index
int64 | repo_id
string | file_path
string | content
string |
|---|---|---|---|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers/multilabel
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers/multilabel/incremental/MajorityLabelsetUpdateable.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package meka.classifiers.multilabel.incremental;
import meka.classifiers.incremental.IncrementalEvaluation;
import meka.classifiers.multilabel.IncrementalMultiLabelClassifier;
import meka.classifiers.multilabel.MajorityLabelset;
import weka.core.Instance;
/**
* MajorityLabelsetUpdateable.java - Updateable version of MajorityLabelset.
* @see MajorityLabelset
* @author Jesse Read
* @version September 2011
*/
public class MajorityLabelsetUpdateable extends MajorityLabelset implements IncrementalMultiLabelClassifier {
/** for serialization. */
private static final long serialVersionUID = -6454034593889787500L;
@Override
public String globalInfo() {
return "Updateable Majority Labelset Classifier";
}
@Override
public void updateClassifier(Instance x) throws Exception {
int L = x.classIndex();
super.updateCount(x,L);
}
public static void main(String args[]) {
IncrementalEvaluation.runExperiment(new MajorityLabelsetUpdateable(),args);
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers/multilabel
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers/multilabel/incremental/PSUpdateable.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package meka.classifiers.multilabel.incremental;
import meka.classifiers.incremental.IncrementalEvaluation;
import meka.classifiers.multilabel.IncrementalMultiLabelClassifier;
import meka.classifiers.multilabel.PS;
import meka.core.LabelSet;
import meka.core.MLUtils;
import meka.core.OptionUtils;
import meka.core.PSUtils;
import weka.classifiers.UpdateableClassifier;
import weka.classifiers.trees.HoeffdingTree;
import weka.core.Instance;
import weka.core.Instances;
import weka.core.Option;
import java.util.*;
/**
* PSUpdateable.java - Pruned Sets Updateable.
* Can be given any base classifier, since it must rebuild when the buffer is full anyway.
* <br>
* While the initial training set is being buffered, it will predict the majority labelset. Note that this version buffers training examples, not just combinations.
* @see PS
* @author Jesse Read
* @version September, 2011
*/
public class PSUpdateable extends PS implements IncrementalMultiLabelClassifier {
/** for serialization. */
private static final long serialVersionUID = -3909203248118831224L;
protected int m_Limit = 1000;
protected int m_Support = 10;
protected int L = -1;
protected HashMap<LabelSet,Integer> combinations = null;
protected Instances batch = null;
protected MajorityLabelsetUpdateable mlu = new MajorityLabelsetUpdateable();
@Override
public String globalInfo() {
return "Updateable PS";
}
public PSUpdateable() {
// default classifier for GUI
this.m_Classifier = new HoeffdingTree();
}
@Override
protected String defaultClassifierString() {
// default classifier for CLI
return "weka.classifiers.trees.HoeffdingTree";
}
@Override
public void buildClassifier(Instances D) throws Exception {
testCapabilities(D);
L = D.classIndex();
batch = new Instances(D);
if (batch.numInstances() >= getLimit()) {
// if we have at least the limit, build!
if (getDebug()) System.out.println("Train on instances 0 ... "+batch.numInstances());
combinations = PSUtils.countCombinationsSparse(batch,L);
MLUtils.pruneCountHashMap(combinations,m_P);
// { NEW (we don't want more than m_Support classes!)
int p = m_P;
while(combinations.size() > getSupport()) {
//System.out.println("double prune!");
m_P++;
MLUtils.pruneCountHashMap(combinations,m_P);
}
super.buildClassifier(batch);
m_P = p;
// } NEW
mlu = null; // We won't be needing the majority set classifier!
}
else {
// otherwise we don't have enough yet, initialize the collection batch
if (getDebug()) System.out.println("Continue collection batch from instance "+batch.numInstances());
// we will predict the majority labelset until we have a large enough batch
mlu.buildClassifier(batch);
}
}
@Override
public void updateClassifier(Instance x) throws Exception {
if (batch.numInstances() < getLimit() && mlu != null) {
// store example
batch.add(x);
if (batch.numInstances() >= getLimit()) {
// we have enough instances to bulid PS!
combinations = PSUtils.countCombinationsSparse(batch,L);
MLUtils.pruneCountHashMap(combinations,m_P);
// { NEW (we don't want more than m_Support classes!) -- note, the while loop is a slow way to do this
int p = m_P;
while(combinations.size() > getSupport()) {
m_P++;
MLUtils.pruneCountHashMap(combinations,m_P);
}
super.buildClassifier(batch);
m_P = p;
// } NEW
batch.clear();
mlu = null;
}
else {
// not enough instances in the batch yet, just update the majority-label classifier
mlu.updateClassifier(x);
}
}
else {
// update PS ...
for (Instance x_i : PSUtils.PSTransformation(x,L,combinations,m_N,super.m_InstancesTemplate)) {
// update internal sl classifier (e.g. naive bayes)
((UpdateableClassifier)m_Classifier).updateClassifier(x_i);
}
}
}
@Override
public double[] distributionForInstance(Instance x) throws Exception {
int L = x.classIndex();
if (mlu != null) {
// we're still using the majority-labelset classifier, use it to return the most common combination
return mlu.distributionForInstance(x);
}
else {
// we've built PS already, return a PS prediction!
return super.distributionForInstance(x);
}
}
@Override
public String [] getOptions() {
List<String> result = new ArrayList<>();
OptionUtils.add(result, 'I', getLimit());
OptionUtils.add(result, "support", getSupport());
OptionUtils.add(result, super.getOptions());
return OptionUtils.toArray(result);
}
@Override
public void setOptions(String[] options) throws Exception {
setLimit(OptionUtils.parse(options, 'I', 1000));
setSupport(OptionUtils.parse(options, "support", 10));
super.setOptions(options);
}
@Override
public Enumeration listOptions() {
Vector result = new Vector();
result.addElement(new Option("\tSets the buffer size \n\tdefault: 1000", "I", 1, "-I <value>"));
result.addElement(new Option("\tSets the max. num. of combs.\n\tdefault: 10", "support", 1, "-support <value>"));
OptionUtils.add(result, super.listOptions());
return OptionUtils.toEnumeration(result);
}
public int getLimit() {
return m_Limit;
}
public void setLimit(int m_Limit) {
this.m_Limit = m_Limit;
}
public String limitTipText() {
return "The buffer size (num. of instances to collect before training PS).";
}
public int getSupport() {
return m_Support;
}
public void setSupport(int m_Support) {
this.m_Support = m_Support;
}
public String supportTipText() {
return "The maximum number of class values (i.e., label combinations) to consider.";
}
public static void main(String args[]) {
IncrementalEvaluation.runExperiment(new PSUpdateable(),args);
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers/multilabel
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers/multilabel/incremental/RTUpdateable.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package meka.classifiers.multilabel.incremental;
import meka.classifiers.incremental.IncrementalEvaluation;
import meka.classifiers.multilabel.IncrementalMultiLabelClassifier;
import meka.classifiers.multilabel.RT;
import weka.classifiers.UpdateableClassifier;
import weka.classifiers.trees.HoeffdingTree;
import weka.core.Instance;
/**
* RTUpdateable.java - Updateable RT.
* Must be given an UpdateableClassifier base classifier.
* @see RT
* @author Jesse Read
* @version October, 2011
*/
public class RTUpdateable extends RT implements IncrementalMultiLabelClassifier {
/** for serialization. */
private static final long serialVersionUID = 3766003607269541755L;
@Override
public String globalInfo() {
return "Updateable RT\nMust be run with an Updateable base classifier.";
}
public RTUpdateable() {
// default classifier for GUI
this.m_Classifier = new HoeffdingTree();
}
@Override
protected String defaultClassifierString() {
// default classifier for CLI
return "weka.classifiers.trees.HoeffdingTree";
}
@Override
public void updateClassifier(Instance x) throws Exception {
int L = x.classIndex();
for (int j = 0; j < L; j++) {
if(x.value(j) > 0.0) {
Instance x_j = convertInstance(x);
x_j.setClassValue(j);
((UpdateableClassifier)m_Classifier).updateClassifier(x_j);
}
}
}
public static void main(String args[]) {
IncrementalEvaluation.runExperiment(new RTUpdateable(),args);
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers/multilabel/incremental
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers/multilabel/incremental/meta/BaggingMLUpdateable.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package meka.classifiers.multilabel.incremental.meta;
import meka.classifiers.multilabel.IncrementalMultiLabelClassifier;
import meka.classifiers.multilabel.incremental.BRUpdateable;
import meka.classifiers.incremental.IncrementalEvaluation;
import meka.classifiers.multilabel.meta.EnsembleML;
import weka.classifiers.UpdateableClassifier;
import weka.core.Instance;
import weka.core.Instances;
import weka.core.TechnicalInformation;
import weka.core.TechnicalInformation.Field;
import weka.core.TechnicalInformation.Type;
import weka.core.TechnicalInformationHandler;
import java.util.Random;
/**
* BaggingMLUpdatable.java - Using the OzaBag scheme (see OzaBag.java from MOA)).
* See also: N. Oza and S. Russell. Online bagging and boosting. In Artificial Intelligence and Statistics 2001, pages 105-112. Morgan Kaufmann, 2001.
* @version Jan 2013
* @author Jesse Read
*/
public class BaggingMLUpdateable extends EnsembleML implements IncrementalMultiLabelClassifier, TechnicalInformationHandler {
private static final long serialVersionUID = 4978269895923479962L;
protected Random random = null;
/**
* Description to display in the GUI.
*
* @return the description
*/
@Override
public String globalInfo() {
return "Incremental Bagging";
}
public BaggingMLUpdateable() {
// default classifier for GUI
this.m_Classifier = new BRUpdateable();
}
@Override
protected String defaultClassifierString() {
// default classifier for CLI
return "meka.classifiers.multilabel.incremental.BRUpdateable";
}
@Override
public void buildClassifier(Instances D) throws Exception {
random = new Random(m_Seed);
super.buildClassifier(D);
}
@Override
public void updateClassifier(Instance x) throws Exception {
for(int i = 0; i < m_NumIterations; i++) {
// Oza-Bag style
int k = poisson(1.0, random);
if (m_BagSizePercent == 100) {
// Train on all instances
k = 1;
}
if (k > 0) {
// Train on this instance only if k > 0
Instance x_weighted = (Instance) x.copy();
x_weighted.setWeight(x.weight() * (double)k);
((UpdateableClassifier)m_Classifiers[i]).updateClassifier(x_weighted);
}
}
}
protected static int poisson(double lambda, Random r) {
if (lambda < 100.0) {
double product = 1.0;
double sum = 1.0;
double threshold = r.nextDouble() * Math.exp(lambda);
int i = 1;
int max = Math.max(100, 10 * (int) Math.ceil(lambda));
while ((i < max) && (sum <= threshold)) {
product *= (lambda / i);
sum += product;
i++;
}
return i - 1;
}
double x = lambda + Math.sqrt(lambda) * r.nextGaussian();
if (x < 0.0) {
return 0;
}
return (int) Math.floor(x);
}
@Override
public TechnicalInformation getTechnicalInformation() {
TechnicalInformation result;
result = new TechnicalInformation(Type.ARTICLE);
result.setValue(Field.AUTHOR, "N.Oza, S. Russell");
result.setValue(Field.TITLE, "Online bagging and boosting");
result.setValue(Field.JOURNAL, "Artificial Intelligence and Statistics");
result.setValue(Field.PUBLISHER, "Morgan Kaufmann");
result.setValue(Field.YEAR, "2001");
result.setValue(Field.PAGES, "105-112");
return result;
}
public static void main(String args[]) {
IncrementalEvaluation.runExperiment(new BaggingMLUpdateable(),args);
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers/multilabel
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers/multilabel/meta/BaggingML.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package meka.classifiers.multilabel.meta;
import java.util.Random;
import meka.classifiers.multilabel.MultiLabelClassifier;
import meka.classifiers.multilabel.ProblemTransformationMethod;
import weka.core.Instance;
import weka.core.Instances;
import weka.core.Randomizable;
import weka.core.RevisionUtils;
/**
* BaggingML.java - Combining several multi-label classifiers using Bootstrap AGGregatING.
*
* @author Jesse Read (jmr30@cs.waikato.ac.nz)
*/
public class BaggingML extends MetaProblemTransformationMethod {
/** for serialization. */
private static final long serialVersionUID = -6208337124420497991L;
/**
* Description to display in the GUI.
*
* @return the description
*/
@Override
public String globalInfo() {
return "Combining several multi-label classifiers using Bootstrap AGGregatING";
}
public BaggingML() {
// default for Bagging
this.m_BagSizePercent = 100;
}
@Override
public void buildClassifier(Instances train) throws Exception {
this.testCapabilities(train);
if (this.getDebug()) {
System.out.print("-: Models: ");
}
train = new Instances(train);
this.m_Classifiers = ProblemTransformationMethod.makeCopies((MultiLabelClassifier) this.m_Classifier, this.m_NumIterations);
for (int i = 0; i < this.m_NumIterations; i++) {
if (Thread.currentThread().isInterrupted()) {
throw new InterruptedException("Thread has been interrupted.");
}
Random r = new Random(this.m_Seed + i);
Instances bag = new Instances(train, 0);
if (this.m_Classifiers[i] instanceof Randomizable) {
((Randomizable) this.m_Classifiers[i]).setSeed(this.m_Seed + i);
}
if (this.getDebug()) {
System.out.print("" + i + " ");
}
int ixs[] = new int[train.numInstances()];
for (int j = 0; j < ixs.length; j++) {
ixs[r.nextInt(ixs.length)]++;
}
for (int j = 0; j < ixs.length; j++) {
if (Thread.currentThread().isInterrupted()) {
throw new InterruptedException("Thread has been interrupted.");
}
if (ixs[j] > 0) {
Instance instance = train.instance(j);
instance.setWeight(ixs[j]);
bag.add(instance);
}
}
this.m_Classifiers[i].buildClassifier(bag);
}
if (this.getDebug()) {
System.out.println(":-");
}
}
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision: 9117 $");
}
public static void main(final String args[]) {
ProblemTransformationMethod.evaluation(new BaggingML(), args);
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers/multilabel
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers/multilabel/meta/BaggingMLdup.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package meka.classifiers.multilabel.meta;
import java.util.Random;
import meka.classifiers.multilabel.ProblemTransformationMethod;
import weka.core.Instances;
import weka.core.Randomizable;
import weka.core.RevisionUtils;
/**
* BaggingMLdup.java - A version of BaggingML where Instances are duplicated instead of assigned
* higher weighs. Duplicates Instances instead of assigning higher weights -- should work for
* methods that do not handle weights at all.
*
* @author Jesse Read (jmr30@cs.waikato.ac.nz)
*/
public class BaggingMLdup extends MetaProblemTransformationMethod {
/** for serialization. */
private static final long serialVersionUID = -5606278379913020097L;
/**
* Description to display in the GUI.
*
* @return the description
*/
@Override
public String globalInfo() {
return "Combining several multi-label classifiers using Bootstrap AGGregatING.\n"
+ "Duplicates Instances instead of assigning higher weights -- should work for methods that do not handle weights at all.";
}
@Override
public void buildClassifier(final Instances train) throws Exception {
this.testCapabilities(train);
if (this.getDebug()) {
System.out.print("-: Models: ");
}
// m_Classifiers = (MultilabelClassifier[]) AbstractClassifier.makeCopies(m_Classifier,
// m_NumIterations);
this.m_Classifiers = ProblemTransformationMethod.makeCopies((ProblemTransformationMethod) this.m_Classifier, this.m_NumIterations);
for (int i = 0; i < this.m_NumIterations; i++) {
if (Thread.currentThread().isInterrupted()) {
throw new InterruptedException("Thread has been interrupted.");
}
Random r = new Random(this.m_Seed + i);
Instances bag = new Instances(train, 0);
if (this.m_Classifiers[i] instanceof Randomizable) {
((Randomizable) this.m_Classifiers[i]).setSeed(this.m_Seed + i);
}
if (this.getDebug()) {
System.out.print("" + i + " ");
}
int bag_no = (this.m_BagSizePercent * train.numInstances() / 100);
// System.out.println(" bag no: "+bag_no);
while (bag.numInstances() < bag_no) {
if (Thread.currentThread().isInterrupted()) {
throw new InterruptedException("Thread has been interrupted.");
}
bag.add(train.instance(r.nextInt(train.numInstances())));
}
this.m_Classifiers[i].buildClassifier(bag);
}
if (this.getDebug()) {
System.out.println(":-");
}
}
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision: 9117 $");
}
public static void main(final String args[]) {
ProblemTransformationMethod.evaluation(new BaggingMLdup(), args);
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers/multilabel
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers/multilabel/meta/CM.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package meka.classifiers.multilabel.meta;
import meka.classifiers.multilabel.ProblemTransformationMethod;
import meka.core.MLUtils;
import weka.core.Instance;
import weka.core.Instances;
import weka.core.RevisionUtils;
/**
* CM.java - Classification Maximization using any multi-label classifier.
*
* A specified multi-label classifier is built on the training data. This model is then used to
* classify unlabelled data (e.g., the test data) The classifier is then retrained with all data,
* and the cycle continues ... (for I iterations).
*
* @version July, 2014
* @author Jesse Read
* @see EM
*/
public class CM extends EM {
private static final long serialVersionUID = -6297505619194774433L;
@Override
public String globalInfo() {
return "Train a classifier using labelled and unlabelled data (semi-supervised) using Classification Expectation algorithm (a hard version of EM). Unlike EM, can use any classifier here, not necessarily one which gives good probabilistic output.";
}
@Override
public void buildClassifier(final Instances D) throws Exception {
this.testCapabilities(D);
if (this.getDebug()) {
System.out.println("Initial build ...");
}
this.m_Classifier.buildClassifier(D);
Instances DA = MLUtils.combineInstances(D, this.D_);
if (this.getDebug()) {
System.out.print("Performing " + this.m_I + " 'CM' Iterations: [");
}
for (int i = 0; i < this.m_I; i++) {
if (Thread.currentThread().isInterrupted()) {
throw new InterruptedException("Thread has been interrupted.");
}
if (this.getDebug()) {
System.out.print(".");
}
// classification
this.updateWeights((ProblemTransformationMethod) this.m_Classifier, DA);
// maximization (of parameters)
this.m_Classifier.buildClassifier(DA);
}
System.out.println("]");
}
@Override
protected void updateWeights(final ProblemTransformationMethod h, final Instances D) throws Exception {
for (Instance x : D) {
if (Thread.currentThread().isInterrupted()) {
throw new InterruptedException("Thread has been interrupted.");
}
double y[] = h.distributionForInstance(x);
for (int j = 0; j < y.length; j++) {
x.setValue(j, (y[j] < 0.5) ? 0. : 1.);
}
}
}
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision: 9117 $");
}
public static void main(final String args[]) {
ProblemTransformationMethod.evaluation(new CM(), args);
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers/multilabel
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers/multilabel/meta/DeepML.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package meka.classifiers.multilabel.meta;
import Jama.Matrix;
import rbms.RBM;
import rbms.DBM;
import weka.core.*;
import weka.core.TechnicalInformation.Field;
import weka.core.TechnicalInformation.Type;
import meka.core.*;
import meka.classifiers.multilabel.ProblemTransformationMethod;
import meka.classifiers.multilabel.NN.*;
import meka.classifiers.multilabel.BR;
/**
* DeepML.java - Deep Multi-label Classification.
* Trains an RBM/DBM on the feature space of the training data; then train on it (with the labels) with whichever multi-label classifier is specified.
* <br>
* See: Jesse Read and Jaakko Hollmen. <i>A Deep Interpretation of Classifier Chains</i>. IDA 2014.
* <br>
* The first RBM will have h = d / 2 hidden units, the second h = h / 2, and so on, where d is the number of original (visible) input feature attributes.
*
* @author Jesse Read
* @version December 2012
*/
public class DeepML extends AbstractDeepNeuralNet implements TechnicalInformationHandler {
private static final long serialVersionUID = 3388606529764305098L;
protected RBM dbm = null;
protected long rbm_time = 0;
/**
* CreateDBM - make a RBM if N=1 or a DBM otherwise.
*/
protected RBM createDBM(int d) throws Exception {
return ((m_N == 1) ?
new RBM(this.getOptions()) : // RBM
new DBM(this.getOptions())) ; // DBM
}
public DeepML() {
// default classifier for GUI
this.m_Classifier = new BR();
}
@Override
protected String defaultClassifierString() {
// default classifier for CLI
return "meka.classifiers.multilabel.BR";
}
@Override
public void buildClassifier(Instances D) throws Exception {
testCapabilities(D);
// Extract variables
int L = D.classIndex();
int d = D.numAttributes()-L;
double X_[][] = MLUtils.getXfromD(D);
// Pre Tune ===========================
/*
if (m_M < 0 || m_R < 0 || m_H < 0) {
System.out.println("We will do some pre-tuning here ...");
//BR h = new BR();
//h.setClassifier(new SMO());
String ops[] = RBMTools.tuneRBM((MultilabelClassifier)m_Classifier,D,m_R,m_M,m_H,m_E);
System.out.println("got: "+Arrays.toString(ops));
this.setOptions(ops);
}
*/
// ====================================
// Build DBM
dbm = createDBM(d);
dbm.setSeed(m_Seed);
dbm.setE(m_E);
// Train RBM, get Z
long before = System.currentTimeMillis();
dbm.train(X_,L);
rbm_time = System.currentTimeMillis() - before;
double Z[][] = dbm.prob_Z(X_);
if (getDebug()) {
Matrix tW[] = dbm.getWs();
System.out.println("X = \n"+ MatrixUtils.toString(X_));
System.out.println("W = \n"+ MatrixUtils.toString(tW[0].getArray()));
System.out.println("Y = \n"+ MatrixUtils.toString(MLUtils.getYfromD(D), 0));
System.out.println("Z = \n"+ MatrixUtils.toString(MatrixUtils.threshold(Z, 0.5), 0));
/*
Instances newD = RBMTools.makeDataset(D,M.threshold(Z,0.5));
System.out.println(""+newD);
ArffSaver saver = new ArffSaver();
saver.setInstances(newD);
saver.setFile(new File("newD.arff"));
saver.writeBatch();
System.exit(1);
*/
}
// Train Classifier
m_InstancesTemplate = new Instances(MLUtils.replaceZasAttributes(D,Z,L)); // did not clear
m_Classifier.buildClassifier(m_InstancesTemplate);
}
@Override
public double[] distributionForInstance(Instance xy) throws Exception {
int L = xy.classIndex();
double z[] = dbm.prob_z(MLUtils.getxfromInstance(xy));
Instance zy = (Instance)m_InstancesTemplate.firstInstance().copy();
MLUtils.setValues(zy,z,L);
zy.setDataset(m_InstancesTemplate);
return m_Classifier.distributionForInstance(zy);
}
@Override
public String toString() {
return super.toString() + ", RBM-Build_Time="+rbm_time;
}
/*
* TODO: Make a generic abstract -dependency_user- class that has this option, and extend it here
*/
public String globalInfo() {
return
"Create a new feature space using a stack of RBMs, then employ a multi-label classifier on top. "
+ "For more information see:\n"
+ getTechnicalInformation().toString();
}
@Override
public TechnicalInformation getTechnicalInformation() {
TechnicalInformation result;
result = new TechnicalInformation(Type.INPROCEEDINGS);
result.setValue(Field.AUTHOR, "Jesse Read and Jaako Hollmen");
result.setValue(Field.TITLE, "A Deep Interpretation of Classifier Chains");
result.setValue(Field.BOOKTITLE, "Advances in Intelligent Data Analysis {XIII} - 13th International Symposium, {IDA} 2014");
result.setValue(Field.PAGES, "251--262");
result.setValue(Field.YEAR, "2014");
return result;
}
public static void main(String args[]) throws Exception {
ProblemTransformationMethod.evaluation(new DeepML(), args);
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers/multilabel
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers/multilabel/meta/EM.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package meka.classifiers.multilabel.meta;
import java.util.ArrayList;
import java.util.Enumeration;
import java.util.List;
import java.util.Vector;
import meka.classifiers.multilabel.CC;
import meka.classifiers.multilabel.ProblemTransformationMethod;
import meka.classifiers.multilabel.SemisupervisedClassifier;
import meka.core.MLUtils;
import meka.core.OptionUtils;
import weka.core.Instance;
import weka.core.Instances;
import weka.core.Option;
import weka.core.RevisionUtils;
import weka.core.TechnicalInformation;
import weka.core.TechnicalInformation.Field;
import weka.core.TechnicalInformation.Type;
import weka.core.TechnicalInformationHandler;
/**
* EM.java - Expectation Maximization using any multi-label classifier.
*
* A specified multi-label classifier is built on the training data. This model is then used to
* classify the test data. The confidence with which instances are classified is used to reweight
* them. This data is then used to retrain the classifier. This cycle continues ('EM'-style) for I
* iterations. The final model is used to officially classifier the test data. <br>
* Because of the weighting, it is advised to use a classifier which gives good confidence
* (probabalistic) outputs. <br>
*
* @version 2010
* @author Jesse Read
*/
public class EM extends ProblemTransformationMethod implements SemisupervisedClassifier, TechnicalInformationHandler {
private static final long serialVersionUID = 2622231824673975335L;
protected int m_I = 10;
protected Instances D_ = null;
public EM() {
// default classifier for GUI
this.m_Classifier = new CC();
}
@Override
protected String defaultClassifierString() {
// default classifier for CLI
return "meka.classifiers.multilabel.CC";
}
@Override
public void introduceUnlabelledData(final Instances D) {
this.D_ = D;
}
@Override
public String globalInfo() {
return ""// "Train a classifier using labelled and unlabelled data (semi-supervised) using an EM-type
// algorithm. Works best if the classifier can give good probabalistic outputs. " + "A similar
// procedure was used with LC and Naive Bayes in:\n" + getTechnicalInformation().toString();
+ "A specified multi-label classifier is built on the training data. This model is then used to classify the test data. "
+ "The confidence with which instances are classified is used to reweight them. This data is then used to retrain the classifier. "
+ "This cycle continues ('EM'-style) for I iterations. The final model is used to officially classifier the test data. "
+ "Because of the weighting, it is advised to use a classifier which gives good confidence (probabalistic) outputs. ";
}
@Override
public void buildClassifier(final Instances D) throws Exception {
this.testCapabilities(D);
if (this.getDebug()) {
System.out.println("Initial build ...");
}
this.m_Classifier.buildClassifier(D);
Instances DA = MLUtils.combineInstances(D, this.D_);
if (this.getDebug()) {
System.out.print("Performing " + this.m_I + " 'EM' Iterations: [");
}
for (int i = 0; i < this.m_I; i++) {
if (Thread.currentThread().isInterrupted()) {
throw new InterruptedException("Thread has been interrupted.");
}
if (this.getDebug()) {
System.out.print(".");
}
// expectation (classify + update weights)
this.updateWeights((ProblemTransformationMethod) this.m_Classifier, DA);
// maximization of parameters (training)
this.m_Classifier.buildClassifier(DA);
}
System.out.println("]");
}
protected void updateWeights(final ProblemTransformationMethod h, final Instances D) throws Exception {
for (Instance x : D) {
if (Thread.currentThread().isInterrupted()) {
throw new InterruptedException("Thread has been interrupted.");
}
double w = 1.0; // weight (product of probability)
double y[] = h.distributionForInstance(x);
// convert ML distribution into probability vector, and multiply to w as we go ..
for (int j = 0; j < y.length; j++) {
w *= (y[j] < 0.5) ? 1. - y[j] : y[j];
}
x.setWeight(w);
}
}
@Override
public double[] distributionForInstance(final Instance x) throws Exception {
return this.m_Classifier.distributionForInstance(x);
}
public void setIterations(final int i) {
this.m_I = i;
}
public int getIterations() {
return this.m_I;
}
public String iterationsTipText() {
return "The number of EM iterations to perform.";
}
@Override
public Enumeration listOptions() {
Vector result = new Vector();
result.addElement(new Option("\tThe number of iterations of EM to carry out (default: 10)", "I", 1, "-I <value>"));
OptionUtils.add(result, super.listOptions());
return OptionUtils.toEnumeration(result);
}
@Override
public void setOptions(final String[] options) throws Exception {
this.setIterations(OptionUtils.parse(options, 'I', 10));
super.setOptions(options);
}
@Override
public String[] getOptions() {
List<String> result = new ArrayList<>();
OptionUtils.add(result, 'I', this.getIterations());
OptionUtils.add(result, super.getOptions());
return OptionUtils.toArray(result);
}
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision: 9117 $");
}
@Override
public TechnicalInformation getTechnicalInformation() {
TechnicalInformation result;
result = new TechnicalInformation(Type.ARTICLE);
result.setValue(Field.AUTHOR, "Nigam, Kamal and Mccallum, Andrew K. and Thrun, Sebastian and Mitchell, Tom M.");
result.setValue(Field.TITLE, "Text classification from Labeled and Unlabeled Documents using EM");
result.setValue(Field.JOURNAL, "Machine Learning");
result.setValue(Field.VOLUME, "39");
result.setValue(Field.NUMBER, "2/3");
result.setValue(Field.PAGES, "103--134");
result.setValue(Field.YEAR, "2010");
return result;
}
public static void main(final String args[]) {
ProblemTransformationMethod.evaluation(new EM(), args);
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers/multilabel
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers/multilabel/meta/EnsembleML.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package meka.classifiers.multilabel.meta;
import java.util.Random;
import meka.classifiers.multilabel.ProblemTransformationMethod;
import weka.core.Instances;
import weka.core.Randomizable;
import weka.core.RevisionUtils;
/**
* EnsembleML.java - Combines several multi-label classifiers in a simple-subset ensemble.
*
* @author Jesse Read (jmr30@cs.waikato.ac.nz)
*/
public class EnsembleML extends MetaProblemTransformationMethod {
/** for serialization. */
private static final long serialVersionUID = 835659467275068411L;
/**
* Description to display in the GUI.
*
* @return the description
*/
@Override
public String globalInfo() {
return "Combining several multi-label classifiers in a simple-subset ensemble.";
}
@Override
public void buildClassifier(Instances train) throws Exception {
this.testCapabilities(train);
if (this.getDebug()) {
System.out.print("-: Models: ");
}
train = new Instances(train);
this.m_Classifiers = ProblemTransformationMethod.makeCopies((ProblemTransformationMethod) this.m_Classifier, this.m_NumIterations);
int sub_size = (train.numInstances() * this.m_BagSizePercent / 100);
for (int i = 0; i < this.m_NumIterations; i++) {
if (Thread.currentThread().isInterrupted()) {
throw new InterruptedException("Thread has been interrupted.");
}
if (this.getDebug()) {
System.out.print("" + i + " ");
}
if (this.m_Classifiers[i] instanceof Randomizable) {
((Randomizable) this.m_Classifiers[i]).setSeed(i);
}
train.randomize(new Random(this.m_Seed + i));
Instances sub_train = new Instances(train, 0, sub_size);
this.m_Classifiers[i].buildClassifier(sub_train);
}
if (this.getDebug()) {
System.out.println(":-");
}
}
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision: 9117 $");
}
public static void main(final String args[]) {
ProblemTransformationMethod.evaluation(new EnsembleML(), args);
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers/multilabel
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers/multilabel/meta/FilteredClassifier.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* FilteredClassifier.java
* Copyright (C) 2017-2018 University of Waikato, Hamilton, NZ
*/
package meka.classifiers.multilabel.meta;
import java.util.Random;
import meka.classifiers.multilabel.BR;
import meka.classifiers.multilabel.MultiLabelClassifier;
import meka.classifiers.multilabel.ProblemTransformationMethod;
import weka.classifiers.Classifier;
import weka.core.Attribute;
import weka.core.Capabilities;
import weka.core.Instances;
import weka.core.Randomizable;
import weka.core.WeightedAttributesHandler;
import weka.core.WeightedInstancesHandler;
import weka.filters.AllFilter;
import weka.filters.Filter;
/**
* Allows the application of a filter in conjunction with a multi-label classifier.
*
* @author FracPete (fracpete at waikato dot ac dot nz)
* @version $Revision$
*/
public class FilteredClassifier extends weka.classifiers.meta.FilteredClassifier implements MultiLabelClassifier {
private static final long serialVersionUID = 4466454723202805056L;
/**
* Default constructor.
*
* Turns off check for modified class attribute.
*/
public FilteredClassifier() {
super();
this.setDoNotCheckForModifiedClassAttribute(true);
this.m_Classifier = new BR();
this.m_Filter = new AllFilter();
}
/**
* String describing default classifier.
*/
@Override
protected String defaultClassifierString() {
return BR.class.getName();
}
/**
* Set the base learner.
*
* @param newClassifier
* the classifier to use.
*/
@Override
public void setClassifier(final Classifier newClassifier) {
if (!(newClassifier instanceof MultiLabelClassifier)) {
throw new IllegalArgumentException("Classifier must be a " + MultiLabelClassifier.class.getName() + "!");
}
super.setClassifier(newClassifier);
}
/**
* TestCapabilities. Make sure the training data is suitable.
*
* @param D
* the data
*/
public void testCapabilities(final Instances D) throws Exception {
// get the classifier's capabilities, enable all class attributes and do the usual test
Capabilities cap = this.getCapabilities();
cap.enableAllClasses();
// get the capabilities again, test class attributes individually
int L = D.classIndex();
for (int j = 0; j < L; j++) {
Attribute c = D.attribute(j);
cap.testWithFail(c, true);
}
}
/**
* Sets up the filter and runs checks.
*
* @return filtered data
*/
protected Instances setUp(Instances data) throws Exception {
String relName;
String classAtt;
relName = data.relationName();
classAtt = data.classAttribute().name();
if (this.m_Classifier == null) {
throw new Exception("No base classifiers have been set!");
}
this.getCapabilities().testWithFail(data);
// get fresh instances object
data = new Instances(data);
Attribute classAttribute = (Attribute) data.classAttribute().copy();
this.m_Filter.setInputFormat(data); // filter capabilities are checked here
data = Filter.useFilter(data, this.m_Filter);
if ((!classAttribute.equals(data.classAttribute())) && (!this.m_DoNotCheckForModifiedClassAttribute)) {
throw new IllegalArgumentException("Cannot proceed: " + this.getFilterSpec() + " has modified the class attribute!");
}
data.setRelationName(relName);
data.setClassIndex(data.attribute(classAtt).index());
// can classifier handle the data?
this.testCapabilities(data);
this.m_FilteredInstances = data.stringFreeStructure();
return data;
}
/**
* Build the classifier on the filtered data.
*
* @param data
* the training data
* @throws Exception
* if the classifier could not be built successfully
*/
@Override
public void buildClassifier(Instances data) throws Exception {
if (this.m_Classifier == null) {
throw new Exception("No base classifier has been set!");
}
this.getCapabilities().testWithFail(data);
Random r = (data.numInstances() > 0) ? data.getRandomNumberGenerator(this.getSeed()) : new Random(this.getSeed());
data = this.setUp(data, r);
if (!data.allInstanceWeightsIdentical() && !(this.m_Classifier instanceof WeightedInstancesHandler)) {
data = data.resampleWithWeights(r); // The filter may have assigned weights.
}
if (!data.allAttributeWeightsIdentical() && !(this.m_Classifier instanceof WeightedAttributesHandler)) {
data = this.resampleAttributes(data, false, r);
}
if (this.m_Classifier instanceof Randomizable) {
((Randomizable) this.m_Classifier).setSeed(r.nextInt());
}
this.m_Classifier.buildClassifier(data);
}
/**
* Returns a string representation of the model.
*
* @return the model
*/
@Override
public String getModel() {
if (this.m_Classifier instanceof MultiLabelClassifier) {
return ((MultiLabelClassifier) this.m_Classifier).getModel();
} else {
return this.toString();
}
}
public static void main(final String args[]) {
ProblemTransformationMethod.evaluation(new FilteredClassifier(), args);
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers/multilabel
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers/multilabel/meta/MBR.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package meka.classifiers.multilabel.meta;
import meka.classifiers.multilabel.BR;
import meka.classifiers.multilabel.ProblemTransformationMethod;
import weka.classifiers.AbstractClassifier;
import weka.core.Attribute;
import weka.core.FastVector;
import weka.core.Instance;
import weka.core.Instances;
import weka.core.RevisionUtils;
import weka.core.TechnicalInformation;
import weka.core.TechnicalInformation.Field;
import weka.core.TechnicalInformation.Type;
import weka.core.TechnicalInformationHandler;
/**
* MBR.java - Meta BR: BR stacked with feature outputs into another BR. Described in: Godbole and
* Sarawagi, <i>Discriminative Methods for Multi-labeled Classification</i>.
*
* @version June 2009
* @author Jesse Read (jmr30@cs.waikato.ac.nz)
*/
public class MBR extends ProblemTransformationMethod implements TechnicalInformationHandler {
/** for serialization. */
private static final long serialVersionUID = 865889198021748917L;
protected BR m_BASE = null;
protected BR m_META = null;
public MBR() {
// default classifier for GUI
this.m_Classifier = new BR();
}
/**
* Description to display in the GUI.
*
* @return the description
*/
@Override
public String globalInfo() {
return "BR stacked with feature outputs.\nFor more information see:\n" + this.getTechnicalInformation().toString();
}
@Override
protected String defaultClassifierString() {
return BR.class.getName();
}
@Override
public TechnicalInformation getTechnicalInformation() {
TechnicalInformation result;
result = new TechnicalInformation(Type.INPROCEEDINGS);
result.setValue(Field.AUTHOR, "Shantanu Godbole, Sunita Sarawagi");
result.setValue(Field.TITLE, "Discriminative Methods for Multi-labeled Classification");
result.setValue(Field.BOOKTITLE, "Advances in Knowledge Discovery and Data Mining");
result.setValue(Field.YEAR, "2004");
result.setValue(Field.PAGES, "22-30");
result.setValue(Field.SERIES, "LNCS");
return result;
}
@Override
public void buildClassifier(final Instances data) throws Exception {
this.testCapabilities(data);
int c = data.classIndex();
// Base BR
if (this.getDebug()) {
System.out.println("Build BR Base (" + c + " models)");
}
this.m_BASE = (BR) AbstractClassifier.forName(this.getClassifier().getClass().getName(), ((AbstractClassifier) this.getClassifier()).getOptions());
this.m_BASE.buildClassifier(data);
// Meta BR
if (this.getDebug()) {
System.out.println("Prepare Meta data ");
}
Instances meta_data = new Instances(data);
FastVector BinaryClass = new FastVector(c);
BinaryClass.addElement("0");
BinaryClass.addElement("1");
for (int i = 0; i < c; i++) {
meta_data.insertAttributeAt(new Attribute("metaclass" + i, BinaryClass), c);
}
for (int i = 0; i < data.numInstances(); i++) {
if (Thread.currentThread().isInterrupted()) {
throw new InterruptedException("Thread has been interrupted.");
}
double cfn[] = this.m_BASE.distributionForInstance(data.instance(i));
for (int a = 0; a < cfn.length; a++) {
meta_data.instance(i).setValue(a + c, cfn[a]);
}
}
meta_data.setClassIndex(c);
this.m_InstancesTemplate = new Instances(meta_data, 0);
if (this.getDebug()) {
System.out.println("Build BR Meta (" + c + " models)");
}
this.m_META = (BR) AbstractClassifier.forName(this.getClassifier().getClass().getName(), ((AbstractClassifier) this.getClassifier()).getOptions());
this.m_META.buildClassifier(meta_data);
}
@Override
public double[] distributionForInstance(final Instance instance) throws Exception {
int c = instance.classIndex();
double result[] = this.m_BASE.distributionForInstance(instance);
instance.setDataset(null);
for (int i = 0; i < c; i++) {
instance.insertAttributeAt(c);
}
if (Thread.currentThread().isInterrupted()) {
throw new InterruptedException("Thread has been interrupted.");
}
instance.setDataset(this.m_InstancesTemplate);
for (int i = 0; i < c; i++) {
instance.setValue(c + i, result[i]);
}
return this.m_META.distributionForInstance(instance);
}
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision: 9117 $");
}
public static void main(final String args[]) {
ProblemTransformationMethod.evaluation(new MBR(), args);
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers/multilabel
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers/multilabel/meta/MetaProblemTransformationMethod.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package meka.classifiers.multilabel.meta;
import java.util.ArrayList;
import java.util.Enumeration;
import java.util.List;
import java.util.Vector;
import meka.classifiers.multilabel.CC;
import meka.classifiers.multilabel.MultiLabelClassifier;
import meka.classifiers.multilabel.ProblemTransformationMethod;
import meka.core.OptionUtils;
import weka.core.Instance;
import weka.core.Option;
import weka.core.Randomizable;
/**
* MultilabelMetaClassifier.java - For ensembles of multi-label methods.
*
* @author Jesse Read (jmr30@cs.waikato.ac.nz)
*/
public abstract class MetaProblemTransformationMethod extends ProblemTransformationMethod implements Randomizable {
/** for serialization. */
private static final long serialVersionUID = -6604797895790690612L;
protected MultiLabelClassifier m_Classifiers[] = null;
protected int m_Seed = 1;
protected int m_NumIterations = 10;
protected int m_BagSizePercent = 67;
/**
* Description to display in the GUI.
*
* @return the description
*/
@Override
public String globalInfo() {
return "For ensembles of multi-label methods.";
}
public MetaProblemTransformationMethod() {
// default classifier for GUI
this.m_Classifier = new CC();
}
@Override
protected String defaultClassifierString() {
// default classifier for CLI
return "meka.classifiers.multilabel.CC";
}
@Override
public double[] distributionForInstance(final Instance x) throws Exception {
double p[] = new double[x.classIndex()];
for (int i = 0; i < this.m_NumIterations; i++) {
if (Thread.currentThread().isInterrupted()) {
throw new InterruptedException("Thread has been interrupted.");
}
double d[] = this.m_Classifiers[i].distributionForInstance(x);
for (int j = 0; j < d.length; j++) {
p[j] += d[j];
}
}
// turn votes into a [0,1] confidence for each label
for (int j = 0; j < p.length; j++) {
p[j] = p[j] / this.m_NumIterations;
}
return p;
}
public int getNumIterations() {
return this.m_NumIterations;
}
public void setNumIterations(final int n) {
this.m_NumIterations = n;
}
public String numIterationsTipText() {
return "The number of iterations to perform.";
}
public int getBagSizePercent() {
return this.m_BagSizePercent;
}
public void setBagSizePercent(final int p) {
this.m_BagSizePercent = p;
}
public String bagSizePercentTipText() {
return "The size of the bag in percent (0-100).";
}
@Override
public void setSeed(final int s) {
this.m_Seed = s;
}
@Override
public int getSeed() {
return this.m_Seed;
}
public String seedTipText() {
return "The seed value for randomizing the data.";
}
@Override
public Enumeration listOptions() {
Vector result = new Vector();
result.addElement(new Option("\tSets the number of models (default 10)", "I", 1, "-I <num>"));
result.addElement(new Option("\tSize of each bag, as a percentage of total training size (default 67)", "P", 1, "-P <size percentage>"));
result.addElement(new Option("\tRandom number seed for sampling (default 1)", "S", 1, "-S <seed>"));
OptionUtils.add(result, super.listOptions());
return OptionUtils.toEnumeration(result);
}
@Override
public void setOptions(final String[] options) throws Exception {
this.setSeed(OptionUtils.parse(options, 'S', 1));
this.setNumIterations(OptionUtils.parse(options, 'I', 10));
this.setBagSizePercent(OptionUtils.parse(options, 'P', 67));
super.setOptions(options);
}
@Override
public String[] getOptions() {
List<String> result = new ArrayList<>();
OptionUtils.add(result, 'S', this.getSeed());
OptionUtils.add(result, 'I', this.getNumIterations());
OptionUtils.add(result, 'P', this.getBagSizePercent());
OptionUtils.add(result, super.getOptions());
return OptionUtils.toArray(result);
}
/**
* Returns a string representation of the model.
*
* @return the model
*/
@Override
public String getModel() {
StringBuilder result;
int i;
String model;
if (this.m_Classifiers == null) {
return this.getClass().getName() + ": No model built yet";
}
result = new StringBuilder();
for (i = 0; i < this.m_Classifiers.length; i++) {
if (i > 0) {
result.append("\n\n");
}
result.append(this.getClass().getName() + ": Model #" + (i + 1) + "\n\n");
model = this.m_Classifiers[i].getModel();
if (model.length() > 0) {
result.append(model);
} else {
result.append("No model representation available");
}
}
return result.toString();
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers/multilabel
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers/multilabel/meta/MultiSearch.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* MultiSearch.java
* Copyright (C) 2008-2017 University of Waikato, Hamilton, New Zealand
*/
package meka.classifiers.multilabel.meta;
import meka.classifiers.AbstractMultiSearch;
import meka.classifiers.multilabel.MultiLabelClassifier;
import meka.classifiers.multilabel.RAkEL;
import weka.classifiers.Classifier;
import weka.core.RevisionUtils;
import weka.core.SerializedObject;
import weka.core.setupgenerator.AbstractParameter;
import weka.core.setupgenerator.MathParameter;
/**
<!-- globalinfo-start -->
* Performs a search of an arbitrary number of parameters of a classifier and chooses the best combination found.<br>
* The properties being explored are totally up to the user.<br>
* <br>
* E.g., if you have a FilteredClassifier selected as base classifier, sporting a PLSFilter and you want to explore the number of PLS components, then your property will be made up of the following components:<br>
* - filter: referring to the FilteredClassifier's property (= PLSFilter)<br>
* - numComponents: the actual property of the PLSFilter that we want to modify<br>
* And assembled, the property looks like this:<br>
* filter.numComponents<br>
* <br>
* <br>
* The best classifier setup can be accessed after the buildClassifier call via the getBestClassifier method.<br>
* <br>
* The trace of setups evaluated can be accessed after the buildClassifier call as well, using the following methods:<br>
* - getTrace()<br>
* - getTraceSize()<br>
* - getTraceValue(int)<br>
* - getTraceFolds(int)<br>
* - getTraceClassifierAsCli(int)<br>
* - getTraceParameterSettings(int)<br>
* <br>
* Using the weka.core.setupgenerator.ParameterGroup parameter, it is possible to group dependent parameters. In this case, all top-level parameters must be of type weka.core.setupgenerator.ParameterGroup.
* <br><br>
<!-- globalinfo-end -->
*
<!-- options-start -->
* Valid options are: <p>
*
* <pre> -E <ACC|JIDX|HSCORE|EM|JDIST|HLOSS|ZOLOSS|HARSCORE|OE|RLOSS|AVGPREC|LOGLOSSL|LOGLOSSD|F1MICRO|F1MACROEX|F1MACROLBL|AUPRC|AUROC|LCARD|LDIST>
* Determines the parameter used for evaluation:
* ACC = Accuracy
* JIDX = Jaccard index
* HSCORE = Hamming score
* EM = Exact match
* JDIST = Jaccard distance
* HLOSS = Hamming loss
* ZOLOSS = ZeroOne loss
* HARSCORE = Harmonic score
* OE = One error
* RLOSS = Rank loss
* AVGPREC = Avg precision
* LOGLOSSL = Log Loss (lim. L)
* LOGLOSSD = Log Loss (lim. D)
* F1MICRO = F1 (micro averaged)
* F1MACROEX = F1 (macro averaged by example)
* F1MACROLBL = F1 (macro averaged by label)
* AUPRC = AUPRC (macro averaged)
* AUROC = AUROC (macro averaged)
* LCARD = Label cardinality (predicted)
* LDIST = Levenshtein distance
* (default: ACC)</pre>
*
* <pre> -search "<classname options>"
* A property search setup.
* </pre>
*
* <pre> -algorithm "<classname options>"
* A search algorithm.
* </pre>
*
* <pre> -log-file <filename>
* The log file to log the messages to.
* (default: none)</pre>
*
* <pre> -S <num>
* Random number seed.
* (default 1)</pre>
*
* <pre> -W
* Full name of base classifier.
* (default: meka.classifiers.multilabel.RAkEL)</pre>
*
* <pre> -output-debug-info
* If set, classifier is run in debug mode and
* may output additional info to the console</pre>
*
* <pre> -do-not-check-capabilities
* If set, classifier capabilities are not checked before classifier is built
* (use with caution).</pre>
*
* <pre> -num-decimal-places
* The number of decimal places for the output of numbers in the model (default 2).</pre>
*
* <pre> -batch-size
* The desired batch size for batch prediction (default 100).</pre>
*
* <pre>
* Options specific to classifier meka.classifiers.multilabel.RAkEL:
* </pre>
*
* <pre> -M <num>
* Sets M (default 10): the number of subsets</pre>
*
* <pre> -k <num>
* The number of labels in each subset (must be at least 1 and less than the number of labels) </pre>
*
* <pre> -P <value>
* Sets the pruning value, defining an infrequent labelset as one which occurs <= P times in the data (P = 0 defaults to LC).
* default: 0 (LC)</pre>
*
* <pre> -N <value>
* Sets the (maximum) number of frequent labelsets to subsample from the infrequent labelsets.
* default: 0 (none)
* n N = n
* -n N = n, or 0 if LCard(D) >= 2
* n-m N = random(n,m)</pre>
*
* <pre> -S <value>
* The seed value for randomization
* default: 0</pre>
*
* <pre> -W
* Full name of base classifier.
* (default: weka.classifiers.trees.J48)</pre>
*
* <pre> -output-debug-info
* If set, classifier is run in debug mode and
* may output additional info to the console</pre>
*
* <pre> -do-not-check-capabilities
* If set, classifier capabilities are not checked before classifier is built
* (use with caution).</pre>
*
* <pre> -num-decimal-places
* The number of decimal places for the output of numbers in the model (default 2).</pre>
*
* <pre> -batch-size
* The desired batch size for batch prediction (default 100).</pre>
*
* <pre>
* Options specific to classifier weka.classifiers.trees.J48:
* </pre>
*
* <pre> -U
* Use unpruned tree.</pre>
*
* <pre> -O
* Do not collapse tree.</pre>
*
* <pre> -C <pruning confidence>
* Set confidence threshold for pruning.
* (default 0.25)</pre>
*
* <pre> -M <minimum number of instances>
* Set minimum number of instances per leaf.
* (default 2)</pre>
*
* <pre> -R
* Use reduced error pruning.</pre>
*
* <pre> -N <number of folds>
* Set number of folds for reduced error
* pruning. One fold is used as pruning set.
* (default 3)</pre>
*
* <pre> -B
* Use binary splits only.</pre>
*
* <pre> -S
* Do not perform subtree raising.</pre>
*
* <pre> -L
* Do not clean up after the tree has been built.</pre>
*
* <pre> -A
* Laplace smoothing for predicted probabilities.</pre>
*
* <pre> -J
* Do not use MDL correction for info gain on numeric attributes.</pre>
*
* <pre> -Q <seed>
* Seed for random data shuffling (default 1).</pre>
*
* <pre> -doNotMakeSplitPointActualValue
* Do not make split point actual value.</pre>
*
* <pre> -output-debug-info
* If set, classifier is run in debug mode and
* may output additional info to the console</pre>
*
* <pre> -do-not-check-capabilities
* If set, classifier capabilities are not checked before classifier is built
* (use with caution).</pre>
*
* <pre> -num-decimal-places
* The number of decimal places for the output of numbers in the model (default 2).</pre>
*
* <pre> -batch-size
* The desired batch size for batch prediction (default 100).</pre>
*
<!-- options-end -->
*
* @author fracpete (fracpete at waikato dot ac dot nz)
* @version $Revision: 4521 $
*/
public class MultiSearch
extends AbstractMultiSearch
implements MultiLabelClassifier {
/** for serialization. */
private static final long serialVersionUID = -5129316523575906233L;
/**
* Returns the default classifier to use.
*
* @return the default classifier
*/
protected Classifier defaultClassifier() {
return new RAkEL();
}
/**
* Returns the default search parameters.
*
* @return the parameters
*/
protected AbstractParameter[] defaultSearchParameters() {
AbstractParameter[] result;
MathParameter param;
result = new AbstractParameter[2];
param = new MathParameter();
param.setProperty("M");
param.setMin(5);
param.setMax(15);
param.setStep(5);
param.setBase(10);
param.setExpression("I");
result[0] = param;
param = new MathParameter();
param.setProperty("K");
param.setMin(1);
param.setMax(3);
param.setStep(1);
param.setBase(10);
param.setExpression("I");
result[1] = param;
try {
result = (AbstractParameter[]) new SerializedObject(result).getObject();
}
catch (Exception e) {
result = new AbstractParameter[0];
System.err.println("Failed to create copy of default parameters!");
e.printStackTrace();
}
return result;
}
/**
* Set the base learner.
*
* @param newClassifier the classifier to use.
*/
@Override
public void setClassifier(Classifier newClassifier) {
if (!(newClassifier instanceof MultiLabelClassifier))
throw new IllegalStateException(
"Base classifier must implement " + MultiLabelClassifier.class.getName()
+ ", provided: " + newClassifier.getClass().getName());
super.setClassifier(newClassifier);
}
/**
* Returns the revision string.
*
* @return the revision
*/
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision: 4521 $");
}
/**
* Main method for running this classifier from commandline.
*
* @param args the options
*/
public static void main(String[] args) {
runClassifier(new MultiSearch(), args);
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers/multilabel
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers/multilabel/meta/RandomSubspaceML.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package meka.classifiers.multilabel.meta;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Enumeration;
import java.util.List;
import java.util.Random;
import java.util.Vector;
import meka.classifiers.multilabel.ProblemTransformationMethod;
import meka.core.A;
import meka.core.F;
import meka.core.MLUtils;
import meka.core.OptionUtils;
import weka.core.Instance;
import weka.core.Instances;
import weka.core.Option;
import weka.core.Randomizable;
import weka.core.RevisionUtils;
import weka.core.TechnicalInformation;
import weka.core.TechnicalInformation.Field;
import weka.core.TechnicalInformation.Type;
import weka.core.TechnicalInformationHandler;
/**
* RandomSubspaceML.java - Subsample the attribute space and instance space randomly for each
* ensemble member. Basically a generalized version of Random Forests. It is computationally cheaper
* than EnsembleML for the same number of models. <br>
* As used with CC in: Jesse Read, Bernhard Pfahringer, Geoff Holmes, Eibe Frank. <i>Classifier
* Chains for Multi-label Classification</i>. Machine Learning Journal. Springer. Vol. 85(3), pp
* 333-359. (May 2011). <br>
* In earlier versions of Meka this class was called <i>BaggingMLq</i> and used Bagging procedure.
* Now it uses a simple ensemble cut. <br>
*
* @author Jesse Read
* @version June 2014
*/
public class RandomSubspaceML extends MetaProblemTransformationMethod implements TechnicalInformationHandler {
/** for serialization. */
private static final long serialVersionUID = 3608541911971484299L;
protected int m_AttSizePercent = 50;
protected int m_IndicesCut[][] = null;
protected Instances m_InstancesTemplates[] = null;
protected Instance m_InstanceTemplates[] = null;
@Override
public void buildClassifier(final Instances D) throws Exception {
this.testCapabilities(D);
this.m_InstancesTemplates = new Instances[this.m_NumIterations];
this.m_InstanceTemplates = new Instance[this.m_NumIterations];
if (this.getDebug()) {
System.out.println("-: Models: ");
}
this.m_Classifiers = ProblemTransformationMethod.makeCopies((ProblemTransformationMethod) this.m_Classifier, this.m_NumIterations);
Random r = new Random(this.m_Seed);
int N_sub = (D.numInstances() * this.m_BagSizePercent / 100);
int L = D.classIndex();
int d = D.numAttributes() - L;
int d_new = d * this.m_AttSizePercent / 100;
this.m_IndicesCut = new int[this.m_NumIterations][];
for (int i = 0; i < this.m_NumIterations; i++) {
if (Thread.currentThread().isInterrupted()) {
throw new InterruptedException("Thread has been interrupted.");
}
// Downsize the instance space (exactly like in EnsembleML.java)
if (this.getDebug()) {
System.out.print("\t" + (i + 1) + ": ");
}
D.randomize(r);
Instances D_cut = new Instances(D, 0, N_sub);
if (this.getDebug()) {
System.out.print("N=" + D.numInstances() + " -> N'=" + D_cut.numInstances() + ", ");
}
// Downsize attribute space
D_cut.setClassIndex(-1);
int indices_a[] = A.make_sequence(L, d + L);
A.shuffle(indices_a, r);
indices_a = Arrays.copyOfRange(indices_a, 0, d - d_new);
Arrays.sort(indices_a);
this.m_IndicesCut[i] = A.invert(indices_a, D.numAttributes());
D_cut = F.remove(D_cut, indices_a, false);
D_cut.setClassIndex(L);
if (this.getDebug()) {
System.out.print(" A:=" + (D.numAttributes() - L) + " -> A'=" + (D_cut.numAttributes() - L) + " (" + this.m_IndicesCut[i][L] + ",...,"
+ this.m_IndicesCut[i][this.m_IndicesCut[i].length - 1] + ")");
}
// Train multi-label classifier
if (this.m_Classifiers[i] instanceof Randomizable) {
((Randomizable) this.m_Classifiers[i]).setSeed(this.m_Seed + i);
}
if (this.getDebug()) {
System.out.println(".");
}
this.m_Classifiers[i].buildClassifier(D_cut);
this.m_InstanceTemplates[i] = D_cut.instance(1);
this.m_InstancesTemplates[i] = new Instances(D_cut, 0);
}
if (this.getDebug()) {
System.out.println(":-");
}
}
@Override
public double[] distributionForInstance(final Instance x) throws Exception {
int L = x.classIndex();
double p[] = new double[L];
for (int i = 0; i < this.m_NumIterations; i++) {
if (Thread.currentThread().isInterrupted()) {
throw new InterruptedException("Thread has been interrupted.");
}
// Use a template Instance from training, and copy values over
// (this is faster than copying x and cutting it to shape)
Instance x_ = this.m_InstanceTemplates[i];
MLUtils.copyValues(x_, x, this.m_IndicesCut[i]);
x_.setDataset(this.m_InstancesTemplates[i]);
// TODO, use generic voting scheme somewhere?
double d[] = ((ProblemTransformationMethod) this.m_Classifiers[i]).distributionForInstance(x_);
for (int j = 0; j < d.length; j++) {
p[j] += d[j];
}
}
return p;
}
@Override
public Enumeration listOptions() {
Vector result = new Vector();
result.addElement(
new Option("\tSize of attribute space, as a percentage of total attribute space size (must be between 1 and 100, default: 50)", "A", 1, "-A <size percentage>"));
OptionUtils.add(result, super.listOptions());
return OptionUtils.toEnumeration(result);
}
@Override
public void setOptions(final String[] options) throws Exception {
this.setAttSizePercent(OptionUtils.parse(options, 'A', 50));
super.setOptions(options);
}
@Override
public String[] getOptions() {
List<String> result = new ArrayList<>();
OptionUtils.add(result, 'A', this.getAttSizePercent());
OptionUtils.add(result, super.getOptions());
return OptionUtils.toArray(result);
}
public static void main(final String args[]) {
ProblemTransformationMethod.evaluation(new RandomSubspaceML(), args);
}
/**
* Description to display in the GUI.
*
* @return the description
*/
@Override
public String globalInfo() {
return "Combining several multi-label classifiers in an ensemble where the attribute space for each model is a random subset of the original space.";
}
@Override
public TechnicalInformation getTechnicalInformation() {
TechnicalInformation result;
result = new TechnicalInformation(Type.ARTICLE);
result.setValue(Field.AUTHOR, "Jesse Read, Bernhard Pfahringer, Geoff Holmes, Eibe Frank");
result.setValue(Field.TITLE, "Classifier Chains for Multi-label Classification");
result.setValue(Field.JOURNAL, "Machine Learning Journal");
result.setValue(Field.YEAR, "2011");
result.setValue(Field.VOLUME, "85");
result.setValue(Field.NUMBER, "3");
result.setValue(Field.PAGES, "333-359");
return result;
}
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision: 9117 $");
}
/**
* Sets the percentage of attributes to sample from the original set.
*/
public void setAttSizePercent(final int value) {
this.m_AttSizePercent = value;
}
/**
* Gets the percentage of attributes to sample from the original set.
*/
public int getAttSizePercent() {
return this.m_AttSizePercent;
}
public String attSizePercentTipText() {
return "Size of attribute space, as a percentage of total attribute space size";
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers/multilabel
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers/multilabel/meta/SubsetMapper.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package meka.classifiers.multilabel.meta;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.Random;
import java.util.Set;
import java.util.Vector;
import meka.classifiers.multilabel.BR;
import meka.classifiers.multilabel.ProblemTransformationMethod;
import meka.core.MLUtils;
import weka.core.Instance;
import weka.core.Instances;
import weka.core.RevisionUtils;
import weka.core.TechnicalInformation;
import weka.core.TechnicalInformation.Field;
import weka.core.TechnicalInformation.Type;
import weka.core.TechnicalInformationHandler;
/**
* Maps the output of a multi-label classifier to a known label combination using the hamming
* distance. described in <i>Improved Boosting Algorithms Using Confidence-rated Predictions</i> by
* Schapire, Robert E. and Singer, Yoram
*
* @author Jesse Read (jmr30@cs.waikato.ac.nz)
*/
public class SubsetMapper extends ProblemTransformationMethod implements TechnicalInformationHandler {
/** for serialization. */
private static final long serialVersionUID = -6587406787943635084L;
/**
* Description to display in the GUI.
*
* @return the description
*/
@Override
public String globalInfo() {
return "Maps the output of a multi-label classifier to a known label combination using the hamming distance." + "For more information see:\n"
+ this.getTechnicalInformation().toString();
}
public SubsetMapper() {
// default classifier for GUI
this.m_Classifier = new BR();
}
@Override
protected String defaultClassifierString() {
// default classifier for CLI
return "meka.classifiers.multilabel.BR";
}
@Override
public TechnicalInformation getTechnicalInformation() {
TechnicalInformation result;
result = new TechnicalInformation(Type.ARTICLE);
result.setValue(Field.AUTHOR, "Robert E. Schapire, Yoram Singer ");
result.setValue(Field.TITLE, "Improved Boosting Algorithms Using Confidence-rated Predictions");
result.setValue(Field.JOURNAL, "Machine Learning Journal");
result.setValue(Field.YEAR, "1999");
result.setValue(Field.VOLUME, "37");
result.setValue(Field.NUMBER, "3");
result.setValue(Field.PAGES, "297-336");
return result;
}
protected HashMap<String, Integer> m_Count = new HashMap<>();
protected double[] nearestSubset(final double d[]) throws Exception {
String comb = MLUtils.toBitString(doubles2ints(d));
// If combination exists
if (this.m_Count.get(comb) != null) {
return MLUtils.fromBitString(comb);
}
int closest_count = 0;
int min_distance = Integer.MAX_VALUE;
String nearest = comb;
for (String current : this.shuffle(this.m_Count.keySet())) {
if (Thread.currentThread().isInterrupted()) {
throw new InterruptedException("Thread has been interrupted.");
}
int distance = hammingDistance(current, comb);
if (distance == min_distance) {
int count = this.m_Count.get(current);
if (count > closest_count) {
nearest = current;
closest_count = count;
}
}
if (distance < min_distance) {
min_distance = distance;
nearest = current;
closest_count = this.m_Count.get(nearest);
}
}
return MLUtils.fromBitString(nearest);
}
private Collection<String> shuffle(final Set<String> labelSubsets) {
int seed = 1;
Vector<String> result = new Vector<>(labelSubsets.size());
result.addAll(labelSubsets);
Collections.shuffle(result, new Random(seed));
return result;
}
@Override
public void buildClassifier(final Instances D) throws Exception {
this.testCapabilities(D);
for (int i = 0; i < D.numInstances(); i++) {
if (Thread.currentThread().isInterrupted()) {
throw new InterruptedException("Thread has been interrupted.");
}
this.m_Count.put(MLUtils.toBitString(D.instance(i), D.classIndex()), 0);
}
this.m_Classifier.buildClassifier(D);
}
@Override
public double[] distributionForInstance(final Instance TestInstance) throws Exception {
double r[] = ((ProblemTransformationMethod) this.m_Classifier).distributionForInstance(TestInstance);
return this.nearestSubset(r);
}
private static final int[] doubles2ints(final double d[]) {
int b[] = new int[d.length];
for (int i = 0; i < d.length; i++) {
b[i] = (int) Math.round(d[i]);
}
return b;
}
private static final int hammingDistance(final String s1, final String s2) {
int dist = 0;
for (int i = 0; i < Math.min(s1.length(), s2.length()); i++) {
dist += Math.abs(MLUtils.char2int(s1.charAt(i)) - MLUtils.char2int(s2.charAt(i)));
}
return dist;
}
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision: 9117 $");
}
public static void main(final String args[]) {
ProblemTransformationMethod.evaluation(new SubsetMapper(), args);
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers/multilabel
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers/multilabel/neurofuzzy/ARAMNetwork.java
|
/*
*
*
* Adapted from NaiveBayes.java
*
* Copyright (C) 2016 Fernando Benites
* @author Fernando Benites
*/
package meka.classifiers.multilabel.neurofuzzy;
import java.util.ArrayList;
import java.util.Enumeration;
import java.util.List;
import java.util.Vector;
import java.util.Arrays;
import meka.classifiers.multilabel.*;
import weka.classifiers.Classifier;
import meka.classifiers.multilabel.Evaluation;
import weka.classifiers.UpdateableClassifier;
import weka.core.Attribute;
import weka.core.Capabilities;
import weka.core.Instance;
import weka.core.Instances;
import weka.core.Option;
import weka.core.OptionHandler;
import weka.core.Utils;
import meka.core.MLUtils;
import weka.core.WeightedInstancesHandler;
import weka.core.RevisionUtils;
/**
* ****REPLACE THE FOLLOWING WITH SIMILAR INFORMATION.
* Class for a Naive Bayes classifier using estimator classes. Numeric
* estimator precision values are chosen based on analysis of the
* training data. For this reason, the classifier is not an
* UpdateableClassifier (which in typical usage are initialized with zero
* training instances) -- if you need the UpdateableClassifier functionality,
* use the NaiveBayesUpdateable classifier. The NaiveBayesUpdateable
* classifier will use a default precision of 0.1 for numeric attributes
* when buildClassifier is called with zero training instances.
* <p>
* For more information on Naive Bayes classifiers, see<p>
*
* George H. John and Pat Langley (1995). <i>Estimating
* Continuous Distributions in Bayesian Classifiers</i>. Proceedings
* of the Eleventh Conference on Uncertainty in Artificial
* Intelligence. pp. 338-345. Morgan Kaufmann, San Mateo.<p>
*
* Valid options are:<p>
*
* -K <br>
* Use kernel estimation for modelling numeric attributes rather than
* a single normal distribution.<p>
*
* -D <br>
* Use supervised discretization to process numeric attributes.<p>
*
* @author Len Trigg (trigg@cs.waikato.ac.nz)
* @author Eibe Frank (eibe@cs.waikato.ac.nz)
* @author Rushi Bhatt (rushi@cns.bu.edu)
* @version $Revision: 1.16 $
* Modified by Rushi for use as a CN710 template
*/
public class ARAMNetwork extends ARAMNetworkClass {
//**** THIS IS WHERE CLASSIFIER WEIGHTS ETC GO ****
//define stuff like weight matrices, classifier parameters etc.
//e.g., protected double rho_a_bar=0.0;
public int numFeatures = -1;
public int numClasses = -1;
public double threshold=0.02;
int numCategories = 0;
double roa = 0.9;
double rob = 1.0;
double alpha = 0.0001;
double[][] weightsA = null;
double[] sweightsA = null;
double sweightsA0=0;
double[][] weightsB = null;
int learningRate = 1;
int weightblearnmethod= 0;
int maxNumCategories = 20000;
boolean m_userankstoclass=false;
boolean learningphase=true;
int[] neuronsactivated=null;
double[] neuronsactivity=null;
List<Integer> order=null;
int nrinstclassified=0;
String activity_report="";
public int[] getneuronsactivated(){
return neuronsactivated;
}
public double[] getneuronsactivity(){
return neuronsactivity;
}
public ARAMNetwork(int fnumFeatures, int fnumClasses, double fro, double fthreshold) {
initARAM(fnumFeatures, fnumClasses, fro, fthreshold);
}
public ARAMNetwork(){
}
private void initARAM(int fnumFeatures, int fnumClasses, double fro, double fthreshold){
numFeatures = fnumFeatures;
numClasses = fnumClasses;
threshold = fthreshold;
weightsA = new double[1][numFeatures];
Arrays.fill(weightsA[0], 1);
weightsB = new double[1][numClasses];
Arrays.fill(weightsB[0], 0);
numCategories = 1;
}
/**
* Returns a string describing this classifier
* @return a description of the classifier suitable for
* displaying in the explorer/experimenter gui.
* ****MODIFY WITH CORRECT INFORMATION****
*/
public String globalInfo() {
return "This is ARAM.";
}
/**
* Generates the classifier.
*
* @param instances set of instances serving as training data
* @exception Exception if the classifier has not been generated
* successfully
*/
public void buildClassifier(Instances D) throws Exception {
int L = D.classIndex();
int featlength = (D.numAttributes() -L)*2;
int numSamples = D.numInstances();
int classlength = L * 2;
if (this.order==null){
order = new ArrayList<Integer>();
for (int j=0; j<D.numInstances();j++){
order.add(j);
}
}
if (numFeatures==-1){
initARAM( featlength,classlength ,roa , threshold );
}else{
if (featlength != numFeatures) {
return ;
}
if (classlength != numClasses) {
return ;
}}
// Copy the instances so we don't mess up the original data.
// Function calls do not deep copy the arguments..
//Instances m_Instances = new Instances(instances);
// Use the enumeration of instances to train classifier.
// Do any sanity checks (e.g., missing attributes etc here
// before calling updateClassifier for the actual learning
//Enumeration enumInsts = D.enumerateInstances();
for(int i=0; i<D.numInstances();i++){
Instance instance = D.get(order.get(i));
updateClassifier(instance);
}
System.out.println("Training done, used "+numCategories+" neurons with rho ="+roa+".");
// Alternatively, you can put the training logic within this method,
// rather than updateClassifier(...). However, if you omit the
// updateClassifier(...) method, you should remove
// UpdateableClassifier from the class declaration above.
}
// ****THIS IS THE WEIGHT UPDATE ROUTINE. MODIFY TO CHANGE THE ALGORITHM****
/**
* Updates the classifier with the given instance.
*
* @param instance the new training instance to include in the model
* @exception Exception if the instance could not be incorporated in
* the model.
*/
public void updateClassifier(Instance instance) throws Exception {
//called once for each instance.
if(!learningphase){
return;
}
int num_classes=(int) (0.5 * numClasses);
int num_features=(int) (0.5 * numFeatures);
double[] data = new double[numFeatures];
double[] labels = new double[numClasses];
int numChanges = 0;
if (!instance.classIsMissing()) {
//Do the weight updates using the instance.
for (int j = 0; j <num_features; j++) {
data[j] = instance.value(num_classes+j);
data[j+num_features] = 1 - data[j];
//if (data[j]<0 || data[j]>1){
// System.out.println("Data not normalized, this will cause error!");
//}
}
for (int j = 0; j < num_classes ; j++) {
labels[j] = instance.value(j);
labels[j+num_classes] = 1 - labels[j];
}
SortPair[] cateacti = ARTActivateCategories(data);
java.util.Arrays.sort(cateacti);
boolean resonance = false;
int currentSortedIndex = 0;
int currentCategory = -1;
double matchA = 0;
double matchB = 0;
while (!resonance) {
currentCategory = cateacti[currentSortedIndex]
.getOriginalIndex();
matchA = ART_Calculate_Match(data, weightsA[currentCategory]);
if (sumArray(weightsB[currentCategory]) == 0) {
matchB = 1;
} else {
matchB = ART_Calculate_Match(labels,
weightsB[currentCategory]);
}
if (matchA >= roa && matchB >= rob) {
if (currentCategory == numCategories -1) {
if (currentSortedIndex == maxNumCategories) {
System.out
.println("WARNING: The maximum number of categories has been reached.");
resonance = true;
} else {
// Add a new category
for (int j = 0; j < data.length; j++) {
weightsA[currentCategory][j] = data[j];
}
for (int j = 0; j < weightsB[currentCategory].length; j++) {
weightsB[currentCategory][j] = labels[j];
}
ARAMm_Add_New_Category();
// fprintf(FileID,'Add a new category of %d\n',
// network.numCategories);
// Increment the number of changes since we added a
// new category.
numChanges = numChanges + 1;
resonance = true;
}
} else {
// % Update weights
double weightChange = ARAMm_Update_Weights(data,
labels, currentCategory);
if (weightChange == 1) {
numChanges += 1;
}
resonance = true;
}
} else {
currentSortedIndex += 1;
resonance = false;
}
}
}
}
//****THIS IS THE CLASSIFICATION ROUTINE. MODIFY TO CHANGE THE ALGORITHM****
//****classifyInstance() uses this method, so implement the
//****nuts-and-bolts of your algorithm here.
/**
* Calculates the class membership probabilities for the given test
* instance.
*
* @param instance the instance to be classified
* @return predicted class probability distribution
* @exception Exception if there is a problem generating the prediction
*/
public double[] distributionForInstance(Instance instance) throws Exception {
int num_classes=(int) (0.5 * numClasses);
int num_features=(int) (0.5 * numFeatures);
double[] dist = new double[num_classes];
double[] currentData = new double[numFeatures];
double[] ranking = new double[num_classes];
for (int j = 0; j < num_features; j++) {
currentData[j] = instance.value(num_classes+j);
currentData[num_features+j] = 1 - currentData[j];
}
SortPair[] sortedActivations = ARTActivateCategories(currentData);
java.util.Arrays.sort(sortedActivations);
double diff_act = sortedActivations[0].getValue()
- sortedActivations[numCategories - 2].getValue();
int largest_activ = 1;
double activ_change = 0;
for (int i = 1; i < numCategories; i++) {
activ_change = (sortedActivations[0].getValue() - sortedActivations[i]
.getValue())
/ sortedActivations[0].getValue();
if (activ_change > threshold * diff_act) {
break;
}
largest_activ = largest_activ + 1;
}
// % largest_activ =5;
double[] best_matches = new double[largest_activ];
java.util.Arrays.fill(best_matches, 1);
for (int i = 0; i < largest_activ; i++) {
// % best_matches(i) = matches(sortedCategories(i));
best_matches[i] = sortedActivations[i].getValue();
}
// % min_mat = min(best_matches);
// % max_mat = max(best_matches);
double sum_mat = sumArray(best_matches);
int currentCategory = 0;
this.neuronsactivated=new int[largest_activ];
this.neuronsactivity=new double[largest_activ];
for (int i = 0; i < largest_activ; i++) {
this.neuronsactivity[i]=best_matches[i];
best_matches[i] = best_matches[i] / sum_mat;
currentCategory = sortedActivations[i].getOriginalIndex();
this.neuronsactivated[i]=currentCategory;
// % Fill return vector with weightB values
for (int j = 0; j < num_classes; j++) {
ranking[j] = ranking[j]
+ best_matches[i] * weightsB[currentCategory][j];
}
}
if(m_userankstoclass) {
return ARAMm_Ranking2Class(ranking);
}
return ranking;
}
public double[] ARAMm_Ranking2Class(double[] rankings) {
int columns=rankings.length;
double[] classes= new double[columns ];
SortPair[] sortedRanks = new SortPair[columns];
for (int j=0;j<columns;j++){
sortedRanks[j]= new SortPair(rankings[j],j);
}
java.util.Arrays.sort(sortedRanks);
// sortedActivations[0].getValue()sortedActivations[i].getOriginalIndex()
SortPair[] change=new SortPair[columns-1];
for(int j =1; j<columns;j++){
change[j-1] = new SortPair(sortedRanks[j-1].getValue()-sortedRanks[j].getValue(),j);
}
java.util.Arrays.sort(change);
// double val= change[0].getValue();
int ind=change[0].getOriginalIndex();
for (int j =0; j<ind;j++){
classes[sortedRanks[j].getOriginalIndex()] = 1;
}
return classes;
}
private SortPair[] ARTActivateCategories(double[] Data) {
SortPair[] catacti = new SortPair[numCategories];
// double[] catacti=new double[numCategories];
double[] matchVector = new double[numFeatures];
for (int i = 0; i < numCategories; i++) {
double sumvector = 0;
double sumweight = 0;
for (int j = 0; j < numFeatures; j++) {
matchVector[j] = ((Data[j] < weightsA[i][j]) ? Data[j]
: weightsA[i][j]);
sumvector += matchVector[j];
sumweight += weightsA[i][j];
}
catacti[i] = new SortPair(sumvector / (alpha + sumweight), i);
}
return catacti;
}
// ****YOU SHOULDN'T NEED TO CHANGE THIS
/**
* Classifies the given test instance. The instance has to belong to a
* dataset when it's being classified. Note that a classifier MUST
* implement either this or distributionForInstance().
*
* @param instance the instance to be classified
* @return the predicted most likely class for the instance or
* Instance.missingValue() if no prediction is made
* @exception Exception if an error occurred during the prediction
*/
public double classifyInstance(Instance instance) throws Exception {
double[] dist = distributionForInstance(instance);
if (dist == null) {
throw new Exception("Null distribution predicted");
}
switch (instance.classAttribute().type()) {
case Attribute.NOMINAL:
double max = 0;
int maxIndex = 0;
for (int i = 0; i < dist.length; i++) {
if (dist[i] > max) {
maxIndex = i;
max = dist[i];
}
}
if (max > 0) {
return maxIndex;
} else {
//return Instance.missingValue();
}
case Attribute.NUMERIC:
return dist[0];
default:
return -1;
}
}
// ****ANY OPTIONS/PARAMETERS GO HERE****
/**
* Returns an enumeration describing the available options.
*
* @return an enumeration of all the available options.
*/
public Enumeration listOptions() {
//These are just examples, modify to suit your algorithm
Vector newVector = new Vector(2);
newVector.addElement(
new Option("\tChange generalization parameter Rho\n",
"P", 0,"-P"));
newVector.addElement(
new Option("\tUse ranking to class function special dev. for ARAM.\n",
"K", 0,"-K"));
return newVector.elements();
}
//****OPTIONS HERE SHOULD MATCH THOSE ADDED ABOVE****
/**
* Parses a given list of options. Valid options are:<p>
*
* -K <br>
* Use kernel estimation for modelling numeric attributes rather than
* a single normal distribution.<p>
*
* -D <br>
* Use supervised discretization to process numeric attributes.
*
* @param options the list of options as an array of strings
* @exception Exception if an option is not supported
*/
public void setOptions(String[] options) throws Exception {
//These are just examples, modify to suit your algorithm
// boolean k = Utils.getFlag('K', options);
// boolean d = Utils.getFlag('D', options);
// if (k && d) {
// throw new IllegalArgumentException(
// "Can't use both kernel density estimation and discretization!");
// }
// setUseSupervisedDiscretization(d);
// setUseKernelEstimator(k);
roa = (Utils.getOptionPos("P",options) >= 0) ? Double.parseDouble(Utils.getOption("P", options)) : roa;
m_userankstoclass= (Utils.getOptionPos("K",options) >= 0);
super.setOptions(options);
}
//****MORE OPTION PARSING STUFF****
/**
* Gets the current settings of the classifier.
*
* @return an array of strings suitable for passing to setOptions
*/
public String [] getOptions() {
//These are just examples, modify to suit your algorithm
String [] options = new String [3];
try{
options =weka.core.Utils.splitOptions("-P 0.9 -K");
}catch (Exception ex) {
System.out.println(ex.getMessage());
}
return options;
}
//****ANY INFORMATION LIKE NO. OF UNITS ETC PRINTED HERE
/**
* Returns a description of the classifier.
*
* @return a description of the classifier as a string.
*/
public String toString() {
//These are just examples, modify to suit your algorithm
StringBuffer text = new StringBuffer();
text.append("ML ARAM classifier");
// if (m_Instances == null) {
// text.append(": No model built yet.");
// } else {
// try {
// for (int i = 0; i < m_Distributions[0].length; i++) {
// text.append("\n\nClass " + m_Instances.classAttribute().value(i) +
// ": Prior probability = " + Utils.
// doubleToString(m_ClassDistribution.getProbability(i),
// 4, 2) + "\n\n");
// Enumeration enumAtts = m_Instances.enumerateAttributes();
// int attIndex = 0;
// while (enumAtts.hasMoreElements()) {
// Attribute attribute = (Attribute) enumAtts.nextElement();
// text.append(attribute.name() + ": "
// + m_Distributions[attIndex][i]);
// attIndex++;
// }
// }
// } catch (Exception ex) {
// text.append(ex.getMessage());
// }
// }
return text.toString();
}
//****MORE GUI RELATED STUFF AND PARAMETER ACCESS METHODS
// /**
// * Returns the tip text for this property
// * @return tip text for this property suitable for
// * displaying in the explorer/experimenter gui
// */
// public String useKernelEstimatorTipText() {
// return "Use a kernel estimator for numeric attributes rather than a "
// +"normal distribution.";
// }
// /**
// * Gets if kernel estimator is being used.
// *
// * @return Value of m_UseKernelEstimatory.
// */
// public boolean getUseKernelEstimator() {
//
// return m_UseKernelEstimator;
// }
//
// /**
// * Sets if kernel estimator is to be used.
// *
// * @param v Value to assign to m_UseKernelEstimatory.
// */
// public void setUseKernelEstimator(boolean v) {
//
// m_UseKernelEstimator = v;
// if (v) {
// setUseSupervisedDiscretization(false);
// }
// }
//
// /**
// * Returns the tip text for this property
// * @return tip text for this property suitable for
// * displaying in the explorer/experimenter gui
// */
// public String useSupervisedDiscretizationTipText() {
// return "Use supervised discretization to convert numeric attributes to nominal "
// +"ones.";
// }
//
// /**
// * Get whether supervised discretization is to be used.
// *
// * @return true if supervised discretization is to be used.
// */
// public boolean getUseSupervisedDiscretization() {
//
// return m_UseDiscretization;
// }
//
// /**
// * Set whether supervised discretization is to be used.
// *
// * @param newblah true if supervised discretization is to be used.
// */
// public void setUseSupervisedDiscretization(boolean newblah) {
//
// m_UseDiscretization = newblah;
// if (newblah) {
// setUseKernelEstimator(false);
// }
// }
/**
* Main method for testing this class.
*
* @param argv the options
*/
private double ARAMm_Update_Weights(double[] data, double[] labels,
int category) {
double weightChange = 0;
for (int i = 0; i < numFeatures; i++) {
if (data[i] < weightsA[category][i]){
weightsA[category][i] = (learningRate * data[i])
+ (1 - learningRate) * weightsA[category][i];
}
}
for (int i = 0; i < numClasses; i++) {
if(weightblearnmethod== 0){
weightsB[category][i] = labels[i] + weightsB[category][i];
weightChange = 1;
}else{
// %normalise
if ( labels[i]< weightsB[category][i]){
weightsB[category][i] = (learningRate * labels[i] )+ (1 - learningRate) *weightsB[category][i];
weightChange = 1;
}
}
}
return weightChange;
}
private double ART_Calculate_Match(double[] Data, double[] fweights) {
int lnumFeatures = Data.length;
if (lnumFeatures != fweights.length) {
return 0.0;
}
double[] matchVector = new double[lnumFeatures];
double summatch = 0;
double suminput = 0;
for (int j = 0; j < lnumFeatures; j++) {
matchVector[j] = ((Data[j] < fweights[j]) ? Data[j] : fweights[j]);
summatch += matchVector[j];
suminput += Data[j];
}
if (suminput == 0) {
return 0.0;
}
return summatch / suminput;
}
private void ARAMm_Add_New_Category() {
weightsA = Arrays.copyOf(weightsA, numCategories + 1);
weightsB = Arrays.copyOf(weightsB, numCategories + 1);
weightsA[numCategories] = new double[numFeatures];
weightsB[numCategories] = new double[numClasses];
Arrays.fill(weightsA[numCategories], 1.0);
Arrays.fill(weightsB[numCategories], 0.0);
numCategories += 1;
}
private double sumArray(double[] arr) {
int num = arr.length;
double result = 0;
for (int i = 0; i < num; i++) {
result += arr[i];
}
return result;
}
public static void main(String [] argv) {
try {
Evaluation.runExperiment(((MultiLabelClassifier) new ARAMNetwork()), argv);
} catch (Exception e) {
e.printStackTrace();
System.err.println(e.getMessage());
}
}
@Override
public String getModel() {
// TODO Auto-generated method stub
return null;
}
@Override
public boolean isThreaded() {
// TODO Auto-generated method stub
return false;
}
@Override
public void setThreaded(boolean setv) {
// TODO Auto-generated method stub
}
@Override
public double[][] distributionForInstanceM(Instances i) throws Exception {
// TODO Auto-generated method stub
return null;
}
@Override
public void setDebug(boolean debug) {
// TODO Auto-generated method stub
}
@Override
public boolean getDebug() {
// TODO Auto-generated method stub
return false;
}
@Override
public String debugTipText() {
// TODO Auto-generated method stub
return null;
}
@Override
public Capabilities getCapabilities() {
// TODO Auto-generated method stub
return null;
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers/multilabel
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers/multilabel/neurofuzzy/ARAMNetworkClass.java
|
/*
*
*
* Adapted from NaiveBayes.java
*
* Copyright (C) 2016 Fernando Benites
* @author Fernando Benites
*/
package meka.classifiers.multilabel.neurofuzzy;
import java.util.Enumeration;
import java.util.List;
import java.util.Vector;
import java.util.Arrays;
import java.util.HashMap;
import meka.classifiers.multilabel.*;
import weka.classifiers.Classifier;
import weka.classifiers.Evaluation;
import weka.classifiers.SingleClassifierEnhancer;
import weka.classifiers.UpdateableClassifier;
import weka.core.Attribute;
import weka.core.Capabilities;
import weka.core.Instance;
import weka.core.Instances;
import weka.core.Option;
import weka.core.OptionHandler;
import weka.core.Utils;
import meka.core.MLUtils;
import weka.core.WeightedInstancesHandler;
import weka.core.RevisionUtils;
/**
* ****REPLACE THE FOLLOWING WITH SIMILAR INFORMATION.
* Class for a Naive Bayes classifier using estimator classes. Numeric
* estimator precision values are chosen based on analysis of the
* training data. For this reason, the classifier is not an
* UpdateableClassifier (which in typical usage are initialized with zero
* training instances) -- if you need the UpdateableClassifier functionality,
* use the NaiveBayesUpdateable classifier. The NaiveBayesUpdateable
* classifier will use a default precision of 0.1 for numeric attributes
* when buildClassifier is called with zero training instances.
* <p>
* For more information on Naive Bayes classifiers, see<p>
*
* George H. John and Pat Langley (1995). <i>Estimating
* Continuous Distributions in Bayesian Classifiers</i>. Proceedings
* of the Eleventh Conference on Uncertainty in Artificial
* Intelligence. pp. 338-345. Morgan Kaufmann, San Mateo.<p>
*
* Valid options are:<p>
*
* -K <br>
* Use kernel estimation for modelling numeric attributes rather than
* a single normal distribution.<p>
*
* -D <br>
* Use supervised discretization to process numeric attributes.<p>
*
* @author Len Trigg (trigg@cs.waikato.ac.nz)
* @author Eibe Frank (eibe@cs.waikato.ac.nz)
* @author Rushi Bhatt (rushi@cns.bu.edu)
* @version $Revision: 1.16 $
* Modified by Rushi for use as a CN710 template
*/
public abstract class ARAMNetworkClass extends SingleClassifierEnhancer implements MultiLabelClassifierThreaded,
IncrementalMultiLabelClassifier{
public int numFeatures = -1;
public int numClasses = -1;
public double threshold=0.02;
int numCategories = 0;
double roa = 0.9;
double rob = 1.0;
double alpha = 0.0001;
double[][] weightsA = null;
double[][] weightsB = null;
double[] sweightsA =null;
double sweightsA0=0;
int learningRate = 1;
int weightblearnmethod= 0;
int maxNumCategories = 20000;
boolean m_userankstoclass=false;
boolean learningphase=true;
protected int[] neuronsactivated=null;
protected double[] neuronsactivity=null;
List<Integer> order=null;
int nrinstclassified=0;
protected String activity_report="";
//**** THIS IS WHERE CLASSIFIER WEIGHTS ETC GO ****
//define stuff like weight matrices, classifier parameters etc.
//e.g., protected double rho_a_bar=0.0;
/**
* Returns a string describing this classifier
* @return a description of the classifier suitable for
* displaying in the explorer/experimenter gui.
* ****MODIFY WITH CORRECT INFORMATION****
*/
/**
* Generates the classifier.
*
* @param instances set of instances serving as training data
* @exception Exception if the classifier has not been generated
* successfully
*/
//public int[] getneuronsactivated();
// public double[] getneuronsactivity();
public void testCapabilities(Instances D) throws Exception {
// get the classifier's capabilities, enable all class attributes and do the usual test
Capabilities cap = getCapabilities();
cap.enableAllClasses();
//getCapabilities().testWithFail(D);
// get the capabilities again, test class attributes individually
int L = D.classIndex();
for(int j = 0; j < L; j++) {
Attribute c = D.attribute(j);
cap.testWithFail(c,true);
}
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers/multilabel
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers/multilabel/neurofuzzy/ARAMNetworkSparse.java
|
/*
*
*
* Adapted from NaiveBayes.java
*
* Copyright (C) 2016 Fernando Benites
* @author Fernando Benites
*/
package meka.classifiers.multilabel.neurofuzzy;
import java.util.ArrayList;
import java.util.Enumeration;
import java.util.Vector;
import java.util.Arrays;
import java.util.HashMap;
import meka.classifiers.multilabel.*;
import weka.classifiers.Classifier;
import meka.classifiers.multilabel.Evaluation;
import weka.classifiers.UpdateableClassifier;
import weka.core.Attribute;
import weka.core.Instance;
import weka.core.Instances;
import weka.core.Option;
import weka.core.OptionHandler;
import weka.core.Utils;
import meka.core.MLUtils;
import weka.core.WeightedInstancesHandler;
import weka.core.RevisionUtils;
/**
* ****REPLACE THE FOLLOWING WITH SIMILAR INFORMATION.
* Class for a Naive Bayes classifier using estimator classes. Numeric
* estimator precision values are chosen based on analysis of the
* training data. For this reason, the classifier is not an
* UpdateableClassifier (which in typical usage are initialized with zero
* training instances) -- if you need the UpdateableClassifier functionality,
* use the NaiveBayesUpdateable classifier. The NaiveBayesUpdateable
* classifier will use a default precision of 0.1 for numeric attributes
* when buildClassifier is called with zero training instances.
* <p>
* For more information on Naive Bayes classifiers, see<p>
*
* George H. John and Pat Langley (1995). <i>Estimating
* Continuous Distributions in Bayesian Classifiers</i>. Proceedings
* of the Eleventh Conference on Uncertainty in Artificial
* Intelligence. pp. 338-345. Morgan Kaufmann, San Mateo.<p>
*
* Valid options are:<p>
*
* -K <br>
* Use kernel estimation for modelling numeric attributes rather than
* a single normal distribution.<p>
*
* -D <br>
* Use supervised discretization to process numeric attributes.<p>
*
* @author Len Trigg (trigg@cs.waikato.ac.nz)
* @author Eibe Frank (eibe@cs.waikato.ac.nz)
* @author Rushi Bhatt (rushi@cns.bu.edu)
* @version $Revision: 1.16 $
* Modified by Rushi for use as a CN710 template
*/
public class ARAMNetworkSparse extends ARAMNetworkClass {
//**** THIS IS WHERE CLASSIFIER WEIGHTS ETC GO ****
//define stuff like weight matrices, classifier parameters etc.
//e.g., protected double rho_a_bar=0.0;
SparseVector[] weightsA = null;
double[] sweightsA = null;
double sweightsA0;
SparseVector[] weightsB = null;
HashMap<String, Vector> hmclasses = null;
int snumFeatures=0;
int snumClasses=0;
int numinstances=0;
public ARAMNetworkSparse(int fnumFeatures, int fnumClasses, double fro, double fthreshold) {
initARAM(fnumFeatures, fnumClasses, fro, fthreshold);
}
public ARAMNetworkSparse(){
}
private void initARAM(int fnumFeatures, int fnumClasses, double fro, double fthreshold){
numFeatures = fnumFeatures;
snumFeatures = (int)(0.5*numFeatures);
numClasses = fnumClasses;
snumClasses= (int)(0.5*numClasses);
threshold = fthreshold;
weightsA = new SparseVector[1];
weightsA[0] = new SparseVector(numFeatures);
sweightsA = new double[1];
sweightsA[0]=0;
for(int i=0;i<numFeatures;i++){
sweightsA[0]+=1;
}
sweightsA0=sweightsA[0];
weightsB = new SparseVector[1];
weightsB[0] = new SparseVector(snumClasses);
numCategories = 1;
hmclasses = new HashMap<String, Vector>();
}
/**
* Returns a string describing this classifier
* @return a description of the classifier suitable for
* displaying in the explorer/experimenter gui.
* ****MODIFY WITH CORRECT INFORMATION****
*/
public String globalInfo() {
return "This is ARAM.";
}
/**
* Generates the classifier.
*
* @param instances set of instances serving as training data
* @exception Exception if the classifier has not been generated
* successfully
*/
public void buildClassifier(Instances D) throws Exception {
int L = D.classIndex();
int featlength = (D.numAttributes() -L)*2;
int numSamples = D.numInstances();
int classlength = L * 2;
if (this.order==null){
order = new ArrayList<Integer>();
for (int j=0; j<D.numInstances();j++){
order.add(j);
}
}
if (numFeatures==-1){
initARAM( featlength,classlength ,roa , threshold );
}else{
if (featlength != numFeatures) {
return ;
}
if (classlength != numClasses) {
return ;
}}
// Copy the instances so we don't mess up the original data.
// Function calls do not deep copy the arguments..
//Instances m_Instances = new Instances(instances);
// Use the enumeration of instances to train classifier.
// Do any sanity checks (e.g., missing attributes etc here
// before calling updateClassifier for the actual learning
for(int i=0; i<D.numInstances();i++){
Instance instance = D.get(order.get(i));
updateClassifier(instance);
}
System.out.println("Training done, used "+numCategories+" neurons with rho ="+roa+".");
// Alternatively, you can put the training logic within this method,
// rather than updateClassifier(...). However, if you omit the
// updateClassifier(...) method, you should remove
// UpdateableClassifier from the class declaration above.
}
// ****THIS IS THE WEIGHT UPDATE ROUTINE. MODIFY TO CHANGE THE ALGORITHM****
/**
* Updates the classifier with the given instance.
*
* @param instance the new training instance to include in the model
* @exception Exception if the instance could not be incorporated in
* the model.
*/
public void updateClassifier(Instance instance) throws Exception {
//called once for each instance.
if(!learningphase){
return;
}
int num_classes=(int) (snumClasses);
int num_features=(int) (snumFeatures);
double[] data = new double[num_features];
double[] labels = new double[num_classes];
int numChanges = 0;
int numCategories_1=numCategories -1;
numinstances+=1;
if (!instance.classIsMissing()) {
//Do the weight updates using the instance.
double suminputA=0;
double suminputB=0;
for (int j = 0; j <num_features; j++) {
data[j] = instance.value(num_classes+j);
suminputA+=1;
//if (data[j]<0 || data[j]>1){
// System.out.println("Data not normalized, this will cause error!");
//}
}
for (int j = 0; j < num_classes ; j++) {
labels[j] = instance.value(j);
suminputB+=labels[j];
}
SortPair[] cateacti = ARTActivateCategories(data,labels);
java.util.Arrays.sort(cateacti);
boolean resonance = false;
int currentSortedIndex = 0;
int currentCategory = -1;
double matchA = 0;
double matchB = 0;
while (!resonance && currentSortedIndex<cateacti.length) {
currentCategory = cateacti[currentSortedIndex]
.getOriginalIndex();
if (currentCategory == numCategories_1) {
matchB=1;
matchA=1;
}
else{
matchA = ART_Calculate_MatchA(data, weightsA[currentCategory],suminputA);
if (weightsB[currentCategory].sum() == 0) {
matchB = 1;
} else {
matchB = ART_Calculate_MatchB(labels,
weightsB[currentCategory],suminputB);
}
}
if (matchA >= roa && matchB >= rob) {
if (currentCategory == numCategories_1) {
if (currentSortedIndex == maxNumCategories) {
System.out
.println("WARNING: The maximum number of categories has been reached.");
resonance = true;
} else {
// Add a new category
sweightsA[currentCategory]=0;
for (int j = 0; j < snumFeatures; j++) {
weightsA[currentCategory].put(j,data[j]);
sweightsA[currentCategory]+=data[j];
}
for (int j = snumFeatures; j < numFeatures; j++) {
double da=data[j-snumFeatures];
weightsA[currentCategory].put(j, da);
sweightsA[currentCategory]+=1-da;
}
for (int j = 0; j < weightsB[currentCategory].size(); j++) {
weightsB[currentCategory].put(j,labels[j]);
}
String s = Arrays.toString(labels);
if (hmclasses.containsKey(s)){
hmclasses.get(s).add(currentCategory);
hmclasses.put(s,hmclasses.get(s));
}else{
Vector v = new Vector();
v.add(currentCategory);
hmclasses.put(s,v);
}
ARAMm_Add_New_Category();
// fprintf(FileID,'Add a new category of %d\n',
// network.numCategories);
// Increment the number of changes since we added a
// new category.
numChanges = numChanges + 1;
resonance = true;
break;
}
} else {
// % Update weights
double weightChange = ARAMm_Update_Weights(data,
labels, currentCategory);
if (weightChange == 1) {
numChanges += 1;
}
resonance = true;
break;
}
} else {
currentSortedIndex += 1;
resonance = false;
}
}
if(!resonance && currentSortedIndex>=cateacti.length)
{
// Add a new category
sweightsA[numCategories_1]=0;
for (int j = 0; j < snumFeatures; j++) {
weightsA[numCategories_1].put(j, data[j]);
sweightsA[numCategories_1]+=data[j];
}
for (int j = snumFeatures; j < numFeatures; j++) {
double da=data[j-snumFeatures];
weightsA[numCategories_1].put(j, da);
sweightsA[numCategories_1]+=1-da;
}
for (int j = 0; j < weightsB[numCategories_1].size(); j++) {
weightsB[numCategories_1].put(j, labels[j]);
}
String s = Arrays.toString(labels);
if (hmclasses.containsKey(s)){
hmclasses.get(s).add(numCategories_1);
hmclasses.put(s,hmclasses.get(s));
}else{
Vector v = new Vector();
v.add(numCategories_1);
hmclasses.put(s,v);
}
ARAMm_Add_New_Category();
// fprintf(FileID,'Add a new category of %d\n',
// network.numCategories);
// Increment the number of changes since we added a
// new category.
numChanges = numChanges + 1;
}
}
}
//****THIS IS THE CLASSIFICATION ROUTINE. MODIFY TO CHANGE THE ALGORITHM****
//****classifyInstance() uses this method, so implement the
//****nuts-and-bolts of your algorithm here.
/**
* Calculates the class membership probabilities for the given test
* instance.
*
* @param instance the instance to be classified
* @return predicted class probability distribution
* @exception Exception if there is a problem generating the prediction
*/
public double[] distributionForInstance(Instance instance) throws Exception {
int num_classes=(int) (snumClasses);
int num_features=(int) (snumFeatures);
double[] dist = new double[num_classes];
double[] currentData = new double[numFeatures];
double[] ranking = new double[num_classes];
for (int j = 0; j < num_features; j++) {
currentData[j] = instance.value(num_classes+j);
currentData[num_features+j] = 1 - currentData[j];
}
SortPair[] sortedActivations = ARTActivateCategories(currentData);
java.util.Arrays.sort(sortedActivations);
double diff_act = sortedActivations[0].getValue()
- sortedActivations[numCategories - 2].getValue();
int largest_activ = 1;
double activ_change = 0;
for (int i = 1; i < sortedActivations.length; i++) {
activ_change = (sortedActivations[0].getValue() - sortedActivations[i]
.getValue())
/ sortedActivations[0].getValue();
if (activ_change > threshold * diff_act) {
break;
}
largest_activ = largest_activ + 1;
}
// % largest_activ =5;
double[] best_matches = new double[largest_activ];
java.util.Arrays.fill(best_matches, 1);
for (int i = 0; i < largest_activ; i++) {
// % best_matches(i) = matches(sortedCategories(i));
best_matches[i] = sortedActivations[i].getValue();
}
// % min_mat = min(best_matches);
// % max_mat = max(best_matches);
double sum_mat = sumArray(best_matches);
int currentCategory = 0;
this.neuronsactivated=new int[largest_activ];
this.neuronsactivity=new double[largest_activ];
for (int i = 0; i < largest_activ; i++) {
this.neuronsactivity[i]=best_matches[i];
best_matches[i] = best_matches[i] / sum_mat;
currentCategory = sortedActivations[i].getOriginalIndex();
this.neuronsactivated[i]=currentCategory;
// % Fill return vector with weightB values
for (int j = 0; j < num_classes; j++) {
ranking[j] = ranking[j]
+ best_matches[i] * weightsB[currentCategory].get(j);
}
}
if(m_userankstoclass) {
return ARAMm_Ranking2Class(ranking);
}
return ranking;
}
public double[] ARAMm_Ranking2Class(double[] rankings) {
int columns=rankings.length;
double[] classes= new double[columns ];
SortPair[] sortedRanks = new SortPair[columns];
for (int j=0;j<columns;j++){
sortedRanks[j]= new SortPair(rankings[j],j);
}
java.util.Arrays.sort(sortedRanks);
// sortedActivations[0].getValue()sortedActivations[i].getOriginalIndex()
SortPair[] change=new SortPair[columns-1];
for(int j =1; j<columns;j++){
change[j-1] = new SortPair(sortedRanks[j-1].getValue()-sortedRanks[j].getValue(),j);
}
java.util.Arrays.sort(change);
// double val= change[0].getValue();
int ind=change[0].getOriginalIndex();
for (int j =0; j<ind;j++){
classes[sortedRanks[j].getOriginalIndex()] = 1;
}
return classes;
}
private SortPair[] ARTActivateCategories(double[] Data) {
SortPair[] catacti = new SortPair[numCategories-1];
// double[] catacti=new double[numCategories];
double[] matchVector = new double[numFeatures];
for (int i = 0; i < numCategories-1; i++) {
double sumvector = 0;
// double sumweight = 0;
for (int j = 0; j < (int)snumFeatures; j++) {
double wa=weightsA[i].get(j);
matchVector[j] = ((Data[j] < wa) ? Data[j]
: wa);
sumvector += matchVector[j];
// sumweight += weightsA[i][j];
}
for (int j = snumFeatures; j < numFeatures; j++) {
double wa=1-weightsA[i].get(j);
double da=1-Data[j-snumFeatures];
matchVector[j] = (((da) < wa) ? da: wa);
sumvector += matchVector[j];
}
//sumweight=sweightsA[i];
catacti[i] = new SortPair(sumvector / (alpha + sweightsA[i]), i);
// catacti[i] = new SortPair(sumvector / (alpha + sumweight), i);
//System.out.println("sumweight "+(sumweight-sweightsA[i]));
}
return catacti;
}
private SortPair[] ARTActivateCategories(double[] Data, double[] labels) {
String s = Arrays.toString(labels);
Vector lclasses = hmclasses.get(s);
SortPair[] catacti = null;
if (lclasses==null||lclasses.size()==0){
catacti=new SortPair[1];
catacti[0] = new SortPair(1,numCategories-1);
return catacti;
}
catacti = new SortPair[lclasses.size()];
// double[] catacti=new double[numCategories];
double[] matchVector = new double[numFeatures];
for (int i = 0; i < lclasses.size(); i++) {
double sumvector = 0;
// double sumweight = 0;
int k = ((Integer)lclasses.get(i)).intValue();
for (int j = 0; j < snumFeatures; j++) {
double wa=weightsA[k].get(j);
matchVector[j] = ((Data[j] < wa) ? Data[j]
: wa);
sumvector += matchVector[j];
// sumweight += weightsA[k][j];
}
for (int j = snumFeatures; j < numFeatures; j++) {
double wa=1-weightsA[k].get(j);
double da=1-Data[j-snumFeatures];
matchVector[j] = (((da) < wa) ? da: wa);
sumvector += matchVector[j];
}
//sumweight=sweightsA[k];
//catacti[i] = new SortPair(sumvector / (alpha + sumweight), k);
//System.out.println("sumweight "+(sumweight-sweightsA[k]));
catacti[i] = new SortPair(sumvector / (alpha + sweightsA[k]), k);
}
return catacti;
}
// ****YOU SHOULDN'T NEED TO CHANGE THIS
/**
* Classifies the given test instance. The instance has to belong to a
* dataset when it's being classified. Note that a classifier MUST
* implement either this or distributionForInstance().
*
* @param instance the instance to be classified
* @return the predicted most likely class for the instance or
* Instance.missingValue() if no prediction is made
* @exception Exception if an error occurred during the prediction
*/
public double classifyInstance(Instance instance) throws Exception {
double[] dist = distributionForInstance(instance);
if (dist == null) {
throw new Exception("Null distribution predicted");
}
switch (instance.classAttribute().type()) {
case Attribute.NOMINAL:
double max = 0;
int maxIndex = 0;
for (int i = 0; i < dist.length; i++) {
if (dist[i] > max) {
maxIndex = i;
max = dist[i];
}
}
if (max > 0) {
return maxIndex;
} else {
//return Instance.missingValue();
}
case Attribute.NUMERIC:
return dist[0];
default:
return -1;
}
}
// ****ANY OPTIONS/PARAMETERS GO HERE****
/**
* Returns an enumeration describing the available options.
*
* @return an enumeration of all the available options.
*/
public Enumeration listOptions() {
//These are just examples, modify to suit your algorithm
Vector newVector = new Vector(2);
newVector.addElement(
new Option("\tChange generalization parameter Rho\n",
"P", 0,"-P"));
newVector.addElement(
new Option("\tUse ranking to class function special dev. for ARAM.\n",
"K", 0,"-K"));
return newVector.elements();
}
//****OPTIONS HERE SHOULD MATCH THOSE ADDED ABOVE****
/**
* Parses a given list of options. Valid options are:<p>
*
* -K <br>
* Use kernel estimation for modelling numeric attributes rather than
* a single normal distribution.<p>
*
* -D <br>
* Use supervised discretization to process numeric attributes.
*
* @param options the list of options as an array of strings
* @exception Exception if an option is not supported
*/
public void setOptions(String[] options) throws Exception {
//These are just examples, modify to suit your algorithm
// boolean k = Utils.getFlag('K', options);
// boolean d = Utils.getFlag('D', options);
// if (k && d) {
// throw new IllegalArgumentException(
// "Can't use both kernel density estimation and discretization!");
// }
// setUseSupervisedDiscretization(d);
// setUseKernelEstimator(k);
roa = (Utils.getOptionPos("P",options) >= 0) ? Double.parseDouble(Utils.getOption("P", options)) : roa;
m_userankstoclass= (Utils.getOptionPos("K",options) >= 0);
super.setOptions(options);
}
//****MORE OPTION PARSING STUFF****
/**
* Gets the current settings of the classifier.
*
* @return an array of strings suitable for passing to setOptions
*/
public String [] getOptions() {
//These are just examples, modify to suit your algorithm
String [] options = new String [3];
try{
options =weka.core.Utils.splitOptions("-P 0.9 -K");
}catch (Exception ex) {
System.out.println(ex.getMessage());
}
return options;
}
//****ANY INFORMATION LIKE NO. OF UNITS ETC PRINTED HERE
/**
* Returns a description of the classifier.
*
* @return a description of the classifier as a string.
*/
public String toString() {
//These are just examples, modify to suit your algorithm
StringBuffer text = new StringBuffer();
text.append("ML ARAM classifier");
// if (m_Instances == null) {
// text.append(": No model built yet.");
// } else {
// try {
// for (int i = 0; i < m_Distributions[0].length; i++) {
// text.append("\n\nClass " + m_Instances.classAttribute().value(i) +
// ": Prior probability = " + Utils.
// doubleToString(m_ClassDistribution.getProbability(i),
// 4, 2) + "\n\n");
// Enumeration enumAtts = m_Instances.enumerateAttributes();
// int attIndex = 0;
// while (enumAtts.hasMoreElements()) {
// Attribute attribute = (Attribute) enumAtts.nextElement();
// text.append(attribute.name() + ": "
// + m_Distributions[attIndex][i]);
// attIndex++;
// }
// }
// } catch (Exception ex) {
// text.append(ex.getMessage());
// }
// }
return text.toString();
}
//****MORE GUI RELATED STUFF AND PARAMETER ACCESS METHODS
// /**
// * Returns the tip text for this property
// * @return tip text for this property suitable for
// * displaying in the explorer/experimenter gui
// */
// public String useKernelEstimatorTipText() {
// return "Use a kernel estimator for numeric attributes rather than a "
// +"normal distribution.";
// }
// /**
// * Gets if kernel estimator is being used.
// *
// * @return Value of m_UseKernelEstimatory.
// */
// public boolean getUseKernelEstimator() {
//
// return m_UseKernelEstimator;
// }
//
// /**
// * Sets if kernel estimator is to be used.
// *
// * @param v Value to assign to m_UseKernelEstimatory.
// */
// public void setUseKernelEstimator(boolean v) {
//
// m_UseKernelEstimator = v;
// if (v) {
// setUseSupervisedDiscretization(false);
// }
// }
//
// /**
// * Returns the tip text for this property
// * @return tip text for this property suitable for
// * displaying in the explorer/experimenter gui
// */
// public String useSupervisedDiscretizationTipText() {
// return "Use supervised discretization to convert numeric attributes to nominal "
// +"ones.";
// }
//
// /**
// * Get whether supervised discretization is to be used.
// *
// * @return true if supervised discretization is to be used.
// */
// public boolean getUseSupervisedDiscretization() {
//
// return m_UseDiscretization;
// }
//
// /**
// * Set whether supervised discretization is to be used.
// *
// * @param newblah true if supervised discretization is to be used.
// */
// public void setUseSupervisedDiscretization(boolean newblah) {
//
// m_UseDiscretization = newblah;
// if (newblah) {
// setUseKernelEstimator(false);
// }
// }
/**
* Main method for testing this class.
*
* @param argv the options
*/
private double ARAMm_Update_Weights(double[] data, double[] labels,
int category) {
double weightChange = 0;
sweightsA[category]=0;
for (int i = 0; i < snumFeatures; i++) {
double wa=weightsA[category].get(i);
if (data[i] < wa ){
wa = (learningRate * data[i])
+ (1 - learningRate) * wa;
weightsA[category].put(i, wa);
}
sweightsA[category]+=wa;
}
//above
for (int i = snumFeatures; i < numFeatures; i++) {
double wa=1-weightsA[category].get(i);
double da=1-data[i-snumFeatures];
if (da < wa ){
wa = (learningRate * da)
+ (1 - learningRate) * wa;
weightsA[category].put(i, 1-wa);
}
sweightsA[category]+=wa;
}
for (int i = 0; i < snumClasses; i++) {
double wb=weightsB[category].get(i);
if(weightblearnmethod== 0){
weightsB[category].put(i, labels[i] + wb);
weightChange = 1;
}else{
// %normalise
if ( labels[i]< wb){
weightsB[category].put(i, (learningRate * labels[i] )+ (1 - learningRate) *wb);
weightChange = 1;
}
}
}
return weightChange;
}
private double ART_Calculate_MatchA(double[] Data, SparseVector fweights, double suminput
) {
if (suminput == 0) {
return 0.0;
}
int lnumFeatures = Data.length;
if (lnumFeatures*2 != fweights.size()) {
return 0.0;
}
double[] matchVector = new double[numFeatures];
double summatch = 0;
//double suminput = 0;
for (int j = 0; j < lnumFeatures; j++) {
double w = fweights.get(j);
matchVector[j] = ((Data[j] < w) ? Data[j] :w);
summatch += matchVector[j];
//suminput += Data[j];
}
for (int j = snumFeatures; j < numFeatures; j++) {
double w =1- fweights.get(j);
double da= 1-Data[j-snumFeatures];
matchVector[j] = ((da< w) ? da :w);
summatch += matchVector[j];
}
// if (suminput == 0) {
// return 0.0;
// }
return summatch / suminput;
}
private double ART_Calculate_MatchB(double[] Data, SparseVector fweights, double suminput
) {
if (suminput == 0) {
return 0.0;
}
int lnumFeatures = Data.length;
if (lnumFeatures != fweights.size()) {
return 0.0;
}
double[] matchVector = new double[lnumFeatures];
double summatch = 0;
//double suminput = 0;
for (int j = 0; j < lnumFeatures; j++) {
double w = fweights.get(j);
matchVector[j] = ((Data[j] < w) ? Data[j] :w);
summatch += matchVector[j];
//suminput += Data[j];
}
return summatch / suminput;
}
private void ARAMm_Add_New_Category() {
weightsA = Arrays.copyOf(weightsA, numCategories + 1);
sweightsA = Arrays.copyOf(sweightsA, numCategories + 1);
weightsB = Arrays.copyOf(weightsB, numCategories + 1);
weightsA[numCategories] = new SparseVector((int)numFeatures);
//sweightsA[numCategories] = new double();
weightsB[numCategories] = new SparseVector((int)snumClasses);
//Arrays.fill(weightsA[numCategories], 1.0);
//Arrays.fill(weightsB[numCategories], 0.0);
sweightsA[numCategories]=sweightsA0;
numCategories += 1;
}
private double sumArray(double[] arr) {
int num = arr.length;
double result = 0;
for (int i = 0; i < num; i++) {
result += arr[i];
}
return result;
}
public static void main(String [] argv) {
try {
Evaluation.runExperiment(((MultiLabelClassifier) new WvARAM()), argv);
} catch (Exception e) {
e.printStackTrace();
System.err.println(e.getMessage());
}
}
@Override
public boolean isThreaded() {
// TODO Auto-generated method stub
return false;
}
@Override
public void setThreaded(boolean setv) {
// TODO Auto-generated method stub
}
@Override
public double[][] distributionForInstanceM(Instances i) throws Exception {
// TODO Auto-generated method stub
return null;
}
@Override
public String getModel() {
// TODO Auto-generated method stub
return null;
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers/multilabel
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers/multilabel/neurofuzzy/ARAMNetworkSparseH.java
|
/*
*
*
* Adapted from NaiveBayes.java
*
* Copyright (C) 2016 Fernando Benites
* @author Fernando Benites
*/
package meka.classifiers.multilabel.neurofuzzy;
import java.util.ArrayList;
import java.util.Enumeration;
import java.util.Vector;
import java.util.Arrays;
import java.util.HashMap;
import meka.classifiers.multilabel.*;
import weka.classifiers.Classifier;
import meka.classifiers.multilabel.Evaluation;
import weka.classifiers.UpdateableClassifier;
import weka.core.Attribute;
import weka.core.Instance;
import weka.core.Instances;
import weka.core.Option;
import weka.core.OptionHandler;
import weka.core.Utils;
import meka.core.MLUtils;
import weka.core.WeightedInstancesHandler;
import weka.core.RevisionUtils;
/**
* ****REPLACE THE FOLLOWING WITH SIMILAR INFORMATION.
* Class for a Naive Bayes classifier using estimator classes. Numeric
* estimator precision values are chosen based on analysis of the
* training data. For this reason, the classifier is not an
* UpdateableClassifier (which in typical usage are initialized with zero
* training instances) -- if you need the UpdateableClassifier functionality,
* use the NaiveBayesUpdateable classifier. The NaiveBayesUpdateable
* classifier will use a default precision of 0.1 for numeric attributes
* when buildClassifier is called with zero training instances.
* <p>
* For more information on Naive Bayes classifiers, see<p>
*
* George H. John and Pat Langley (1995). <i>Estimating
* Continuous Distributions in Bayesian Classifiers</i>. Proceedings
* of the Eleventh Conference on Uncertainty in Artificial
* Intelligence. pp. 338-345. Morgan Kaufmann, San Mateo.<p>
*
* Valid options are:<p>
*
* -K <br>
* Use kernel estimation for modelling numeric attributes rather than
* a single normal distribution.<p>
*
* -D <br>
* Use supervised discretization to process numeric attributes.<p>
*
* @author Len Trigg (trigg@cs.waikato.ac.nz)
* @author Eibe Frank (eibe@cs.waikato.ac.nz)
* @author Rushi Bhatt (rushi@cns.bu.edu)
* @version $Revision: 1.16 $
* Modified by Rushi for use as a CN710 template
*/
public class ARAMNetworkSparseH extends ARAMNetworkClass {
//**** THIS IS WHERE CLASSIFIER WEIGHTS ETC GO ****
//define stuff like weight matrices, classifier parameters etc.
//e.g., protected double rho_a_bar=0.0;
HashMap[] weightsA = null;
double[] sweightsA = null;
double sweightsA0;
HashMap[] weightsB = null;
HashMap<String, Vector> hmclasses = null;
int snumFeatures=0;
int snumClasses=0;
int numinstances=0;
public ARAMNetworkSparseH(int fnumFeatures, int fnumClasses, double fro, double fthreshold) {
initARAM(fnumFeatures, fnumClasses, fro, fthreshold);
}
public ARAMNetworkSparseH(){
}
private void initARAM(int fnumFeatures, int fnumClasses, double fro, double fthreshold){
numFeatures = fnumFeatures;
snumFeatures = (int)(0.5*numFeatures);
numClasses = fnumClasses;
snumClasses= (int)(0.5*numClasses);
threshold = fthreshold;
weightsA = new HashMap[1];
weightsA[0] = new HashMap();
sweightsA = new double[1];
sweightsA[0]=0;
for(int i=0;i<numFeatures;i++){
sweightsA[0]+=1;
}
sweightsA0=sweightsA[0];
weightsB = new HashMap[1];
weightsB[0] = new HashMap();
numCategories = 1;
hmclasses = new HashMap<String, Vector>();
}
/**
* Returns a string describing this classifier
* @return a description of the classifier suitable for
* displaying in the explorer/experimenter gui.
* ****MODIFY WITH CORRECT INFORMATION****
*/
public String globalInfo() {
return "This is ARAM.";
}
/**
* Generates the classifier.
*
* @param instances set of instances serving as training data
* @exception Exception if the classifier has not been generated
* successfully
*/
public void buildClassifier(Instances D) throws Exception {
int L = D.classIndex();
int featlength = (D.numAttributes() -L)*2;
int numSamples = D.numInstances();
int classlength = L * 2;
if (this.order==null){
order = new ArrayList<Integer>();
for (int j=0; j<D.numInstances();j++){
order.add(j);
}
}
if (numFeatures==-1){
initARAM( featlength,classlength ,roa , threshold );
}else{
if (featlength != numFeatures) {
return ;
}
if (classlength != numClasses) {
return ;
}}
// Copy the instances so we don't mess up the original data.
// Function calls do not deep copy the arguments..
//Instances m_Instances = new Instances(instances);
// Use the enumeration of instances to train classifier.
// Do any sanity checks (e.g., missing attributes etc here
// before calling updateClassifier for the actual learning
for(int i=0; i<D.numInstances();i++){
Instance instance = D.get(order.get(i));
updateClassifier(instance);
}
System.out.println("Training done, used "+numCategories+" neurons with rho ="+roa+".");
// Alternatively, you can put the training logic within this method,
// rather than updateClassifier(...). However, if you omit the
// updateClassifier(...) method, you should remove
// UpdateableClassifier from the class declaration above.
}
// ****THIS IS THE WEIGHT UPDATE ROUTINE. MODIFY TO CHANGE THE ALGORITHM****
/**
* Updates the classifier with the given instance.
*
* @param instance the new training instance to include in the model
* @exception Exception if the instance could not be incorporated in
* the model.
*/
public void updateClassifier(Instance instance) throws Exception {
//called once for each instance.
if(!learningphase){
return;
}
int num_classes=(int) (snumClasses);
int num_features=(int) (snumFeatures);
double[] data = new double[num_features];
double[] labels = new double[num_classes];
int numChanges = 0;
int numCategories_1=numCategories -1;
numinstances+=1;
if (!instance.classIsMissing()) {
//Do the weight updates using the instance.
double suminputA=0;
double suminputB=0;
for (int j = 0; j <num_features; j++) {
data[j] = instance.value(num_classes+j);
suminputA+=1;
//if (data[j]<0 || data[j]>1){
// System.out.println("Data not normalized, this will cause error!");
//}
}
for (int j = 0; j < num_classes ; j++) {
labels[j] = instance.value(j);
suminputB+=labels[j];
}
SortPair[] cateacti = ARTActivateCategories(data,labels);
java.util.Arrays.sort(cateacti);
boolean resonance = false;
int currentSortedIndex = 0;
int currentCategory = -1;
double matchA = 0;
double matchB = 0;
while (!resonance && currentSortedIndex<cateacti.length) {
currentCategory = cateacti[currentSortedIndex]
.getOriginalIndex();
if (currentCategory == numCategories_1) {
matchB=1;
matchA=1;
}
else{
matchA = ART_Calculate_MatchA(data, weightsA[currentCategory],suminputA);
if (sumArray(weightsB[currentCategory]) == 0) {
matchB = 1;
} else {
matchB = ART_Calculate_MatchB(labels,
weightsB[currentCategory],suminputB);
}
}
if (matchA >= roa && matchB >= rob) {
if (currentCategory == numCategories_1) {
if (currentSortedIndex == maxNumCategories) {
System.out
.println("WARNING: The maximum number of categories has been reached.");
resonance = true;
} else {
// Add a new category
sweightsA[currentCategory]=0;
for (int j = 0; j < snumFeatures; j++) {
weightsA[currentCategory].put(j,data[j]);
sweightsA[currentCategory]+=data[j];
}
for (int j = snumFeatures; j < numFeatures; j++) {
double da=data[j-snumFeatures];
weightsA[currentCategory].put(j, da);
sweightsA[currentCategory]+=1-da;
}
for (int j = 0; j < snumClasses; j++) {
weightsB[currentCategory].put(j,labels[j]);
}
String s = Arrays.toString(labels);
if (hmclasses.containsKey(s)){
hmclasses.get(s).add(currentCategory);
hmclasses.put(s,hmclasses.get(s));
}else{
Vector v = new Vector();
v.add(currentCategory);
hmclasses.put(s,v);
}
ARAMm_Add_New_Category();
// fprintf(FileID,'Add a new category of %d\n',
// network.numCategories);
// Increment the number of changes since we added a
// new category.
numChanges = numChanges + 1;
resonance = true;
break;
}
} else {
// % Update weights
double weightChange = ARAMm_Update_Weights(data,
labels, currentCategory);
if (weightChange == 1) {
numChanges += 1;
}
resonance = true;
break;
}
} else {
currentSortedIndex += 1;
resonance = false;
}
}
if(!resonance && currentSortedIndex>=cateacti.length)
{
// Add a new category
sweightsA[numCategories_1]=0;
for (int j = 0; j < snumFeatures; j++) {
weightsA[numCategories_1].put(j, data[j]);
sweightsA[numCategories_1]+=data[j];
}
for (int j = snumFeatures; j < numFeatures; j++) {
double da=data[j-snumFeatures];
weightsA[numCategories_1].put(j, da);
sweightsA[numCategories_1]+=1-da;
}
for (int j = 0; j < snumClasses; j++) {
weightsB[numCategories_1].put(j, labels[j]);
}
String s = Arrays.toString(labels);
if (hmclasses.containsKey(s)){
hmclasses.get(s).add(numCategories_1);
hmclasses.put(s,hmclasses.get(s));
}else{
Vector v = new Vector();
v.add(numCategories_1);
hmclasses.put(s,v);
}
ARAMm_Add_New_Category();
// fprintf(FileID,'Add a new category of %d\n',
// network.numCategories);
// Increment the number of changes since we added a
// new category.
numChanges = numChanges + 1;
}
}
}
//****THIS IS THE CLASSIFICATION ROUTINE. MODIFY TO CHANGE THE ALGORITHM****
//****classifyInstance() uses this method, so implement the
//****nuts-and-bolts of your algorithm here.
/**
* Calculates the class membership probabilities for the given test
* instance.
*
* @param instance the instance to be classified
* @return predicted class probability distribution
* @exception Exception if there is a problem generating the prediction
*/
public double[] distributionForInstance(Instance instance) throws Exception {
int num_classes=(int) (snumClasses);
int num_features=(int) (snumFeatures);
double[] dist = new double[num_classes];
double[] currentData = new double[numFeatures];
double[] ranking = new double[num_classes];
for (int j = 0; j < num_features; j++) {
currentData[j] = instance.value(num_classes+j);
currentData[num_features+j] = 1 - currentData[j];
}
SortPair[] sortedActivations = ARTActivateCategories(currentData);
java.util.Arrays.sort(sortedActivations);
double diff_act = sortedActivations[0].getValue()
- sortedActivations[numCategories - 2].getValue();
int largest_activ = 1;
double activ_change = 0;
for (int i = 1; i < sortedActivations.length; i++) {
activ_change = (sortedActivations[0].getValue() - sortedActivations[i]
.getValue())
/ sortedActivations[0].getValue();
if (activ_change > threshold * diff_act) {
break;
}
largest_activ = largest_activ + 1;
}
// % largest_activ =5;
double[] best_matches = new double[largest_activ];
java.util.Arrays.fill(best_matches, 1);
for (int i = 0; i < largest_activ; i++) {
// % best_matches(i) = matches(sortedCategories(i));
best_matches[i] = sortedActivations[i].getValue();
}
// % min_mat = min(best_matches);
// % max_mat = max(best_matches);
double sum_mat = sumArray(best_matches);
int currentCategory = 0;
this.neuronsactivated=new int[largest_activ];
this.neuronsactivity=new double[largest_activ];
for (int i = 0; i < largest_activ; i++) {
this.neuronsactivity[i]=best_matches[i];
best_matches[i] = best_matches[i] / sum_mat;
currentCategory = sortedActivations[i].getOriginalIndex();
this.neuronsactivated[i]=currentCategory;
// % Fill return vector with weightB values
for (int j = 0; j < num_classes; j++) {
ranking[j] = ranking[j]
+ best_matches[i] * (Double)weightsB[currentCategory].get(j);
}
}
if(m_userankstoclass) {
return ARAMm_Ranking2Class(ranking);
}
return ranking;
}
public double[] ARAMm_Ranking2Class(double[] rankings) {
int columns=rankings.length;
double[] classes= new double[columns ];
SortPair[] sortedRanks = new SortPair[columns];
for (int j=0;j<columns;j++){
sortedRanks[j]= new SortPair(rankings[j],j);
}
java.util.Arrays.sort(sortedRanks);
// sortedActivations[0].getValue()sortedActivations[i].getOriginalIndex()
SortPair[] change=new SortPair[columns-1];
for(int j =1; j<columns;j++){
change[j-1] = new SortPair(sortedRanks[j-1].getValue()-sortedRanks[j].getValue(),j);
}
java.util.Arrays.sort(change);
// double val= change[0].getValue();
int ind=change[0].getOriginalIndex();
for (int j =0; j<ind;j++){
classes[sortedRanks[j].getOriginalIndex()] = 1;
}
return classes;
}
private SortPair[] ARTActivateCategories(double[] Data) {
SortPair[] catacti = new SortPair[numCategories-1];
// double[] catacti=new double[numCategories];
double[] matchVector = new double[numFeatures];
for (int i = 0; i < numCategories-1; i++) {
double sumvector = 0;
// double sumweight = 0;
for (int j = 0; j < (int)snumFeatures; j++) {
double wa=(Double)weightsA[i].get(j);
matchVector[j] = ((Data[j] < wa) ? Data[j]
: wa);
sumvector += matchVector[j];
// sumweight += weightsA[i][j];
}
for (int j = snumFeatures; j < numFeatures; j++) {
double wa=1-(Double)weightsA[i].get(j);
double da=1-Data[j-snumFeatures];
matchVector[j] = (((da) < wa) ? da: wa);
sumvector += matchVector[j];
}
//sumweight=sweightsA[i];
catacti[i] = new SortPair(sumvector / (alpha + sweightsA[i]), i);
// catacti[i] = new SortPair(sumvector / (alpha + sumweight), i);
//System.out.println("sumweight "+(sumweight-sweightsA[i]));
}
return catacti;
}
private SortPair[] ARTActivateCategories(double[] Data, double[] labels) {
String s = Arrays.toString(labels);
Vector lclasses = hmclasses.get(s);
SortPair[] catacti = null;
if (lclasses==null||lclasses.size()==0){
catacti=new SortPair[1];
catacti[0] = new SortPair(1,numCategories-1);
return catacti;
}
catacti = new SortPair[lclasses.size()];
// double[] catacti=new double[numCategories];
double[] matchVector = new double[numFeatures];
for (int i = 0; i < lclasses.size(); i++) {
double sumvector = 0;
// double sumweight = 0;
int k = ((Integer)lclasses.get(i)).intValue();
for (int j = 0; j < snumFeatures; j++) {
double wa=(Double)weightsA[k].get(j);
matchVector[j] = ((Data[j] < wa) ? Data[j]
: wa);
sumvector += matchVector[j];
// sumweight += weightsA[k][j];
}
for (int j = snumFeatures; j < numFeatures; j++) {
double wa=1-(Double)weightsA[k].get(j);
double da=1-Data[j-snumFeatures];
matchVector[j] = (((da) < wa) ? da: wa);
sumvector += matchVector[j];
}
//sumweight=sweightsA[k];
//catacti[i] = new SortPair(sumvector / (alpha + sumweight), k);
//System.out.println("sumweight "+(sumweight-sweightsA[k]));
catacti[i] = new SortPair(sumvector / (alpha + sweightsA[k]), k);
}
return catacti;
}
// ****YOU SHOULDN'T NEED TO CHANGE THIS
/**
* Classifies the given test instance. The instance has to belong to a
* dataset when it's being classified. Note that a classifier MUST
* implement either this or distributionForInstance().
*
* @param instance the instance to be classified
* @return the predicted most likely class for the instance or
* Instance.missingValue() if no prediction is made
* @exception Exception if an error occurred during the prediction
*/
public double classifyInstance(Instance instance) throws Exception {
double[] dist = distributionForInstance(instance);
if (dist == null) {
throw new Exception("Null distribution predicted");
}
switch (instance.classAttribute().type()) {
case Attribute.NOMINAL:
double max = 0;
int maxIndex = 0;
for (int i = 0; i < dist.length; i++) {
if (dist[i] > max) {
maxIndex = i;
max = dist[i];
}
}
if (max > 0) {
return maxIndex;
} else {
//return Instance.missingValue();
}
case Attribute.NUMERIC:
return dist[0];
default:
return -1;
}
}
// ****ANY OPTIONS/PARAMETERS GO HERE****
/**
* Returns an enumeration describing the available options.
*
* @return an enumeration of all the available options.
*/
public Enumeration listOptions() {
//These are just examples, modify to suit your algorithm
Vector newVector = new Vector(2);
newVector.addElement(
new Option("\tChange generalization parameter Rho\n",
"P", 0,"-P"));
newVector.addElement(
new Option("\tUse ranking to class function special dev. for ARAM.\n",
"K", 0,"-K"));
return newVector.elements();
}
//****OPTIONS HERE SHOULD MATCH THOSE ADDED ABOVE****
/**
* Parses a given list of options. Valid options are:<p>
*
* -K <br>
* Use kernel estimation for modelling numeric attributes rather than
* a single normal distribution.<p>
*
* -D <br>
* Use supervised discretization to process numeric attributes.
*
* @param options the list of options as an array of strings
* @exception Exception if an option is not supported
*/
public void setOptions(String[] options) throws Exception {
//These are just examples, modify to suit your algorithm
// boolean k = Utils.getFlag('K', options);
// boolean d = Utils.getFlag('D', options);
// if (k && d) {
// throw new IllegalArgumentException(
// "Can't use both kernel density estimation and discretization!");
// }
// setUseSupervisedDiscretization(d);
// setUseKernelEstimator(k);
roa = (Utils.getOptionPos("P",options) >= 0) ? Double.parseDouble(Utils.getOption("P", options)) : roa;
m_userankstoclass= (Utils.getOptionPos("K",options) >= 0);
super.setOptions(options);
}
//****MORE OPTION PARSING STUFF****
/**
* Gets the current settings of the classifier.
*
* @return an array of strings suitable for passing to setOptions
*/
public String [] getOptions() {
//These are just examples, modify to suit your algorithm
String [] options = new String [3];
try{
options =weka.core.Utils.splitOptions("-P 0.9 -K");
}catch (Exception ex) {
System.out.println(ex.getMessage());
}
return options;
}
//****ANY INFORMATION LIKE NO. OF UNITS ETC PRINTED HERE
/**
* Returns a description of the classifier.
*
* @return a description of the classifier as a string.
*/
public String toString() {
//These are just examples, modify to suit your algorithm
StringBuffer text = new StringBuffer();
text.append("ML ARAM classifier");
// if (m_Instances == null) {
// text.append(": No model built yet.");
// } else {
// try {
// for (int i = 0; i < m_Distributions[0].length; i++) {
// text.append("\n\nClass " + m_Instances.classAttribute().value(i) +
// ": Prior probability = " + Utils.
// doubleToString(m_ClassDistribution.getProbability(i),
// 4, 2) + "\n\n");
// Enumeration enumAtts = m_Instances.enumerateAttributes();
// int attIndex = 0;
// while (enumAtts.hasMoreElements()) {
// Attribute attribute = (Attribute) enumAtts.nextElement();
// text.append(attribute.name() + ": "
// + m_Distributions[attIndex][i]);
// attIndex++;
// }
// }
// } catch (Exception ex) {
// text.append(ex.getMessage());
// }
// }
return text.toString();
}
//****MORE GUI RELATED STUFF AND PARAMETER ACCESS METHODS
// /**
// * Returns the tip text for this property
// * @return tip text for this property suitable for
// * displaying in the explorer/experimenter gui
// */
// public String useKernelEstimatorTipText() {
// return "Use a kernel estimator for numeric attributes rather than a "
// +"normal distribution.";
// }
// /**
// * Gets if kernel estimator is being used.
// *
// * @return Value of m_UseKernelEstimatory.
// */
// public boolean getUseKernelEstimator() {
//
// return m_UseKernelEstimator;
// }
//
// /**
// * Sets if kernel estimator is to be used.
// *
// * @param v Value to assign to m_UseKernelEstimatory.
// */
// public void setUseKernelEstimator(boolean v) {
//
// m_UseKernelEstimator = v;
// if (v) {
// setUseSupervisedDiscretization(false);
// }
// }
//
// /**
// * Returns the tip text for this property
// * @return tip text for this property suitable for
// * displaying in the explorer/experimenter gui
// */
// public String useSupervisedDiscretizationTipText() {
// return "Use supervised discretization to convert numeric attributes to nominal "
// +"ones.";
// }
//
// /**
// * Get whether supervised discretization is to be used.
// *
// * @return true if supervised discretization is to be used.
// */
// public boolean getUseSupervisedDiscretization() {
//
// return m_UseDiscretization;
// }
//
// /**
// * Set whether supervised discretization is to be used.
// *
// * @param newblah true if supervised discretization is to be used.
// */
// public void setUseSupervisedDiscretization(boolean newblah) {
//
// m_UseDiscretization = newblah;
// if (newblah) {
// setUseKernelEstimator(false);
// }
// }
/**
* Main method for testing this class.
*
* @param argv the options
*/
private double ARAMm_Update_Weights(double[] data, double[] labels,
int category) {
double weightChange = 0;
sweightsA[category]=0;
for (int i = 0; i < snumFeatures; i++) {
double wa=(Double)weightsA[category].get(i);
if (data[i] < wa ){
wa = (learningRate * data[i])
+ (1 - learningRate) * wa;
weightsA[category].put(i, wa);
}
sweightsA[category]+=wa;
}
//above
for (int i = snumFeatures; i < numFeatures; i++) {
double wa=1-(Double)weightsA[category].get(i);
double da=1-data[i-snumFeatures];
if (da < wa ){
wa = (learningRate * da)
+ (1 - learningRate) * wa;
weightsA[category].put(i, 1-wa);
}
sweightsA[category]+=wa;
}
for (int i = 0; i < snumClasses; i++) {
double wb=(Double)weightsB[category].get(i);
if(weightblearnmethod== 0){
weightsB[category].put(i, labels[i] + wb);
weightChange = 1;
}else{
// %normalise
if ( labels[i]< wb){
weightsB[category].put(i, (learningRate * labels[i] )+ (1 - learningRate) *wb);
weightChange = 1;
}
}
}
return weightChange;
}
private double ART_Calculate_MatchA(double[] Data, HashMap fweights, double suminput
) {
if (suminput == 0) {
return 0.0;
}
int lnumFeatures = Data.length;
if (lnumFeatures*2 != fweights.size()) {
return 0.0;
}
double[] matchVector = new double[numFeatures];
double summatch = 0;
//double suminput = 0;
for (int j = 0; j < lnumFeatures; j++) {
double w = (Double)fweights.get(j);
matchVector[j] = ((Data[j] < w) ? Data[j] :w);
summatch += matchVector[j];
//suminput += Data[j];
}
for (int j = snumFeatures; j < numFeatures; j++) {
double w =1- (Double)fweights.get(j);
double da= 1-Data[j-snumFeatures];
matchVector[j] = ((da< w) ? da :w);
summatch += matchVector[j];
}
// if (suminput == 0) {
// return 0.0;
// }
return summatch / suminput;
}
private double ART_Calculate_MatchB(double[] Data, HashMap fweights, double suminput
) {
if (suminput == 0) {
return 0.0;
}
int lnumFeatures = Data.length;
if (lnumFeatures != fweights.size()) {
return 0.0;
}
double[] matchVector = new double[lnumFeatures];
double summatch = 0;
//double suminput = 0;
for (int j = 0; j < lnumFeatures; j++) {
double w =(Double) fweights.get(j);
matchVector[j] = ((Data[j] < w) ? Data[j] :w);
summatch += matchVector[j];
//suminput += Data[j];
}
return summatch / suminput;
}
private void ARAMm_Add_New_Category() {
weightsA = Arrays.copyOf(weightsA, numCategories + 1);
sweightsA = Arrays.copyOf(sweightsA, numCategories + 1);
weightsB = Arrays.copyOf(weightsB, numCategories + 1);
weightsA[numCategories] = new HashMap();
//sweightsA[numCategories] = new double();
weightsB[numCategories] = new HashMap();
//Arrays.fill(weightsA[numCategories], 1.0);
//Arrays.fill(weightsB[numCategories], 0.0);
sweightsA[numCategories]=sweightsA0;
numCategories += 1;
}
private double sumArray(double[] arr) {
int num = arr.length;
double result = 0;
for (int i = 0; i < num; i++) {
result += arr[i];
}
return result;
}
private double sumArray(HashMap arr) {
int num = arr.size();
double result = 0;
for (Object key: arr.keySet()) {
result += (Double)arr.get(key);
}
return result;
}
public static void main(String [] argv) {
try {
Evaluation.runExperiment(new WvARAM(), argv);
} catch (Exception e) {
e.printStackTrace();
System.err.println(e.getMessage());
}
}
@Override
public boolean isThreaded() {
// TODO Auto-generated method stub
return false;
}
@Override
public void setThreaded(boolean setv) {
// TODO Auto-generated method stub
}
@Override
public double[][] distributionForInstanceM(Instances i) throws Exception {
// TODO Auto-generated method stub
return null;
}
@Override
public String getModel() {
// TODO Auto-generated method stub
return null;
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers/multilabel
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers/multilabel/neurofuzzy/ARAMNetworkSparseHT.java
|
/*
*
*
* Adapted from NaiveBayes.java
*
* Copyright (C) 2016 Fernando Benites
* @author Fernando Benites
*/
package meka.classifiers.multilabel.neurofuzzy;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Enumeration;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.TreeSet;
import java.util.Vector;
import meka.classifiers.multilabel.Evaluation;
import meka.classifiers.multilabel.MultiLabelClassifier;
import meka.classifiers.multilabel.ProblemTransformationMethod;
import weka.core.Attribute;
import weka.core.Instance;
import weka.core.Instances;
import weka.core.Option;
import weka.core.Utils;
/**
* ****REPLACE THE FOLLOWING WITH SIMILAR INFORMATION.
* Class for a Naive Bayes classifier using estimator classes. Numeric
* estimator precision values are chosen based on analysis of the
* training data. For this reason, the classifier is not an
* UpdateableClassifier (which in typical usage are initialized with zero
* training instances) -- if you need the UpdateableClassifier functionality,
* use the NaiveBayesUpdateable classifier. The NaiveBayesUpdateable
* classifier will use a default precision of 0.1 for numeric attributes
* when buildClassifier is called with zero training instances.
* <p>
* For more information on Naive Bayes classifiers, see<p>
*
* George H. John and Pat Langley (1995). <i>Estimating
* Continuous Distributions in Bayesian Classifiers</i>. Proceedings
* of the Eleventh Conference on Uncertainty in Artificial
* Intelligence. pp. 338-345. Morgan Kaufmann, San Mateo.<p>
*
* Valid options are:<p>
*
* -K <br>
* Use kernel estimation for modelling numeric attributes rather than
* a single normal distribution.<p>
*
* -D <br>
* Use supervised discretization to process numeric attributes.<p>
*
* @author Len Trigg (trigg@cs.waikato.ac.nz)
* @author Eibe Frank (eibe@cs.waikato.ac.nz)
* @author Rushi Bhatt (rushi@cns.bu.edu)
* @version $Revision: 1.16 $
* Modified by Rushi for use as a CN710 template
*/
public class ARAMNetworkSparseHT extends ARAMNetworkClass {
//**** THIS IS WHERE CLASSIFIER WEIGHTS ETC GO ****
//define stuff like weight matrices, classifier parameters etc.
//e.g., protected double rho_a_bar=0.0;
HashMap[] weightsA = null;
HashSet[] upweightsA=null;
double[] sweightsA = null;
double sweightsA0;
HashMap[] weightsB = null;
HashMap<String, Vector> hmclasses = null;
int snumFeatures=0;
int snumClasses=0;
int numinstances=0;
int activated=0;
public ARAMNetworkSparseHT(int fnumFeatures, int fnumClasses, double fro, double fthreshold) {
initARAM(fnumFeatures, fnumClasses, fro, fthreshold);
}
public ARAMNetworkSparseHT(){
}
private void initARAM(int fnumFeatures, int fnumClasses, double fro, double fthreshold){
numFeatures = fnumFeatures;
snumFeatures = (int)(0.5*numFeatures);
numClasses = fnumClasses;
snumClasses= (int)(0.5*numClasses);
threshold = fthreshold;
weightsA = new HashMap[1];
weightsA[0] = new HashMap();
upweightsA = new HashSet[1];
upweightsA[0] = new HashSet();
sweightsA = new double[1];
sweightsA[0]=0;
for(int i=0;i<numFeatures;i++){
sweightsA[0]+=1;
}
sweightsA0=sweightsA[0];
weightsB = new HashMap[1];
weightsB[0] = new HashMap();
numCategories = 1;
hmclasses = new HashMap<String, Vector>();
}
/**
* Returns a string describing this classifier
* @return a description of the classifier suitable for
* displaying in the explorer/experimenter gui.
* ****MODIFY WITH CORRECT INFORMATION****
*/
public String globalInfo() {
return "This is ARAM.";
}
/**
* Generates the classifier.
*
* @param instances set of instances serving as training data
* @exception Exception if the classifier has not been generated
* successfully
*/
public void buildClassifier(Instances D) throws Exception {
int L = D.classIndex();
int featlength = (D.numAttributes() -L)*2;
int numSamples = D.numInstances();
int classlength = L * 2;
if (this.order==null){
order = new ArrayList<Integer>();
for (int j=0; j<D.numInstances();j++){
order.add(j);
}
}
if (numFeatures==-1){
initARAM( featlength,classlength ,roa , threshold );
}else{
if (featlength != numFeatures) {
return ;
}
if (classlength != numClasses) {
return ;
}}
// Copy the instances so we don't mess up the original data.
// Function calls do not deep copy the arguments..
//Instances m_Instances = new Instances(instances);
// Use the enumeration of instances to train classifier.
// Do any sanity checks (e.g., missing attributes etc here
// before calling updateClassifier for the actual learning
for(int i=0; i<D.numInstances();i++){
Instance instance = D.get(order.get(i));
updateClassifier(instance);
}
System.out.println("Training done, used "+numCategories+" neurons with rho ="+roa+".");
// Alternatively, you can put the training logic within this method,
// rather than updateClassifier(...). However, if you omit the
// updateClassifier(...) method, you should remove
// UpdateableClassifier from the class declaration above.
}
// ****THIS IS THE WEIGHT UPDATE ROUTINE. MODIFY TO CHANGE THE ALGORITHM****
/**
* Updates the classifier with the given instance.
*
* @param instance the new training instance to include in the model
* @exception Exception if the instance could not be incorporated in
* the model.
*/
public void updateClassifier(Instance instance) throws Exception {
//called once for each instance.
if(!learningphase){
return;
}
int num_classes=(int) (snumClasses);
HashMap<Integer,Double> data = new HashMap<Integer,Double>();
HashMap<Integer, Double> labels = new HashMap<Integer, Double>();
int numChanges = 0;
int numCategories_1=numCategories -1;
numinstances+=1;
if (!instance.classIsMissing()) {
//Do the weight updates using the instance.
for (Integer tj=0; tj<instance.numValues(); tj++){
int j=instance.index(tj);
double da = instance.value(j);
if (da==0){
continue;
}
if(j<num_classes){
labels.put(j, da);
}else{
data.put(j-num_classes, da);
}
}
SortPair2[] cateacti = ARTActivateCategories(data,labels);
java.util.Arrays.sort(cateacti);
boolean resonance = false;
int currentSortedIndex = 0;
int currentCategory = -1;
double matchA = 0;
while (!resonance && currentSortedIndex<cateacti.length) {
currentCategory = cateacti[currentSortedIndex]
.getOriginalIndex();
if (currentCategory == numCategories_1) {
matchA=1;
}
else{
matchA = (cateacti[currentSortedIndex].getRawValue()/
snumFeatures);
// System.out.println("Ma: "+matchA+" "+cateacti[currentSortedIndex].getValue()
// +" "+numinstances+" "+currentCategory+" S:"+sweightsA[currentCategory]);
}
if (matchA >= roa) {
if (currentCategory == numCategories_1) {
if (currentSortedIndex == maxNumCategories) {
System.out
.println("WARNING: The maximum number of categories has been reached.");
resonance = true;
} else {
// Add a new category
sweightsA[currentCategory]=0;
Set<Integer> s1=data.keySet();
for (Integer j :s1) {
Double da=data.get(j);
weightsA[currentCategory].put(j,da);
weightsA[currentCategory].put(j+snumFeatures, da);
upweightsA[currentCategory].add(j+snumFeatures);
}
Set<Integer> s2=weightsA[currentCategory].keySet();
int count=0;
for (int j :s2) {
double da=(Double)weightsA[currentCategory].get(j);
if (j<snumFeatures){
sweightsA[currentCategory]+=da;
}
else{
sweightsA[currentCategory]+=1-da;
count+=1;
}
}
sweightsA[currentCategory]+=snumFeatures-count;
s1=labels.keySet();
for (int j :s1) {
weightsB[currentCategory].put(j,labels.get(j));
}
String s = labels.keySet().toString();
if (hmclasses.containsKey(s)){
hmclasses.get(s).add(currentCategory);
hmclasses.put(s,hmclasses.get(s));
}else{
Vector<Integer> v = new Vector<Integer>();
v.add(currentCategory);
hmclasses.put(s,v);
}
ARAMm_Add_New_Category();
//System.out.println(numinstances+" "+numCategories);
// fprintf(FileID,'Add a new category of %d\n',
// network.numCategories);
// Increment the number of changes since we added a
// new category.
numChanges = numChanges + 1;
resonance = true;
break;
}
} else {
// % Update weights
double weightChange = ARAMm_Update_Weights(data,
labels, currentCategory);
//System.out.println(numinstances+" "+currentCategory+" S:"+sweightsA[currentCategory]);
//sumArrayF(this.weightsA[1]);
if (weightChange == 1) {
numChanges += 1;
}
resonance = true;
break;
}
} else {
currentSortedIndex += 1;
resonance = false;
}
}
if(!resonance && currentSortedIndex>=cateacti.length)
{
// Add a new category
sweightsA[numCategories_1]=0;
Set<Integer> s1=data.keySet();
int count=0;
for (int j: s1) {
double da=data.get(j);
weightsA[numCategories_1].put(j, da);
sweightsA[numCategories_1]+=da;
weightsA[numCategories_1].put(j+snumFeatures, da);
sweightsA[numCategories_1]+=1-da;
upweightsA[numCategories_1].add(j+snumFeatures);
count+=1;
}
sweightsA[numCategories_1]+=snumFeatures-count;
s1=labels.keySet();
for (int j : s1) {
weightsB[numCategories_1].put(j, labels.get(j));
}
String s = labels.keySet().toString();
if (hmclasses.containsKey(s)){
hmclasses.get(s).add(numCategories_1);
hmclasses.put(s,hmclasses.get(s));
}else{
Vector<Integer> v = new Vector<Integer>();
v.add(numCategories_1);
hmclasses.put(s,v);
}
ARAMm_Add_New_Category();
//System.out.println(numinstances+" "+numCategories);
// fprintf(FileID,'Add a new category of %d\n',
// network.numCategories);
// Increment the number of changes since we added a
// new category.
numChanges = numChanges + 1;
}
}
}
//****THIS IS THE CLASSIFICATION ROUTINE. MODIFY TO CHANGE THE ALGORITHM****
//****classifyInstance() uses this method, so implement the
//****nuts-and-bolts of your algorithm here.
/**
* Calculates the class membership probabilities for the given test
* instance.
*
* @param instance the instance to be classified
* @return predicted class probability distribution
* @exception Exception if there is a problem generating the prediction
*/
public double[] distributionForInstance(Instance instance) throws Exception {
int num_classes=(int) (snumClasses);
double[] ranking = new double[num_classes];
// long startMilli = System.currentTimeMillis();
// for (int j = 0; j < num_features; j++) {
//
// double dt=instance.value(num_classes+j);
// if (dt!=0){
// currentData.put(j, dt);
// }
// }
//TODO use instance here
SortPair[] sortedActivations = ARTActivateCategories(instance);
java.util.Arrays.sort(sortedActivations);
double s0=sortedActivations[0].getValue();
double diff_act = s0
- sortedActivations[numCategories - 2].getValue();
int largest_activ = 1;
double activ_change = 0;
for (int i = 1; i < sortedActivations.length; i++) {
activ_change = (s0 - sortedActivations[i]
.getValue())
/ s0;
if (activ_change > threshold * diff_act) {
break;
}
largest_activ = largest_activ + 1;
}
// % largest_activ =5;
double[] best_matches = new double[largest_activ];
java.util.Arrays.fill(best_matches, 1);
best_matches[0]=s0;
for (int i = 1; i < largest_activ; i++) {
// % best_matches(i) = matches(sortedCategories(i));
best_matches[i] = sortedActivations[i].getValue();
}
// % min_mat = min(best_matches);
// % max_mat = max(best_matches);
double sum_mat = sumArray(best_matches);
int currentCategory = 0;
this.neuronsactivated=new int[largest_activ];
this.neuronsactivity=new double[largest_activ];
for (int i = 0; i < largest_activ; i++) {
this.neuronsactivity[i]=best_matches[i];
best_matches[i] = best_matches[i] / sum_mat;
currentCategory = sortedActivations[i].getOriginalIndex();
this.neuronsactivated[i]=currentCategory;
// % Fill return vector with weightB values
Set <Integer> s1= weightsB[currentCategory].keySet();
for (int j :s1) {
ranking[j] = ranking[j]
+ best_matches[i] * (Double)weightsB[currentCategory].get(j);
}
}
this.nrinstclassified+=1;
if(m_userankstoclass) {
return ARAMm_Ranking2Class(ranking);
}
// long endMilli = System.currentTimeMillis();
return ranking;
}
public double[] ARAMm_Ranking2Class(double[] rankings) {
int columns=rankings.length;
double[] classes= new double[columns ];
SortPair[] sortedRanks = new SortPair[columns];
for (int j=0;j<columns;j++){
sortedRanks[j]= new SortPair(rankings[j],j);
}
java.util.Arrays.sort(sortedRanks);
// sortedActivations[0].getValue()sortedActivations[i].getOriginalIndex()
SortPair[] change=new SortPair[columns-1];
for(int j =1; j<columns;j++){
change[j-1] = new SortPair(sortedRanks[j-1].getValue()-sortedRanks[j].getValue(),j);
}
java.util.Arrays.sort(change);
// double val= change[0].getValue();
int ind=change[0].getOriginalIndex();
for (int j =0; j<ind;j++){
classes[sortedRanks[j].getOriginalIndex()] = 1;
}
return classes;
}
private SortPair[] ARTActivateCategories(Instance Data) {
SortPair[] catacti = new SortPair[numCategories-1];
// double[] catacti=new double[numCategories];
//Set<Integer> s1=new HashSet<Integer>();
//Set<Integer> s1=Data.keySet();
// long startMilli = System.currentTimeMillis();
for (int i = 0; i < numCategories-1; i++) {
// long startMilliss = System.nanoTime();
double sumvector = 0;
// double sumweight = 0;
int count=0;
List<Integer> s2=new ArrayList<Integer>(upweightsA[i]);
//List<Integer> s2=new ArrayList<Integer>(weightsA[i].keySet());
//for (Integer j: s1) {
//double da=(Double)Data.get(j);
long st3 =0;
long st4 =0;
for (int tj=0; tj<Data.numValues(); tj++){
long startMillisst = System.nanoTime();
int sj=Data.index(tj);
double da = Data.value(sj);
Integer j=sj-snumClasses;
//s1.add(j);
if (da==0){
//s2.remove((Integer)j);
continue;
}
long st10 = System.nanoTime();
count+=1;
long st1 = System.nanoTime();
Double wa=(Double)weightsA[i].get(j);
long st1a = System.nanoTime();
if(wa!=null){
sumvector += ((da < wa) ?da
: wa);
//s2.remove((Integer)j);
}
long st2 = System.nanoTime();
Integer j1=j+snumFeatures;
Double wat=(Double)weightsA[i].get(j1);
if(wat!=null){
wat=1-wat;
double dat=1-da;
sumvector += (((dat) < wat) ? dat: wat);
st3 = System.nanoTime();
s2.remove((Integer)j1);
st4 = System.nanoTime();
}else{
sumvector+=1-da;
}
// long endMillisst = System.nanoTime();
int jti=1;
// System.out.println("it took " + (endMillisst - startMillisst) + " milli(s)");
}
for (Integer j: s2) {
double wat=1-(Double)weightsA[i].get(j);
sumvector +=wat;
count+=1;
}
sumvector+=snumFeatures-count;
//sumweight=sweightsA[i];
// long endMilliss = System.nanoTime();
catacti[i] = new SortPair(sumvector / (alpha + sweightsA[i]), i);
//System.out.println("it took " + (endMilliss - startMilliss) + " milli(s)");
// catacti[i] = new SortPair(sumvector / (alpha + sumweight), i);
//System.out.println("sumweight "+(sumweight-sweightsA[i]));
//if(activated==121){
// System.out.println(i+ " "+sumvector+" "+sweightsA[i]);
//}
}
// long endMilli = System.currentTimeMillis();
//activated+=1;
return catacti;
}
private SortPair2[] ARTActivateCategories(Map<Integer, Double> Data, HashMap<Integer, Double> labels) {
String s = labels.keySet().toString();
Vector lclasses = (Vector)hmclasses.get(s);
SortPair2[] catacti = null;
if (lclasses==null||lclasses.size()==0){
catacti=new SortPair2[1];
catacti[0] = new SortPair2(1,numCategories-1,1);
return catacti;
}
catacti = new SortPair2[lclasses.size()];
// double[] catacti=new double[numCategories];
for (int i = 0; i < lclasses.size(); i++) {
double sumvector = 0;
// double sumweight = 0;
int k = ((Integer)lclasses.get(i)).intValue();
List<Integer> s2=new ArrayList<Integer>(upweightsA[k]);
int counter=0;
//double dt = instance.value(num_classes+j);
for (Map.Entry<Integer, Double> entry : Data.entrySet()) {
int j=entry.getKey();
double da=(Double)entry.getValue();
// for (Integer tj=0; tj<Data.numValues(); tj++){
// int j=Data.index(tj);
//
// double da = Data.value(j-snumClasses);
// if (da==0){
// s2.remove((Integer)j);
// continue;
// }
Double wa=(Double)weightsA[k].get(j);
if(wa!=null){
if(wa==0){
continue;
}
sumvector += ((da < wa) ? da
: wa);
// s2.remove((Integer)j);
}
Integer j1=j+snumFeatures;
double dat=1-da;
Double wat=(Double)weightsA[k].get(j1);
if(wat!=null){
wat=1-wat;
// sumweight += weightsA[k][j];
sumvector +=(((dat) < wat) ? dat: wat);
s2.remove((Object)j1);
counter+=1;
}else{
sumvector += dat;
counter+=1;
//s2.remove((Integer)j1);
}
}
for (Integer j: s2) {
counter+=1;
sumvector += 1-(Double)weightsA[k].get(j);
}
sumvector += snumFeatures-counter;
//sumweight=sweightsA[k];
//catacti[i] = new SortPair(sumvector / (alpha + sumweight), k);
//System.out.println("sumweight "+(sumweight-sweightsA[k]));
catacti[i] = new SortPair2(sumvector / (alpha + sweightsA[k]), k,sumvector);
}
return catacti;
}
// ****YOU SHOULDN'T NEED TO CHANGE THIS
/**
* Classifies the given test instance. The instance has to belong to a
* dataset when it's being classified. Note that a classifier MUST
* implement either this or distributionForInstance().
*
* @param instance the instance to be classified
* @return the predicted most likely class for the instance or
* Instance.missingValue() if no prediction is made
* @exception Exception if an error occurred during the prediction
*/
public double classifyInstance(Instance instance) throws Exception {
double[] dist = distributionForInstance(instance);
if (dist == null) {
throw new Exception("Null distribution predicted");
}
switch (instance.classAttribute().type()) {
case Attribute.NOMINAL:
double max = 0;
int maxIndex = 0;
for (int i = 0; i < dist.length; i++) {
if (dist[i] > max) {
maxIndex = i;
max = dist[i];
}
}
if (max > 0) {
return maxIndex;
} else {
//return Instance.missingValue();
}
case Attribute.NUMERIC:
return dist[0];
default:
return -1;
}
}
// ****ANY OPTIONS/PARAMETERS GO HERE****
/**
* Returns an enumeration describing the available options.
*
* @return an enumeration of all the available options.
*/
public Enumeration<Option> listOptions() {
//These are just examples, modify to suit your algorithm
Vector<Option> newVector = new Vector<Option>(2);
newVector.addElement(
new Option("\tChange generalization parameter Rho\n",
"P", 0,"-P"));
newVector.addElement(
new Option("\tUse ranking to class function special dev. for ARAM.\n",
"K", 0,"-K"));
return newVector.elements();
}
//****OPTIONS HERE SHOULD MATCH THOSE ADDED ABOVE****
/**
* Parses a given list of options. Valid options are:<p>
*
* -K <br>
* Use kernel estimation for modelling numeric attributes rather than
* a single normal distribution.<p>
*
* -D <br>
* Use supervised discretization to process numeric attributes.
*
* @param options the list of options as an array of strings
* @exception Exception if an option is not supported
*/
public void setOptions(String[] options) throws Exception {
//These are just examples, modify to suit your algorithm
// boolean k = Utils.getFlag('K', options);
// boolean d = Utils.getFlag('D', options);
// if (k && d) {
// throw new IllegalArgumentException(
// "Can't use both kernel density estimation and discretization!");
// }
// setUseSupervisedDiscretization(d);
// setUseKernelEstimator(k);
roa = (Utils.getOptionPos("P",options) >= 0) ? Double.parseDouble(Utils.getOption("P", options)) : roa;
m_userankstoclass= (Utils.getOptionPos("K",options) >= 0);
super.setOptions(options);
}
//****MORE OPTION PARSING STUFF****
/**
* Gets the current settings of the classifier.
*
* @return an array of strings suitable for passing to setOptions
*/
public String [] getOptions() {
//These are just examples, modify to suit your algorithm
String [] options = new String [3];
try{
options =weka.core.Utils.splitOptions("-P 0.9 -K");
}catch (Exception ex) {
System.out.println(ex.getMessage());
}
return options;
}
//****ANY INFORMATION LIKE NO. OF UNITS ETC PRINTED HERE
/**
* Returns a description of the classifier.
*
* @return a description of the classifier as a string.
*/
public String toString() {
//These are just examples, modify to suit your algorithm
StringBuffer text = new StringBuffer();
text.append("ML ARAM classifier");
// if (m_Instances == null) {
// text.append(": No model built yet.");
// } else {
// try {
// for (int i = 0; i < m_Distributions[0].length; i++) {
// text.append("\n\nClass " + m_Instances.classAttribute().value(i) +
// ": Prior probability = " + Utils.
// doubleToString(m_ClassDistribution.getProbability(i),
// 4, 2) + "\n\n");
// Enumeration enumAtts = m_Instances.enumerateAttributes();
// int attIndex = 0;
// while (enumAtts.hasMoreElements()) {
// Attribute attribute = (Attribute) enumAtts.nextElement();
// text.append(attribute.name() + ": "
// + m_Distributions[attIndex][i]);
// attIndex++;
// }
// }
// } catch (Exception ex) {
// text.append(ex.getMessage());
// }
// }
return text.toString();
}
/**
* Main method for testing this class.
*
* @param argv the options
*/
private double ARAMm_Update_Weights(HashMap<Integer, Double> data, HashMap<Integer, Double> labels,
int category) {
double weightChange = 0;
sweightsA[category]=0;
Set<Integer> s1=new TreeSet<Integer>(data.keySet());
Set<Integer> s2=new HashSet<Integer>(weightsA[category].keySet());
int count=0;
for (Integer i: s1) {
if (i>=snumFeatures){
continue;
}
double da=data.get(i);
Double wa=(Double)weightsA[category].get(i);
if(wa!=null){
if (da < wa ){
wa = (learningRate * da)
+ (1 - learningRate) * wa;
if(wa==0){
weightsA[category].remove(i);
}else{
weightsA[category].put(i, wa);
}
}
sweightsA[category]+=wa;
s2.remove(i);
}
double dat=1-da;
//}
// for (Integer i: s1) {
int j1= i+snumFeatures;
// double dat=1-data.get(j1-snumFeatures);
Double wat=(Double)weightsA[category].get(j1);
// Double wat=(Double)weightsA[category].get(j1);
if(wat!=null){
wat=1-wat;
if (dat < wat ){
wat = ((learningRate * dat)
+ (1 - learningRate) * wat);
if (wat==1){
weightsA[category].remove(j1);
upweightsA[category].remove(j1);
}else{
weightsA[category].put(j1, 1-wat);
upweightsA[category].add(j1);
count+=1;
}
}else{
if (wat!=1){
count+=1;
}
}
sweightsA[category]+=wat;
s2.remove(j1);
}else{
wat=(learningRate * dat);
if (wat==1){
weightsA[category].remove(j1);
upweightsA[category].remove(j1);
}else{
weightsA[category].put((Integer)j1,(Double) (1-wat));
upweightsA[category].add(j1);
count+=1;
}
sweightsA[category]+=wat;
}
}
for (Integer i: s2) {
if (i<snumFeatures){
try{
weightsA[category].remove(i);
upweightsA[category].remove(i);
}catch(Exception e){
e.getClass();
}
}
else{
double wat=1-(Double)weightsA[category].get(i);
sweightsA[category]+=wat;
count+=1;
}
}
sweightsA[category]+=snumFeatures-count;
s1=labels.keySet();
for (Integer i: s1) {
double lb=labels.get(i);
Double wb=(Double)weightsB[category].get(i);
if(wb!=null){
if(weightblearnmethod== 0){
weightsB[category].put(i, lb + wb);
weightChange = 1;
}else{
// %normalise
if ( lb< wb){
weightsB[category].put(i, (learningRate * lb )+ (1 - learningRate) *wb);
weightChange = 1;
}
}}
}
return weightChange;
}
private void ARAMm_Add_New_Category() {
weightsA = Arrays.copyOf(weightsA, numCategories + 1);
sweightsA = Arrays.copyOf(sweightsA, numCategories + 1);
weightsB = Arrays.copyOf(weightsB, numCategories + 1);
upweightsA = Arrays.copyOf(upweightsA, numCategories + 1);
weightsA[numCategories] = new HashMap<Integer, Double>();
weightsB[numCategories] = new HashMap<Integer, Double>();
upweightsA[numCategories] = new HashSet();
sweightsA[numCategories]=sweightsA0;
numCategories += 1;
}
private double sumArray(double[] arr) {
int num = arr.length;
double result = 0;
for (int i = 0; i < num; i++) {
result += arr[i];
}
return result;
}
public static void main(String [] argv) {
try {
Evaluation.runExperiment(((MultiLabelClassifier) new WvARAM()), argv);
} catch (Exception e) {
e.printStackTrace();
System.err.println(e.getMessage());
}
}
public double[][] distributionForInstanceM(Instances i) throws Exception {
// TODO Auto-generated method stub
return null;
}
class SortPair2 implements Comparable<SortPair2> {
private int originalIndex;
private double value;
private double rawvalue;
public SortPair2(double value, int originalIndex, double rawvalue) {
this.value = value;
this.originalIndex = originalIndex;
this.rawvalue = rawvalue;
}
public int compareTo(SortPair2 o) {
return Double.compare(o.getValue(), value);
}
public int getOriginalIndex() {
return originalIndex;
}
public double getValue() {
return value;
}
public double getRawValue() {
return rawvalue;
}
}
@Override
public boolean isThreaded() {
// TODO Auto-generated method stub
return false;
}
@Override
public void setThreaded(boolean setv) {
// TODO Auto-generated method stub
}
@Override
public String getModel() {
// TODO Auto-generated method stub
return null;
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers/multilabel
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers/multilabel/neurofuzzy/ARAMNetworkSparseHT_Strange.java
|
/*
* ClassifierTemplate.java
*
* <<Your Name Here>>
* CN 710
* Dept. of Cognitive & Neural Systems
* Boston University
* <<Date here>>
*
* Copyright (c) 2006, Boston University
*
* Adapted from NaiveBayes.java
* Copyright (C) 1999 Eibe Frank,Len Trigg
*/
package meka.classifiers.multilabel.neurofuzzy;
import java.util.ArrayList;
import java.util.Enumeration;
import java.util.List;
import java.util.Set;
import java.util.SortedMap;
import java.util.TreeMap;
import java.util.Vector;
import java.util.Arrays;
import java.util.HashMap;
import meka.classifiers.multilabel.*;
import weka.classifiers.Classifier;
import meka.classifiers.multilabel.Evaluation;
import weka.classifiers.UpdateableClassifier;
import weka.core.Attribute;
import weka.core.Instance;
import weka.core.Instances;
import weka.core.Option;
import weka.core.OptionHandler;
import weka.core.Utils;
import meka.core.MLUtils;
import weka.core.WeightedInstancesHandler;
import weka.core.RevisionUtils;
/**
* ****REPLACE THE FOLLOWING WITH SIMILAR INFORMATION.
* Class for a Naive Bayes classifier using estimator classes. Numeric
* estimator precision values are chosen based on analysis of the
* training data. For this reason, the classifier is not an
* UpdateableClassifier (which in typical usage are initialized with zero
* training instances) -- if you need the UpdateableClassifier functionality,
* use the NaiveBayesUpdateable classifier. The NaiveBayesUpdateable
* classifier will use a default precision of 0.1 for numeric attributes
* when buildClassifier is called with zero training instances.
* <p>
* For more information on Naive Bayes classifiers, see<p>
*
* George H. John and Pat Langley (1995). <i>Estimating
* Continuous Distributions in Bayesian Classifiers</i>. Proceedings
* of the Eleventh Conference on Uncertainty in Artificial
* Intelligence. pp. 338-345. Morgan Kaufmann, San Mateo.<p>
*
* Valid options are:<p>
*
* -K <br>
* Use kernel estimation for modelling numeric attributes rather than
* a single normal distribution.<p>
*
* -D <br>
* Use supervised discretization to process numeric attributes.<p>
*
* @author Len Trigg (trigg@cs.waikato.ac.nz)
* @author Eibe Frank (eibe@cs.waikato.ac.nz)
* @author Rushi Bhatt (rushi@cns.bu.edu)
* @version $Revision: 1.16 $
* Modified by Rushi for use as a CN710 template
*/
public class ARAMNetworkSparseHT_Strange extends ARAMNetworkClass {
//**** THIS IS WHERE CLASSIFIER WEIGHTS ETC GO ****
//define stuff like weight matrices, classifier parameters etc.
//e.g., protected double rho_a_bar=0.0;
HashMap[] weightsA = null;
double[] sweightsA = null;
double sweightsA0;
HashMap[] weightsB = null;
HashMap<String, Vector> hmclasses = null;
int snumFeatures=0;
int snumClasses=0;
int numinstances=0;
public ARAMNetworkSparseHT_Strange(int fnumFeatures, int fnumClasses, double fro, double fthreshold) {
initARAM(fnumFeatures, fnumClasses, fro, fthreshold);
}
public ARAMNetworkSparseHT_Strange(){
}
private void initARAM(int fnumFeatures, int fnumClasses, double fro, double fthreshold){
numFeatures = fnumFeatures;
snumFeatures = (int)(0.5*numFeatures);
numClasses = fnumClasses;
snumClasses= (int)(0.5*numClasses);
threshold = fthreshold;
weightsA = new HashMap[1];
weightsA[0] = new HashMap();
sweightsA = new double[1];
sweightsA[0]=0;
for(int i=0;i<numFeatures;i++){
sweightsA[0]+=1;
}
sweightsA0=sweightsA[0];
weightsB = new HashMap[1];
weightsB[0] = new HashMap();
numCategories = 1;
hmclasses = new HashMap<String, Vector>();
}
/**
* Returns a string describing this classifier
* @return a description of the classifier suitable for
* displaying in the explorer/experimenter gui.
* ****MODIFY WITH CORRECT INFORMATION****
*/
public String globalInfo() {
return "This is ARAM.";
}
/**
* Generates the classifier.
*
* @param instances set of instances serving as training data
* @exception Exception if the classifier has not been generated
* successfully
*/
public void buildClassifier(Instances D) throws Exception {
int L = D.classIndex();
int featlength = (D.numAttributes() -L)*2;
int numSamples = D.numInstances();
int classlength = L * 2;
if (this.order==null){
order = new ArrayList<Integer>();
for (int j=0; j<D.numInstances();j++){
order.add(j);
}
}
if (numFeatures==-1){
initARAM( featlength,classlength ,roa , threshold );
}else{
if (featlength != numFeatures) {
return ;
}
if (classlength != numClasses) {
return ;
}}
// Copy the instances so we don't mess up the original data.
// Function calls do not deep copy the arguments..
//Instances m_Instances = new Instances(instances);
// Use the enumeration of instances to train classifier.
// Do any sanity checks (e.g., missing attributes etc here
// before calling updateClassifier for the actual learning
for(int i=0; i<D.numInstances();i++){
Instance instance = D.get(order.get(i));
updateClassifier(instance);
}
System.out.println("Training done, used "+numCategories+" neurons with rho ="+roa+".");
// Alternatively, you can put the training logic within this method,
// rather than updateClassifier(...). However, if you omit the
// updateClassifier(...) method, you should remove
// UpdateableClassifier from the class declaration above.
}
// ****THIS IS THE WEIGHT UPDATE ROUTINE. MODIFY TO CHANGE THE ALGORITHM****
/**
* Updates the classifier with the given instance.
*
* @param instance the new training instance to include in the model
* @exception Exception if the instance could not be incorporated in
* the model.
*/
public void updateClassifier(Instance instance) throws Exception {
//called once for each instance.
if(!learningphase){
return;
}
int num_classes=(int) (snumClasses);
int num_features=(int) (snumFeatures);
SortedMap<Integer,Double> data = new TreeMap<Integer,Double>();
HashMap labels = new HashMap();
int numChanges = 0;
int numCategories_1=numCategories -1;
numinstances+=1;
if (!instance.classIsMissing()) {
//Do the weight updates using the instance.
double suminputA=0;
double suminputB=0;
for (int j = 0; j <num_features; j++) {
double dt = instance.value(num_classes+j);
if (dt!=0){
data.put(j, dt);
}
suminputA+=1;
//if (data[j]<0 || data[j]>1){
// System.out.println("Data not normalized, this will cause error!");
//}
}
for (int j = 0; j < num_classes ; j++) {
double lt = instance.value(j);
if (lt!=0){
labels.put(j, lt);
}
suminputB+=lt;
}
SortPair[] cateacti = ARTActivateCategories(data,labels);
java.util.Arrays.sort(cateacti);
boolean resonance = false;
int currentSortedIndex = 0;
int currentCategory = -1;
double matchA = 0;
double matchB = 0;
while (!resonance && currentSortedIndex<cateacti.length) {
currentCategory = cateacti[currentSortedIndex]
.getOriginalIndex();
if (currentCategory == numCategories_1) {
matchB=1;
matchA=1;
}
else{
matchA = ART_Calculate_MatchA(data, weightsA[currentCategory],suminputA);
if (weightsB[currentCategory].isEmpty()) {
matchB = 1;
} else {
matchB = ART_Calculate_MatchB(labels,
weightsB[currentCategory],suminputB);
}
}
if (matchA >= roa&& matchB >= rob) {
if (currentCategory == numCategories_1) {
if (currentSortedIndex == maxNumCategories) {
System.out
.println("WARNING: The maximum number of categories has been reached.");
resonance = true;
} else {
// Add a new category
sweightsA[currentCategory]=0;
Set<Integer> s1=data.keySet();
for (int j :s1) {
double da=(Double)data.get(j);
weightsA[currentCategory].put(j,da);
weightsA[currentCategory].put(j+snumFeatures, da);
}
Set<Integer> s2=weightsA[currentCategory].keySet();
int count=0;
for (int j :s2) {
double da=(Double)weightsA[currentCategory].get(j);
if (j<snumFeatures){
sweightsA[currentCategory]+=da;
}
else{
sweightsA[currentCategory]+=1-da;
count+=1;
}
}
sweightsA[currentCategory]+=snumFeatures-count;
s1=labels.keySet();
for (int j :s1) {
weightsB[currentCategory].put(j,labels.get(j));
}
String s = labels.keySet().toString();
if (hmclasses.containsKey(s)){
hmclasses.get(s).add(currentCategory);
hmclasses.put(s,hmclasses.get(s));
}else{
Vector v = new Vector();
v.add(currentCategory);
hmclasses.put(s,v);
}
ARAMm_Add_New_Category();
// fprintf(FileID,'Add a new category of %d\n',
// network.numCategories);
// Increment the number of changes since we added a
// new category.
numChanges = numChanges + 1;
resonance = true;
break;
}
} else {
// % Update weights
double weightChange = ARAMm_Update_Weights(data,
labels, currentCategory);
sumArrayF(this.weightsA[1]);
if (weightChange == 1) {
numChanges += 1;
}
resonance = true;
break;
}
} else {
currentSortedIndex += 1;
resonance = false;
}
}
if(!resonance && currentSortedIndex>=cateacti.length)
{
// Add a new category
sweightsA[numCategories_1]=0;
Set<Integer> s1=data.keySet();
int count=0;
for (int j: s1) {
double da=(Double)data.get(j);
weightsA[numCategories_1].put(j, da);
sweightsA[numCategories_1]+=da;
weightsA[numCategories_1].put(j+snumFeatures, da);
sweightsA[numCategories_1]+=1-da;
count+=1;
}
sweightsA[numCategories_1]+=snumFeatures-count;
s1=labels.keySet();
for (int j : s1) {
weightsB[numCategories_1].put(j, labels.get(j));
}
String s = labels.keySet().toString();
if (hmclasses.containsKey(s)){
hmclasses.get(s).add(numCategories_1);
hmclasses.put(s,hmclasses.get(s));
}else{
Vector v = new Vector();
v.add(numCategories_1);
hmclasses.put(s,v);
}
ARAMm_Add_New_Category();
// fprintf(FileID,'Add a new category of %d\n',
// network.numCategories);
// Increment the number of changes since we added a
// new category.
numChanges = numChanges + 1;
}
}
}
//****THIS IS THE CLASSIFICATION ROUTINE. MODIFY TO CHANGE THE ALGORITHM****
//****classifyInstance() uses this method, so implement the
//****nuts-and-bolts of your algorithm here.
/**
* Calculates the class membership probabilities for the given test
* instance.
*
* @param instance the instance to be classified
* @return predicted class probability distribution
* @exception Exception if there is a problem generating the prediction
*/
public double[] distributionForInstance(Instance instance) throws Exception {
int num_classes=(int) (snumClasses);
int num_features=(int) (snumFeatures);
double[] dist = new double[num_classes];
HashMap currentData = new HashMap();
double[] ranking = new double[num_classes];
for (int j = 0; j < num_features; j++) {
double dt=instance.value(num_classes+j);
if (dt!=0){
currentData.put(j, dt);
}
}
SortPair[] sortedActivations = ARTActivateCategories(currentData);
java.util.Arrays.sort(sortedActivations);
double diff_act = sortedActivations[0].getValue()
- sortedActivations[numCategories - 2].getValue();
int largest_activ = 1;
double activ_change = 0;
for (int i = 1; i < sortedActivations.length; i++) {
activ_change = (sortedActivations[0].getValue() - sortedActivations[i]
.getValue())
/ sortedActivations[0].getValue();
if (activ_change > threshold * diff_act) {
break;
}
largest_activ = largest_activ + 1;
}
// % largest_activ =5;
double[] best_matches = new double[largest_activ];
java.util.Arrays.fill(best_matches, 1);
for (int i = 0; i < largest_activ; i++) {
// % best_matches(i) = matches(sortedCategories(i));
best_matches[i] = sortedActivations[i].getValue();
}
// % min_mat = min(best_matches);
// % max_mat = max(best_matches);
double sum_mat = sumArray(best_matches);
int currentCategory = 0;
this.neuronsactivated=new int[largest_activ];
this.neuronsactivity=new double[largest_activ];
for (int i = 0; i < largest_activ; i++) {
this.neuronsactivity[i]=best_matches[i];
best_matches[i] = best_matches[i] / sum_mat;
currentCategory = sortedActivations[i].getOriginalIndex();
this.neuronsactivated[i]=currentCategory;
// % Fill return vector with weightB values
Set <Integer> s1= weightsB[currentCategory].keySet();
for (int j :s1) {
ranking[j] = ranking[j]
+ best_matches[i] * (Double)weightsB[currentCategory].get(j);
}
}
if(m_userankstoclass) {
return ARAMm_Ranking2Class(ranking);
}
return ranking;
}
public double[] ARAMm_Ranking2Class(double[] rankings) {
int columns=rankings.length;
double[] classes= new double[columns ];
SortPair[] sortedRanks = new SortPair[columns];
for (int j=0;j<columns;j++){
sortedRanks[j]= new SortPair(rankings[j],j);
}
java.util.Arrays.sort(sortedRanks);
// sortedActivations[0].getValue()sortedActivations[i].getOriginalIndex()
SortPair[] change=new SortPair[columns-1];
for(int j =1; j<columns;j++){
change[j-1] = new SortPair(sortedRanks[j-1].getValue()-sortedRanks[j].getValue(),j);
}
java.util.Arrays.sort(change);
// double val= change[0].getValue();
int ind=change[0].getOriginalIndex();
for (int j =0; j<ind;j++){
classes[sortedRanks[j].getOriginalIndex()] = 1;
}
return classes;
}
private SortPair[] ARTActivateCategories(HashMap Data) {
SortPair[] catacti = new SortPair[numCategories-1];
// double[] catacti=new double[numCategories];
double[] matchVector = new double[numFeatures];
Set<Integer> s1=Data.keySet();
for (int i = 0; i < numCategories-1; i++) {
double sumvector = 0;
// double sumweight = 0;
for (Integer j: s1) {
double da=(Double)Data.get(j);
if(weightsA[i].containsKey(j)){
double wa=(Double)weightsA[i].get(j);
matchVector[j] = ((da < wa) ?da
: wa);
sumvector += matchVector[j];
}
int j1=j+snumFeatures;
if(weightsA[i].containsKey(j1)){
double wat=1-(Double)weightsA[i].get(j1);
double dat=1-da;
matchVector[j1] = (((dat) < wat) ? dat: wat);
sumvector += matchVector[j1];
}
}
//sumweight=sweightsA[i];
catacti[i] = new SortPair(sumvector / (alpha + sweightsA[i]), i);
// catacti[i] = new SortPair(sumvector / (alpha + sumweight), i);
//System.out.println("sumweight "+(sumweight-sweightsA[i]));
}
return catacti;
}
private SortPair[] ARTActivateCategories(SortedMap Data, HashMap labels) {
String s = labels.keySet().toString();
Vector lclasses = hmclasses.get(s);
SortPair[] catacti = null;
if (lclasses==null||lclasses.size()==0){
catacti=new SortPair[1];
catacti[0] = new SortPair(1,numCategories-1);
return catacti;
}
catacti = new SortPair[lclasses.size()];
// double[] catacti=new double[numCategories];
double[] matchVector = new double[numFeatures];
Set<Integer> s1=Data.keySet();
for (int i = 0; i < lclasses.size(); i++) {
double sumvector = 0;
// double sumweight = 0;
int k = ((Integer)lclasses.get(i)).intValue();
for (Integer j: s1) {
double da=(Double)Data.get(j);
if(weightsA[k].containsKey(j)){
double wa=0;
try{
wa=(Double)weightsA[k].get(j);
}catch(Exception e){
e.getClass();
}
if(wa==0){
continue;
}
matchVector[j] = ((da < wa) ? da
: wa);
sumvector += matchVector[j];
}
int j1=j+snumFeatures;
if(weightsA[k].containsKey(j1)){
double wat=1-(Double)weightsA[k].get(j1);
// sumweight += weightsA[k][j];
if(wat==0){
continue;
}
double dat=1-da;
matchVector[j1] = (((dat) < wat) ? dat: wat);
sumvector += matchVector[j1];
}
}
//sumweight=sweightsA[k];
//catacti[i] = new SortPair(sumvector / (alpha + sumweight), k);
//System.out.println("sumweight "+(sumweight-sweightsA[k]));
catacti[i] = new SortPair(sumvector / (alpha + sweightsA[k]), k);
}
return catacti;
}
// ****YOU SHOULDN'T NEED TO CHANGE THIS
/**
* Classifies the given test instance. The instance has to belong to a
* dataset when it's being classified. Note that a classifier MUST
* implement either this or distributionForInstance().
*
* @param instance the instance to be classified
* @return the predicted most likely class for the instance or
* Instance.missingValue() if no prediction is made
* @exception Exception if an error occurred during the prediction
*/
public double classifyInstance(Instance instance) throws Exception {
double[] dist = distributionForInstance(instance);
if (dist == null) {
throw new Exception("Null distribution predicted");
}
switch (instance.classAttribute().type()) {
case Attribute.NOMINAL:
double max = 0;
int maxIndex = 0;
for (int i = 0; i < dist.length; i++) {
if (dist[i] > max) {
maxIndex = i;
max = dist[i];
}
}
if (max > 0) {
return maxIndex;
} else {
//return Instance.missingValue();
}
case Attribute.NUMERIC:
return dist[0];
default:
return -1;
}
}
// ****ANY OPTIONS/PARAMETERS GO HERE****
/**
* Returns an enumeration describing the available options.
*
* @return an enumeration of all the available options.
*/
public Enumeration listOptions() {
//These are just examples, modify to suit your algorithm
Vector newVector = new Vector(2);
newVector.addElement(
new Option("\tChange generalization parameter Rho\n",
"P", 0,"-P"));
newVector.addElement(
new Option("\tUse ranking to class function special dev. for ARAM.\n",
"K", 0,"-K"));
return newVector.elements();
}
//****OPTIONS HERE SHOULD MATCH THOSE ADDED ABOVE****
/**
* Parses a given list of options. Valid options are:<p>
*
* -K <br>
* Use kernel estimation for modelling numeric attributes rather than
* a single normal distribution.<p>
*
* -D <br>
* Use supervised discretization to process numeric attributes.
*
* @param options the list of options as an array of strings
* @exception Exception if an option is not supported
*/
public void setOptions(String[] options) throws Exception {
//These are just examples, modify to suit your algorithm
// boolean k = Utils.getFlag('K', options);
// boolean d = Utils.getFlag('D', options);
// if (k && d) {
// throw new IllegalArgumentException(
// "Can't use both kernel density estimation and discretization!");
// }
// setUseSupervisedDiscretization(d);
// setUseKernelEstimator(k);
roa = (Utils.getOptionPos("P",options) >= 0) ? Double.parseDouble(Utils.getOption("P", options)) : roa;
m_userankstoclass= (Utils.getOptionPos("K",options) >= 0);
super.setOptions(options);
}
//****MORE OPTION PARSING STUFF****
/**
* Gets the current settings of the classifier.
*
* @return an array of strings suitable for passing to setOptions
*/
public String [] getOptions() {
//These are just examples, modify to suit your algorithm
String [] options = new String [3];
try{
options =weka.core.Utils.splitOptions("-P 0.9 -K");
}catch (Exception ex) {
System.out.println(ex.getMessage());
}
return options;
}
//****ANY INFORMATION LIKE NO. OF UNITS ETC PRINTED HERE
/**
* Returns a description of the classifier.
*
* @return a description of the classifier as a string.
*/
public String toString() {
//These are just examples, modify to suit your algorithm
StringBuffer text = new StringBuffer();
text.append("ML ARAM classifier");
// if (m_Instances == null) {
// text.append(": No model built yet.");
// } else {
// try {
// for (int i = 0; i < m_Distributions[0].length; i++) {
// text.append("\n\nClass " + m_Instances.classAttribute().value(i) +
// ": Prior probability = " + Utils.
// doubleToString(m_ClassDistribution.getProbability(i),
// 4, 2) + "\n\n");
// Enumeration enumAtts = m_Instances.enumerateAttributes();
// int attIndex = 0;
// while (enumAtts.hasMoreElements()) {
// Attribute attribute = (Attribute) enumAtts.nextElement();
// text.append(attribute.name() + ": "
// + m_Distributions[attIndex][i]);
// attIndex++;
// }
// }
// } catch (Exception ex) {
// text.append(ex.getMessage());
// }
// }
return text.toString();
}
//****MORE GUI RELATED STUFF AND PARAMETER ACCESS METHODS
// /**
// * Returns the tip text for this property
// * @return tip text for this property suitable for
// * displaying in the explorer/experimenter gui
// */
// public String useKernelEstimatorTipText() {
// return "Use a kernel estimator for numeric attributes rather than a "
// +"normal distribution.";
// }
// /**
// * Gets if kernel estimator is being used.
// *
// * @return Value of m_UseKernelEstimatory.
// */
// public boolean getUseKernelEstimator() {
//
// return m_UseKernelEstimator;
// }
//
// /**
// * Sets if kernel estimator is to be used.
// *
// * @param v Value to assign to m_UseKernelEstimatory.
// */
// public void setUseKernelEstimator(boolean v) {
//
// m_UseKernelEstimator = v;
// if (v) {
// setUseSupervisedDiscretization(false);
// }
// }
//
// /**
// * Returns the tip text for this property
// * @return tip text for this property suitable for
// * displaying in the explorer/experimenter gui
// */
// public String useSupervisedDiscretizationTipText() {
// return "Use supervised discretization to convert numeric attributes to nominal "
// +"ones.";
// }
//
// /**
// * Get whether supervised discretization is to be used.
// *
// * @return true if supervised discretization is to be used.
// */
// public boolean getUseSupervisedDiscretization() {
//
// return m_UseDiscretization;
// }
//
// /**
// * Set whether supervised discretization is to be used.
// *
// * @param newblah true if supervised discretization is to be used.
// */
// public void setUseSupervisedDiscretization(boolean newblah) {
//
// m_UseDiscretization = newblah;
// if (newblah) {
// setUseKernelEstimator(false);
// }
// }
/**
* Main method for testing this class.
*
* @param argv the options
*/
private double ARAMm_Update_Weights(SortedMap data, HashMap labels,
int category) {
double weightChange = 0;
sweightsA[category]=0;
Set<Integer> s1=data.keySet();
int count=0;
for (Integer i: s1) {
count+=1;
double da=(Double)data.get(i);
if(weightsA[category].containsKey(i)){
double wa=(Double)weightsA[category].get(i);
if (da < wa ){
wa = (learningRate * da)
+ (1 - learningRate) * wa;
if(wa==0){
weightsA[category].remove(i);
}else{
weightsA[category].put(i, wa);
}
}
sweightsA[category]+=wa;
//System.out.println(sweightsA[category]);
}
}
for (Integer i: s1) {
double da=(Double)data.get(i);
// else{
// double wa=(learningRate * da);
// weightsA[category].put(i, wa);
// sweightsA[category]+=wa;
//
// }
int j1= i+snumFeatures;
double dat=1-da;
if(weightsA[category].containsKey(j1)){
double wat=1-(Double)weightsA[category].get(j1);
if (dat < wat ){
wat = ((learningRate * dat)
+ (1 - learningRate) * wat);
if (wat==0){
weightsA[category].remove(j1);
}else{
weightsA[category].put(j1, 1-wat);
}
}
sweightsA[category]+=wat;
//System.out.println(sweightsA[category]);
}else{
double wa=(learningRate * dat);
if (wa==0){
weightsA[category].remove(j1);
}else{
weightsA[category].put(j1, 1-wa);
sweightsA[category]+=wa;
//System.out.println(sweightsA[category]);
}
}
}
Set<Integer> s2=weightsA[category].keySet();
List<Integer> s3=new ArrayList<Integer>();
try{
for (Integer i: s2) {
if (s1.contains(i)){
continue;
}
if (i<snumFeatures){
s3.add(i);
}
// else{
// double wat=1-(Double)weightsA[category].get(i);
// sweightsA[category]+=wat;
// }
}
for (Integer i: s3){
count+=1;
weightsA[category].remove(i);
}
}catch(Exception e){
e.getClass();
}
sweightsA[category]+=snumFeatures-count;
s1=labels.keySet();
for (Integer i: s1) {
double lb=(Double)labels.get(i);
if(weightsB[category].containsKey(i)){
double wb=(Double)weightsB[category].get(i);
if(weightblearnmethod== 0){
weightsB[category].put(i, lb + wb);
weightChange = 1;
}else{
// %normalise
if ( lb< wb){
weightsB[category].put(i, (learningRate * lb )+ (1 - learningRate) *wb);
weightChange = 1;
}
}}
}
return weightChange;
}
private double ART_Calculate_MatchA(SortedMap Data, HashMap fweights, double suminput
) {
if (suminput == 0) {
return 0.0;
}
double[] matchVector = new double[numFeatures];
double summatch = 0;
//double suminput = 0;
Set<Integer> s1=Data.keySet();
for (Integer j: s1) {
double da= (Double)Data.get(j);
if(fweights.containsKey(j)){
double wa = (Double)fweights.get(j);
if(wa==0){
continue;
}
matchVector[j] = (( da< wa) ? da :wa);
summatch += matchVector[j];
}
//suminput += Data[j];
int j1= j+snumFeatures;
if(fweights.containsKey(j1)){
double wat =1- (Double)fweights.get(j1);
if(wat==0){
continue;
}
double dat= 1-da;
matchVector[j] = ((dat< wat) ? dat :wat);
summatch += matchVector[j];
}
}
// if (suminput == 0) {
// return 0.0;
// }
return summatch / suminput;
}
private double ART_Calculate_MatchB(HashMap Data, HashMap fweights, double suminput
) {
if (suminput == 0) {
return 0.0;
}
double[] matchVector = new double[snumClasses];
double summatch = 0;
//double suminput = 0;
Set<Integer> s1=Data.keySet();
for (Integer j: s1) {
double w =(Double) fweights.get(j);
double da = (Double) Data.get(j);
matchVector[j] = ((da < w) ? da :w);
summatch += matchVector[j];
//suminput += Data[j];
}
return summatch / suminput;
}
private void ARAMm_Add_New_Category() {
weightsA = Arrays.copyOf(weightsA, numCategories + 1);
sweightsA = Arrays.copyOf(sweightsA, numCategories + 1);
weightsB = Arrays.copyOf(weightsB, numCategories + 1);
weightsA[numCategories] = new HashMap();
//sweightsA[numCategories] = new double();
weightsB[numCategories] = new HashMap();
//Arrays.fill(weightsA[numCategories], 1.0);
//Arrays.fill(weightsB[numCategories], 0.0);
sweightsA[numCategories]=sweightsA0;
numCategories += 1;
}
private double sumArray(double[] arr) {
int num = arr.length;
double result = 0;
for (int i = 0; i < num; i++) {
result += arr[i];
}
return result;
}
private double sumArray(HashMap arr) {
double result = 0;
int counter=0;
for (Object key: arr.keySet()) {
try{
result += (Double)arr.get(key);
}catch(Exception e){
System.out.println(e);
}
}
return result;
}
private double sumArrayF(HashMap arr) {
double result = 0;
int counter=0;
for (Object key: arr.keySet()) {
try{
if ((Integer)key<snumFeatures){
result += (Double)arr.get(key);
}else{
result += 1-(Double)arr.get(key);
counter+=1;
}
}catch(Exception e){
System.out.println(e);
}
}
result+=snumFeatures-counter;
return result;
}
public static void main(String [] argv) {
try {
Evaluation.runExperiment(new WvARAM(), argv);
} catch (Exception e) {
e.printStackTrace();
System.err.println(e.getMessage());
}
}
@Override
public boolean isThreaded() {
// TODO Auto-generated method stub
return false;
}
@Override
public void setThreaded(boolean setv) {
// TODO Auto-generated method stub
}
@Override
public double[][] distributionForInstanceM(Instances i) throws Exception {
// TODO Auto-generated method stub
return null;
}
@Override
public String getModel() {
// TODO Auto-generated method stub
return null;
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers/multilabel
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers/multilabel/neurofuzzy/ARAMNetworkSparseV.java
|
/*
*
*
* Adapted from NaiveBayes.java
*
* Copyright (C) 2016 Fernando Benites
* @author Fernando Benites
*/
package meka.classifiers.multilabel.neurofuzzy;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Enumeration;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.TreeSet;
import java.util.Vector;
import meka.classifiers.multilabel.Evaluation;
import meka.classifiers.multilabel.MultiLabelClassifier;
import meka.classifiers.multilabel.ProblemTransformationMethod;
import weka.core.Attribute;
import weka.core.Instance;
import weka.core.Instances;
import weka.core.Option;
import weka.core.Utils;
/**
* ****REPLACE THE FOLLOWING WITH SIMILAR INFORMATION.
* Class for a Naive Bayes classifier using estimator classes. Numeric
* estimator precision values are chosen based on analysis of the
* training data. For this reason, the classifier is not an
* UpdateableClassifier (which in typical usage are initialized with zero
* training instances) -- if you need the UpdateableClassifier functionality,
* use the NaiveBayesUpdateable classifier. The NaiveBayesUpdateable
* classifier will use a default precision of 0.1 for numeric attributes
* when buildClassifier is called with zero training instances.
* <p>
* For more information on Naive Bayes classifiers, see<p>
*
* George H. John and Pat Langley (1995). <i>Estimating
* Continuous Distributions in Bayesian Classifiers</i>. Proceedings
* of the Eleventh Conference on Uncertainty in Artificial
* Intelligence. pp. 338-345. Morgan Kaufmann, San Mateo.<p>
*
* Valid options are:<p>
*
* -K <br>
* Use kernel estimation for modelling numeric attributes rather than
* a single normal distribution.<p>
*
* -D <br>
* Use supervised discretization to process numeric attributes.<p>
*
* @author Len Trigg (trigg@cs.waikato.ac.nz)
* @author Eibe Frank (eibe@cs.waikato.ac.nz)
* @author Rushi Bhatt (rushi@cns.bu.edu)
* @version $Revision: 1.16 $
* Modified by Rushi for use as a CN710 template
*/
public class ARAMNetworkSparseV extends ARAMNetworkClass {
//**** THIS IS WHERE CLASSIFIER WEIGHTS ETC GO ****
//define stuff like weight matrices, classifier parameters etc.
//e.g., protected double rho_a_bar=0.0;
SparseArray[] weightsA = null;
SparseArray[] upweightsA=null;
double[] sweightsA = null;
double sweightsA0;
SparseArray[] weightsB = null;
HashMap<String, Vector> hmclasses = null;
int snumFeatures=0;
int snumClasses=0;
int numinstances=0;
int activated=0;
public ARAMNetworkSparseV(int fnumFeatures, int fnumClasses, double fro, double fthreshold) {
initARAM(fnumFeatures, fnumClasses, fro, fthreshold);
}
public ARAMNetworkSparseV(){
}
private void initARAM(int fnumFeatures, int fnumClasses, double fro, double fthreshold){
numFeatures = fnumFeatures;
snumFeatures = (int)(0.5*numFeatures);
numClasses = fnumClasses;
snumClasses= (int)(0.5*numClasses);
threshold = fthreshold;
weightsA = new SparseArray[1];
weightsA[0] = new SparseArray();
upweightsA = new SparseArray[1];
upweightsA[0] = new SparseArray();
sweightsA = new double[1];
sweightsA[0]=0;
for(int i=0;i<numFeatures;i++){
sweightsA[0]+=1;
}
sweightsA0=sweightsA[0];
weightsB = new SparseArray[1];
weightsB[0] = new SparseArray();
numCategories = 1;
hmclasses = new HashMap<String, Vector>();
}
/**
* Returns a string describing this classifier
* @return a description of the classifier suitable for
* displaying in the explorer/experimenter gui.
* ****MODIFY WITH CORRECT INFORMATION****
*/
public String globalInfo() {
return "This is ARAM.";
}
/**
* Generates the classifier.
*
* @param instances set of instances serving as training data
* @exception Exception if the classifier has not been generated
* successfully
*/
public void buildClassifier(Instances D) throws Exception {
int L = D.classIndex();
int featlength = (D.numAttributes() -L)*2;
int numSamples = D.numInstances();
int classlength = L * 2;
if (this.order==null){
order = new ArrayList<Integer>();
for (int j=0; j<D.numInstances();j++){
order.add(j);
}
}
if (numFeatures==-1){
initARAM( featlength,classlength ,roa , threshold );
}else{
if (featlength != numFeatures) {
return ;
}
if (classlength != numClasses) {
return ;
}}
// Copy the instances so we don't mess up the original data.
// Function calls do not deep copy the arguments..
//Instances m_Instances = new Instances(instances);
// Use the enumeration of instances to train classifier.
// Do any sanity checks (e.g., missing attributes etc here
// before calling updateClassifier for the actual learning
for(int i=0; i<D.numInstances();i++){
Instance instance = D.get(order.get(i));
updateClassifier(instance);
}
System.out.println("Training done, used "+numCategories+" neurons with rho ="+roa+".");
// Alternatively, you can put the training logic within this method,
// rather than updateClassifier(...). However, if you omit the
// updateClassifier(...) method, you should remove
// UpdateableClassifier from the class declaration above.
}
// ****THIS IS THE WEIGHT UPDATE ROUTINE. MODIFY TO CHANGE THE ALGORITHM****
/**
* Updates the classifier with the given instance.
*
* @param instance the new training instance to include in the model
* @exception Exception if the instance could not be incorporated in
* the model.
*/
public void updateClassifier(Instance instance) throws Exception {
//called once for each instance.
if(!learningphase){
return;
}
int num_classes=(int) (snumClasses);
SparseArray data = new SparseArray();
SparseArray labels = new SparseArray();
int numChanges = 0;
int numCategories_1=numCategories -1;
numinstances+=1;
if (!instance.classIsMissing()) {
//Do the weight updates using the instance.
for (Integer tj=0; tj<instance.numValues(); tj++){
int j=instance.index(tj);
double da = instance.value(j);
if (da==0){
continue;
}
if(j<num_classes){
labels.put(j, da);
}else{
data.put(j-num_classes, da);
}
}
SortPair2[] cateacti = ARTActivateCategories(data,labels);
java.util.Arrays.sort(cateacti);
boolean resonance = false;
int currentSortedIndex = 0;
int currentCategory = -1;
double matchA = 0;
while (!resonance && currentSortedIndex<cateacti.length) {
currentCategory = cateacti[currentSortedIndex]
.getOriginalIndex();
if (currentCategory == numCategories_1) {
matchA=1;
}
else{
matchA = (cateacti[currentSortedIndex].getRawValue()/
snumFeatures);
// System.out.println("Ma: "+matchA+" "+cateacti[currentSortedIndex].getValue()
// +" "+numinstances+" "+currentCategory+" S:"+sweightsA[currentCategory]);
}
if (matchA >= roa) {
if (currentCategory == numCategories_1) {
if (currentSortedIndex == maxNumCategories) {
System.out
.println("WARNING: The maximum number of categories has been reached.");
resonance = true;
} else {
// Add a new category
sweightsA[currentCategory]=0;
int[] s1=data.getKeys();
int sit=data.size();
int count=0;
int j=0;
for (int jt=0;jt<sit;jt++) {
j=s1[jt];
Double da=data.get(j);
weightsA[currentCategory].put(j,da);
weightsA[currentCategory].put(j+snumFeatures, da);
upweightsA[currentCategory].put(j+snumFeatures,1.0);
/*}
int[] s2=weightsA[currentCategory].getKeys();
int si2=weightsA[currentCategory].size();
for (int j=0 ;j<si2;j++) {
// System.out.println(j+" "+s2[j]);
double da=(Double)weightsA[currentCategory].get(s2[j]);*/
// if (s2[j]<snumFeatures){
sweightsA[currentCategory]+=da;
/*}
else{*/
sweightsA[currentCategory]+=1-da;
count+=1;
//}
}
sweightsA[currentCategory]+=snumFeatures-count;
s1=labels.getKeys();
sit=labels.size();
for (int jt=0;jt<sit;jt++) {
j=s1[jt];
weightsB[currentCategory].put(j,labels.get(j));
}
String s = labels.getKeysString();
if (hmclasses.containsKey(s)){
hmclasses.get(s).add(currentCategory);
hmclasses.put(s,hmclasses.get(s));
}else{
Vector<Integer> v = new Vector<Integer>();
v.add(currentCategory);
hmclasses.put(s,v);
}
ARAMm_Add_New_Category();
//System.out.println(numinstances+" "+numCategories);
// fprintf(FileID,'Add a new category of %d\n',
// network.numCategories);
// Increment the number of changes since we added a
// new category.
numChanges = numChanges + 1;
resonance = true;
break;
}
} else {
// % Update weights
double weightChange = ARAMm_Update_Weights(data,
labels, currentCategory);
//System.out.println(numinstances+" "+currentCategory+" S:"+sweightsA[currentCategory]);
//sumArrayF(this.weightsA[1]);
if (weightChange == 1) {
numChanges += 1;
}
resonance = true;
break;
}
} else {
currentSortedIndex += 1;
resonance = false;
}
}
if(!resonance && currentSortedIndex>=cateacti.length)
{
// Add a new category
sweightsA[numCategories_1]=0;
int[] s1=data.getKeys();
int sit=data.size();
int j=0;
int count=0;
for (int jt=0;jt<sit;jt++) {
j=s1[jt];
double da=data.get(j);
weightsA[numCategories_1].put(j, da);
sweightsA[numCategories_1]+=da;
weightsA[numCategories_1].put(j+snumFeatures, da);
sweightsA[numCategories_1]+=1-da;
upweightsA[numCategories_1].put(j+snumFeatures,1.0);
count+=1;
}
sweightsA[numCategories_1]+=snumFeatures-count;
s1=labels.getKeys();
sit=labels.size();
j=0;
for (int jt=0;jt<sit;jt++) {
j=s1[jt];
weightsB[numCategories_1].put(j, labels.get(j));
}
String s = labels.getKeysString();
if (hmclasses.containsKey(s)){
hmclasses.get(s).add(numCategories_1);
hmclasses.put(s,hmclasses.get(s));
}else{
Vector<Integer> v = new Vector<Integer>();
v.add(numCategories_1);
hmclasses.put(s,v);
}
ARAMm_Add_New_Category();
//System.out.println(numinstances+" "+numCategories);
// fprintf(FileID,'Add a new category of %d\n',
// network.numCategories);
// Increment the number of changes since we added a
// new category.
numChanges = numChanges + 1;
}
}
}
//****THIS IS THE CLASSIFICATION ROUTINE. MODIFY TO CHANGE THE ALGORITHM****
//****classifyInstance() uses this method, so implement the
//****nuts-and-bolts of your algorithm here.
/**
* Calculates the class membership probabilities for the given test
* instance.
*
* @param instance the instance to be classified
* @return predicted class probability distribution
* @exception Exception if there is a problem generating the prediction
*/
public double[] distributionForInstance(Instance instance) throws Exception {
int num_classes=(int) (snumClasses);
double[] ranking = new double[num_classes];
// long startMilli = System.currentTimeMillis();
// for (int j = 0; j < num_features; j++) {
//
// double dt=instance.value(num_classes+j);
// if (dt!=0){
// currentData.put(j, dt);
// }
// }
//TODO use instance here
SortPair[] sortedActivations = ARTActivateCategories(instance);
java.util.Arrays.sort(sortedActivations);
double s0=sortedActivations[0].getValue();
double diff_act = s0
- sortedActivations[numCategories - 2].getValue();
int largest_activ = 1;
double activ_change = 0;
for (int i = 1; i < sortedActivations.length; i++) {
activ_change = (s0 - sortedActivations[i]
.getValue())
/ s0;
if (activ_change > threshold * diff_act) {
break;
}
largest_activ = largest_activ + 1;
}
// % largest_activ =5;
double[] best_matches = new double[largest_activ];
java.util.Arrays.fill(best_matches, 1);
best_matches[0]=s0;
for (int i = 1; i < largest_activ; i++) {
// % best_matches(i) = matches(sortedCategories(i));
best_matches[i] = sortedActivations[i].getValue();
}
// % min_mat = min(best_matches);
// % max_mat = max(best_matches);
double sum_mat = sumArray(best_matches);
int currentCategory = 0;
this.neuronsactivated=new int[largest_activ];
this.neuronsactivity=new double[largest_activ];
for (int i = 0; i < largest_activ; i++) {
this.neuronsactivity[i]=best_matches[i];
best_matches[i] = best_matches[i] / sum_mat;
currentCategory = sortedActivations[i].getOriginalIndex();
this.neuronsactivated[i]=currentCategory;
// % Fill return vector with weightB values
//Set <Integer> s1= weightsB[currentCategory].keySet();
int[] s1=weightsB[currentCategory].getKeys();
int sit=weightsB[currentCategory].size();
int j=0;
for (int jt=0;jt<sit;jt++) {
j=s1[jt];
ranking[j] = ranking[j]
+ best_matches[i] * (Double)weightsB[currentCategory].get(j);
}
}
this.nrinstclassified+=1;
if(m_userankstoclass) {
return ARAMm_Ranking2Class(ranking);
}
// long endMilli = System.currentTimeMillis();
return ranking;
}
public double[] ARAMm_Ranking2Class(double[] rankings) {
int columns=rankings.length;
double[] classes= new double[columns ];
SortPair[] sortedRanks = new SortPair[columns];
for (int j=0;j<columns;j++){
sortedRanks[j]= new SortPair(rankings[j],j);
}
java.util.Arrays.sort(sortedRanks);
// sortedActivations[0].getValue()sortedActivations[i].getOriginalIndex()
SortPair[] change=new SortPair[columns-1];
for(int j =1; j<columns;j++){
change[j-1] = new SortPair(sortedRanks[j-1].getValue()-sortedRanks[j].getValue(),j);
}
java.util.Arrays.sort(change);
// double val= change[0].getValue();
int ind=change[0].getOriginalIndex();
for (int j =0; j<ind;j++){
classes[sortedRanks[j].getOriginalIndex()] = 1;
}
return classes;
}
private SortPair[] ARTActivateCategories(Instance Data) {
SortPair[] catacti = new SortPair[numCategories-1];
// double[] catacti=new double[numCategories];
//Set<Integer> s1=new HashSet<Integer>();
//Set<Integer> s1=Data.keySet();
long startMilli = System.currentTimeMillis();
for (int i = 0; i < numCategories-1; i++) {
long startMilliss = System.nanoTime();
double sumvector = 0;
// double sumweight = 0;
int count=0;
SparseArray s2=upweightsA[i].clone();
// HashMapIVector s2=new HashMapIVector(upweightsA[i]);
//List<Integer> s2=new ArrayList<Integer>(weightsA[i].keySet());
//for (Integer j: s1) {
//double da=(Double)Data.get(j);
long st3 =0;
long st4 =0;
for (int tj=0; tj<Data.numValues(); tj++){
long startMillisst = System.nanoTime();
int sj=Data.index(tj);
double da = Data.value(sj);
int j=sj-snumClasses;
//s1.add(j);
if (da==0){
//s2.remove((Integer)j);
continue;
}
long st10 = System.nanoTime();
count+=1;
long st1 = System.nanoTime();
Double wa=(Double)weightsA[i].get(j);
long st1a = System.nanoTime();
if(wa!=null){
sumvector += ((da < wa) ?da
: wa);
//s2.remove((Integer)j);
}
long st2 = System.nanoTime();
Integer j1=j+snumFeatures;
//int j1=j+snumFeatures;
Double wat=(Double)weightsA[i].get(j1);
if(wat!=null){
wat=1-wat;
double dat=1-da;
sumvector += (((dat) < wat) ? dat: wat);
st3 = System.nanoTime();
// s2.remove(j1);
s2.remove(j1);
st4 = System.nanoTime();
}else{
sumvector+=1-da;
}
long endMillisst = System.nanoTime();
int jti=1;
// System.out.println("it took " + (endMillisst - startMillisst) + " milli(s)");
}
//Integer[] s3=s2.keySet();
/* int[] s3=s2.TableKeyRaw();
if(s3!=null){
for (int j=snumFeatures; j<s3.length;j++) {
if (s3[j]==-1){
continue;
}*/
int[] s1=s2.getKeys();
int sit=s2.size();
int j=0;
for (int jt=0;jt<sit;jt++) {
j=s1[jt];
// double wat=1-(Double)weightsA[i].get(s3[j]);
double wat=1-(Double)weightsA[i].get(j);
sumvector +=wat;
count+=1;
}
//}
sumvector+=snumFeatures-count;
//sumweight=sweightsA[i];
long endMilliss = System.nanoTime();
catacti[i] = new SortPair(sumvector / (alpha + sweightsA[i]), i);
//System.out.println("it took " + (endMilliss - startMilliss) + " milli(s)");
// catacti[i] = new SortPair(sumvector / (alpha + sumweight), i);
//System.out.println("sumweight "+(sumweight-sweightsA[i]));
//if(activated==121){
// System.out.println(i+ " "+sumvector+" "+sweightsA[i]);
//}
}
long endMilli = System.currentTimeMillis();
//activated+=1;
return catacti;
}
private SortPair2[] ARTActivateCategories(SparseArray Data, SparseArray labels) {
String s = labels.getKeysString();
Vector lclasses = (Vector)hmclasses.get(s);
SortPair2[] catacti = null;
if (lclasses==null||lclasses.size()==0){
catacti=new SortPair2[1];
catacti[0] = new SortPair2(1,numCategories-1,1);
return catacti;
}
catacti = new SortPair2[lclasses.size()];
// double[] catacti=new double[numCategories];
for (int i = 0; i < lclasses.size(); i++) {
double sumvector = 0;
// double sumweight = 0;
int k = ((Integer)lclasses.get(i)).intValue();
SparseArray s2=upweightsA[k].clone();
int counter=0;
//double dt = instance.value(num_classes+j);
//for (Map.Entry<Integer, Double> entry : Data.entrySet()) {
int[] s1=Data.getKeys();
int sit=Data.size();
int j=0;
for (int jt=0;jt<sit;jt++) {
j=s1[jt];
//int j=entry.getKey();
double da=(Double)Data.get(j);
// for (Integer tj=0; tj<Data.numValues(); tj++){
// int j=Data.index(tj);
//
// double da = Data.value(j-snumClasses);
// if (da==0){
// s2.remove((Integer)j);
// continue;
// }
Double wa=(Double)weightsA[k].get(j);
if(wa!=null){
if(wa==0){
continue;
}
sumvector += ((da < wa) ? da
: wa);
// s2.remove((Integer)j);
}
Integer j1=j+snumFeatures;
double dat=1-da;
Double wat=(Double)weightsA[k].get(j1);
if(wat!=null){
wat=1-wat;
// sumweight += weightsA[k][j];
sumvector +=(((dat) < wat) ? dat: wat);
s2.remove(j1);
counter+=1;
}else{
sumvector += dat;
counter+=1;
//s2.remove((Integer)j1);
}
}
s1=s2.getKeys();
sit=s2.size();
//int j=0;
for (int jt=0;jt<sit;jt++) {
j=s1[jt];
counter+=1;
sumvector += 1-(Double)weightsA[k].get(j);
}
sumvector += snumFeatures-counter;
//sumweight=sweightsA[k];
//catacti[i] = new SortPair(sumvector / (alpha + sumweight), k);
//System.out.println("sumweight "+(sumweight-sweightsA[k]));
catacti[i] = new SortPair2(sumvector / (alpha + sweightsA[k]), k,sumvector);
}
return catacti;
}
// ****YOU SHOULDN'T NEED TO CHANGE THIS
/**
* Classifies the given test instance. The instance has to belong to a
* dataset when it's being classified. Note that a classifier MUST
* implement either this or distributionForInstance().
*
* @param instance the instance to be classified
* @return the predicted most likely class for the instance or
* Instance.missingValue() if no prediction is made
* @exception Exception if an error occurred during the prediction
*/
public double classifyInstance(Instance instance) throws Exception {
double[] dist = distributionForInstance(instance);
if (dist == null) {
throw new Exception("Null distribution predicted");
}
switch (instance.classAttribute().type()) {
case Attribute.NOMINAL:
double max = 0;
int maxIndex = 0;
for (int i = 0; i < dist.length; i++) {
if (dist[i] > max) {
maxIndex = i;
max = dist[i];
}
}
if (max > 0) {
return maxIndex;
} else {
//return Instance.missingValue();
}
case Attribute.NUMERIC:
return dist[0];
default:
return -1;
}
}
// ****ANY OPTIONS/PARAMETERS GO HERE****
/**
* Returns an enumeration describing the available options.
*
* @return an enumeration of all the available options.
*/
public Enumeration<Option> listOptions() {
//These are just examples, modify to suit your algorithm
Vector<Option> newVector = new Vector<Option>(2);
newVector.addElement(
new Option("\tChange generalization parameter Rho\n",
"P", 0,"-P"));
newVector.addElement(
new Option("\tUse ranking to class function special dev. for ARAM.\n",
"K", 0,"-K"));
return newVector.elements();
}
//****OPTIONS HERE SHOULD MATCH THOSE ADDED ABOVE****
/**
* Parses a given list of options. Valid options are:<p>
*
* -K <br>
* Use kernel estimation for modelling numeric attributes rather than
* a single normal distribution.<p>
*
* -D <br>
* Use supervised discretization to process numeric attributes.
*
* @param options the list of options as an array of strings
* @exception Exception if an option is not supported
*/
public void setOptions(String[] options) throws Exception {
//These are just examples, modify to suit your algorithm
// boolean k = Utils.getFlag('K', options);
// boolean d = Utils.getFlag('D', options);
// if (k && d) {
// throw new IllegalArgumentException(
// "Can't use both kernel density estimation and discretization!");
// }
// setUseSupervisedDiscretization(d);
// setUseKernelEstimator(k);
roa = (Utils.getOptionPos("P",options) >= 0) ? Double.parseDouble(Utils.getOption("P", options)) : roa;
m_userankstoclass= (Utils.getOptionPos("K",options) >= 0);
super.setOptions(options);
}
//****MORE OPTION PARSING STUFF****
/**
* Gets the current settings of the classifier.
*
* @return an array of strings suitable for passing to setOptions
*/
public String [] getOptions() {
//These are just examples, modify to suit your algorithm
String [] options = new String [3];
try{
options =weka.core.Utils.splitOptions("-P 0.9 -K");
}catch (Exception ex) {
System.out.println(ex.getMessage());
}
return options;
}
//****ANY INFORMATION LIKE NO. OF UNITS ETC PRINTED HERE
/**
* Returns a description of the classifier.
*
* @return a description of the classifier as a string.
*/
public String toString() {
//These are just examples, modify to suit your algorithm
StringBuffer text = new StringBuffer();
text.append("ML ARAM classifier");
// if (m_Instances == null) {
// text.append(": No model built yet.");
// } else {
// try {
// for (int i = 0; i < m_Distributions[0].length; i++) {
// text.append("\n\nClass " + m_Instances.classAttribute().value(i) +
// ": Prior probability = " + Utils.
// doubleToString(m_ClassDistribution.getProbability(i),
// 4, 2) + "\n\n");
// Enumeration enumAtts = m_Instances.enumerateAttributes();
// int attIndex = 0;
// while (enumAtts.hasMoreElements()) {
// Attribute attribute = (Attribute) enumAtts.nextElement();
// text.append(attribute.name() + ": "
// + m_Distributions[attIndex][i]);
// attIndex++;
// }
// }
// } catch (Exception ex) {
// text.append(ex.getMessage());
// }
// }
return text.toString();
}
/**
* Main method for testing this class.
*
* @param argv the options
*/
private double ARAMm_Update_Weights(SparseArray data, SparseArray labels,
int category) {
double weightChange = 0;
sweightsA[category]=0;
//Set<Integer> s1=data.keySet();
SparseArray s2=weightsA[category].clone();
int count=0;
int[] s1=data.getKeys();
int sit=data.size();
int i=0;
for (int jt=0;jt<sit;jt++) {
i=s1[jt];
//for (Integer i: s1) {
double da=data.get(i);
Double wa=(Double)weightsA[category].get(i);
if(wa!=null){
if (da < wa ){
wa = (learningRate * da)
+ (1 - learningRate) * wa;
if(wa==0){
weightsA[category].remove(i);
}else{
weightsA[category].put(i, wa);
}
}
sweightsA[category]+=wa;
s2.remove(i);
}
double dat=1-da;
//}
// for (Integer i: s1) {
int j1= i+snumFeatures;
// double dat=1-data.get(j1-snumFeatures);
Double wat=(Double)weightsA[category].get(j1);
// Double wat=(Double)weightsA[category].get(j1);
if(wat!=null){
wat=1-wat;
if (dat < wat ){
wat = ((learningRate * dat)
+ (1 - learningRate) * wat);
if (wat==1){
weightsA[category].remove(j1);
upweightsA[category].remove(j1);
}else{
weightsA[category].put(j1, 1-wat);
upweightsA[category].put(j1,0.0);
count+=1;
}
}else{
if (wat!=1){
count+=1;
}
}
sweightsA[category]+=wat;
s2.remove(j1);
}else{
wat=(learningRate * dat);
if (wat==1){
weightsA[category].remove(j1);
upweightsA[category].remove(j1);
}else{
weightsA[category].put((Integer)j1,(Double) (1-wat));
upweightsA[category].put(j1,1.0);
count+=1;
}
sweightsA[category]+=wat;
}
}
//System.out.println(this.numinstances);
s1=s2.getKeys();
sit=s2.size();
//int j=0;
for (int jt=0;jt<sit;jt++) {
i=s1[jt];
if (i<snumFeatures){
try{
weightsA[category].remove(i);
upweightsA[category].remove(i);
}catch(Exception e){
e.getClass();
}
}
else{
double wat=1-(Double)weightsA[category].get(i);
sweightsA[category]+=wat;
count+=1;
}
}
sweightsA[category]+=snumFeatures-count;
//s1=labels.keySet();
s1=labels.getKeys();
sit=labels.size();
//int j=0;
for (int jt=0;jt<sit;jt++) {
i=s1[jt];
//for (Integer i: s1) {
double lb=labels.get(i);
Double wb=(Double)weightsB[category].get(i);
if(wb!=null){
if(weightblearnmethod== 0){
weightsB[category].put(i, lb + wb);
weightChange = 1;
}else{
// %normalise
if ( lb< wb){
weightsB[category].put(i, (learningRate * lb )+ (1 - learningRate) *wb);
weightChange = 1;
}
}}
}
return weightChange;
}
private void ARAMm_Add_New_Category() {
weightsA = Arrays.copyOf(weightsA, numCategories + 1);
sweightsA = Arrays.copyOf(sweightsA, numCategories + 1);
weightsB = Arrays.copyOf(weightsB, numCategories + 1);
upweightsA = Arrays.copyOf(upweightsA, numCategories + 1);
weightsA[numCategories] = new SparseArray();
weightsB[numCategories] = new SparseArray();
upweightsA[numCategories] = new SparseArray();
sweightsA[numCategories]=sweightsA0;
numCategories += 1;
}
private double sumArray(double[] arr) {
int num = arr.length;
double result = 0;
for (int i = 0; i < num; i++) {
result += arr[i];
}
return result;
}
public static void main(String [] argv) {
try {
Evaluation.runExperiment(((MultiLabelClassifier) new WvARAM()), argv);
} catch (Exception e) {
e.printStackTrace();
System.err.println(e.getMessage());
}
}
public double[][] distributionForInstanceM(Instances i) throws Exception {
// TODO Auto-generated method stub
return null;
}
class SortPair2 implements Comparable<SortPair2> {
private int originalIndex;
private double value;
private double rawvalue;
public SortPair2(double value, int originalIndex, double rawvalue) {
this.value = value;
this.originalIndex = originalIndex;
this.rawvalue = rawvalue;
}
public int compareTo(SortPair2 o) {
return Double.compare(o.getValue(), value);
}
public int getOriginalIndex() {
return originalIndex;
}
public double getValue() {
return value;
}
public double getRawValue() {
return rawvalue;
}
}
@Override
public boolean isThreaded() {
// TODO Auto-generated method stub
return false;
}
@Override
public void setThreaded(boolean setv) {
// TODO Auto-generated method stub
}
@Override
public String getModel() {
// TODO Auto-generated method stub
return null;
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers/multilabel
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers/multilabel/neurofuzzy/ARAMNetworkfast.java
|
/*
* ClassifierTemplate.java
*
* <<Your Name Here>>
* CN 710
* Dept. of Cognitive & Neural Systems
* Boston University
* <<Date here>>
*
* Copyright (c) 2006, Boston University
*
* Adapted from NaiveBayes.java
* Copyright (C) 1999 Eibe Frank,Len Trigg
*/
package meka.classifiers.multilabel.neurofuzzy;
import java.io.BufferedWriter;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.OutputStreamWriter;
import java.io.Writer;
import java.util.ArrayList;
import java.util.Enumeration;
import java.util.Vector;
import java.util.Arrays;
import java.util.HashMap;
import meka.classifiers.multilabel.*;
import meka.classifiers.multilabel.neurofuzzy.ARAMNetworkSparseHT.SortPair2;
import meka.classifiers.multilabel.Evaluation;
import weka.classifiers.Classifier;
//import weka.classifiers.Evaluation;
import weka.classifiers.UpdateableClassifier;
import weka.core.Attribute;
import weka.core.Instance;
import weka.core.Instances;
import weka.core.Option;
import weka.core.OptionHandler;
import weka.core.Utils;
import meka.core.MLUtils;
import weka.core.WeightedInstancesHandler;
import weka.core.RevisionUtils;
/**
* ****REPLACE THE FOLLOWING WITH SIMILAR INFORMATION.
* Class for a Naive Bayes classifier using estimator classes. Numeric
* estimator precision values are chosen based on analysis of the
* training data. For this reason, the classifier is not an
* UpdateableClassifier (which in typical usage are initialized with zero
* training instances) -- if you need the UpdateableClassifier functionality,
* use the NaiveBayesUpdateable classifier. The NaiveBayesUpdateable
* classifier will use a default precision of 0.1 for numeric attributes
* when buildClassifier is called with zero training instances.
* <p>
* For more information on Naive Bayes classifiers, see<p>
*
* George H. John and Pat Langley (1995). <i>Estimating
* Continuous Distributions in Bayesian Classifiers</i>. Proceedings
* of the Eleventh Conference on Uncertainty in Artificial
* Intelligence. pp. 338-345. Morgan Kaufmann, San Mateo.<p>
*
* Valid options are:<p>
*
* -K <br>
* Use kernel estimation for modelling numeric attributes rather than
* a single normal distribution.<p>
*
* -D <br>
* Use supervised discretization to process numeric attributes.<p>
*
* @author Len Trigg (trigg@cs.waikato.ac.nz)
* @author Eibe Frank (eibe@cs.waikato.ac.nz)
* @author Rushi Bhatt (rushi@cns.bu.edu)
* @version $Revision: 1.16 $
* Modified by Rushi for use as a CN710 template
*/
public class ARAMNetworkfast extends ARAMNetworkClass {
//**** THIS IS WHERE CLASSIFIER WEIGHTS ETC GO ****
//define stuff like weight matrices, classifier parameters etc.
//e.g., protected double rho_a_bar=0.0;
HashMap<String, Vector> hmclasses = null;
int numinstancestr=0;
int activated=0;
private Writer writer = null;
public ARAMNetworkfast(int fnumFeatures, int fnumClasses, double fro, double fthreshold) {
initARAM(fnumFeatures, fnumClasses, fro, fthreshold);
}
public ARAMNetworkfast(){
}
private void initARAM(int fnumFeatures, int fnumClasses, double fro, double fthreshold){
numFeatures = fnumFeatures;
numClasses = fnumClasses;
threshold = fthreshold;
weightsA = new double[1][numFeatures];
sweightsA = new double[1];
sweightsA[0]=0;
Arrays.fill(weightsA[0], 1);
for(int i=0;i<numFeatures;i++){
sweightsA[0]+=weightsA[0][i];
}
sweightsA0=sweightsA[0];
weightsB = new double[1][numClasses];
Arrays.fill(weightsB[0], 0);
numCategories = 1;
hmclasses = new HashMap<String, Vector>();
}
/**
* Returns a string describing this classifier
* @return a description of the classifier suitable for
* displaying in the explorer/experimenter gui.
* ****MODIFY WITH CORRECT INFORMATION****
*/
public String globalInfo() {
return "This is ARAM.";
}
/**
* Generates the classifier.
*
* @param instances set of instances serving as training data
* @exception Exception if the classifier has not been generated
* successfully
*/
public void buildClassifier(Instances D) throws Exception {
int L = D.classIndex();
int featlength = (D.numAttributes() -L)*2;
int numSamples = D.numInstances();
int classlength = L * 2;
if (this.order==null){
order = new ArrayList<Integer>();
for (int j=0; j<D.numInstances();j++){
order.add(j);
}
}
if (numFeatures==-1){
initARAM( featlength,classlength ,roa , threshold );
}else{
if (featlength != numFeatures) {
return ;
}
if (classlength != numClasses) {
return ;
}}
// Copy the instances so we don't mess up the original data.
// Function calls do not deep copy the arguments..
//Instances m_Instances = new Instances(instances);
// Use the enumeration of instances to train classifier.
// Do any sanity checks (e.g., missing attributes etc here
// before calling updateClassifier for the actual learning
for(int i=0; i<D.numInstances();i++){
Instance instance = D.get(order.get(i));
updateClassifier(instance);
}
System.out.println("Training done, used "+numCategories+" neurons with rho ="+roa+".");
// Alternatively, you can put the training logic within this method,
// rather than updateClassifier(...). However, if you omit the
// updateClassifier(...) method, you should remove
// UpdateableClassifier from the class declaration above.
}
// ****THIS IS THE WEIGHT UPDATE ROUTINE. MODIFY TO CHANGE THE ALGORITHM****
/**
* Updates the classifier with the given instance.
*
* @param instance the new training instance to include in the model
* @exception Exception if the instance could not be incorporated in
* the model.
*/
public void updateClassifier(Instance instance) throws Exception {
//called once for each instance.
if(!learningphase){
return;
}
int num_classes=(int) (0.5 * numClasses);
int num_features=(int) (0.5 * numFeatures);
double[] data = new double[numFeatures];
double[] labels = new double[numClasses];
int numChanges = 0;
int numCategories_1=numCategories -1;
numinstancestr+=1;
if (!instance.classIsMissing()) {
//Do the weight updates using the instance.
double suminputA=0;
double suminputB=0;
for (int j = 0; j <num_features; j++) {
data[j] = instance.value(num_classes+j);
data[j+num_features] = 1 - data[j];
suminputA+=data[j]+data[j+num_features];
//if (data[j]<0 || data[j]>1){
// System.out.println("Data not normalized, this will cause error!");
//}
}
for (int j = 0; j < num_classes ; j++) {
labels[j] = instance.value(j);
labels[j+num_classes] = 1 - labels[j];
suminputB+=labels[j]+labels[j+num_classes];
}
SortPair2[] cateacti = ARTActivateCategories(data,labels);
java.util.Arrays.sort(cateacti);
boolean resonance = false;
int currentSortedIndex = 0;
int currentCategory = -1;
double matchA = 0;
double matchB = 0;
while (!resonance && currentSortedIndex<cateacti.length) {
currentCategory = cateacti[currentSortedIndex]
.getOriginalIndex();
if (currentCategory == numCategories_1) {
matchA=1;
}
else{
matchA =cateacti[currentSortedIndex]
.getRawValue()/suminputA;
}
if (matchA >= roa) {
if (currentCategory == numCategories_1) {
if (currentSortedIndex == maxNumCategories) {
System.out
.println("WARNING: The maximum number of categories has been reached.");
resonance = true;
} else {
// Add a new category
sweightsA[currentCategory]=0;
for (int j = 0; j < data.length; j++) {
weightsA[currentCategory][j] = data[j];
sweightsA[currentCategory]+=data[j];
}
for (int j = 0; j < weightsB[currentCategory].length; j++) {
weightsB[currentCategory][j] = labels[j];
}
String s = Arrays.toString(labels);
if (hmclasses.containsKey(s)){
hmclasses.get(s).add(currentCategory);
hmclasses.put(s,hmclasses.get(s));
}else{
Vector v = new Vector();
v.add(currentCategory);
hmclasses.put(s,v);
}
ARAMm_Add_New_Category();
//System.out.println(numinstancestr+" "+numCategories);
// fprintf(FileID,'Add a new category of %d\n',
// network.numCategories);
// Increment the number of changes since we added a
// new category.
numChanges = numChanges + 1;
resonance = true;
break;
}
} else {
// % Update weights
double weightChange = ARAMm_Update_Weights(data,
labels, currentCategory);
//System.out.println(numinstancestr+" "+currentCategory+" S:"+sweightsA[currentCategory]);
if (weightChange == 1) {
numChanges += 1;
}
resonance = true;
break;
}
} else {
currentSortedIndex += 1;
resonance = false;
}
}
if(!resonance && currentSortedIndex>=cateacti.length)
{
// Add a new category
sweightsA[numCategories_1]=0;
for (int j = 0; j < data.length; j++) {
weightsA[numCategories_1][j] = data[j];
sweightsA[numCategories_1]+=data[j];
}
for (int j = 0; j < weightsB[numCategories_1].length; j++) {
weightsB[numCategories_1][j] = labels[j];
}
String s = Arrays.toString(labels);
if (hmclasses.containsKey(s)){
hmclasses.get(s).add(numCategories_1);
hmclasses.put(s,hmclasses.get(s));
}else{
Vector v = new Vector();
v.add(numCategories_1);
hmclasses.put(s,v);
}
ARAMm_Add_New_Category();
//System.out.println(numinstancestr+" "+numCategories);
// fprintf(FileID,'Add a new category of %d\n',
// network.numCategories);
// Increment the number of changes since we added a
// new category.
numChanges = numChanges + 1;
}
}
}
private void print_weights(Writer twriter){
for (int i=0;i<weightsA.length;i++){
try {
twriter.write(i+":");
twriter.write(Arrays.toString(weightsA[i]));
twriter.write("[");
double value=0;
String pos="";
for (int j=0;j<weightsB[i].length/2;j++){
if (weightsB[i][j]!=0){
value=weightsB[i][j];
pos+=j+",";
}
//twriter.write(Arrays.toString(Arrays.copyOfRange(weightsB[i],0,numClasses/2)));
}
twriter.write(value+":"+pos+"]\n");
} catch (IOException ex) {
// report
}
}
}
//****THIS IS THE CLASSIFICATION ROUTINE. MODIFY TO CHANGE THE ALGORITHM****
//****classifyInstance() uses this method, so implement the
//****nuts-and-bolts of your algorithm here.
/**
* Calculates the class membership probabilities for the given test
* instance.
*
* @param instance the instance to be classified
* @return predicted class probability distribution
* @exception Exception if there is a problem generating the prediction
*/
public double[] distributionForInstance(Instance instance) throws Exception {
int num_classes=(int) (0.5 * numClasses);
int num_features=(int) (0.5 * numFeatures);
double[] dist = new double[num_classes];
double[] currentData = new double[numFeatures];
double[] ranking = new double[num_classes];
for (int j = 0; j < num_features; j++) {
currentData[j] = instance.value(num_classes+j);
currentData[num_features+j] = 1 - currentData[j];
}
SortPair2[] sortedActivations = ARTActivateCategories(currentData);
java.util.Arrays.sort(sortedActivations);
double s0=sortedActivations[0].getValue();
double diff_act = s0
- sortedActivations[numCategories - 2].getValue();
int largest_activ = 1;
double activ_change = 0;
for (int i = 1; i < sortedActivations.length; i++) {
activ_change = (s0 - sortedActivations[i]
.getValue())
/ s0;
if (activ_change > threshold * diff_act) {
break;
}
largest_activ = largest_activ + 1;
}
// % largest_activ =5;
double[] best_matches = new double[largest_activ];
java.util.Arrays.fill(best_matches, 1);
best_matches[0]=s0;
for (int i = 1; i < largest_activ; i++) {
// % best_matches(i) = matches(sortedCategories(i));
best_matches[i] = sortedActivations[i].getValue();
}
// % min_mat = min(best_matches);
// % max_mat = max(best_matches);
double sum_mat = sumArray(best_matches);
int currentCategory = 0;
this.neuronsactivated=new int[largest_activ];
this.neuronsactivity=new double[largest_activ];
if (this.activity_report.compareTo("")!=0){
try {
if (writer==null){
writer = new BufferedWriter(new OutputStreamWriter(
new FileOutputStream(this.activity_report), "utf-8"));
writer.write("Neurons");
print_weights(writer);
}
writer.write("this.nrinstclassified "+this.nrinstclassified+"\n");
} catch (IOException ex) {
// report
}
}
for (int i = 0; i < largest_activ; i++) {
this.neuronsactivity[i]=best_matches[i];
best_matches[i] = best_matches[i] / sum_mat;
currentCategory = sortedActivations[i].getOriginalIndex();
this.neuronsactivated[i]=currentCategory;
// % Fill return vector with weightB values
if (this.activity_report.compareTo("")!=0){
try {
writer.write(this.neuronsactivity[i]+","+this.neuronsactivated[i]+";");
} catch (IOException ex) {
// report
}
}
for (int j = 0; j < num_classes; j++) {
ranking[j] = ranking[j]
+ best_matches[i] * weightsB[currentCategory][j];
}
}
this.nrinstclassified+=1;
if (this.activity_report.compareTo("")!=0){
try {
writer.write(";;");
for (int i = 1; i < sortedActivations.length; i++) {
writer.write(sortedActivations[i].getOriginalIndex()+":"+String.format("%.6f",sortedActivations[i].getValue())+",");
}
writer.write("\n");
} catch (IOException ex) {
// report
}
}
if(m_userankstoclass) {
return ARAMm_Ranking2Class(ranking);
}
return ranking;
}
public double[] ARAMm_Ranking2Class(double[] rankings) {
int columns=rankings.length;
double[] classes= new double[columns ];
SortPair[] sortedRanks = new SortPair[columns];
for (int j=0;j<columns;j++){
sortedRanks[j]= new SortPair(rankings[j],j);
}
java.util.Arrays.sort(sortedRanks);
// sortedActivations[0].getValue()sortedActivations[i].getOriginalIndex()
SortPair[] change=new SortPair[columns-1];
for(int j =1; j<columns;j++){
change[j-1] = new SortPair(sortedRanks[j-1].getValue()-sortedRanks[j].getValue(),j);
}
java.util.Arrays.sort(change);
// double val= change[0].getValue();
int ind=change[0].getOriginalIndex();
for (int j =0; j<ind;j++){
classes[sortedRanks[j].getOriginalIndex()] = 1;
}
return classes;
}
private SortPair2[] ARTActivateCategories(double[] Data) {
SortPair2[] catacti = new SortPair2[numCategories-1];
// double[] catacti=new double[numCategories];
for (int i = 0; i < numCategories-1; i++) {
double sumvector = 0;
// double sumweight = 0;
for (int j = 0; j < numFeatures; j++) {
sumvector += ((Data[j] < weightsA[i][j]) ? Data[j]
: weightsA[i][j]);
// sumweight += weightsA[i][j];
}
//sumweight=sweightsA[i];
catacti[i] = new SortPair2(sumvector / (alpha + sweightsA[i]), i, sumvector);
//if(activated==121){
// System.out.println(i+ " "+sumvector+" "+sweightsA[i]);
//}
// catacti[i] = new SortPair(sumvector / (alpha + sumweight), i);
//System.out.println("sumweight "+(sumweight-sweightsA[i]));
}
//activated+=1;
return catacti;
}
private SortPair2[] ARTActivateCategories(double[] Data, double[] labels) {
String s = Arrays.toString(labels);
Vector lclasses = hmclasses.get(s);
SortPair2[] catacti = null;
if (lclasses==null||lclasses.size()==0){
catacti=new SortPair2[1];
catacti[0] = new SortPair2(1,numCategories-1,(int)(0.5*numFeatures));
return catacti;
}
catacti = new SortPair2[lclasses.size()];
// double[] catacti=new double[numCategories];
for (int i = 0; i < lclasses.size(); i++) {
double sumvector = 0;
int k = ((Integer)lclasses.get(i)).intValue();
for (int j = 0; j < numFeatures; j++) {
sumvector += ((Data[j] < weightsA[k][j]) ? Data[j]
: weightsA[k][j]);
// sumweight += weightsA[k][j];
}
//sumweight=sweightsA[k];
//catacti[i] = new SortPair(sumvector / (alpha + sumweight), k);
//System.out.println("sumweight "+(sumweight-sweightsA[k]));
catacti[i] = new SortPair2(sumvector / (alpha + sweightsA[k]), k,sumvector);
}
return catacti;
}
// ****YOU SHOULDN'T NEED TO CHANGE THIS
/**
* Classifies the given test instance. The instance has to belong to a
* dataset when it's being classified. Note that a classifier MUST
* implement either this or distributionForInstance().
*
* @param instance the instance to be classified
* @return the predicted most likely class for the instance or
* Instance.missingValue() if no prediction is made
* @exception Exception if an error occurred during the prediction
*/
public double classifyInstance(Instance instance) throws Exception {
double[] dist = distributionForInstance(instance);
if (dist == null) {
throw new Exception("Null distribution predicted");
}
switch (instance.classAttribute().type()) {
case Attribute.NOMINAL:
double max = 0;
int maxIndex = 0;
for (int i = 0; i < dist.length; i++) {
if (dist[i] > max) {
maxIndex = i;
max = dist[i];
}
}
if (max > 0) {
return maxIndex;
} else {
//return Instance.missingValue();
}
case Attribute.NUMERIC:
return dist[0];
default:
return -1;
}
}
// ****ANY OPTIONS/PARAMETERS GO HERE****
/**
* Returns an enumeration describing the available options.
*
* @return an enumeration of all the available options.
*/
public Enumeration listOptions() {
//These are just examples, modify to suit your algorithm
Vector newVector = new Vector(2);
newVector.addElement(
new Option("\tChange generalization parameter Rho\n",
"P", 0,"-P"));
newVector.addElement(
new Option("\tUse ranking to class function special dev. for ARAM.\n",
"K", 0,"-K"));
newVector.addElement(
new Option("\tUse report file to output debug specific information of ARAM.\n",
"Rt", 0,"-Rt"));
return newVector.elements();
}
//****OPTIONS HERE SHOULD MATCH THOSE ADDED ABOVE****
/**
* Parses a given list of options. Valid options are:<p>
*
* -K <br>
* Use kernel estimation for modelling numeric attributes rather than
* a single normal distribution.<p>
*
* -D <br>
* Use supervised discretization to process numeric attributes.
*
* @param options the list of options as an array of strings
* @exception Exception if an option is not supported
*/
public void setOptions(String[] options) throws Exception {
//These are just examples, modify to suit your algorithm
// boolean k = Utils.getFlag('K', options);
// boolean d = Utils.getFlag('D', options);
// if (k && d) {
// throw new IllegalArgumentException(
// "Can't use both kernel density estimation and discretization!");
// }
// setUseSupervisedDiscretization(d);
// setUseKernelEstimator(k);
roa = (Utils.getOptionPos("P",options) >= 0) ? Double.parseDouble(Utils.getOption("P", options)) : roa;
m_userankstoclass= (Utils.getOptionPos("K",options) >= 0);
activity_report = (Utils.getOptionPos("Rt",options) >= 0) ? Utils.getOption("Rt", options) : "";
super.setOptions(options);
}
//****MORE OPTION PARSING STUFF****
/**
* Gets the current settings of the classifier.
*
* @return an array of strings suitable for passing to setOptions
*/
public String [] getOptions() {
//These are just examples, modify to suit your algorithm
String [] options = new String [3];
try{
options =weka.core.Utils.splitOptions("-P 0.9 -K");
}catch (Exception ex) {
System.out.println(ex.getMessage());
}
return options;
}
//****ANY INFORMATION LIKE NO. OF UNITS ETC PRINTED HERE
/**
* Returns a description of the classifier.
*
* @return a description of the classifier as a string.
*/
public String toString() {
//These are just examples, modify to suit your algorithm
StringBuffer text = new StringBuffer();
text.append("ML ARAM classifier");
// if (m_Instances == null) {
// text.append(": No model built yet.");
// } else {
// try {
// for (int i = 0; i < m_Distributions[0].length; i++) {
// text.append("\n\nClass " + m_Instances.classAttribute().value(i) +
// ": Prior probability = " + Utils.
// doubleToString(m_ClassDistribution.getProbability(i),
// 4, 2) + "\n\n");
// Enumeration enumAtts = m_Instances.enumerateAttributes();
// int attIndex = 0;
// while (enumAtts.hasMoreElements()) {
// Attribute attribute = (Attribute) enumAtts.nextElement();
// text.append(attribute.name() + ": "
// + m_Distributions[attIndex][i]);
// attIndex++;
// }
// }
// } catch (Exception ex) {
// text.append(ex.getMessage());
// }
// }
return text.toString();
}
private double ARAMm_Update_Weights(double[] data, double[] labels,
int category) {
double weightChange = 0;
sweightsA[category]=0;
for (int i = 0; i < numFeatures; i++) {
if (data[i] < weightsA[category][i]){
weightsA[category][i] = (learningRate * data[i])
+ (1 - learningRate) * weightsA[category][i];
}
sweightsA[category]+=weightsA[category][i];
}
for (int i = 0; i < numClasses; i++) {
if(weightblearnmethod== 0){
weightsB[category][i] = labels[i] + weightsB[category][i];
weightChange = 1;
}else{
// %normalise
if ( labels[i]< weightsB[category][i]){
weightsB[category][i] = (learningRate * labels[i] )+ (1 - learningRate) *weightsB[category][i];
weightChange = 1;
}
}
}
return weightChange;
}
private void ARAMm_Add_New_Category() {
weightsA = Arrays.copyOf(weightsA, numCategories + 1);
sweightsA = Arrays.copyOf(sweightsA, numCategories + 1);
weightsB = Arrays.copyOf(weightsB, numCategories + 1);
weightsA[numCategories] = new double[numFeatures];
//sweightsA[numCategories] = new double();
weightsB[numCategories] = new double[numClasses];
Arrays.fill(weightsA[numCategories], 1.0);
Arrays.fill(weightsB[numCategories], 0.0);
sweightsA[numCategories]=sweightsA0;
numCategories += 1;
}
private double sumArray(double[] arr) {
int num = arr.length;
double result = 0;
for (int i = 0; i < num; i++) {
result += arr[i];
}
return result;
}
/**
* Main method for testing this class.
*
* @param argv the options
*/
public static void main(String [] argv) {
try {
Evaluation.runExperiment((MultiLabelClassifier)new WvARAM(), argv);
} catch (Exception e) {
e.printStackTrace();
System.err.println(e.getMessage());
}
}
class SortPair2 implements Comparable<SortPair2> {
private int originalIndex;
private double value;
private double rawvalue;
public SortPair2(double value, int originalIndex, double rawvalue) {
this.value = value;
this.originalIndex = originalIndex;
this.rawvalue = rawvalue;
}
public int compareTo(SortPair2 o) {
return Double.compare(o.getValue(), value);
}
public int getOriginalIndex() {
return originalIndex;
}
public double getValue() {
return value;
}
public double getRawValue() {
return rawvalue;
}
}
@Override
public double[][] distributionForInstanceM(Instances i) throws Exception {
// TODO Auto-generated method stub
return null;
}
@Override
public boolean isThreaded() {
// TODO Auto-generated method stub
return false;
}
@Override
public void setThreaded(boolean setv) {
// TODO Auto-generated method stub
}
@Override
public String getModel() {
// TODO Auto-generated method stub
return null;
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers/multilabel
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers/multilabel/neurofuzzy/ARTCluster.java
|
package meka.classifiers.multilabel.neurofuzzy;
import java.util.Arrays;
import meka.classifiers.multilabel.*;
import weka.core.Instance;
import weka.core.Instances;
/*
Copyright (C) 2016 Fernando Benites
@author Fernando Benites
*/
//based on ARTCluster_v3.py from DAMIART
public class ARTCluster {
public double vigilance=0.7;
double[][] weightsA = null;
double[] weightsAsum = null;
int numCategories=0;
int numFeatures=0;
double learningRate=1;
Integer[][] Ids = null;
int maxidsperc=100000;
int maxids=3;
final int maxNumCategories=100000;
public ARTCluster(int fnumFeatures, double fro) {
numFeatures=fnumFeatures;
vigilance=fro;
}
public void learn(double[][] data, int[] ids ){
int startc=0;
if (weightsA==null|| weightsA.length == 0){
addsample(data[0],ids[0],-1);
startc = 1;
}
else{
startc = 0;
}
//parameter to assure prototype does not get too big
if (maxids==0){
maxids=3;
}
for(int i = 0; i < data.length; i++) {
if (i%1000==0){
System.out.println("Processing"+i+weightsA.length);
}
int found=0;
double[] activationn=new double[weightsA.length];
double[] activationi=new double[weightsA.length];
double[] fc=data[0];
double fcs = sum(fc);
SortPair[] sortedActivations=new SortPair[weightsA.length];
for(int i2 = 0; i2 < weightsA.length; i2++) {
double minnfs = ART_Calculate_Match(weightsA[i2], fc);
activationi[i2] =minnfs/fcs;
activationn[i2] =minnfs/weightsAsum[i2];
sortedActivations[i2] = new SortPair(activationn[i2], i2);
}
if (max(activationn) == 0){
addsample(data[i],ids[i],fcs);
continue;
}
java.util.Arrays.sort(sortedActivations);
int currentCategory = -1;
int currentSortedIndex = 0;
boolean resonance = false;
while (!resonance) {
currentCategory = sortedActivations[currentSortedIndex]
.getOriginalIndex();
if (activationi[currentCategory]>vigilance){
if (currentCategory == numCategories -1) {
if (currentSortedIndex == maxNumCategories) {
System.out
.println("WARNING: The maximum number of categories has been reached.");
resonance = true;
} else {
addsample(data[i],ids[i],-1);
resonance = true;
}
}else{
if(Ids[currentCategory].length>maxidsperc){
currentSortedIndex += 1;
resonance = false;
}
update_Weights(data[i], ids[i],
currentCategory);
}
}else{
if (currentCategory == numCategories -1) {
addsample(data[i],ids[i],-1);
resonance = true;
}else{
currentSortedIndex += 1;
resonance = false;
}
}
}
}
}
private double ART_Calculate_Match(double[] Data, double[] fweights) {
int lnumFeatures = Data.length;
if (lnumFeatures != fweights.length) {
return 0.0;
}
double[] matchVector = new double[lnumFeatures];
double summatch = 0;
double suminput = 0;
for (int j = 0; j < lnumFeatures; j++) {
matchVector[j] = ((Data[j] < fweights[j]) ? Data[j] : fweights[j]);
summatch += matchVector[j];
suminput += Data[j];
}
if (suminput == 0) {
return 0.0;
}
return summatch / suminput;
}
public void addsample(double[] sample, int id, double sum){
numFeatures=sample.length;
if (weightsA==null)
{
weightsA = new double[1][numFeatures];
Arrays.fill(weightsA[0], 1);
weightsAsum = new double[1];
Ids = new Integer[1][1];
Ids[numCategories][0]=id;
}
else{
weightsA = Arrays.copyOf(weightsA, numCategories + 1);
weightsA[numCategories] = new double[numFeatures];
weightsAsum = Arrays.copyOf(weightsAsum, numCategories + 1);
Ids = Arrays.copyOf(Ids, numCategories + 1);
Ids[numCategories]=new Integer[1];
Ids[numCategories][0]=id;
}
int category=numCategories;
if (sum<0){
weightsAsum[numCategories] = 0;
for (int i = 0; i < numFeatures; i++) {
if (sample[i] < weightsA[category][i]){
weightsA[category][i] = (learningRate * sample[i])
+ (1 - learningRate) * weightsA[category][i];
weightsAsum[numCategories]+=weightsA[category][i];
}
}
}else{
weightsAsum[numCategories] = sum;
for (int i = 0; i < numFeatures; i++) {
if (sample[i] < weightsA[category][i]){
weightsA[category][i] = (learningRate * sample[i])
+ (1 - learningRate) * weightsA[category][i];
}
}
}
numCategories+=1;
}
public void update_Weights(double[] sample, int id, int category){
for (int i = 0; i < numFeatures; i++) {
if (sample[i] < weightsA[category][i]){
weightsA[category][i] = (learningRate * sample[i])
+ (1 - learningRate) * weightsA[category][i];
weightsAsum[numCategories]+=weightsA[category][i];
}
}
int maxl=Ids[category].length;
Ids[category] = Arrays.copyOf(Ids[category],maxl + 1);
Ids[category][maxl]=id;
}
public SortPair[] activate(double[] data){
SortPair[] activations= new SortPair[weightsA.length];
for(int i = 0; i < weightsA.length; i++) {
activations[i]=new SortPair(ART_Calculate_Match(data,weightsA[i]), i);
}
return activations;
}
double sum(double[] data){
double result=0;
for(int i = 0; i < data.length; i++) {
result+=data[i];
}
return result;
}
double max(double[] data){
double result=0;
for(int i = 0; i < data.length; i++) {
if(result<data[i])
result=data[i];
}
return result;
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers/multilabel
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers/multilabel/neurofuzzy/HARAMNetwork.java
|
/*
*
*
* Adapted from NaiveBayes.java
*
* Copyright (C) 2016 Fernando Benites
* @author Fernando Benites
*/
package meka.classifiers.multilabel.neurofuzzy;
import java.util.ArrayList;
import java.util.Enumeration;
import java.util.List;
import java.util.Vector;
import java.util.Arrays;
import meka.classifiers.multilabel.*;
import weka.classifiers.Classifier;
import meka.classifiers.multilabel.Evaluation;
//import weka.classifiers.Evaluation;
import weka.classifiers.UpdateableClassifier;
import weka.core.*;
import weka.core.TechnicalInformation.Field;
import weka.core.TechnicalInformation.Type;
/**
* ****REPLACE THE FOLLOWING WITH SIMILAR INFORMATION.
* Class for a Naive Bayes classifier using estimator classes. Numeric
* estimator precision values are chosen based on analysis of the
* training data. For this reason, the classifier is not an
* UpdateableClassifier (which in typical usage are initialized with zero
* training instances) -- if you need the UpdateableClassifier functionality,
* use the NaiveBayesUpdateable classifier. The NaiveBayesUpdateable
* classifier will use a default precision of 0.1 for numeric attributes
* when buildClassifier is called with zero training instances.
* <p>
* For more information on Naive Bayes classifiers, see<p>
*
* George H. John and Pat Langley (1995). <i>Estimating
* Continuous Distributions in Bayesian Classifiers</i>. Proceedings
* of the Eleventh Conference on Uncertainty in Artificial
* Intelligence. pp. 338-345. Morgan Kaufmann, San Mateo.<p>
*
* Valid options are:<p>
*
* -K <br>
* Use kernel estimation for modelling numeric attributes rather than
* a single normal distribution.<p>
*
* -D <br>
* Use supervised discretization to process numeric attributes.<p>
*
* @author Len Trigg (trigg@cs.waikato.ac.nz)
* @author Eibe Frank (eibe@cs.waikato.ac.nz)
* @author Rushi Bhatt (rushi@cns.bu.edu)
* @version $Revision: 1.16 $
* Modified by Rushi for use as a CN710 template
*/
public class HARAMNetwork extends ARAMNetworkClass
implements OptionHandler, WeightedInstancesHandler, UpdateableClassifier,
Randomizable, TechnicalInformationHandler, MultiLabelClassifier{
/**
*
*/
private static final long serialVersionUID = -1532576398976144661L;
/**
*
*/
//**** THIS IS WHERE CLASSIFIER WEIGHTS ETC GO ****
//define stuff like weight matrices, classifier parameters etc.
//e.g., protected double rho_a_bar=0.0;
double clustervig=0.7;
ARTCluster ARTCs=null;
int[] pids =null;
private boolean relearned=false;
public HARAMNetwork(int fnumFeatures, int fnumClasses, double fro, double fthreshold, double cvig) {
initARAM(fnumFeatures, fnumClasses, fro, fthreshold, cvig);
}
public HARAMNetwork(){
}
private void initARAM(int fnumFeatures, int fnumClasses, double fro, double fthreshold, double cvig){
numFeatures = fnumFeatures;
numClasses = fnumClasses;
threshold = fthreshold;
weightsA = new double[1][numFeatures];
Arrays.fill(weightsA[0], 1);
weightsB = new double[1][numClasses];
Arrays.fill(weightsB[0], 0);
numCategories = 1;
clustervig=cvig;
}
/**
* Generates the classifier.
*
* @param instances set of instances serving as training data
* @exception Exception if the classifier has not been generated
* successfully
*/
public void buildClassifier(Instances D) throws Exception {
int L = D.classIndex();
int featlength = (D.numAttributes() -L)*2;
int numSamples = D.numInstances();
int classlength = L * 2;
if (this.order==null){
order = new ArrayList<Integer>();
for (int j=0; j<D.numInstances();j++){
order.add(j);
}
}
if (numFeatures==-1){
initARAM( featlength,classlength ,roa , threshold,clustervig);
}else{
if (featlength != numFeatures) {
return ;
}
if (classlength != numClasses) {
return ;
}}
// Copy the instances so we don't mess up the original data.
// Function calls do not deep copy the arguments..
//Instances m_Instances = new Instances(instances);
// Use the enumeration of instances to train classifier.
// Do any sanity checks (e.g., missing attributes etc here
// before calling updateClassifier for the actual learning
//Enumeration enumInsts = D.enumerateInstances();
for(int i=0; i<D.numInstances();i++){
Instance instance = D.get(order.get(i));
updateClassifier(instance);
}
System.out.println("Training done, used "+numCategories+" neurons with rho ="+roa+".");
// Alternatively, you can put the training logic within this method,
// rather than updateClassifier(...). However, if you omit the
// updateClassifier(...) method, you should remove
// UpdateableClassifier from the class declaration above.
}
// ****THIS IS THE WEIGHT UPDATE ROUTINE. MODIFY TO CHANGE THE ALGORITHM****
/**
* Updates the classifier with the given instance.
*
* @param instance the new training instance to include in the model
* @exception Exception if the instance could not be incorporated in
* the model.
*/
public void updateClassifier(Instance instance) throws Exception {
//called once for each instance.
if(!learningphase){
return;
}
relearned=true;
int num_classes=(int) (0.5 * numClasses);
int num_features=(int) (0.5 * numFeatures);
double[] data = new double[numFeatures];
double[] labels = new double[numClasses];
int numChanges = 0;
if (!instance.classIsMissing()) {
//Do the weight updates using the instance.
for (int j = 0; j <num_features; j++) {
data[j] = instance.value(num_classes+j);
data[j+num_features] = 1 - data[j];
//if (data[j]<0 || data[j]>1){
// System.out.println("Data not normalized, this will cause error!");
//}
}
for (int j = 0; j < num_classes ; j++) {
labels[j] = instance.value(j);
labels[j+num_classes] = 1 - labels[j];
}
SortPair[] cateacti = ARTActivateCategories(data);
java.util.Arrays.sort(cateacti);
boolean resonance = false;
int currentSortedIndex = 0;
int currentCategory = -1;
double matchA = 0;
double matchB = 0;
while (!resonance) {
currentCategory = cateacti[currentSortedIndex]
.getOriginalIndex();
matchA = ART_Calculate_Match(data, weightsA[currentCategory]);
if (sumArray(weightsB[currentCategory]) == 0) {
matchB = 1;
} else {
matchB = ART_Calculate_Match(labels,
weightsB[currentCategory]);
}
if (matchA >= roa && matchB >= rob) {
if (currentCategory == numCategories -1) {
if (currentSortedIndex == maxNumCategories) {
System.out
.println("WARNING: The maximum number of categories has been reached.");
resonance = true;
} else {
// Add a new category
for (int j = 0; j < data.length; j++) {
weightsA[currentCategory][j] = data[j];
}
for (int j = 0; j < weightsB[currentCategory].length; j++) {
weightsB[currentCategory][j] = labels[j];
}
ARAMm_Add_New_Category();
// fprintf(FileID,'Add a new category of %d\n',
// network.numCategories);
// Increment the number of changes since we added a
// new category.
numChanges = numChanges + 1;
resonance = true;
}
} else {
// % Update weights
double weightChange = ARAMm_Update_Weights(data,
labels, currentCategory);
if (weightChange == 1) {
numChanges += 1;
}
resonance = true;
}
} else {
currentSortedIndex += 1;
resonance = false;
}
}
}
}
public void PrepareHClusters(){
//reset pids
if (pids ==null || pids.length!=weightsA.length){
pids= new int[weightsA.length];
for(int i=0; i<weightsA.length;i++ ){
pids[i]=i;
}
}
ARTCluster ARTt= new ARTCluster(numFeatures, clustervig);
ARTt.learn(weightsA, pids);
ARTCs=ARTt;
relearned=false;
}
//****THIS IS THE CLASSIFICATION ROUTINE. MODIFY TO CHANGE THE ALGORITHM****
//****classifyInstance() uses this method, so implement the
//****nuts-and-bolts of your algorithm here.
/**
* Calculates the class membership probabilities for the given test
* instance.
*
* @param instance the instance to be classified
* @return predicted class probability distribution
* @exception Exception if there is a problem generating the prediction
*/
public double[] distributionForInstance(Instance instance) throws Exception {
if (relearned==true)
{
PrepareHClusters();
}
int num_classes=(int) (0.5 * numClasses);
int num_features=(int) (0.5 * numFeatures);
double[] dist = new double[num_classes];
double[] currentData = new double[numFeatures];
double[] ranking = new double[num_classes];
for (int j = 0; j < num_features; j++) {
currentData[j] = instance.value(num_classes+j);
currentData[num_features+j] = 1 - currentData[j];
}
SortPair[] sortedActClust=ARTCs.activate(currentData);
List<Integer> ids=new ArrayList<Integer>();
java.util.Arrays.sort(sortedActClust);
int cid=sortedActClust[0].getOriginalIndex();
int numberofids=0;
for (int k=0; k<ARTCs.Ids[cid].length;k++){
ids.add(ARTCs.Ids[cid][k]);
}
numberofids+=ARTCs.Ids[cid].length;
int c1=1;
while(c1<ARTCs.maxids){
cid=sortedActClust[c1].getOriginalIndex();
for (int k=0; k<ARTCs.Ids[cid].length;k++){
ids.add(ARTCs.Ids[cid][k]);
}
numberofids+=ARTCs.Ids[cid].length;
c1+=1;
}
SortPair[] sortedActivations = ARTActivateCategories(currentData, ids, numberofids);
java.util.Arrays.sort(sortedActivations);
double diff_act = sortedActivations[0].getValue()
- sortedActivations[sortedActivations.length - 1].getValue();
int largest_activ = 1;
double activ_change = 0;
for (int i = 1; i < sortedActivations.length; i++) {
activ_change = (sortedActivations[0].getValue() - sortedActivations[i]
.getValue())
/ sortedActivations[0].getValue();
if (activ_change > threshold * diff_act) {
break;
}
largest_activ = largest_activ + 1;
}
// % largest_activ =5;
double[] best_matches = new double[largest_activ];
java.util.Arrays.fill(best_matches, 1);
for (int i = 0; i < largest_activ; i++) {
// % best_matches(i) = matches(sortedCategories(i));
best_matches[i] = sortedActivations[i].getValue();
}
// % min_mat = min(best_matches);
// % max_mat = max(best_matches);
double sum_mat = sumArray(best_matches);
int currentCategory = 0;
this.neuronsactivated=new int[largest_activ];
this.neuronsactivity=new double[largest_activ];
for (int i = 0; i < largest_activ; i++) {
this.neuronsactivity[i]=best_matches[i];
best_matches[i] = best_matches[i] / sum_mat;
currentCategory = sortedActivations[i].getOriginalIndex();
this.neuronsactivated[i]=currentCategory;
// % Fill return vector with weightB values
for (int j = 0; j < num_classes; j++) {
ranking[j] = ranking[j]
+ best_matches[i] * weightsB[currentCategory][j];
}
}
if(m_userankstoclass) {
return ARAMm_Ranking2Class(ranking);
}
return ranking;
}
public double[] ARAMm_Ranking2Class(double[] rankings) {
int columns=rankings.length;
double[] classes= new double[columns ];
SortPair[] sortedRanks = new SortPair[columns];
for (int j=0;j<columns;j++){
sortedRanks[j]= new SortPair(rankings[j],j);
}
java.util.Arrays.sort(sortedRanks);
// sortedActivations[0].getValue()sortedActivations[i].getOriginalIndex()
SortPair[] change=new SortPair[columns-1];
for(int j =1; j<columns;j++){
change[j-1] = new SortPair(sortedRanks[j-1].getValue()-sortedRanks[j].getValue(),j);
}
java.util.Arrays.sort(change);
// double val= change[0].getValue();
int ind=change[0].getOriginalIndex();
for (int j =0; j<ind;j++){
classes[sortedRanks[j].getOriginalIndex()] = 1;
}
return classes;
}
private SortPair[] ARTActivateCategories(double[] Data) {
SortPair[] catacti = new SortPair[numCategories];
// double[] catacti=new double[numCategories];
double[] matchVector = new double[numFeatures];
for (int i = 0; i < numCategories; i++) {
double sumvector = 0;
double sumweight = 0;
for (int j = 0; j < numFeatures; j++) {
matchVector[j] = ((Data[j] < weightsA[i][j]) ? Data[j]
: weightsA[i][j]);
sumvector += matchVector[j];
sumweight += weightsA[i][j];
}
catacti[i] = new SortPair(sumvector / (alpha + sumweight), i);
}
return catacti;
}
private SortPair[] ARTActivateCategories(double[] Data,List<Integer> ids, int maxnumberids ) {
SortPair[] catacti = new SortPair[ids.size()];
// double[] catacti=new double[numCategories];
double[] matchVector = new double[numFeatures];
for (int i1 = 0; i1 < maxnumberids; i1++) {
double sumvector = 0;
double sumweight = 0;
int i=(int)ids.get(i1);
for (int j = 0; j < numFeatures; j++) {
matchVector[j] = ((Data[j] < weightsA[i][j]) ? Data[j]
: weightsA[i][j]);
sumvector += matchVector[j];
sumweight += weightsA[i][j];
}
catacti[i1] = new SortPair(sumvector / (alpha + sumweight), i);
}
return catacti;
}
// ****YOU SHOULDN'T NEED TO CHANGE THIS
/**
* Classifies the given test instance. The instance has to belong to a
* dataset when it's being classified. Note that a classifier MUST
* implement either this or distributionForInstance().
*
* @param instance the instance to be classified
* @return the predicted most likely class for the instance or
* Instance.missingValue() if no prediction is made
* @exception Exception if an error occurred during the prediction
*/
public double classifyInstance(Instance instance) throws Exception {
double[] dist = distributionForInstance(instance);
if (dist == null) {
throw new Exception("Null distribution predicted");
}
switch (instance.classAttribute().type()) {
case Attribute.NOMINAL:
double max = 0;
int maxIndex = 0;
for (int i = 0; i < dist.length; i++) {
if (dist[i] > max) {
maxIndex = i;
max = dist[i];
}
}
if (max > 0) {
return maxIndex;
} else {
//return Instance.missingValue();
}
case Attribute.NUMERIC:
return dist[0];
default:
return -1;
}
}
// ****ANY OPTIONS/PARAMETERS GO HERE****
/**
* Returns an enumeration describing the available options.
*
* @return an enumeration of all the available options.
*/
public Enumeration listOptions() {
//These are just examples, modify to suit your algorithm
Vector newVector = new Vector(2);
newVector.addElement(
new Option("\tChange generalization parameter Rho\n",
"P", 0,"-P"));
newVector.addElement(
new Option("\tChange generalization parameter Rho for Clusters\n",
"PC", 0,"-PC"));
newVector.addElement(
new Option("\tChange threshold to select activated neurons (no-winner-takes-all)\n",
"THR", 0,"-THR"));
newVector.addElement(
new Option("\tUse ranking to class function special dev. for ARAM.\n",
"K", 0,"-K"));
return newVector.elements();
}
//****OPTIONS HERE SHOULD MATCH THOSE ADDED ABOVE****
/**
* Parses a given list of options. Valid options are:<p>
*
* -K <br>
* Use kernel estimation for modelling numeric attributes rather than
* a single normal distribution.<p>
*
* -D <br>
* Use supervised discretization to process numeric attributes.
*
* @param options the list of options as an array of strings
* @exception Exception if an option is not supported
*/
public void setOptions(String[] options) throws Exception {
//These are just examples, modify to suit your algorithm
// boolean k = Utils.getFlag('K', options);
// boolean d = Utils.getFlag('D', options);
// if (k && d) {
// throw new IllegalArgumentException(
// "Can't use both kernel density estimation and discretization!");
// }
// setUseSupervisedDiscretization(d);
// setUseKernelEstimator(k);
roa = (Utils.getOptionPos("P",options) >= 0) ? Double.parseDouble(Utils.getOption("P", options)) : roa;
clustervig = (Utils.getOptionPos("PC",options) >= 0) ? Double.parseDouble(Utils.getOption("PC", options)) : clustervig;
threshold = (Utils.getOptionPos("THR",options) >= 0) ? Double.parseDouble(Utils.getOption("THR", options)) : threshold;
m_userankstoclass= (Utils.getOptionPos("K",options) >= 0);
super.setOptions(options);
}
//****MORE OPTION PARSING STUFF****
/**
* Gets the current settings of the classifier.
*
* @return an array of strings suitable for passing to setOptions
*/
public String [] getOptions() {
//These are just examples, modify to suit your algorithm
Vector<String> result = new Vector<String>();
try{
result.add("-P");
result.add(Double.toString(roa));
result.add("-THR");
result.add(Double.toString(threshold));
result.add("-PC");
result.add(Double.toString(clustervig));
}catch (Exception ex) {
System.out.println(ex.getMessage());
}
return (String[]) result.toArray(new String[result.size()]);
}
//****ANY INFORMATION LIKE NO. OF UNITS ETC PRINTED HERE
/**
* Returns a description of the classifier.
*
* @return a description of the classifier as a string.
*/
public String toString() {
//These are just examples, modify to suit your algorithm
StringBuffer text = new StringBuffer();
text.append("ML ARAM classifier");
// if (m_Instances == null) {
// text.append(": No model built yet.");
// } else {
// try {
// for (int i = 0; i < m_Distributions[0].length; i++) {
// text.append("\n\nClass " + m_Instances.classAttribute().value(i) +
// ": Prior probability = " + Utils.
// doubleToString(m_ClassDistribution.getProbability(i),
// 4, 2) + "\n\n");
// Enumeration enumAtts = m_Instances.enumerateAttributes();
// int attIndex = 0;
// while (enumAtts.hasMoreElements()) {
// Attribute attribute = (Attribute) enumAtts.nextElement();
// text.append(attribute.name() + ": "
// + m_Distributions[attIndex][i]);
// attIndex++;
// }
// }
// } catch (Exception ex) {
// text.append(ex.getMessage());
// }
// }
return text.toString();
}
//****MORE GUI RELATED STUFF AND PARAMETER ACCESS METHODS
// /**
// * Returns the tip text for this property
// * @return tip text for this property suitable for
// * displaying in the explorer/experimenter gui
// */
// public String useKernelEstimatorTipText() {
// return "Use a kernel estimator for numeric attributes rather than a "
// +"normal distribution.";
// }
// /**
// * Gets if kernel estimator is being used.
// *
// * @return Value of m_UseKernelEstimatory.
// */
// public boolean getUseKernelEstimator() {
//
// return m_UseKernelEstimator;
// }
//
// /**
// * Sets if kernel estimator is to be used.
// *
// * @param v Value to assign to m_UseKernelEstimatory.
// */
// public void setUseKernelEstimator(boolean v) {
//
// m_UseKernelEstimator = v;
// if (v) {
// setUseSupervisedDiscretization(false);
// }
// }
//
// /**
// * Returns the tip text for this property
// * @return tip text for this property suitable for
// * displaying in the explorer/experimenter gui
// */
// public String useSupervisedDiscretizationTipText() {
// return "Use supervised discretization to convert numeric attributes to nominal "
// +"ones.";
// }
//
// /**
// * Get whether supervised discretization is to be used.
// *
// * @return true if supervised discretization is to be used.
// */
// public boolean getUseSupervisedDiscretization() {
//
// return m_UseDiscretization;
// }
//
// /**
// * Set whether supervised discretization is to be used.
// *
// * @param newblah true if supervised discretization is to be used.
// */
// public void setUseSupervisedDiscretization(boolean newblah) {
//
// m_UseDiscretization = newblah;
// if (newblah) {
// setUseKernelEstimator(false);
// }
// }
/**
* Main method for testing this class.
*
* @param argv the options
*/
private double ARAMm_Update_Weights(double[] data, double[] labels,
int category) {
double weightChange = 0;
for (int i = 0; i < numFeatures; i++) {
if (data[i] < weightsA[category][i]){
weightsA[category][i] = (learningRate * data[i])
+ (1 - learningRate) * weightsA[category][i];
}
}
for (int i = 0; i < numClasses; i++) {
if(weightblearnmethod== 0){
weightsB[category][i] = labels[i] + weightsB[category][i];
weightChange = 1;
}else{
// %normalise
if ( labels[i]< weightsB[category][i]){
weightsB[category][i] = (learningRate * labels[i] )+ (1 - learningRate) *weightsB[category][i];
weightChange = 1;
}
}
}
return weightChange;
}
private double ART_Calculate_Match(double[] Data, double[] fweights) {
int lnumFeatures = Data.length;
if (lnumFeatures != fweights.length) {
return 0.0;
}
double[] matchVector = new double[lnumFeatures];
double summatch = 0;
double suminput = 0;
for (int j = 0; j < lnumFeatures; j++) {
matchVector[j] = ((Data[j] < fweights[j]) ? Data[j] : fweights[j]);
summatch += matchVector[j];
suminput += Data[j];
}
if (suminput == 0) {
return 0.0;
}
return summatch / suminput;
}
private void ARAMm_Add_New_Category() {
weightsA = Arrays.copyOf(weightsA, numCategories + 1);
weightsB = Arrays.copyOf(weightsB, numCategories + 1);
weightsA[numCategories] = new double[numFeatures];
weightsB[numCategories] = new double[numClasses];
Arrays.fill(weightsA[numCategories], 1.0);
Arrays.fill(weightsB[numCategories], 0.0);
numCategories += 1;
}
private double sumArray(double[] arr) {
int num = arr.length;
double result = 0;
for (int i = 0; i < num; i++) {
result += arr[i];
}
return result;
}
public static void main(String [] argv) {
try {
Evaluation.runExperiment(((MultiLabelClassifier) new HARAMNetwork()), argv);
} catch (Exception e) {
e.printStackTrace();
System.err.println(e.getMessage());
}
}
@Override
public boolean isThreaded() {
// TODO Auto-generated method stub
return false;
}
@Override
public void setThreaded(boolean setv) {
// TODO Auto-generated method stub
}
@Override
public double[][] distributionForInstanceM(Instances i) throws Exception {
// TODO Auto-generated method stub
return null;
}
public double getVigilancy(){
return roa;
}
public void setVigilancy(double vigilancy){
roa=vigilancy;
}
public void setThreshold(double fthreshold){
threshold=fthreshold;
}
public double getThreshold(){
return threshold;
}
public void setClusterVigilance(double fclustervig){
clustervig=fclustervig;
}
public double getClusterVigilance(){
return clustervig;
}
/**
* Description to display in the GUI.
*
* @return the description
*/
//@Override
public String globalInfo() {
return "Voted ML-HARAM. " + "For more information see:\n" + getTechnicalInformation().toString();
}
@Override
public TechnicalInformation getTechnicalInformation() {
TechnicalInformation result;
//TechnicalInformation additional;
result = new TechnicalInformation(Type.INPROCEEDINGS);
result.setValue(Field.AUTHOR, "Fernando Benites");
result.setValue(Field.TITLE, "HARAM: a Hierarchical ARAM neural network for large-scale text classification.");
result.setValue(Field.BOOKTITLE, "HDM 2015, 3rd International Workshop on High "+
"Dimensional Data Mining, In conjunction with"+
"the IEEE International Conference on Data"+
"Mining (IEEE ICDM 2015), 14 November 2015");
result.setValue(Field.YEAR, "2015");
return result;
}
@Override
public void setSeed(int seed) {
// TODO Auto-generated method stub
}
@Override
public int getSeed() {
// TODO Auto-generated method stub
return 0;
}
@Override
public String getModel() {
// TODO Auto-generated method stub
return "";
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers/multilabel
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers/multilabel/neurofuzzy/ST.java
|
package meka.classifiers.multilabel.neurofuzzy;
/*************************************************************************
* Compilation: javac ST.java
* Execution: java ST
*
* Sorted symbol table implementation using a java.util.TreeMap.
* Does not allow duplicates.
*
* % java ST
*
*************************************************************************/
import java.util.Iterator;
import java.util.NoSuchElementException;
import java.util.SortedMap;
import java.util.TreeMap;
/**
* The <tt>ST</tt> class represents an ordered symbol table of generic
* key-value pairs.
* It supports the usual <em>put</em>, <em>get</em>, <em>contains</em>,
* <em>delete</em>, <em>size</em>, and <em>is-empty</em> methods.
* It also provides ordered methods for finding the <em>minimum</em>,
* <em>maximum</em>, <em>floor</em>, and <em>ceiling</em>.
* It also provides a <em>keys</em> method for iterating over all of the keys.
* A symbol table implements the <em>associative array</em> abstraction:
* when associating a value with a key that is already in the symbol table,
* the convention is to replace the old value with the new value.
* Unlike {@link java.util.Map}, this class uses the convention that
* values cannot be <tt>null</tt>—setting the
* value associated with a key to <tt>null</tt> is equivalent to deleting the key
* from the symbol table.
* <p>
* This implementation uses a balanced binary search tree. It requires that
* the key type implements the <tt>Comparable</tt> interface and calls the
* <tt>compareTo()</tt> and method to compare two keys. It does not call either
* <tt>equals()</tt> or <tt>hashCode()</tt>.
* The <em>put</em>, <em>contains</em>, <em>remove</em>, <em>minimum</em>,
* <em>maximum</em>, <em>ceiling</em>, and <em>floor</em> operations each take
* logarithmic time in the worst case.
* The <em>size</em>, and <em>is-empty</em> operations take constant time.
* Construction takes constant time.
* <p>
* For additional documentation, see <a href="http://introcs.cs.princeton.edu/44st">Section 4.4</a> of
* <i>Introduction to Programming in Java: An Interdisciplinary Approach</i> by Robert Sedgewick and Kevin Wayne.
*/
public class ST<Key extends Comparable<Key>, Value> implements Iterable<Key> {
private TreeMap<Key, Value> st;
/**
* Initializes an empty symbol table.
*/
public ST() {
st = new TreeMap<Key, Value>();
}
/**
* Returns the value associated with the given key.
* @param key the key
* @return the value associated with the given key if the key is in the symbol table
* and <tt>null</tt> if the key is not in the symbol table
* @throws NullPointerException if <tt>key</tt> is <tt>null</tt>
*/
public Value get(Key key) {
if (key == null) throw new NullPointerException("called get() with null key");
return st.get(key);
}
/**
* Inserts the key-value pair into the symbol table, overwriting the old value
* with the new value if the key is already in the symbol table.
* If the value is <tt>null</tt>, this effectively deletes the key from the symbol table.
* @param key the key
* @param val the value
* @throws NullPointerException if <tt>key</tt> is <tt>null</tt>
*/
public void put(Key key, Value val) {
if (key == null) throw new NullPointerException("called put() with null key");
if (val == null) st.remove(key);
else st.put(key, val);
}
/**
* Removes the key and associated value from the symbol table
* (if the key is in the symbol table).
* @param key the key
* @throws NullPointerException if <tt>key</tt> is <tt>null</tt>
*/
public void delete(Key key) {
if (key == null) throw new NullPointerException("called delete() with null key");
st.remove(key);
}
/**
* Does this symbol table contain the given key?
* @param key the key
* @return <tt>true</tt> if this symbol table contains <tt>key</tt> and
* <tt>false</tt> otherwise
* @throws NullPointerException if <tt>key</tt> is <tt>null</tt>
*/
public boolean contains(Key key) {
if (key == null) throw new NullPointerException("called contains() with null key");
return st.containsKey(key);
}
/**
* Returns the number of key-value pairs in this symbol table.
* @return the number of key-value pairs in this symbol table
*/
public int size() {
return st.size();
}
/**
* Is this symbol table empty?
* @return <tt>true</tt> if this symbol table is empty and <tt>false</tt> otherwise
*/
public boolean isEmpty() {
return size() == 0;
}
/**
* Returns all keys in the symbol table as an <tt>Iterable</tt>.
* To iterate over all of the keys in the symbol table named <tt>st</tt>,
* use the foreach notation: <tt>for (Key key : st.keys())</tt>.
* @return all keys in the sybol table as an <tt>Iterable</tt>
*/
public Iterable<Key> keys() {
return st.keySet();
}
/**
* Returns all of the keys in the symbol table as an iterator.
* To iterate over all of the keys in a symbol table named <tt>st</tt>, use the
* foreach notation: <tt>for (Key key : st)</tt>.
* @return an iterator to all of the keys in the symbol table
*/
public Iterator<Key> iterator() {
return st.keySet().iterator();
}
/**
* Returns the smallest key in the symbol table.
* @return the smallest key in the symbol table
* @throws NoSuchElementException if the symbol table is empty
*/
public Key min() {
if (isEmpty()) throw new NoSuchElementException("called min() with empty symbol table");
return st.firstKey();
}
/**
* Returns the largest key in the symbol table.
* @return the largest key in the symbol table
* @throws NoSuchElementException if the symbol table is empty
*/
public Key max() {
if (isEmpty()) throw new NoSuchElementException("called max() with empty symbol table");
return st.lastKey();
}
/**
* Returns the smallest key in the symbol table greater than or equal to <tt>key</tt>.
* @return the smallest key in the symbol table greater than or equal to <tt>key</tt>
* @param key the key
* @throws NoSuchElementException if the symbol table is empty
* @throws NullPointerException if <tt>key</tt> is <tt>null</tt>
*/
public Key ceil(Key key) {
if (key == null) throw new NullPointerException("called ceil() with null key");
SortedMap<Key, Value> tail = st.tailMap(key);
if (tail.isEmpty()) throw new NoSuchElementException();
return tail.firstKey();
}
/**
* Returns the largest key in the symbol table less than or equal to <tt>key</tt>.
* @return the largest key in the symbol table less than or equal to <tt>key</tt>
* @param key the key
* @throws NoSuchElementException if the symbol table is empty
* @throws NullPointerException if <tt>key</tt> is <tt>null</tt>
*/
public Key floor(Key key) {
if (key == null) throw new NullPointerException("called floor() with null key");
// headMap does not include key if present (!)
if (st.containsKey(key)) return key;
SortedMap<Key, Value> head = st.headMap(key);
if (head.isEmpty()) throw new NoSuchElementException();
return head.lastKey();
}
/**
* Unit tests the <tt>ST</tt> data type.
*/
public static void main(String[] args) {
ST<String, String> st = new ST<String, String>();
// insert some key-value pairs
st.put("www.cs.princeton.edu", "128.112.136.11");
st.put("www.cs.princeton.edu", "128.112.136.35"); // overwrite old value
st.put("www.princeton.edu", "128.112.130.211");
st.put("www.math.princeton.edu", "128.112.18.11");
st.put("www.yale.edu", "130.132.51.8");
st.put("www.amazon.com", "207.171.163.90");
st.put("www.simpsons.com", "209.123.16.34");
st.put("www.stanford.edu", "171.67.16.120");
st.put("www.google.com", "64.233.161.99");
st.put("www.ibm.com", "129.42.16.99");
st.put("www.apple.com", "17.254.0.91");
st.put("www.slashdot.com", "66.35.250.150");
st.put("www.whitehouse.gov", "204.153.49.136");
st.put("www.espn.com", "199.181.132.250");
st.put("www.snopes.com", "66.165.133.65");
st.put("www.movies.com", "199.181.132.250");
st.put("www.cnn.com", "64.236.16.20");
st.put("www.iitb.ac.in", "202.68.145.210");
System.out.println(st.get("www.cs.princeton.edu"));
System.out.println(st.get("www.harvardsucks.com"));
System.out.println(st.get("www.simpsons.com"));
System.out.println();
System.out.println("ceil(www.simpsonr.com) = " + st.ceil("www.simpsonr.com"));
System.out.println("ceil(www.simpsons.com) = " + st.ceil("www.simpsons.com"));
System.out.println("ceil(www.simpsont.com) = " + st.ceil("www.simpsont.com"));
System.out.println("floor(www.simpsonr.com) = " + st.floor("www.simpsonr.com"));
System.out.println("floor(www.simpsons.com) = " + st.floor("www.simpsons.com"));
System.out.println("floor(www.simpsont.com) = " + st.floor("www.simpsont.com"));
System.out.println();
System.out.println("min key: " + st.min());
System.out.println("max key: " + st.max());
System.out.println("size: " + st.size());
System.out.println();
// print out all key-value pairs in lexicographic order
for (String s : st.keys())
System.out.println(s + " " + st.get(s));
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers/multilabel
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers/multilabel/neurofuzzy/SparseArray.java
|
package meka.classifiers.multilabel.neurofuzzy;
import java.util.Arrays;
/*
* Copyright (C) 2006 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* SparseArrays map integers to Objects. Unlike a normal array of Objects,
* there can be gaps in the indices. It is intended to be more efficient
* than using a HashMap to map Integers to Objects.
*/
public class SparseArray {
private static final Double DELETED = null;
private boolean mGarbage = false;
/**
* Creates a new SparseArray containing no mappings.
*/
public SparseArray() {
this(10);
}
/**
* Creates a new SparseArray containing no mappings that will not
* require any additional memory allocation to store the specified
* number of mappings.
*/
public SparseArray(int initialCapacity) {
initialCapacity = idealIntArraySize(initialCapacity);
mKeys = new int[initialCapacity];
Arrays.fill(mKeys, -1);
mValues = new Double[initialCapacity];
mSize = 0;
}
/**
* Gets the Object mapped from the specified key, or <code>null</code>
* if no such mapping has been made.
*/
public Double get(int key) {
return get(key, null);
}
/**
* Gets the Object mapped from the specified key, or the specified Object
* if no such mapping has been made.
*/
public Double get(int key, Double valueIfKeyNotFound) {
int i = binarySearch(mKeys, 0, mSize, key);
if (i < 0 || mValues[i] == DELETED) {
return valueIfKeyNotFound;
} else {
return mValues[i];
}
}
/**
* Removes the mapping from the specified key, if there was any.
*/
public void delete(int key) {
int i = binarySearch(mKeys, 0, mSize, key);
if (i >= 0) {
if (mValues[i] != DELETED) {
mValues[i] = DELETED;
// mKeys[i]=-1;
mGarbage = true;
}
}
}
/**
* Alias for {@link #delete(int)}.
*/
public void remove(int key) {
delete(key);
}
private void gc() {
// Log.e("SparseArray", "gc start with " + mSize);
int n = mSize;
int o = 0;
int[] keys = mKeys;
Object[] values = mValues;
for (int i = 0; i < n; i++) {
Object val = values[i];
if (val != DELETED) {
if (i != o) {
keys[o] = keys[i];
values[o] = val;
}
o++;
}
}
mGarbage = false;
mSize = o;
Arrays.fill(keys,o,keys.length, -1);
// Log.e("SparseArray", "gc end with " + mSize);
}
/**
* Adds a mapping from the specified key to the specified value,
* replacing the previous mapping from the specified key if there
* was one.
*/
public void put(int key, Double value) {
int i = binarySearch(mKeys, 0, mSize, key);
if (i >= 0) {
mValues[i] = value;
} else {
i = ~i;
if (i < mSize && mValues[i] == DELETED) {
mKeys[i] = key;
mValues[i] = value;
return;
}
if (mGarbage && mSize >= mKeys.length) {
gc();
// Search again because indices may have changed.
i = ~binarySearch(mKeys, 0, mSize, key);
}
if (mSize >= mKeys.length) {
int n = idealIntArraySize(mSize + 1);
int[] nkeys = new int[n];
Double[] nvalues = new Double[n];
Arrays.fill(nkeys, -1);
// Log.e("SparseArray", "grow " + mKeys.length + " to " + n);
System.arraycopy(mKeys, 0, nkeys, 0, mKeys.length);
System.arraycopy(mValues, 0, nvalues, 0, mValues.length);
mKeys = nkeys;
mValues = nvalues;
}
if (mSize - i != 0) {
// Log.e("SparseArray", "move " + (mSize - i));
System.arraycopy(mKeys, i, mKeys, i + 1, mSize - i);
System.arraycopy(mValues, i, mValues, i + 1, mSize - i);
}
mKeys[i] = key;
mValues[i] = value;
mSize++;
}
}
/**
* Returns the number of key-value mappings that this SparseArray
* currently stores.
*/
public int size() {
if (mGarbage) {
gc();
}
return mSize;
}
/**
* Given an index in the range <code>0...size()-1</code>, returns
* the key from the <code>index</code>th key-value mapping that this
* SparseArray stores.
*/
public int keyAt(int index) {
if (mGarbage) {
gc();
}
return mKeys[index];
}
/**
* Given an index in the range <code>0...size()-1</code>, returns
* the value from the <code>index</code>th key-value mapping that this
* SparseArray stores.
*/
public Double valueAt(int index) {
if (mGarbage) {
gc();
}
return mValues[index];
}
/**
* Given an index in the range <code>0...size()-1</code>, sets a new
* value for the <code>index</code>th key-value mapping that this
* SparseArray stores.
*/
public void setValueAt(int index, Double value) {
if (mGarbage) {
gc();
}
mValues[index] = value;
}
/**
* Returns the index for which {@link #keyAt} would return the
* specified key, or a negative number if the specified
* key is not mapped.
*/
public int indexOfKey(int key) {
if (mGarbage) {
gc();
}
return binarySearch(mKeys, 0, mSize, key);
}
/**
* Returns an index for which {@link #valueAt} would return the
* specified key, or a negative number if no keys map to the
* specified value.
* Beware that this is a linear search, unlike lookups by key,
* and that multiple keys can map to the same value and this will
* find only one of them.
*/
public int indexOfValue(Double value) {
if (mGarbage) {
gc();
}
for (int i = 0; i < mSize; i++)
if (mValues[i] == value)
return i;
return -1;
}
/**
* Removes all key-value mappings from this SparseArray.
*/
public void clear() {
int n = mSize;
Object[] values = mValues;
for (int i = 0; i < n; i++) {
values[i] = null;
}
mSize = 0;
mGarbage = false;
}
public static int idealIntArraySize(int need) {
need*=4;
for (int i = 4; i < 32; i++)
if (need <= (1 << i) - 12)
return (1 << i) - 12;
return need/4;
}
/**
* Puts a key/value pair into the array, optimizing for the case where
* the key is greater than all existing keys in the array.
*/
public void append(int key, Double value) {
if (mSize != 0 && key <= mKeys[mSize - 1]) {
put(key, value);
return;
}
if (mGarbage && mSize >= mKeys.length) {
gc();
}
int pos = mSize;
if (pos >= mKeys.length) {
int n = idealIntArraySize(pos + 1);
int[] nkeys = new int[n];
Arrays.fill(nkeys, -1);
Double[] nvalues = new Double[n];
// Log.e("SparseArray", "grow " + mKeys.length + " to " + n);
System.arraycopy(mKeys, 0, nkeys, 0, mKeys.length);
System.arraycopy(mValues, 0, nvalues, 0, mValues.length);
mKeys = nkeys;
mValues = nvalues;
}
mKeys[pos] = key;
mValues[pos] = value;
mSize = pos + 1;
}
public String getKeysString(){
String s="";
for(int j =0; j<mSize;j++){
s+=mKeys[j]+",";
}
return s;
}
private static int binarySearch(int[] a, int start, int len, int key) {
int high = start + len, low = start - 1, guess;
while (high - low > 1) {
guess = (high + low) / 2;
if (a[guess] < key)
low = guess;
else
high = guess;
}
if (high == start + len)
return ~(start + len);
else if (a[high] == key)
return high;
else
return ~high;
}
private void checkIntegrity() {
for (int i = 1; i < mSize; i++) {
if (mKeys[i] <= mKeys[i - 1]) {
for (int j = 0; j < mSize; j++) {
// Log.e("FAIL", j + ": " + mKeys[j] + " -> " + mValues[j]);
}
throw new RuntimeException();
}
}
}
public int[] getKeys(){
if (mGarbage) {
gc();
}
return mKeys;
}
public SparseArray clone() {
SparseArray res=new SparseArray(mSize);
System.arraycopy(mKeys, 0, res.mKeys,0,mSize);
System.arraycopy(mValues, 0, res.mValues,0,mSize);
res.mSize=mSize;
return res;
}
private int[] mKeys;
private Double[] mValues;
private int mSize;
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers/multilabel
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers/multilabel/neurofuzzy/SparseVector.java
|
package meka.classifiers.multilabel.neurofuzzy;
/*
* from http://introcs.cs.princeton.edu/java/44st/SparseVector.java.html
Copyright © 2000–2011, Robert Sedgewick and Kevin Wayne.
*/
public class SparseVector {
private final int N; // length
private ST<Integer, Double> st; // the vector, represented by index-value pairs
// initialize the all 0s vector of length N
public SparseVector(int N) {
this.N = N;
this.st = new ST<Integer, Double>();
}
// put st[i] = value
public void put(int i, double value) {
if (i < 0 || i >= N) throw new RuntimeException("Illegal index");
if (value == 0.0) st.delete(i);
else st.put(i, value);
}
// return st[i]
public double get(int i) {
if (i < 0 || i >= N) throw new RuntimeException("Illegal index");
if (st.contains(i)) return st.get(i);
else return 0.0;
}
// return the number of nonzero entries
public int nnz() {
return st.size();
}
// return the size of the vector
public int size() {
return N;
}
// return the dot product of this vector a with b
public double dot(SparseVector b) {
SparseVector a = this;
if (a.N != b.N) throw new RuntimeException("Vector lengths disagree");
double sum = 0.0;
// iterate over the vector with the fewest nonzeros
if (a.st.size() <= b.st.size()) {
for (int i : a.st)
if (b.st.contains(i)) sum += a.get(i) * b.get(i);
}
else {
for (int i : b.st)
if (a.st.contains(i)) sum += a.get(i) * b.get(i);
}
return sum;
}
public double sum() {
double s1=0;
SparseVector a = this;
for (int i : a.st){
s1+=a.get(i);
}
return s1;
}
// return the 2-norm
public double norm() {
SparseVector a = this;
return Math.sqrt(a.dot(a));
}
// return alpha * a
public SparseVector scale(double alpha) {
SparseVector a = this;
SparseVector c = new SparseVector(N);
for (int i : a.st) c.put(i, alpha * a.get(i));
return c;
}
// return a + b
public SparseVector plus(SparseVector b) {
SparseVector a = this;
if (a.N != b.N) throw new RuntimeException("Vector lengths disagree");
SparseVector c = new SparseVector(N);
for (int i : a.st) c.put(i, a.get(i)); // c = a
for (int i : b.st) c.put(i, b.get(i) + c.get(i)); // c = c + b
return c;
}
// return a string representation
public String toString() {
String s = "";
for (int i : st) {
s += "(" + i + ", " + st.get(i) + ") ";
}
return s;
}
// test client
public static void main(String[] args) {
SparseVector a = new SparseVector(10);
SparseVector b = new SparseVector(10);
a.put(3, 0.50);
a.put(9, 0.75);
a.put(6, 0.11);
a.put(6, 0.00);
b.put(3, 0.60);
b.put(4, 0.90);
System.out.println("a = " + a);
System.out.println("b = " + b);
System.out.println("a dot b = " + a.dot(b));
System.out.println("a + b = " + a.plus(b));
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers/multilabel
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers/multilabel/neurofuzzy/WARAM.java
|
/*
* ClassifierTemplate.java
*
* <<Your Name Here>>
* CN 710
* Dept. of Cognitive & Neural Systems
* Boston University
* <<Date here>>
*
* Copyright (c) 2006, Boston University
*
* Adapted from NaiveBayes.java
* Copyright (C) 1999 Eibe Frank,Len Trigg
*/
package meka.classifiers.multilabel.neurofuzzy;
import java.util.Enumeration;
import java.util.Map;
import java.util.Vector;
import java.util.Arrays;
import meka.classifiers.multilabel.*;
import weka.classifiers.Classifier;
//import weka.classifiers.Evaluation;
import meka.classifiers.multilabel.Evaluation;
import weka.classifiers.UpdateableClassifier;
import weka.core.Attribute;
import weka.core.Capabilities;
import weka.core.Instance;
import weka.core.Instances;
import weka.core.Option;
import weka.core.OptionHandler;
import weka.core.Utils;
import meka.core.MLUtils;
import meka.core.MultiLabelDrawable;
import weka.core.WeightedInstancesHandler;
import weka.core.RevisionUtils;
/**
* ****REPLACE THE FOLLOWING WITH SIMILAR INFORMATION.
* Class for a Naive Bayes classifier using estimator classes. Numeric
* estimator precision values are chosen based on analysis of the
* training data. For this reason, the classifier is not an
* UpdateableClassifier (which in typical usage are initialized with zero
* training instances) -- if you need the UpdateableClassifier functionality,
* use the NaiveBayesUpdateable classifier. The NaiveBayesUpdateable
* classifier will use a default precision of 0.1 for numeric attributes
* when buildClassifier is called with zero training instances.
* <p>
* For more information on Naive Bayes classifiers, see<p>
*
* George H. John and Pat Langley (1995). <i>Estimating
* Continuous Distributions in Bayesian Classifiers</i>. Proceedings
* of the Eleventh Conference on Uncertainty in Artificial
* Intelligence. pp. 338-345. Morgan Kaufmann, San Mateo.<p>
*
* Valid options are:<p>
*
* -K <br>
* Use kernel estimation for modelling numeric attributes rather than
* a single normal distribution.<p>
*
* -D <br>
* Use supervised discretization to process numeric attributes.<p>
*
* @author Len Trigg (trigg@cs.waikato.ac.nz)
* @author Eibe Frank (eibe@cs.waikato.ac.nz)
* @author Rushi Bhatt (rushi@cns.bu.edu)
* @version $Revision: 1.16 $
* Modified by Rushi for use as a CN710 template
*/
public class WARAM extends ARAMNetworkClass
implements MultiLabelClassifierThreaded, OptionHandler, WeightedInstancesHandler, UpdateableClassifier, MultiLabelDrawable {
//**** THIS IS WHERE CLASSIFIER WEIGHTS ETC GO ****
//define stuff like weight matrices, classifier parameters etc.
//e.g., protected double rho_a_bar=0.0;
public WARAM(int fnumFeatures, int fnumClasses, double fro, double fthreshold) {
initARAM(fnumFeatures, fnumClasses, fro, fthreshold);
}
public WARAM(){
}
private void initARAM(int fnumFeatures, int fnumClasses, double fro, double fthreshold){
numFeatures = fnumFeatures;
numClasses = fnumClasses;
threshold = fthreshold;
weightsA = new double[1][numFeatures];
Arrays.fill(weightsA[0], 1);
weightsB = new double[1][numClasses];
Arrays.fill(weightsB[0], 0);
numCategories = 1;
}
/**
* Returns a string describing this classifier
* @return a description of the classifier suitable for
* displaying in the explorer/experimenter gui.
* ****MODIFY WITH CORRECT INFORMATION****
*/
public String globalInfo() {
return "This is ARAM.";
}
/**
* Generates the classifier.
*
* @param instances set of instances serving as training data
* @exception Exception if the classifier has not been generated
* successfully
*/
public void buildClassifier(Instances D) throws Exception {
// swap attributes to fit MEKA
testCapabilities(D);
int L = D.classIndex();
int featlength = (D.numAttributes() -L)*2;
int numSamples = D.numInstances();
int classlength = L * 2;
System.out.println("Using rho="+roa);
if (numFeatures==-1){
initARAM( featlength,classlength ,roa , threshold );
}else{
if (featlength != numFeatures) {
return ;
}
if (classlength != numClasses) {
return ;
}}
// Copy the instances so we don't mess up the original data.
// Function calls do not deep copy the arguments..
//Instances m_Instances = new Instances(instances);
// Use the enumeration of instances to train classifier.
// Do any sanity checks (e.g., missing attributes etc here
// before calling updateClassifier for the actual learning
Enumeration enumInsts = D.enumerateInstances();
while (enumInsts.hasMoreElements()) {
Instance instance = (Instance) enumInsts.nextElement();
updateClassifier(instance);
}
System.out.println("Training done, used "+numCategories+" neurons.");
// Alternatively, you can put the training logic within this method,
// rather than updateClassifier(...). However, if you omit the
// updateClassifier(...) method, you should remove
// UpdateableClassifier from the class declaration above.
}
// ****THIS IS THE WEIGHT UPDATE ROUTINE. MODIFY TO CHANGE THE ALGORITHM****
/**
* Updates the classifier with the given instance.
*
* @param instance the new training instance to include in the model
* @exception Exception if the instance could not be incorporated in
* the model.
*/
public void updateClassifier(Instance instance) throws Exception {
//called once for each instance.
int num_classes=(int) (0.5 * numClasses);
int num_features=(int) (0.5 * numFeatures);
double[] data = new double[numFeatures];
double[] labels = new double[numClasses];
int numChanges = 0;
if (!instance.classIsMissing()) {
//Do the weight updates using the instance.
for (int j = 0; j <num_features; j++) {
data[j] = instance.value(num_classes+j);
data[j+num_features] = 1 - data[j];
}
for (int j = 0; j < num_classes ; j++) {
labels[j] = instance.value(j);
labels[j+num_classes] = 1 - labels[j];
}
SortPair[] cateacti = ARTActivateCategories(data);java.util.Arrays.sort(cateacti);
boolean resonance = false;
int currentSortedIndex = 0;
int currentCategory = -1;
double matchA = 0;
double matchB = 0;
while (!resonance) {
currentCategory = cateacti[currentSortedIndex]
.getOriginalIndex();
matchA = ART_Calculate_Match(data, weightsA[currentCategory]);
if (sumArray(weightsB[currentCategory]) == 0) {
matchB = 1;
} else {
matchB = ART_Calculate_Match(labels,
weightsB[currentCategory]);
}
if (matchA >= roa && matchB >= rob) {
if (currentCategory == numCategories -1) {
if (currentSortedIndex == maxNumCategories) {
System.out
.println("WARNING: The maximum number of categories has been reached.");
resonance = true;
} else {
// Add a new category
for (int j = 0; j < data.length; j++) {
weightsA[currentCategory][j] = data[j];
}
for (int j = 0; j < weightsB[currentCategory].length; j++) {
weightsB[currentCategory][j] = labels[j];
}
ARAMm_Add_New_Category();
// fprintf(FileID,'Add a new category of %d\n',
// network.numCategories);
// Increment the number of changes since we added a
// new category.
numChanges = numChanges + 1;
resonance = true;
}
} else {
// % Update weights
double weightChange = ARAMm_Update_Weights(data,
labels, currentCategory);
if (weightChange == 1) {
numChanges += 1;
}
resonance = true;
}
} else {
currentSortedIndex += 1;
resonance = false;
}
}
}
}
//****THIS IS THE CLASSIFICATION ROUTINE. MODIFY TO CHANGE THE ALGORITHM****
//****classifyInstance() uses this method, so implement the
//****nuts-and-bolts of your algorithm here.
/**
* Calculates the class membership probabilities for the given test
* instance.
*
* @param instance the instance to be classified
* @return predicted class probability distribution
* @exception Exception if there is a problem generating the prediction
*/
public double[] distributionForInstance(Instance instance) throws Exception {
int num_classes=(int) (0.5 * numClasses);
int num_features=(int) (0.5 * numFeatures);
double[] dist = new double[num_classes];
double[] currentData = new double[numFeatures];
double[] ranking = new double[num_classes];
for (int j = 0; j < num_features; j++) {
currentData[j] = instance.value(num_classes+j);
currentData[num_features+j] = 1 - currentData[j];
}
SortPair[] sortedActivations = ARTActivateCategories(currentData);
java.util.Arrays.sort(sortedActivations);
double diff_act = sortedActivations[0].getValue()
- sortedActivations[numCategories - 2].getValue();
int largest_activ = 1;
double activ_change = 0;
for (int i = 1; i < numCategories; i++) {
activ_change = (sortedActivations[0].getValue() - sortedActivations[i]
.getValue())
/ sortedActivations[0].getValue();
if (activ_change > threshold * diff_act) {
break;
}
largest_activ = largest_activ + 1;
}
// % largest_activ =5;
double[] best_matches = new double[largest_activ];
java.util.Arrays.fill(best_matches, 1);
for (int i = 0; i < largest_activ; i++) {
// % best_matches(i) = matches(sortedCategories(i));
best_matches[i] = sortedActivations[i].getValue();
}
// % min_mat = min(best_matches);
// % max_mat = max(best_matches);
double sum_mat = sumArray(best_matches);
int currentCategory = 0;
for (int i = 0; i < largest_activ; i++) {
best_matches[i] = best_matches[i] / sum_mat;
currentCategory = sortedActivations[i].getOriginalIndex();
// % Fill return vector with weightB values
for (int j = 0; j < num_classes; j++) {
ranking[j] = ranking[j]
+ best_matches[i] * weightsB[currentCategory][j];
}
}
if(m_userankstoclass) {
return ARAMm_Ranking2Class(ranking);
}
return ranking;
}
public double[] ARAMm_Ranking2Class(double[] rankings) {
int columns=rankings.length;
double[] classes= new double[columns ];
SortPair[] sortedRanks = new SortPair[columns];
for (int j=0;j<columns;j++){
sortedRanks[j]= new SortPair(rankings[j],j);
}
java.util.Arrays.sort(sortedRanks);
// sortedActivations[0].getValue()sortedActivations[i].getOriginalIndex()
SortPair[] change=new SortPair[columns-1];
for(int j =1; j<columns;j++){
change[j-1] = new SortPair(sortedRanks[j-1].getValue()-sortedRanks[j].getValue(),j);
}
java.util.Arrays.sort(change);
// double val= change[0].getValue();
int ind=change[0].getOriginalIndex();
for (int j =0; j<ind;j++){
classes[sortedRanks[j].getOriginalIndex()] = 1;
}
return classes;
}
private SortPair[] ARTActivateCategories(double[] Data) {
SortPair[] catacti = new SortPair[numCategories];
// double[] catacti=new double[numCategories];
double[] matchVector = new double[numFeatures];
for (int i = 0; i < numCategories; i++) {
double sumvector = 0;
double sumweight = 0;
for (int j = 0; j < numFeatures; j++) {
matchVector[j] = ((Data[j] < weightsA[i][j]) ? Data[j]
: weightsA[i][j]);
sumvector += matchVector[j];
sumweight += weightsA[i][j];
}
catacti[i] = new SortPair(sumvector / (alpha + sumweight), i);
}
return catacti;
}
// ****YOU SHOULDN'T NEED TO CHANGE THIS
/**
* Classifies the given test instance. The instance has to belong to a
* dataset when it's being classified. Note that a classifier MUST
* implement either this or distributionForInstance().
*
* @param instance the instance to be classified
* @return the predicted most likely class for the instance or
* Instance.missingValue() if no prediction is made
* @exception Exception if an error occurred during the prediction
*/
public double classifyInstance(Instance instance) throws Exception {
double[] dist = distributionForInstance(instance);
if (dist == null) {
throw new Exception("Null distribution predicted");
}
switch (instance.classAttribute().type()) {
case Attribute.NOMINAL:
double max = 0;
int maxIndex = 0;
for (int i = 0; i < dist.length; i++) {
if (dist[i] > max) {
maxIndex = i;
max = dist[i];
}
}
if (max > 0) {
return maxIndex;
} else {
//return Instance.missingValue();
}
case Attribute.NUMERIC:
return dist[0];
default:
return -1;
}
}
// ****ANY OPTIONS/PARAMETERS GO HERE****
/**
* Returns an enumeration describing the available options.
*
* @return an enumeration of all the available options.
*/
public Enumeration listOptions() {
//These are just examples, modify to suit your algorithm
Vector newVector = new Vector(2);
newVector.addElement(
new Option("\tChange generalization parameter Rho\n",
"P", 0,"-P"));
newVector.addElement(
new Option("\tUse ranking to class function special dev. for ARAM.\n",
"K", 0,"-K"));
return newVector.elements();
}
//****OPTIONS HERE SHOULD MATCH THOSE ADDED ABOVE****
/**
* Parses a given list of options. Valid options are:<p>
*
* -K <br>
* Use kernel estimation for modelling numeric attributes rather than
* a single normal distribution.<p>
*
* -D <br>
* Use supervised discretization to process numeric attributes.
*
* @param options the list of options as an array of strings
* @exception Exception if an option is not supported
*/
public void setOptions(String[] options) throws Exception {
//These are just examples, modify to suit your algorithm
// boolean k = Utils.getFlag('K', options);
// boolean d = Utils.getFlag('D', options);
// if (k && d) {
// throw new IllegalArgumentException(
// "Can't use both kernel density estimation and discretization!");
// }
// setUseSupervisedDiscretization(d);
// setUseKernelEstimator(k);
roa = (Utils.getOptionPos("P",options) >= 0) ? Double.parseDouble(Utils.getOption("P", options)) : roa;
m_userankstoclass= (Utils.getOptionPos("K",options) >= 0);
super.setOptions(options);
}
//****MORE OPTION PARSING STUFF****
/**
* Gets the current settings of the classifier.
*
* @return an array of strings suitable for passing to setOptions
*/
public String [] getOptions() {
//These are just examples, modify to suit your algorithm
String [] options = new String [3];
try{
options =weka.core.Utils.splitOptions("-P 0.9 -K");
}catch (Exception ex) {
System.out.println(ex.getMessage());
}
return options;
}
//****ANY INFORMATION LIKE NO. OF UNITS ETC PRINTED HERE
/**
* Returns a description of the classifier.
*
* @return a description of the classifier as a string.
*/
public String toString() {
//These are just examples, modify to suit your algorithm
StringBuffer text = new StringBuffer();
text.append("ML ARAM classifier");
// if (m_Instances == null) {
// text.append(": No model built yet.");
// } else {
// try {
// for (int i = 0; i < m_Distributions[0].length; i++) {
// text.append("\n\nClass " + m_Instances.classAttribute().value(i) +
// ": Prior probability = " + Utils.
// doubleToString(m_ClassDistribution.getProbability(i),
// 4, 2) + "\n\n");
// Enumeration enumAtts = m_Instances.enumerateAttributes();
// int attIndex = 0;
// while (enumAtts.hasMoreElements()) {
// Attribute attribute = (Attribute) enumAtts.nextElement();
// text.append(attribute.name() + ": "
// + m_Distributions[attIndex][i]);
// attIndex++;
// }
// }
// } catch (Exception ex) {
// text.append(ex.getMessage());
// }
// }
return text.toString();
}
//****MORE GUI RELATED STUFF AND PARAMETER ACCESS METHODS
// /**
// * Returns the tip text for this property
// * @return tip text for this property suitable for
// * displaying in the explorer/experimenter gui
// */
// public String useKernelEstimatorTipText() {
// return "Use a kernel estimator for numeric attributes rather than a "
// +"normal distribution.";
// }
// /**
// * Gets if kernel estimator is being used.
// *
// * @return Value of m_UseKernelEstimatory.
// */
// public boolean getUseKernelEstimator() {
//
// return m_UseKernelEstimator;
// }
//
// /**
// * Sets if kernel estimator is to be used.
// *
// * @param v Value to assign to m_UseKernelEstimatory.
// */
// public void setUseKernelEstimator(boolean v) {
//
// m_UseKernelEstimator = v;
// if (v) {
// setUseSupervisedDiscretization(false);
// }
// }
//
// /**
// * Returns the tip text for this property
// * @return tip text for this property suitable for
// * displaying in the explorer/experimenter gui
// */
// public String useSupervisedDiscretizationTipText() {
// return "Use supervised discretization to convert numeric attributes to nominal "
// +"ones.";
// }
//
// /**
// * Get whether supervised discretization is to be used.
// *
// * @return true if supervised discretization is to be used.
// */
// public boolean getUseSupervisedDiscretization() {
//
// return m_UseDiscretization;
// }
//
// /**
// * Set whether supervised discretization is to be used.
// *
// * @param newblah true if supervised discretization is to be used.
// */
// public void setUseSupervisedDiscretization(boolean newblah) {
//
// m_UseDiscretization = newblah;
// if (newblah) {
// setUseKernelEstimator(false);
// }
// }
/**
* Main method for testing this class.
*
* @param argv the options
*/
private double ARAMm_Update_Weights(double[] data, double[] labels,
int category) {
double weightChange = 0;
for (int i = 0; i < numFeatures; i++) {
if (data[i] < weightsA[category][i]){
weightsA[category][i] = (learningRate * data[i])
+ (1 - learningRate) * weightsA[category][i];
}
}
for (int i = 0; i < numClasses; i++) {
if(weightblearnmethod== 0){
weightsB[category][i] = labels[i] + weightsB[category][i];
weightChange = 1;
}else{
// %normalise
if ( labels[i]< weightsB[category][i]){
weightsB[category][i] = (learningRate * labels[i] )+ (1 - learningRate) *weightsB[category][i];
weightChange = 1;
}
}
}
return weightChange;
}
private double ART_Calculate_Match(double[] Data, double[] fweights) {
int lnumFeatures = Data.length;
if (lnumFeatures != fweights.length) {
return 0.0;
}
double[] matchVector = new double[lnumFeatures];
double summatch = 0;
double suminput = 0;
for (int j = 0; j < lnumFeatures; j++) {
matchVector[j] = ((Data[j] < fweights[j]) ? Data[j] : fweights[j]);
summatch += matchVector[j];
suminput += Data[j];
}
if (suminput == 0) {
return 0.0;
}
return summatch / suminput;
}
private void ARAMm_Add_New_Category() {
weightsA = Arrays.copyOf(weightsA, numCategories + 1);
weightsB = Arrays.copyOf(weightsB, numCategories + 1);
weightsA[numCategories] = new double[numFeatures];
weightsB[numCategories] = new double[numClasses];
Arrays.fill(weightsA[numCategories], 1.0);
Arrays.fill(weightsB[numCategories], 0.0);
numCategories += 1;
}
private double sumArray(double[] arr) {
int num = arr.length;
double result = 0;
for (int i = 0; i < num; i++) {
result += arr[i];
}
return result;
}
public static void main(String [] argv) {
try {
Evaluation.runExperiment((MultiLabelClassifier) new WARAM(), argv);
} catch (Exception e) {
e.printStackTrace();
System.err.println(e.getMessage());
}
}
@Override
public String getModel() {
// TODO Auto-generated method stub
return null;
}
@Override
public Map<Integer, Integer> graphType() {
// TODO Auto-generated method stub
return null;
}
@Override
public Map<Integer, String> graph() throws Exception {
// TODO Auto-generated method stub
return null;
}
@Override
public void setDebug(boolean debug) {
// TODO Auto-generated method stub
}
@Override
public boolean getDebug() {
// TODO Auto-generated method stub
return false;
}
@Override
public String debugTipText() {
// TODO Auto-generated method stub
return null;
}
@Override
public Capabilities getCapabilities() {
// TODO Auto-generated method stub
return null;
}
@Override
public boolean isThreaded() {
// TODO Auto-generated method stub
return false;
}
@Override
public void setThreaded(boolean setv) {
// TODO Auto-generated method stub
}
@Override
public double[][] distributionForInstanceM(Instances i) throws Exception {
// TODO Auto-generated method stub
return null;
}
}
class SortPair implements Comparable<SortPair> {
private int originalIndex;
private double value;
public SortPair(double value, int originalIndex) {
this.value = value;
this.originalIndex = originalIndex;
}
public int compareTo(SortPair o) {
return Double.compare(o.getValue(), value);
}
public int getOriginalIndex() {
return originalIndex;
}
public double getValue() {
return value;
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers/multilabel
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers/multilabel/neurofuzzy/WvARAM.java
|
/*
*
*
* Copyright (c) 2016, Fernando Benites
* @author Fernando Benites
*
* Adapted from NaiveBayes.java
*/
package meka.classifiers.multilabel.neurofuzzy;
import java.io.FileWriter;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Enumeration;
import java.util.List;
import java.util.Random;
import java.util.Vector;
import java.util.Arrays;
import java.io.BufferedWriter;
import java.io.IOException;
import java.io.PrintWriter;
import meka.classifiers.multilabel.*;
import weka.classifiers.Classifier;
//import weka.classifiers.Evaluation;
import meka.classifiers.multilabel.Evaluation;
import meka.classifiers.multilabel.neurofuzzy.ARAMNetworkClass;
import meka.classifiers.multitarget.MultiTargetClassifier;
import weka.classifiers.UpdateableClassifier;
import weka.core.*;
import weka.core.TechnicalInformation.Field;
import weka.core.TechnicalInformation.Type;
/**
* ****REPLACE THE FOLLOWING WITH SIMILAR INFORMATION.
* Class for a ML-ARAM classifier.
* <p> *
* Valid options are:<p>
*
* -P <br>
* Use a different generalization value.<p>
*
* -K <br>
* Use ML rankings to class.<p>
*
* -V <br>
* Define the number of voters.<p>
*
* @author Fernando Benites (Fernando.Benites@uni.konstanz.de)
* @version $Revision: 1.0 $
*/
public class WvARAM extends ARAMNetworkClass
implements OptionHandler, WeightedInstancesHandler, UpdateableClassifier, Randomizable,
TechnicalInformationHandler, MultiLabelClassifier
{
//**** THIS IS WHERE CLASSIFIER WEIGHTS ETC GO ****
//define stuff like weight matrices, classifier parameters etc.
//e.g., protected double rho_a_bar=0.0;
/**
*
*/
private static final long serialVersionUID = 3824500093693707048L;
ARAMNetworkClass[] networks=null;
int numberofnetworks=5;
int numClasses=0;
double roa=0.9;
boolean m_userankstoclass=false;
boolean fastaram=true;
boolean sparsearam=false;
boolean sparsearamH=false;
boolean sparsearamHT=false;
boolean tfastaram=true;
int m_seed = 42;
DistributionCalc[] dc=null ;
long intclass =0;
PrintWriter fwneurons=null;
boolean saveneuronsactivity=false;
String fsna=null;
protected String activity_report="";
public WvARAM(int fnumberofnetworks) {
numberofnetworks=fnumberofnetworks;
}
public WvARAM(){
}
/**
* Generates the classifier.
*
* @param instances set of instances serving as training data
* @exception Exception if the classifier has not been generated
* successfully
*/
public void buildClassifier(Instances D) throws Exception {
// swap attributes to fit MEKA
//testCapabilities(D);
Random r = new Random(m_seed);
// System.out.println("Version TNG"+tfastaram);
if (fastaram){
networks = new ARAMNetworkfast[numberofnetworks];
}
else if(sparsearam){
networks = new ARAMNetworkSparse[numberofnetworks];
}
else if(sparsearamH){
networks = new ARAMNetworkSparseV[numberofnetworks];
}else if(sparsearamHT){
networks = new ARAMNetworkSparseHT[numberofnetworks];
}else{
networks = new ARAMNetwork[numberofnetworks];
}
numClasses = D.classIndex();
// Copy the instances so we don't mess up the original data.
// Function calls do not deep copy the arguments..
//Instances m_Instances = new Instances(instances);
// Use the enumeration of instances to train classifier.
// Do any sanity checks (e.g., missing attributes etc here
// before calling updateClassifier for the actual learning
if (tfastaram){
BuildClassifier[] bc= new BuildClassifier[numberofnetworks];
for (int i=0; i< numberofnetworks;i++){
List<Integer> list = new ArrayList<Integer>();
for (int j=0; j<D.numInstances();j++){
list.add(j);
}
java.util.Collections.shuffle(list,r);
if (fastaram){
networks[i]=new ARAMNetworkfast();
}
else if(sparsearam){
networks[i]=new ARAMNetworkSparse();
}
else if(sparsearamH){
networks[i]=new ARAMNetworkSparseV();
}
else if(sparsearamHT){
networks[i]=new ARAMNetworkSparseHT();
}
else{
networks[i]=new ARAMNetwork();
}
networks[i].order=list;
networks[i].roa=roa;
bc[i] = new BuildClassifier(networks[i]);
bc[i].setinstances(D);
bc[i].start();
//D.randomize(r);
}
for (int i=0; i< numberofnetworks;i++){
bc[i].join();
networks[i]=bc[i].m_network;
networks[i].learningphase=false;
}
}
else{
for (int i=0; i< numberofnetworks;i++){
if (fastaram){
networks[i]=new ARAMNetworkfast();
}
else if(sparsearam){
networks[i]=new ARAMNetworkSparse();
}
else if(sparsearamH){
networks[i]=new ARAMNetworkSparseV();
}
else if(sparsearamHT){
networks[i]=new ARAMNetworkSparseHT();
}
else{
networks[i]=new ARAMNetwork();
}
networks[i].roa=roa;
networks[i].buildClassifier(D);
networks[i].learningphase=false;
D.randomize(r);
}
}
dc =new DistributionCalc[numberofnetworks];
// Alternatively, you can put the training logic within this method,
// rather than updateClassifier(...). However, if you omit the
// updateClassifier(...) method, you should remove
// UpdateableClassifier from the class declaration above.
}
//****THIS IS THE CLASSIFICATION ROUTINE. MODIFY TO CHANGE THE ALGORITHM****
//****classifyInstance() uses this method, so implement the
//****nuts-and-bolts of your algorithm here.
/**
* Calculates the class membership probabilities for the given test
* instance.
*
* @param instance the instance to be classified
* @return predicted class probability distribution
* @exception Exception if there is a problem generating the prediction
*/
public double[] distributionForInstance(Instance instance) throws Exception {
double[] dist = new double[numClasses];
// long before_test = System.currentTimeMillis();
//long after_test=0;
//long after_test1=0;
int donetime=0;
boolean[] ddone=new boolean[numberofnetworks];
if(saveneuronsactivity && fwneurons==null){
try {
fwneurons = new PrintWriter(new BufferedWriter(new FileWriter(fsna)));
} catch (IOException e) {
e.printStackTrace();
System.exit(1);
//exception handling left as an exercise for the reader
}
}
if (tfastaram){
// System.out.println("fastaram");
for (int i=0; i< numberofnetworks;i++){
if (dc[i]==null){
networks[i].activity_report=activity_report;
dc[i] = new DistributionCalc(networks[i]);
dc[i].id=i;
}
dc[i].setinstance(instance);
}
for (int i=0; i< numberofnetworks;i++){
ddone[i]=false;
if(dc[i].reuse){
dc[i].gathered=false;
synchronized(dc[i]){
dc[i].doNotify();
}
}else{
dc[i].gathered=false;
dc[i].start();
}
// System.out.println("start "+i+" "+dc[i].do_classify+" "+dc[i].gathered+ " "+intclass);
}
// after_test = System.currentTimeMillis();
int clsnr=0;
int counter=0;
while(clsnr<numberofnetworks){
for (int i=0; i< numberofnetworks;i++){
synchronized(dc[i]){
if(dc[i].do_classify==2 && dc[i].gathered!=true){
clsnr+=1;
// dc[i].suspend();
for (int j=0; j< numClasses;j++){
dist[j]+=dc[i].m_dist[j];
}
dc[i].gathered=true;
ddone[i]=true;
donetime+=counter;
if(saveneuronsactivity){
//fwneurons.println(dc[i].m_network.nrinstclassified+":"+i+":"+Arrays.toString(dc[i].m_network.getneuronsactivity())+":"+Arrays.toString(dc[i].m_network.getneuronsactivity()));
}
}
}
}
if (clsnr==numberofnetworks){
break;
}
Thread.sleep(1);
counter+=1;
if((clsnr)/((float)numberofnetworks)>0.75 && counter>20 && counter==2*(donetime/((float)clsnr))){
for (int i=0; i< numberofnetworks;i++){
if (ddone[i]){
continue;
}
synchronized(dc[i]){
if (dc[i].sleep&& dc[i].do_classify!=2){
System.out.println("Error in counter at "+intclass+" clr "+clsnr+" counter "+counter+ "restarting "+i);
dc[i].notify();
}
}
}
}
if(counter%1000==0 && counter >0){
}
if(this.getDebug()) {
if(counter%100==0){
System.out.println("Error in counter at "+intclass+" clr "+clsnr+" counter "+counter);
}
}
}
}
else{
// after_test = System.currentTimeMillis();
for (int i=0; i< numberofnetworks;i++){
networks[i].activity_report=activity_report;
double[] tdist=networks[i].distributionForInstance(instance);
for (int j=0; j< numClasses;j++){
dist[j]+=tdist[j];
}
if(saveneuronsactivity){
/* int[] naia=networks[i].getneuronsactivated();
// fwneurons.println(networks[i].nrinstclassified-1+":"+i+":"+Arrays.toString(naia)+":"+Arrays.toString(networks[i].getneuronsactivity()));
for(int it=0;it<naia.length;it++){
fwneurons.print(naia[it]+"#value:");
int t1=0;
for(int k=0;k<networks[i].weightsB[naia[it]].length/2;k++){
if (networks[i].weightsB[naia[it]][k]!=0){
if(t1==0){
fwneurons.print(networks[i].weightsB[naia[it]][k]+"#positions:");
t1=1;
}
fwneurons.print(k+",");
}
}
fwneurons.print("\n");
//fwneurons.println(naia[it]+":"+Arrays.toString(Arrays.copyOfRange(networks[i].weightsB[naia[it]],0,networks[i].numClasses/2)));
}*/
}
}
}
// after_test1 = System.currentTimeMillis();
// System.out.println("start:"+(after_test-before_test)/1000.0);
// System.out.println("testing:"+(after_test1-after_test)/1000.0);
intclass+=1;
if(this.getDebug()) { if (intclass%100==0){
System.out.println(".");
}}
if(m_userankstoclass) {
return ARAMm_Ranking2Class(dist);
}
return dist;
}
public double[] ARAMm_Ranking2Class(double[] rankings) {
int columns=rankings.length;
double[] classes= new double[columns ];
SortPair[] sortedRanks = new SortPair[columns];
for (int j=0;j<columns;j++){
sortedRanks[j]= new SortPair(rankings[j],j);
}
java.util.Arrays.sort(sortedRanks);
// sortedActivations[0].getValue()sortedActivations[i].getOriginalIndex()
SortPair[] change=new SortPair[columns-1];
for(int j =1; j<columns;j++){
change[j-1] = new SortPair(sortedRanks[j-1].getValue()-sortedRanks[j].getValue(),j);
}
java.util.Arrays.sort(change);
// double val= change[0].getValue();
int ind=change[0].getOriginalIndex();
for (int j =0; j<ind;j++){
classes[sortedRanks[j].getOriginalIndex()] = 1;
}
return classes;
}
// ****YOU SHOULDN'T NEED TO CHANGE THIS
/**
* Classifies the given test instance. The instance has to belong to a
* dataset when it's being classified. Note that a classifier MUST
* implement either this or distributionForInstance().
*
* @param instance the instance to be classified
* @return the predicted most likely class for the instance or
* Instance.missingValue() if no prediction is made
* @exception Exception if an error occurred during the prediction
*/
public double classifyInstance(Instance instance) throws Exception {
double[] dist = distributionForInstance(instance);
if (dist == null) {
throw new Exception("Null distribution predicted");
}
switch (instance.classAttribute().type()) {
case Attribute.NOMINAL:
double max = 0;
int maxIndex = 0;
for (int i = 0; i < dist.length; i++) {
if (dist[i] > max) {
maxIndex = i;
max = dist[i];
}
}
if (max > 0) {
return maxIndex;
} else {
//return Instance.missingValue();
}
case Attribute.NUMERIC:
return dist[0];
default:
return -1;
}
}
// ****ANY OPTIONS/PARAMETERS GO HERE****
/**
* Returns an enumeration describing the available options.
*
* @return an enumeration of all the available options.
*/
@Override
public Enumeration listOptions() {
//These are just examples, modify to suit your algorithm
Vector newVector = new Vector();
newVector.addElement(
new Option("\tChange generalization parameter Rho\n",
"P", 0,"-P"));
newVector.addElement(
new Option("\tChange threshold to select activated neurons (no-winner-takes-all)\n",
"THR", 0,"-THR"));
newVector.addElement(
new Option("\tUse ranking to class function special dev. for ARAM.\n",
"K", 0,"-K"));
newVector.addElement(
new Option("\tFast ARAM.\n",
"F", 0,"-F"));
newVector.addElement(
new Option("\tthreaded ARAM.\n",
"TF", 0,"-TF"));
newVector.addElement(new Option("\tVotersr\n\t ", "V", 5, "-V <value>"));
newVector.addElement(new Option("\t Save neurons activity ARAM.\n",
"NA", 0,"-NA"));
newVector.addElement(new Option("\t Save neurons activity in network ARAM.\n",
"Rt", 0,"-Rt"));
return newVector.elements();
}
//****OPTIONS HERE SHOULD MATCH THOSE ADDED ABOVE****
/**
* Parses a given list of options. Valid options are:<p>
*
* -K <br>
* Use kernel estimation for modelling numeric attributes rather than
* a single normal distribution.<p>
*
* -D <br>
* Use supervised discretization to process numeric attributes.
*
* @param options the list of options as an array of strings
* @exception Exception if an option is not supported
*/
public void setOptions(String[] options) throws Exception {
//These are just examples, modify to suit your algorithm
// boolean k = Utils.getFlag('K', options);
// boolean d = Utils.getFlag('D', options);
// if (k && d) {
// throw new IllegalArgumentException(
// "Can't use both kernel density estimation and discretization!");
// }
// setUseSupervisedDiscretization(d);
// setUseKernelEstimator(k);
roa = (Utils.getOptionPos("P",options) >= 0) ? Double.parseDouble(Utils.getOption("P", options)) : roa;
m_userankstoclass= Utils.getFlag("K",options);
numberofnetworks = (Utils.getOptionPos("V",options) >= 0) ? Integer.parseInt(Utils.getOption("V", options)) : numberofnetworks;
threshold = (Utils.getOptionPos("THR",options) >= 0) ? Double.parseDouble(Utils.getOption("THR", options)) : threshold;
fastaram = Utils.getFlag("F",options);
sparsearam = Utils.getFlag("Sp",options);
sparsearamH = Utils.getFlag("SpH",options);
sparsearamHT = Utils.getFlag("SpHT",options);
tfastaram = Utils.getFlag("TF",options);
fsna = Utils.getOption("NA", options);
activity_report = (Utils.getOptionPos("Rt",options) >= 0) ? Utils.getOption("Rt", options) : "";
if (fsna!=null && fsna!=""){
saveneuronsactivity=true;
}
super.setOptions(options);
}
//****MORE OPTION PARSING STUFF****
/**
* Gets the current settings of the classifier.
*
* @return an array of strings suitable for passing to setOptions
*/
public String [] getOptions() {
//These are just examples, modify to suit your algorithm
// String [] options = null;
Vector<String> result = new Vector<String>();
result.add("-P");
result.add(Double.toString(roa));
result.add("-THR");
result.add(Double.toString(threshold));
if (m_userankstoclass)
result.add("-K");
result.add("-V");
result.add(Integer.toString(numberofnetworks));
if (fastaram)
result.add("-F");
if (sparsearam)
result.add("-Sp");
if (sparsearamH)
result.add("-SpH");
if (sparsearamHT)
result.add("-SpHT");
if (tfastaram)
result.add("-TF");
if (fsna!=null && fsna!="" && fsna.length()>0){
result.add("-NA");
result.add(""+ fsna);
}
if (!activity_report.isEmpty() ){
result.add("-Rt");
result.add("" +activity_report);
}
// Collections.addAll(result, super.getOptions());
return (String[]) result.toArray(new String[result.size()]);
// try{
// options =weka.core.Utils.splitOptions("-P "+roa+(m_userankstoclass?" -K":"")+" -V "+numberofnetworks);
// }catch (Exception ex) {
// System.out.println(ex.getMessage());
// }
// return options;
}
//****ANY INFORMATION LIKE NO. OF UNITS ETC PRINTED HERE
/**
* Returns a description of the classifier.
*
* @return a description of the classifier as a string.
*/
public String toString() {
//These are just examples, modify to suit your algorithm
StringBuffer text = new StringBuffer();
text.append("ML ARAM classifier");
// if (m_Instances == null) {
// text.append(": No model built yet.");
// } else {
// try {
// for (int i = 0; i < m_Distributions[0].length; i++) {
// text.append("\n\nClass " + m_Instances.classAttribute().value(i) +
// ": Prior probability = " + Utils.
// doubleToString(m_ClassDistribution.getProbability(i),
// 4, 2) + "\n\n");
// Enumeration enumAtts = m_Instances.enumerateAttributes();
// int attIndex = 0;
// while (enumAtts.hasMoreElements()) {
// Attribute attribute = (Attribute) enumAtts.nextElement();
// text.append(attribute.name() + ": "
// + m_Distributions[attIndex][i]);
// attIndex++;
// }
// }
// } catch (Exception ex) {
// text.append(ex.getMessage());
// }
// }
return text.toString();
}
/**
* Description to display in the GUI.
*
* @return the description
*/
//@Override
public String globalInfo() {
return "Voted ML-ARAM. " + "For more information see:\n" + getTechnicalInformation().toString();
}
@Override
public TechnicalInformation getTechnicalInformation() {
TechnicalInformation result;
//TechnicalInformation additional;
result = new TechnicalInformation(Type.INPROCEEDINGS);
result.setValue(Field.AUTHOR, "Fernando Benites");
result.setValue(Field.TITLE, "HARAM: a Hierarchical ARAM neural network for large-scale text classification.");
result.setValue(Field.BOOKTITLE, "HDM 2015, 3rd International Workshop on High "+
"Dimensional Data Mining, In conjunction with"+
"the IEEE International Conference on Data"+
"Mining (IEEE ICDM 2015), 14 November 2015");
result.setValue(Field.YEAR, "2015");
return result;
}
//****MORE GUI RELATED STUFF AND PARAMETER ACCESS METHODS
// /**
// * Returns the tip text for this property
// * @return tip text for this property suitable for
// * displaying in the explorer/experimenter gui
// */
// public String useKernelEstimatorTipText() {
// return "Use a kernel estimator for numeric attributes rather than a "
// +"normal distribution.";
// }
// /**
// * Gets if kernel estimator is being used.
// *
// * @return Value of m_UseKernelEstimatory.
// */
// public boolean getUseKernelEstimator() {
//
// return m_UseKernelEstimator;
// }
//
// /**
// * Sets if kernel estimator is to be used.
// *
// * @param v Value to assign to m_UseKernelEstimatory.
// */
// public void setUseKernelEstimator(boolean v) {
//
// m_UseKernelEstimator = v;
// if (v) {
// setUseSupervisedDiscretization(false);
// }
// }
//
// /**
// * Returns the tip text for this property
// * @return tip text for this property suitable for
// * displaying in the explorer/experimenter gui
// */
// public String useSupervisedDiscretizationTipText() {
// return "Use supervised discretization to convert numeric attributes to nominal "
// +"ones.";
// }
//
// /**
// * Get whether supervised discretization is to be used.
// *
// * @return true if supervised discretization is to be used.
// */
// public boolean getUseSupervisedDiscretization() {
//
// return m_UseDiscretization;
// }
//
// /**
// * Set whether supervised discretization is to be used.
// *
// * @param newblah true if supervised discretization is to be used.
// */
// public void setUseSupervisedDiscretization(boolean newblah) {
//
// m_UseDiscretization = newblah;
// if (newblah) {
// setUseKernelEstimator(false);
// }
// }
@Override
public void updateClassifier(Instance instance) throws Exception {
// TODO Auto-generated method stub
for (int i=0; i< numberofnetworks;i++){
networks[i].updateClassifier(instance);
}
}
@Override
public void setSeed(int seed) {
m_seed=seed;
}
@Override
public int getSeed() {
return m_seed;
}
public boolean getThreadedVoters(){
return tfastaram;
}
public void setThreadedVoters(boolean fThreadedVoters){
tfastaram=fThreadedVoters;
}
public boolean getFastLearn(){
return fastaram;
}
public void setFastLearn(boolean ffastaram){
fastaram=ffastaram;
sparsearam=!ffastaram;
sparsearamH=!ffastaram;
sparsearamHT=!ffastaram;
}
public boolean getSparse(){
return sparsearam;
}
public void setSparse(boolean fsparsearam){
fastaram=!fsparsearam;
sparsearam=fsparsearam;
sparsearamH=!fsparsearam;
sparsearamHT=!fsparsearam;
}
public boolean getSparseH(){
return sparsearamH;
}
public void setSparseHT(boolean fsparsearam){
fastaram=!fsparsearam;
sparsearam=!fsparsearam;
sparsearamH=!fsparsearam;
sparsearamHT=fsparsearam;
}
public boolean getSparseHT(){
return sparsearamHT;
}
public void setSparseH(boolean fsparsearam){
sparsearamH=fsparsearam;
}
public int getVoters(){
return numberofnetworks;
}
public void setVoters(int fvoter){
numberofnetworks=fvoter;
}
public double getVigilancy(){
return roa;
}
public void setVigilancy(double vigilancy){
roa=vigilancy;
}
public void setThreshold(double fthreshold){
threshold=fthreshold;
}
public double getThreshold(){
return threshold;
}
public boolean getNeuronsActivity(){
return saveneuronsactivity;
}
public void setNeuronsActivity(boolean fsaveneuronsactivity){
saveneuronsactivity=fsaveneuronsactivity;
}
public String getNeuronsActivityFileName(){
return fsna;
}
public void setNeuronsActivityFileName(String ffsna){
fsna=ffsna;
}
public void freeNetworks (){
if(tfastaram){
for(int i=0;i<dc.length;i++){
dc[i].destroy();
dc[i]=null;
}
dc=null;
}
}
public void destroy(){
freeNetworks();
networks=null;
}
/**
* Main method for testing this class.
*
* @param argv the options
*/
public static void main(String [] argv) {
try {
Evaluation.runExperiment(new WvARAM(), argv);
} catch (Exception e) {
e.printStackTrace();
System.err.println(e.getMessage());
}
System.out.println("Done");
}
public class DistributionCalc extends Thread
{
ARAMNetworkClass m_network =null;
Instance m_inst=null;
double[] m_dist=null;
int do_classify=0;
boolean reuse=false;
boolean sleep=true;
boolean sleep2=false;
boolean doexit=false;
int id=0;//
private Object lock = null;
boolean gathered=false;
public DistributionCalc(ARAMNetworkClass network)
{
m_network=network;
lock=new Object();
}
public void setinstance(Instance inst)
{
m_inst=inst;
do_classify=1;
//System.out.println("new instance "+id);
}
public void run()
{
while (true){
sleep=false;
//System.out.println("start classify again "+id);
try {
m_dist = m_network.distributionForInstance(m_inst);
} catch (Exception e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
//System.out.println("check "+id+" "+do_classify+" "+gathered);
synchronized (this) {
try {
do_classify=2;
reuse=true;
sleep=true;
gathered=false;
sleep2=true;
this.wait();
if(doexit){
return;
}
} catch (InterruptedException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
}
}
public void doNotify() {
synchronized(this) {
this.notify();
sleep2=false;
}
}
public void destroy(){
synchronized(this) {
doexit=true;
this.notify();
sleep2=false;
}
}
}
public class DistributionCalcM extends Thread
{
ARAMNetworkClass m_network =null;
Instances m_inst=null;
double[] m_dist=null;
double[][] results=null;
int do_classify=0;
boolean reuse=false;
boolean sleep=true;
boolean sleep2=false;
boolean doexit=false;
int id=0;//
private Object lock = null;
boolean gathered=false;
public DistributionCalcM(ARAMNetworkClass network)
{
m_network=network;
lock=new Object();
}
public void setinstances(Instances inst)
{
m_inst=inst;
do_classify=1;
//System.out.println("new instance "+id);
}
public void run()
{
int L=m_inst.classIndex();
int N=m_inst.numInstances();
results=new double[N][L];
//System.out.println("start classify again "+id);
try {
for (int i = 0, c = 0; i < N; i++) {
//if(m_network.getDebug()) { int t = i*50/m_inst.numInstances(); if(t > c) { System.out.print("#"); c = t; } }
// No cheating allowed; clear all class information
AbstractInstance x = (AbstractInstance)((AbstractInstance) m_inst.instance(i)).copy();
for(int v = 0; v < m_inst.classIndex(); v++)
x.setValue(v,0.0);
double y[] = m_network.distributionForInstance(x);
for(int j=0;j<numClasses;j++){
results[i][j]=y[j];
}
}
} catch (Exception e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
}
public class BuildClassifier extends Thread
{
ARAMNetworkClass m_network =null;
Instances m_inst=null;
double[] m_dist=null;
public BuildClassifier(ARAMNetworkClass network)
{
m_network=network;
}
public void setinstances(Instances inst)
{
m_inst=inst;
}
public void run()
{
try {
m_network.buildClassifier(m_inst);
} catch (Exception e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
}
@Override
public double[][] distributionForInstanceM(Instances inst) throws Exception {
// TODO Auto-generated method stub
DistributionCalcM[] dct=null ;
int L=numClasses;
int N=inst.numInstances();
dct =new DistributionCalcM[numberofnetworks];
double[][] results=new double[N][L];
for (int i=0; i< numberofnetworks;i++){
dct[i] = new DistributionCalcM(networks[i]);
dct[i].id=i;
dct[i].setinstances(inst);
dct[i].start();
}
for (int k=0; k< numberofnetworks;k++){
dct[k].join();
for (int i = 0, c = 0; i < N; i++) {
for(int j=0;j<numClasses;j++){
results[i][j]+=dct[k].results[i][j];
}
}
}
return results;
}
@Override
public void setDebug(boolean debug) {
// TODO Auto-generated method stub
}
@Override
public boolean getDebug() {
// TODO Auto-generated method stub
return false;
}
@Override
public String debugTipText() {
// TODO Auto-generated method stub
return null;
}
@Override
public String getModel() {
// TODO Auto-generated method stub
return "";
}
@Override
public Capabilities getCapabilities() {
// TODO Auto-generated method stub
return null;
}
@Override
public boolean isThreaded() {
// TODO Auto-generated method stub
return false;
}
@Override
public void setThreaded(boolean setv) {
// TODO Auto-generated method stub
}
public String thresholdTipText() {
return "Set threshold to select activited neurons (no-winner-takes-all)";
}
public String fastLearnTipText() {
return "Only one sweap over the trainig data";
}
public String neuronsActivityTipText() {
return "Save the activity of neurons per sample";
}
public String neuronsActivityFileNameTipText() {
return "Filename for saving the activity of neurons per sample";
}
public String sparseTipText() {
return "Sparse implementation";
}
public String sparseHTipText() {
return "Sparse other implementation";
}
public String sparseHTTipText() {
return "Sparse other implementation";
}
public String seedTipText() {
return "seed when shuffling";
}
public String threadedTipText() {
return "Use threads";
}
public String threadedVotersTipText() {
return "Use threads on voters";
}
public String vigilancyTipText() {
return "Vigilance parameter of ARAM";
}
public String votersTipText() {
return "How many voters?";
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers/multitarget/BCC.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package meka.classifiers.multitarget;
/**
* BCC.java - The Bayesian Classifier Chains (BCC) method.
* Multi-target version of BCC method (directly applicable) -- only the confidence information is different.
* @see meka.classifiers.multilabel.BCC
* @author Jesse Read
* @version June 2012
*/
import java.util.Arrays;
import meka.classifiers.multilabel.ProblemTransformationMethod;
import weka.core.Instance;
import weka.classifiers.trees.J48;
import weka.core.RevisionUtils;
public class BCC extends meka.classifiers.multilabel.BCC implements MultiTargetClassifier {
/** for serialization. */
private static final long serialVersionUID = 2395428645144026318L;
/**
* Description to display in the GUI.
*
* @return the description
*/
@Override
public String globalInfo() {
return
"The Bayesian Classifier Chains (BCC) method.\n"
+ "Multi-target version of the BCC method (directly applicable).";
}
public BCC() {
// default classifier for GUI
this.m_Classifier = new J48();
}
@Override
protected String defaultClassifierString() {
// default classifier for CLI
return "weka.classifiers.trees.J48";
}
@Override
public double[] distributionForInstance(Instance x) throws Exception {
int L = x.classIndex();
double y_long[] = Arrays.copyOf(super.distributionForInstance(x),L*2);
Arrays.fill(y_long,L,y_long.length,1.0);
return y_long;
}
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision: 9117 $");
}
public static void main(String args[]) {
ProblemTransformationMethod.evaluation(new meka.classifiers.multitarget.BCC(), args);
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers/multitarget/CC.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package meka.classifiers.multitarget;
/**
* CC.java - Multi-target version of Classifier Chains (CC).
* Only the confidence information is different, since multi-target implies a multi-dimensional posterior distribution.
*
* @see meka.classifiers.multilabel.CC
* @author Jesse Read
* @version Jan 2012
*/
import java.util.Arrays;
import meka.classifiers.multilabel.ProblemTransformationMethod;
import weka.core.Instance;
import weka.classifiers.trees.J48;
import weka.core.RevisionUtils;
public class CC extends meka.classifiers.multilabel.CC implements MultiTargetClassifier {
/** for serialization. */
private static final long serialVersionUID = 2395428645144026318L;
public CC() {
// default classifier for GUI
this.m_Classifier = new J48();
}
@Override
protected String defaultClassifierString() {
// default classifier for CLI
return "weka.classifiers.trees.J48";
}
/**
* Description to display in the GUI.
*
* @return the description
*/
@Override
public String globalInfo() {
return
"The Classifier Chains (CC) method.\n"
+ "Multi-target version of CC (directly applicable, but the posterior distribution is multidimensional (may help ensemble performance)).";
}
@Override
public double[] distributionForInstance(Instance x) throws Exception {
int L = x.classIndex();
double y_long[] = Arrays.copyOf(super.distributionForInstance(x),L*2);
Arrays.fill(y_long,L,y_long.length,1.0);
return y_long;
}
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision: 9117 $");
}
public static void main(String args[]) {
ProblemTransformationMethod.evaluation(new meka.classifiers.multitarget.CC(), args);
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers/multitarget/CCp.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package meka.classifiers.multitarget;
import java.util.Arrays;
import java.util.Random;
import weka.classifiers.AbstractClassifier;
import meka.classifiers.multilabel.ProblemTransformationMethod;
import weka.core.Instance;
import weka.core.Instances;
import meka.core.MLUtils;
import weka.core.RevisionUtils;
import weka.core.Utils;
/**
* CCp.java - Multitarget CC with probabilistic output.
* <br>
* This version includes probabilistic output in the distributionForInstance, like other MT methods.
* <br>
* i.e.: y[j+L] := P(y[j]|x) (this is usefull when used in an ensemble).
* <br>
* @see CC
* @version March 2012
* @author Jesse Read (jesse@tsc.uc3m.es)
*/
public class CCp extends meka.classifiers.multilabel.CC implements MultiTargetClassifier {
/** for serialization. */
private static final long serialVersionUID = 7139310187485439658L;
protected meka.classifiers.multitarget.CCp.Link root = null;
protected class Link {
private meka.classifiers.multitarget.CCp.Link next = null;
private AbstractClassifier classifier = null;
public Instances _template = null;
private int index = -1;
private int excld[]; // to contain the indices to delete
private int j = 0; //@temp
public Link(int chain[], int j, Instances train) throws Exception {
this.j = j;
this.index = chain[j];
// sort out excludes [4|5,1,0,2,3]
this.excld = Arrays.copyOfRange(chain,j+1,chain.length);
// sort out excludes [0,1,2,3,5]
Arrays.sort(this.excld);
this.classifier = (AbstractClassifier)AbstractClassifier.forName(getClassifier().getClass().getName(),((AbstractClassifier)getClassifier()).getOptions());
Instances new_train = new Instances(train);
// delete all except one (leaving a binary problem)
if(getDebug()) System.out.print(" "+this.index);
new_train.setClassIndex(-1);
// delete all the attributes (and track where our index ends up)
int c_index = chain[j];
for(int i = excld.length-1; i >= 0; i--) {
new_train.deleteAttributeAt(excld[i]);
if (excld[i] < this.index)
c_index--;
}
new_train.setClassIndex(c_index);
_template = new Instances(new_train,0);
this.classifier.buildClassifier(new_train);
new_train = null;
if(j+1 < chain.length)
next = new meka.classifiers.multitarget.CCp.Link(chain, ++j, train);
}
protected void classify(Instance test) throws Exception {
// copy
Instance copy = (Instance)test.copy();
copy.setDataset(null);
// delete attributes we don't need
for(int i = excld.length-1; i >= 0; i--) {
copy.deleteAttributeAt(this.excld[i]);
}
//set template
copy.setDataset(this._template);
// round
for(int k = 0; k < this.j; k++) {
copy.setValue(j,Math.round(copy.value(k)));
}
//set class
double dist[] = this.classifier.distributionForInstance(copy);
int max_index = Utils.maxIndex(dist);
confidences[this.index] = dist[max_index];
test.setValue(this.index,max_index);
//carry on
if (next!=null) next.classify(test);
}
@Override
public String toString() {
return (next == null) ? String.valueOf(this.index) : String.valueOf(this.index)+">"+next.toString();
}
}
/**
* Description to display in the GUI.
*
* @return the description
*/
@Override
public String globalInfo() {
return
"CC method with probabilistic output (CCp).\n"
+ "This version includes probabilistic output in the distributionForInstance, like other MT methods.\n"
+ "i.e.: y[j+L] := P(y[j]|x) (this is usefull when used in an ensemble).";
}
@Override
public void buildClassifier(Instances D) throws Exception {
testCapabilities(D);
int L = D.classIndex();
prepareChain(L);
if(getDebug()) System.out.print(":- Chain (");
root = new meka.classifiers.multitarget.CCp.Link(retrieveChain(),0,D);
if (getDebug()) System.out.println(" ) -:");
}
protected double confidences[] = null;
@Override
public double[] distributionForInstance(Instance x) throws Exception {
int L = x.classIndex();
confidences = new double[L];
root.classify(x);
double y[] = new double[L*2];
for(int j = 0; j < L; j++) {
y[j] = x.value(j);
y[j+L] = confidences[j]; // <--- this is the extra line
}
return y;
}
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision: 9117 $");
}
public static void main(String args[]) {
ProblemTransformationMethod.evaluation(new CCp(), args);
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers/multitarget/CR.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package meka.classifiers.multitarget;
/**
* CR.java - The Class-Relevance Method.
* (The generalised, multi-target version of the Binary Relevance (BR) method).
* @see BR.java
* @version Jan 2012
* @author Jesse Read (jesse@tsc.uc3m.es)
*/
import weka.classifiers.AbstractClassifier;
import meka.classifiers.multilabel.ProblemTransformationMethod;
import weka.core.Instance;
import weka.core.Instances;
import weka.classifiers.trees.J48;
import meka.core.MLUtils;
import weka.core.RevisionUtils;
import weka.core.Utils;
public class CR extends meka.classifiers.multilabel.BR implements MultiTargetClassifier {
/** for serialization. */
private static final long serialVersionUID = 1627371180786293843L;
protected Instances m_Templates[] = null; // TEMPLATES
public CR() {
// default classifier for GUI
this.m_Classifier = new J48();
}
@Override
protected String defaultClassifierString() {
// default classifier for CLI
return "weka.classifiers.trees.J48";
}
/**
* Description to display in the GUI.
*
* @return the description
*/
@Override
public String globalInfo() {
return
"The Class-Relevance Method.\n"
+ "(The generalised, multi-target version of the Binary Relevance (BR) method).";
}
@Override
public void buildClassifier(Instances D) throws Exception {
testCapabilities(D);
int L = D.classIndex();
if(getDebug()) System.out.print("Creating "+L+" models ("+m_Classifier.getClass().getName()+"): ");
m_MultiClassifiers = AbstractClassifier.makeCopies(m_Classifier,L);
m_Templates = new Instances[L];
for(int j = 0; j < L; j++) {
//Select only class attribute 'j'
m_Templates[j] = MLUtils.keepAttributesAt(new Instances(D),new int[]{j},L);
m_Templates[j].setClassIndex(0);
//Build the classifier for that class
m_MultiClassifiers[j].buildClassifier(m_Templates[j]);
if(getDebug()) System.out.print(" " + (m_Templates[j].classAttribute().name()));
m_Templates[j] = new Instances(m_Templates[j], 0);
}
}
@Override
public double[] distributionForInstance(Instance x) throws Exception {
int L = x.classIndex();
double y[] = new double[L*2];
for (int j = 0; j < L; j++) {
Instance x_j = (Instance)x.copy();
x_j.setDataset(null);
x_j = MLUtils.keepAttributesAt(x_j,new int[]{j},L);
x_j.setDataset(m_Templates[j]);
double w[] = m_MultiClassifiers[j].distributionForInstance(x_j); // e.g. [0.1, 0.8, 0.1]
y[j] = Utils.maxIndex(w); // e.g. 1
y[L+j] = w[(int)y[j]]; // e.g. 0.8
}
return y;
}
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision: 9117 $");
}
public static void main(String args[]) {
ProblemTransformationMethod.evaluation(new CR(), args);
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers/multitarget/IncrementalMultiTargetClassifier.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/**
* IncrementalMultiTargetClassifier.java
* Copyright (C) 2015 University of Waikato, Hamilton, NZ
*/
package meka.classifiers.multitarget;
import weka.classifiers.UpdateableClassifier;
/**
* Interface for incremental multi-target classifiers.
*
* @author Joerg Wicker
* @version $Revision$
*/
public interface IncrementalMultiTargetClassifier
extends MultiTargetClassifier, UpdateableClassifier{
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers/multitarget/MultiTargetClassifier.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package meka.classifiers.multitarget;
import weka.classifiers.Classifier;
import weka.core.OptionHandler;
/**
* MultiTargetClassifier.java - A Multi-Target (i.e., Multi-Output / Multi-Dimensional) Classifier.
* Implementing this interface signals to the Evaluation that we are dealing with multi-target data,
* and a different evaluation output is given. Training and classification is the same, using the
* methods <i>buildClassifier(Instances)</i> and <i>distributionForInstance(Instance)</i>, except that
* the latter only returns the argmax value (i.e., what is to be considered the predicted value).
* <br>
* <br>
* At the moment it is also possible to extend the <code>double[]</code> from <code>distributionForInstance</code>
* to a vector of <code>L*2</code> doubles instead of <code>L</code> which contain the max.
* In other words, <code>k</code> in position <code>j</code> and <code>p(y[j]=k)</code> in position <code>j+L</code>.
* <br>
* In the future we will make use of <code>double[] distributionForInstance(Instance,int)</code> instead.
*
* @author Jesse Read
* @version January 2015
*/
import meka.classifiers.MultiXClassifier;
public interface MultiTargetClassifier extends MultiXClassifier {
/*
* TODO Returns the distribution of the k-th value, for each label.
*
* @return the multi-target distribution
*/
//public double[] distributionForInstance(Instance x, int k);
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers/multitarget/NSR.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package meka.classifiers.multitarget;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashMap;
import meka.classifiers.multilabel.ProblemTransformationMethod;
import meka.core.PSUtils;
import meka.core.SuperLabelUtils;
import weka.core.Attribute;
import weka.core.Capabilities;
import weka.core.Instance;
import weka.core.Instances;
import weka.classifiers.trees.J48;
import meka.core.MLUtils;
import meka.core.A;
import weka.core.RevisionUtils;
import weka.core.Utils;
/**
* NSR.java - The Nearest Set Relpacement (NSR) method.
* A multi-target version of PS: The nearest sets are used to replace outliers, rather than subsets (as in PS).
* Important Note: currently can only handle 10 values (or fewer) per target variable.
* @see meka.classifiers.multilabel.PS
* @version Jan 2013
* @author Jesse Read
*/
public class NSR extends meka.classifiers.multilabel.PS implements MultiTargetClassifier {
/** for serialization. */
private static final long serialVersionUID = 8373228150066785001L;
public NSR() {
// default classifier for GUI
this.m_Classifier = new J48();
}
@Override
protected String defaultClassifierString() {
// default classifier for CLI
return "weka.classifiers.trees.J48";
}
/**
* Description to display in the GUI.
*
* @return the description
*/
@Override
public String globalInfo() {
return "The Nearest Set Relpacement (NSR) method.\n"+
"A multi-target version of PS: The nearest sets are used to replace outliers, rather than subsets (as in PS).";
}
@Override
public Capabilities getCapabilities() {
Capabilities result;
result = super.getCapabilities();
result.setMinimumNumberInstances(1);
return result;
}
@Override
public void buildClassifier(Instances D) throws Exception {
testCapabilities(D);
int L = D.classIndex();
try {
m_Classifier.buildClassifier(convertInstances(D,L));
} catch(Exception e) {
if (m_P > 0) {
m_P--;
System.err.println("Not enough distinct class values, trying again with P = "+m_P+" ...");
buildClassifier(D);
}
else
throw new Exception("Failed to construct a classifier.");
}
}
@Override
public double[] distributionForInstance(Instance x) throws Exception {
int L = x.classIndex();
//if there is only one class (as for e.g. in some hier. mtds) predict it
//if(L == 1) return new double[]{1.0};
Instance x_sl = PSUtils.convertInstance(x,L,m_InstancesTemplate); // the sl instance
//x_sl.setDataset(m_InstancesTemplate); // where y in {comb_1,comb_2,...,comb_k}
double w[] = m_Classifier.distributionForInstance(x_sl); // w[j] = p(y_j) for each j = 1,...,L
int max_j = Utils.maxIndex(w); // j of max w[j]
//int max_j = (int)m_Classifier.classifyInstance(x_sl); // where comb_i is selected
String y_max = m_InstancesTemplate.classAttribute().value(max_j); // comb_i e.g. "0+3+0+0+1+2+0+0"
double y[] = Arrays.copyOf(A.toDoubleArray(MLUtils.decodeValue(y_max)),L*2); // "0+3+0+0+1+2+0+0" -> [0.0,3.0,0.0,...,0.0]
HashMap<Double,Double> votes[] = new HashMap[L];
for(int j = 0; j < L; j++) {
votes[j] = new HashMap<Double,Double>();
}
for(int i = 0; i < w.length; i++) {
double y_i[] = A.toDoubleArray(MLUtils.decodeValue(m_InstancesTemplate.classAttribute().value(i)));
for(int j = 0; j < y_i.length; j++) {
votes[j].put(y_i[j] , votes[j].containsKey(y_i[j]) ? votes[j].get(y_i[j]) + w[i] : w[i]);
}
}
// some confidence information
for(int j = 0; j < L; j++) {
y[j+L] = votes[j].size() > 0 ? Collections.max(votes[j].values()) : 0.0;
}
return y;
}
// TODO: use PSUtils
public double[] convertDistribution(double y_sl[], int L) {
double y_ml[] = new double[L];
for(int i = 0; i < y_sl.length; i++) {
if(y_sl[i] > 0.0) {
double d[] = MLUtils.fromBitString(m_InstancesTemplate.classAttribute().value(i));
for(int j = 0; j < d.length; j++) {
if(d[j] > 0.0)
y_ml[j] = 1.0;
}
}
}
return y_ml;
}
// TODO: use SuperLabelUtils
/**
* GetTopNSubsets - return the top N subsets which differ from y by a single class value, ranked by the frequency storte in masterCombinations.
*/
public static String[] getTopNSubsets(String y, final HashMap <String,Integer>masterCombinations, int N) {
String y_bits[] = y.split("\\+");
ArrayList<String> Y = new ArrayList<String>();
for(String y_ : masterCombinations.keySet()) {
if(MLUtils.bitDifference(y_bits,y_.split("\\+")) <= 1) {
Y.add(y_);
}
}
Collections.sort(Y,new Comparator<String>(){
public int compare(String s1, String s2) {
// @note this is just done by the count, @todo: could add further conditions
return (masterCombinations.get(s1) > masterCombinations.get(s2) ? -1 : (masterCombinations.get(s1) > masterCombinations.get(s2) ? 1 : 0));
}
}
);
String Y_strings[] = Y.toArray(new String[Y.size()]);
//System.out.println("returning "+N+"of "+Arrays.toString(Y_strings));
return Arrays.copyOf(Y_strings,Math.min(N,Y_strings.length));
}
// TODO use PSUtils
public Instances convertInstances(Instances D, int L) throws Exception {
//Gather combinations
HashMap<String,Integer> distinctCombinations = MLUtils.classCombinationCounts(D);
if(getDebug())
System.out.println("Found "+distinctCombinations.size()+" unique combinations");
//Prune combinations
MLUtils.pruneCountHashMap(distinctCombinations,m_P);
if(getDebug())
System.out.println("Pruned to "+distinctCombinations.size()+" with P="+m_P);
// Remove all class attributes
Instances D_ = MLUtils.deleteAttributesAt(new Instances(D),MLUtils.gen_indices(L));
// Add a new class attribute
D_.insertAttributeAt(new Attribute("CLASS", new ArrayList(distinctCombinations.keySet())),0); // create the class attribute
D_.setClassIndex(0);
//Add class values
for (int i = 0; i < D.numInstances(); i++) {
String y = MLUtils.encodeValue(MLUtils.toIntArray(D.instance(i),L));
// add it
if(distinctCombinations.containsKey(y)) //if its class value exists
D_.instance(i).setClassValue(y);
// decomp
else if(m_N > 0) {
String d_subsets[] = SuperLabelUtils.getTopNSubsets(y, distinctCombinations, m_N);
for (String s : d_subsets) {
int w = distinctCombinations.get(s);
Instance copy = (Instance)(D_.instance(i)).copy();
copy.setClassValue(s);
copy.setWeight(1.0 / d_subsets.length);
D_.add(copy);
}
}
}
// remove with missing class
D_.deleteWithMissingClass();
// keep the header of new dataset for classification
m_InstancesTemplate = new Instances(D_, 0);
if (getDebug())
System.out.println(""+D_);
return D_;
}
public static String[] decodeValue(String a) {
return a.split("\\+");
}
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision: 9117 $");
}
public static void main(String args[]) {
ProblemTransformationMethod.evaluation(new meka.classifiers.multitarget.NSR(), args);
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers/multitarget/RAkELd.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package meka.classifiers.multitarget;
import meka.classifiers.multilabel.ProblemTransformationMethod;
import meka.core.A;
import meka.core.F;
import meka.core.OptionUtils;
import meka.core.PSUtils;
import meka.core.SuperLabelUtils;
import weka.classifiers.AbstractClassifier;
import weka.classifiers.Classifier;
import weka.core.Drawable;
import weka.core.Instance;
import weka.core.Instances;
import weka.core.Option;
import weka.core.RevisionUtils;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Enumeration;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Random;
import java.util.Vector;
/**
* RAkELd - Multi-target Version of RAkELd.
* Extends NSR just like the multi-label version extends PS.
* @see meka.classifiers.multilabel.RAkELd
* @author Jesse Read
* @version June 2016
*/
public class RAkELd extends NSR {
/** for serialization. */
private static final long serialVersionUID = -6208388889440497990L;
protected Classifier m_Classifiers[] = null;
protected Instances m_InstancesTemplates[] = null;
int m_K = 3;
int m_M = 10;
protected int kMap[][] = null;
protected int vMap[][][] = null; // TODO use this to speed things up
/**
* Description to display in the GUI.
*
* @return the description
*/
@Override
public String globalInfo() {
return "Takes RAndom partition of labELs; like RAkEL but labelsets are disjoint / non-overlapping subsets.";
}
@Override
public void buildClassifier(Instances D) throws Exception {
/*
NOTE: This is a slow way of doing things at the moment, making use of multitarget.SCC functionality,
even though multilabel.RAkELd is not a meta multi-label classifier.
*/
int L = D.classIndex();
int N = D.numInstances();
Random r = new Random(m_S);
// Note: a slightly round-about way of doing it:
int num = (int)Math.ceil(L / m_K);
kMap = SuperLabelUtils.generatePartition(A.make_sequence(L),num,r,true);
m_M = kMap.length;
vMap = new int[m_M][][];
m_Classifiers = AbstractClassifier.makeCopies(m_Classifier,m_M);
m_InstancesTemplates = new Instances[m_M];
if (getDebug())
System.out.println("Building "+m_M+" models of "+m_K+" partitions:");
D = SuperLabelUtils.SLTransformation(D, kMap, m_P, m_N);
for(int i = 0; i < m_M; i++) {
/*
if (getDebug())
System.out.println("\tpartitioning model "+(i+1)+"/"+m_M+": "+Arrays.toString(kMap[i])+", P="+m_P+", N="+m_N);
Instances D_i = SuperLabelUtils.makePartitionDataset(D,kMap[i],m_P,m_N);
*/
Instances D_i = F.keepLabels(D,D.classIndex(),new int[]{i});
D_i.setClassIndex(0);
//vMap[i] = SuperLabelUtils.extractValues(D_i);
if (getDebug())
System.out.println("\tbuilding model "+(i+1)+"/"+m_M+": "+Arrays.toString(kMap[i]));
m_Classifiers[i].buildClassifier(D_i);
m_InstancesTemplates[i] = new Instances(D_i,0);
}
}
@Override
public double[] distributionForInstance(Instance x) throws Exception {
int L = x.classIndex();
HashMap<Integer,Double> votes[] = new HashMap[L];
for(int j = 0; j < L; j++) {
votes[j] = new HashMap<Integer,Double>();
}
for(int m = 0; m < m_M; m++) {
// Transform instance
Instance x_m = PSUtils.convertInstance(x, L, m_InstancesTemplates[m]);
x_m.setDataset(m_InstancesTemplates[m]);
// Get a meta classification
int yp_j = (int)m_Classifiers[m].classifyInstance(x_m); // e.g., 2
int values[] = SuperLabelUtils.decodeValue(m_InstancesTemplates[m].classAttribute().value(yp_j));
int k_indices[] = SuperLabelUtils.decodeClass(m_InstancesTemplates[m].classAttribute().name());
// Vote with classification
for(int j_k = 0; j_k < k_indices.length; j_k++) {
//int i = k_indices[j_k]; // original indices
int j = kMap[m][j_k]; // original indices
Double score = votes[j].get(values[j_k]);
votes[j].put(values[j_k],(score == null) ? 1. : score + 1.);
}
}
double y[] = SuperLabelUtils.convertVotesToDistributionForInstance(votes);
return y;
}
@Override
public String toString() {
if (kMap == null)
return "No model built yet";
StringBuilder s = new StringBuilder("{");
for(int k = 0; k < m_M; k++) {
s.append(Arrays.toString(kMap[k]));
}
return s.append("}").toString();
}
/**
* Returns a string that describes a graph representing
* the object. The string should be in XMLBIF ver.
* 0.3 format if the graph is a BayesNet, otherwise
* it should be in dotty format.
*
* @return the graph described by a string (label index as key)
* @throws Exception if the graph can't be computed
*/
@Override
public Map<Integer,String> graph() throws Exception {
Map<Integer,String> result;
result = new HashMap<Integer,String>();
for (int i = 0; i < m_Classifiers.length; i++) {
if (m_Classifiers[i] != null) {
if (m_Classifiers[i] instanceof Drawable) {
result.put(i, ((Drawable) m_Classifiers[i]).graph());
}
}
}
return result;
}
/**
* GetK - Get the k parameter (size of partitions).
*/
public int getK() {
return m_K;
}
/**
* SetP - Sets the k parameter (size of partitions)
*/
public void setK(int k) {
m_K = k;
}
public String kTipText() {
return "The number of labels in each partition -- should be 1 <= k < (L/2) where L is the total number of labels.";
}
@Override
public Enumeration listOptions() {
Vector result = new Vector();
result.addElement(new Option("\t"+kTipText(), "k", 1, "-k <num>"));
OptionUtils.add(result, super.listOptions());
return OptionUtils.toEnumeration(result);
}
@Override
public void setOptions(String[] options) throws Exception {
setK(OptionUtils.parse(options, 'k', 3));
super.setOptions(options);
}
@Override
public String [] getOptions() {
List<String> result = new ArrayList<>();
OptionUtils.add(result, 'k', getK());
OptionUtils.add(result, super.getOptions());
return OptionUtils.toArray(result);
}
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision: 9117 $");
}
public static void main(String args[]) {
ProblemTransformationMethod.evaluation(new RAkELd(), args);
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers/multitarget/SCC.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package meka.classifiers.multitarget;
import meka.classifiers.multilabel.Evaluation;
import meka.classifiers.multilabel.ProblemTransformationMethod;
import meka.classifiers.multilabel.MultiTargetCapable;
import meka.core.*;
import meka.filters.multilabel.SuperNodeFilter;
import weka.classifiers.Classifier;
import weka.core.*;
import weka.core.TechnicalInformation.Field;
import weka.core.TechnicalInformation.Type;
import java.util.*;
/**
* SCC.java - Super Class Classifier (aka Super Node Classifier).
* The output space is manipulated into super classes (based on label dependence), upon which a multi-target base classifier is applied.
* This is related to the RAkELd-type classifiers for multi-label classification.
*
* @author Jesse Read
* @version June 2012
*/
public class SCC extends NSR implements Randomizable, MultiTargetClassifier, TechnicalInformationHandler {
private SuperNodeFilter f = new SuperNodeFilter();
private int m_Iv = 0;
private int m_I = 1000;
/* TODO make external options */
private static final int i_SPLIT = 67;
private static final String i_ErrFn = "Exact match";
private Random rand = null;
public SCC() {
// default classifier for GUI
this.m_Classifier = new CC();
}
@Override
protected String defaultClassifierString() {
// default classifier for CLI
return "meka.classifiers.multitarget.CC";
}
/**
* Description to display in the GUI.
*
* @return the description
*/
@Override
public String globalInfo() {
return
"Super Class Classifier (SCC).\n"
+ "The output space is manipulated into super classes (based on label dependence; and pruning and nearest-subset-replacement like NSR), upon which a multi-target base classifier is applied.\n"
+ "For example, a super class based on two labels might take values in {[0,3],[0,0],[1,2]}.\n"
+ "For more information see:\n"
+ getTechnicalInformation().toString();
}
@Override
public TechnicalInformation getTechnicalInformation() {
TechnicalInformation result;
result = new TechnicalInformation(Type.INPROCEEDINGS);
result.setValue(Field.AUTHOR, "Jesse Read, Concha Blieza, Pedro Larranaga");
result.setValue(Field.TITLE, "Multi-Dimensional Classification with Super-Classes");
result.setValue(Field.JOURNAL, "IEEE Transactions on Knowledge and Data Engineering");
result.setValue(Field.YEAR, "2013");
return result;
}
private double rating(int partition[][], double M[][]) {
return rating(partition,M,0.0);
}
/**
* Rating - Return a score for the super-class 'partition' using the pairwise info in 'M'
* @param partition super-class partition, e.g., [[0,3],[2],[1,4]]
* @param M pariwise information \propto M[j][k]
* @param CRITICAL a critical value to use
*
* CRITICAL = 2.706;
* CRITICAL = 6.251;
* @Note: For now, assume 3 DOF (multi-label classification)
* @todo set CRITICAL into M, then this can be a generic function
*/
private double rating(int partition[][], double M[][], double CRITICAL) {
int L = M.length;
double S[][] = new double[L][L]; // sums
boolean T[][] = new boolean[L][L]; // together ?
double sumTogether = 0.0, sumApart = 0.0;
// for each combo ...
for(int i = 0; i < partition.length; i++) {
Arrays.sort(partition[i]);
double n = partition[i].length;
// ... add the AVG score for each together-pair
for(int j = 0; j < n; j++) {
for(int k = j+1; k < n; k++) {
T[partition[i][j]][partition[i][k]] = true;
}
}
//score += tot / ((n*(n-1))/2);
}
// for each non-together pair ...
for(int j = 0; j < L; j++) {
for(int k = j+1; k < L; k++) {
if (T[j][k])
sumTogether += (M[j][k] - CRITICAL);
else
sumApart += (M[j][k] - CRITICAL);
}
}
return sumTogether - sumApart;
}
/**
* MutateCombinations - mutate the 'partition'.
*/
private int[][] mutateCombinations(int partition[][], Random r) {
int from = r.nextInt(partition.length);
int i = r.nextInt(partition[from].length);
int to = r.nextInt(partition.length);
if (to == from) {
// create new list, add
partition = Arrays.copyOf(partition,partition.length+1);
partition[partition.length-1] = new int[]{partition[from][i]};
to = partition.length + 1;
// delete original
partition[from] = A.delete(partition[from],i);
}
else {
// make new slot, copy
partition[to] = A.append(partition[to],partition[from][i]);
// delete original
partition[from] = A.delete(partition[from],i);
}
// if empty ...
if (partition[from].length <= 0) {
// delete it
partition[from] = partition[partition.length-1];
partition = Arrays.copyOf(partition,partition.length-1);
}
return partition;
}
/**
* Train classifier <code>h</code>, on dataset <code>D</code>, under super-class partition <code>partition</code>.
*/
public void trainClassifier(Classifier h, Instances D, int partition[][]) throws Exception {
f = new SuperNodeFilter();
f.setIndices(partition);
f.setP(m_P >= 0 ? m_P : rand.nextInt(Math.abs(m_P)));
f.setN(m_N >= 0 ? m_N : rand.nextInt(Math.abs(m_N)));
Instances D_ = f.process(D);
//int K[] = MLUtils.getK(D_); <-- if some K[j] < 2, this is a problem!
if (getDebug()) {
int N = D.numInstances();
int U = MLUtils.numberOfUniqueCombinations(D);
System.out.println("PS("+f.getP()+","+m_N+") reduced: "+N+" -> "+D_.numInstances()+" / "+U+" -> "+ MLUtils.numberOfUniqueCombinations(D_));
//System.out.println("E_acc P "+f.getP()+" "+(D_.numInstances()/(double)N) +" "+(MLUtils.numberOfUniqueCombinations(D_)/(double)U));
//m_Info = "P="+f.getP()+"; %N="+(D_.numInstances()/(double)N) +"; %C"+(MLUtils.numberOfUniqueCombinations(D_)/(double)U)+"; size(partition)="+partition.length;
}
m_InstancesTemplate = D_;
m_Classifier.buildClassifier(D_); // build on the processed batch
}
/**
* Test classifier h, on dataset D, under super-class partition 'partition'.
* <br>
* TODO should be able to use something out of meka.classifiers.Evaluation instead of all this ...
*/
public Result testClassifier(Classifier h, Instances D_train, Instances D_test, int partition[][]) throws Exception {
trainClassifier(m_Classifier,D_train,partition);
Result result = Evaluation.testClassifier((ProblemTransformationMethod)h, D_test);
if (h instanceof MultiTargetClassifier || Evaluation.isMT(D_test)) {
result.setInfo("Type","MT");
}
else if (h instanceof ProblemTransformationMethod) {
result.setInfo("Threshold", MLEvalUtils.getThreshold(result.predictions, D_train, "PCut1"));
result.setInfo("Type","ML");
}
result.setValue("N_train",D_train.numInstances());
result.setValue("N_test",D_test.numInstances());
result.setValue("LCard_train",MLUtils.labelCardinality(D_train));
result.setValue("LCard_test",MLUtils.labelCardinality(D_test));
//result.setValue("Build_time",(after - before)/1000.0);
//result.setValue("Test_time",(after_test - before_test)/1000.0);
//result.setValue("Total_time",(after_test - before)/1000.0);
result.setInfo("Classifier_name",h.getClass().getName());
//result.setInfo("Classifier_ops", Arrays.toString(h.getOptions()));
result.setInfo("Classifier_info",h.toString());
result.setInfo("Dataset_name",MLUtils.getDatasetName(D_test));
result.output = Result.getStats(result,"1");
return result;
}
@Override
public void buildClassifier(Instances D) throws Exception {
int N = D.numInstances(); // only for printouts
int U = MLUtils.numberOfUniqueCombinations(D); // only for printouts
int L = D.classIndex();
rand = new Random(m_S);
if (!(m_Classifier instanceof MultiTargetClassifier) && !(m_Classifier instanceof MultiTargetCapable)) {
throw new Exception("[Error] The base classifier must be multi-target capable, i.e., from meka.classifiers.multitarget.");
}
// 0. SPLIT INTO TRAIN AND VALIDATION SET/S
Instances D_r = new Instances(D);
D_r.randomize(rand);
Instances D_train = new Instances(D_r,0,D_r.numInstances()*i_SPLIT/100);
Instances D_test = new Instances(D_r,D_train.numInstances(),D_r.numInstances()-D_train.numInstances());
// 1. BUILD BR or EBR
if (getDebug()) System.out.print("1. BUILD & Evaluate BR: ");
CR cr = new CR();
cr.setClassifier(((ProblemTransformationMethod)m_Classifier).getClassifier()); // assume PT
Result result_1 = Evaluation.evaluateModel((ProblemTransformationMethod)cr,D_train,D_test,"PCut1","5");
double acc1 = (Double)result_1.getMeasurement(i_ErrFn);
if (getDebug()) System.out.println(" "+acc1);
int partition[][] = SuperLabelUtils.generatePartition(A.make_sequence(L), rand);
// 2. SELECT / MODIFY INDICES (using LEAD technique)
if (getDebug()) System.out.println("2. GET ERR-CHI-SQUARED MATRIX: ");
double MER[][] = StatUtils.condDepMatrix(D_test, result_1);
if (getDebug()) System.out.println(MatrixUtils.toString(MER));
/*
* 3. SIMULATED ANNEALING
* Always accept if best, progressively less likely accept otherwise.
*/
if (getDebug()) System.out.println("3. COMBINE NODES TO FIND THE BEST COMBINATION ACCORDING TO CHI");
double w = rating(partition,MER);
if (getDebug()) System.out.println("@0 : "+SuperLabelUtils.toString(partition)+ "\t("+w+")");
for(int i = 0; i < m_I; i++) {
int partition_[][] = mutateCombinations(MatrixUtils.deep_copy(partition),rand);
double w_ = rating(partition_,MER); // this is really p_MER(partition_)
if (w_ > w) {
// ACCEPT
partition = partition_;
w = w_;
if (getDebug()) System.out.println("@"+i+" : "+SuperLabelUtils.toString(partition)+ "\t("+w+")");
}
else {
// MAYBE ACCEPT
double diff = Math.abs(w_-w);
double p = (2.*(1. - sigma(diff*i/1000.)));
if (p > rand.nextDouble()) {
// OK, ACCEPT NOW
if (getDebug()) System.out.println("@"+i+" : "+SuperLabelUtils.toString(partition_)+ "\t("+w_+")*");
partition = partition_;
w = w_;
}
}
}
/*
* METHOD 2
* refine the set we started with above, with a few iterations.
* we mutate a set, and accept whenever the classification performance is GREATER
*/
if (m_Iv > 0) {
if (getDebug()) System.out.println("4. REFINING THE INITIAL SET WITH SOME OLD-FASHIONED INTERNAL EVAL");
// Build & evaluate the classifier with the latest partition
result_1 = testClassifier((ProblemTransformationMethod)m_Classifier,D_train,D_test,partition);
w = (Double)result_1.getMeasurement(i_ErrFn);
if (getDebug()) System.out.println("@0 : "+SuperLabelUtils.toString(partition)+ "\t("+w+")");
for(int i = 0; i < m_Iv; i++) {
int partition_[][] = mutateCombinations(MatrixUtils.deep_copy(partition),rand);
// Build the classifier with the new combination
trainClassifier(m_Classifier,D_train,partition);
// Evaluate on D_test
Result result_2 = testClassifier((ProblemTransformationMethod)m_Classifier,D_train,D_test,partition_);
double w_ = (Double)result_2.getMeasurement(i_ErrFn);
if (w_ > w) {
w = w_;
partition = partition_;
if (getDebug()) System.out.println("@"+(i+1)+"' : "+SuperLabelUtils.toString(partition)+ "\t("+w+")");
}
}
}
// 4. DECIDE HOW GOOD THEY ARE, COMPARE EACH LABEL TO BR-result?
if (getDebug()) System.out.println("4. TRAIN "+SuperLabelUtils.toString(partition));
trainClassifier(m_Classifier,D,partition);
if (getDebug()) {
//System.out.println("E_acc P "+m_P+" "+(mt.m_InstancesTemplate.numInstances()/(double)N) +" "+(MLUtils.numberOfUniqueCombinations(mt.m_InstancesTemplate)/(double)U));
}
// 5. MOVE ON ...
}
@Override
public double[] distributionForInstance(Instance x) throws Exception {
//return mt.distributionForInstance(x);
int L = x.classIndex();
double y[] = new double[L*2];
// Convert (x,y) to (x_,y_)
int L_ = m_InstancesTemplate.classIndex(); // == L-NUM
Instance x_ = MLUtils.setTemplate(x,f.getTemplate(),m_InstancesTemplate);
// Get a classification y_ = h(x_)
double y_[] = null;
try {
y_ = ((ProblemTransformationMethod)m_Classifier).distributionForInstance(x_);
} catch(Exception e) {
System.err.println("EXCEPTION !!! setting to "+Arrays.toString(y_));
return y;
//e.printStackTrace();
//System.exit(1);
}
// For each super node ...
for(int j = 0; j < L_; j++) {
int idxs[] = SuperNodeFilter.decodeClasses(m_InstancesTemplate.attribute(j).name()); // 3,4 (partition)
String vals[] = SuperNodeFilter.decodeValue(m_InstancesTemplate.attribute(j).value((int)Math.round(y_[j]))); // 1,0 (clases)
for(int i = 0; i < idxs.length; i++) {
y[idxs[i]] = x.dataset().attribute(idxs[i]).indexOfValue(vals[i]); // y_j = v
y[idxs[i]+L] = y_[j+L_]; // P(Y_j = v), hence, MUST be a multi-target classifier
}
}
return y;
}
public void setI(int i) {
m_I = i;
}
public int getI() {
return m_I;
}
public void setIv(int v) {
m_Iv = v;
}
public int getIv() {
return m_Iv;
}
public static void main(String args[]) {
ProblemTransformationMethod.evaluation(new SCC(), args);
}
/**
* Sigmoid / Logistic function
*/
public static final double sigma(double a) {
return 1.0/(1.0+Math.exp(-a));
}
@Override
public Enumeration listOptions() {
Vector result = new Vector();
result.addElement(new Option("\tSets the number of simulated annealing iterations\n\tdefault: 1000", "I", 1, "-I <value>"));
result.addElement(new Option("\tSets the number of internal-validation iterations\n\tdefault: 0", "V", 1, "-V <value>"));
OptionUtils.add(result, super.listOptions());
return OptionUtils.toEnumeration(result);
}
@Override
public void setOptions(String[] options) throws Exception {
setI(OptionUtils.parse(options, 'I', 1000));
setIv(OptionUtils.parse(options, 'V', 0));
super.setOptions(options);
}
@Override
public String [] getOptions() {
List<String> result = new ArrayList<>();
OptionUtils.add(result, 'I', getI());
OptionUtils.add(result, 'V', getIv());
OptionUtils.add(result, super.getOptions());
return OptionUtils.toArray(result);
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers/multitarget
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers/multitarget/incremental/CRUpdateable.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package meka.classifiers.multitarget.incremental;
import meka.classifiers.incremental.IncrementalEvaluation;
import meka.classifiers.multitarget.CR;
import meka.classifiers.multitarget.IncrementalMultiTargetClassifier;
import meka.core.MLUtils;
import weka.classifiers.UpdateableClassifier;
import weka.classifiers.trees.HoeffdingTree;
import weka.core.Instance;
/**
* CRUpdateable.java - Updateable CR. Must be run with an UpdateableClassifier base classifier.
*
* @see CR
* @author Joerg Wicker
* @version December, 2017
*/
public class CRUpdateable extends CR implements IncrementalMultiTargetClassifier {
/** for serialization. */
private static final long serialVersionUID = 6705611077773512052L;
@Override
public String globalInfo() {
return "Updateable CR\nMust be run with an Updateable base classifier.";
}
public CRUpdateable() {
// default classifier for GUI
this.m_Classifier = new HoeffdingTree();
}
@Override
protected String defaultClassifierString() {
// default classifier for CLI
return "weka.classifiers.trees.HoeffdingTree";
}
@Override
public void updateClassifier(final Instance x) throws Exception {
int L = x.classIndex();
if (this.getDebug()) {
System.out.print("-: Updating " + L + " models");
}
for (int j = 0; j < L; j++) {
Instance x_j = (Instance) x.copy();
x_j.setDataset(null);
x_j = MLUtils.keepAttributesAt(x_j, new int[] { j }, L);
x_j.setDataset(this.m_InstancesTemplates[j]);
((UpdateableClassifier) this.m_MultiClassifiers[j]).updateClassifier(x_j);
}
if (this.getDebug()) {
System.out.println(":- ");
}
}
public static void main(final String args[]) {
IncrementalEvaluation.runExperiment(new CRUpdateable(), args);
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers/multitarget
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers/multitarget/meta/BaggingMT.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package meka.classifiers.multitarget.meta;
import meka.classifiers.multilabel.ProblemTransformationMethod;
import meka.classifiers.multilabel.meta.BaggingML;
import meka.classifiers.multitarget.CC;
import meka.classifiers.multitarget.MultiTargetClassifier;
import meka.core.MLUtils;
import meka.core.SuperLabelUtils;
import weka.core.Instance;
import weka.core.RevisionUtils;
import java.util.HashMap;
/**
* BaggingMT.java - The Multi-Target Version of BaggingML.
* It takes votes using the confidence outputs of the base classifier.
* @see meka.classifiers.multilabel.meta.BaggingML
* @author Jesse Read
* @version March 2012
*/
public class BaggingMT extends BaggingML implements MultiTargetClassifier {
/** for serialization. */
private static final long serialVersionUID = -8107887827513707843L;
public BaggingMT() {
// default classifier for GUI
this.m_Classifier = new CC();
}
@Override
protected String defaultClassifierString() {
// default classifier for CLI
return "meka.classifiers.multitarget.CC";
}
/**
* Description to display in the GUI.
*
* @return the description
*/
@Override
public String globalInfo() {
return "Multi-Target Version of BaggingML\n" + "It takes votes using the confidence outputs of the base classifier.";
}
@Override
public double[] distributionForInstance(Instance x) throws Exception {
int L = x.classIndex();
HashMap<Integer,Double> votes[] = new HashMap[L];
for(int j = 0; j < L; j++) {
votes[j] = new HashMap<Integer,Double>();
}
for(int m = 0; m < m_NumIterations; m++) {
double c[] = ((ProblemTransformationMethod)m_Classifiers[m]).distributionForInstance(x);
// votes[j] = votes[j] + P(j|x) @TODO: only if c.length > L
for(int j = 0; j < L; j++) {
Double w = votes[j].containsKey((int)c[j]) ? votes[j].get((int)c[j]) + c[j+L] : c[j+L];
votes[j].put((int)c[j] , w);
}
}
double y[] = SuperLabelUtils.convertVotesToDistributionForInstance(votes);
return y;
}
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision: 9117 $");
}
public static void main(String args[]) {
ProblemTransformationMethod.evaluation(new BaggingMT(), args);
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers/multitarget
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers/multitarget/meta/EnsembleMT.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package meka.classifiers.multitarget.meta;
import meka.classifiers.multilabel.ProblemTransformationMethod;
import meka.classifiers.multilabel.meta.EnsembleML;
import meka.classifiers.multitarget.CC;
import meka.classifiers.multitarget.MultiTargetClassifier;
import meka.core.MLUtils;
import weka.core.Instance;
import weka.core.RevisionUtils;
import java.util.HashMap;
/**
* The Multi-Target Version of EnsembleML.
* It takes votes using the confidence outputs of the base classifier.
* @see meka.classifiers.multilabel.meta.EnsembleML
* @author Jesse Read
* @version Sepetember 2012
*/
public class EnsembleMT extends EnsembleML implements MultiTargetClassifier {
/** for serialization. */
private static final long serialVersionUID = 1213045324147680550L;
public EnsembleMT() {
// default classifier for GUI
this.m_Classifier = new CC();
}
@Override
protected String defaultClassifierString() {
// default classifier for CLI
return "meka.classifiers.multitarget.CC";
}
/**
* Description to display in the GUI.
*
* @return the description
*/
@Override
public String globalInfo() {
return
"The Multi-Target Version of EnsembleML.\n"
+ "It takes votes using the confidence outputs of the base classifier.";
}
@Override
public double[] distributionForInstance(Instance x) throws Exception {
int L = x.classIndex();
HashMap<Integer,Double> votes[] = new HashMap[L];
for(int j = 0; j < L; j++) {
votes[j] = new HashMap<Integer,Double>();
}
double y[] = new double[L];
for(int m = 0; m < m_NumIterations; m++) {
double c[] = ((ProblemTransformationMethod)m_Classifiers[m]).distributionForInstance(x);
// votes[j] = votes[j] + P(j|x) @TODO: only if c.length > L
for(int j = 0; j < L; j++) {
Double w = votes[j].containsKey((int)c[j]) ? votes[j].get((int)c[j]) + c[j+L] : c[j+L];
votes[j].put((int)c[j] , w);
}
}
for(int j = 0; j < L; j++) {
// get the class with max weight
y[j] = (Integer)MLUtils.maxItem(votes[j]);
}
return y;
}
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision: 9117 $");
}
public static void main(String args[]) {
ProblemTransformationMethod.evaluation(new EnsembleMT(), args);
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers/multitarget
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers/multitarget/meta/FilteredClassifier.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/**
* FilteredClassifier.java
* Copyright (C) 2017 University of Waikato, Hamilton, NZ
*/
package meka.classifiers.multitarget.meta;
import meka.classifiers.multilabel.ProblemTransformationMethod;
import meka.classifiers.multitarget.CC;
import meka.classifiers.multitarget.MultiTargetClassifier;
import weka.classifiers.Classifier;
/**
* Allows the application of a filter in conjunction with a multi-target classifier.
*
* @author FracPete (fracpete at waikato dot ac dot nz)
* @version $Revision$
*/
public class FilteredClassifier
extends meka.classifiers.multilabel.meta.FilteredClassifier
implements MultiTargetClassifier {
private static final long serialVersionUID = 6632813009466375365L;
/**
* Default constructor.
*
* Turns off check for modified class attribute.
*/
public FilteredClassifier() {
super();
setDoNotCheckForModifiedClassAttribute(true);
m_Classifier = new CC();
}
/**
* String describing default classifier.
*/
protected String defaultClassifierString() {
return CC.class.getName();
}
/**
* Set the base learner.
*
* @param newClassifier the classifier to use.
*/
@Override
public void setClassifier(Classifier newClassifier) {
if (!(newClassifier instanceof MultiTargetClassifier))
throw new IllegalArgumentException("Classifier must be a " + MultiTargetClassifier.class.getName() + "!");
super.setClassifier(newClassifier);
}
public static void main(String args[]) {
ProblemTransformationMethod.evaluation(new FilteredClassifier(), args);
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers/multitarget
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers/multitarget/meta/MultiSearch.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* MultiSearch.java
* Copyright (C) 2008-2017 University of Waikato, Hamilton, New Zealand
*/
package meka.classifiers.multitarget.meta;
import meka.classifiers.AbstractMultiSearch;
import meka.classifiers.multilabel.MultiLabelClassifier;
import meka.classifiers.multitarget.MultiTargetClassifier;
import meka.classifiers.multitarget.RAkELd;
import weka.classifiers.Classifier;
import weka.core.RevisionUtils;
import weka.core.SerializedObject;
import weka.core.setupgenerator.AbstractParameter;
import weka.core.setupgenerator.MathParameter;
/**
<!-- globalinfo-start -->
* Performs a search of an arbitrary number of parameters of a classifier and chooses the best combination found.<br>
* The properties being explored are totally up to the user.<br>
* <br>
* E.g., if you have a FilteredClassifier selected as base classifier, sporting a PLSFilter and you want to explore the number of PLS components, then your property will be made up of the following components:<br>
* - filter: referring to the FilteredClassifier's property (= PLSFilter)<br>
* - numComponents: the actual property of the PLSFilter that we want to modify<br>
* And assembled, the property looks like this:<br>
* filter.numComponents<br>
* <br>
* <br>
* The best classifier setup can be accessed after the buildClassifier call via the getBestClassifier method.<br>
* <br>
* The trace of setups evaluated can be accessed after the buildClassifier call as well, using the following methods:<br>
* - getTrace()<br>
* - getTraceSize()<br>
* - getTraceValue(int)<br>
* - getTraceFolds(int)<br>
* - getTraceClassifierAsCli(int)<br>
* - getTraceParameterSettings(int)<br>
* <br>
* Using the weka.core.setupgenerator.ParameterGroup parameter, it is possible to group dependent parameters. In this case, all top-level parameters must be of type weka.core.setupgenerator.ParameterGroup.
* <br><br>
<!-- globalinfo-end -->
*
<!-- options-start -->
* Valid options are: <p>
*
* <pre> -E <ACC|JIDX|HSCORE|EM|JDIST|HLOSS|ZOLOSS|HARSCORE|OE|RLOSS|AVGPREC|LOGLOSSL|LOGLOSSD|F1MICRO|F1MACROEX|F1MACROLBL|AUPRC|AUROC|LCARD|LDIST>
* Determines the parameter used for evaluation:
* ACC = Accuracy
* JIDX = Jaccard index
* HSCORE = Hamming score
* EM = Exact match
* JDIST = Jaccard distance
* HLOSS = Hamming loss
* ZOLOSS = ZeroOne loss
* HARSCORE = Harmonic score
* OE = One error
* RLOSS = Rank loss
* AVGPREC = Avg precision
* LOGLOSSL = Log Loss (lim. L)
* LOGLOSSD = Log Loss (lim. D)
* F1MICRO = F1 (micro averaged)
* F1MACROEX = F1 (macro averaged by example)
* F1MACROLBL = F1 (macro averaged by label)
* AUPRC = AUPRC (macro averaged)
* AUROC = AUROC (macro averaged)
* LCARD = Label cardinality (predicted)
* LDIST = Levenshtein distance
* (default: ACC)</pre>
*
* <pre> -search "<classname options>"
* A property search setup.
* </pre>
*
* <pre> -algorithm "<classname options>"
* A search algorithm.
* </pre>
*
* <pre> -log-file <filename>
* The log file to log the messages to.
* (default: none)</pre>
*
* <pre> -S <num>
* Random number seed.
* (default 1)</pre>
*
* <pre> -W
* Full name of base classifier.
* (default: meka.classifiers.multitarget.RAkELd)</pre>
*
* <pre> -output-debug-info
* If set, classifier is run in debug mode and
* may output additional info to the console</pre>
*
* <pre> -do-not-check-capabilities
* If set, classifier capabilities are not checked before classifier is built
* (use with caution).</pre>
*
* <pre> -num-decimal-places
* The number of decimal places for the output of numbers in the model (default 2).</pre>
*
* <pre> -batch-size
* The desired batch size for batch prediction (default 100).</pre>
*
* <pre>
* Options specific to classifier meka.classifiers.multitarget.RAkELd:
* </pre>
*
* <pre> -k <num>
* The number of labels in each partition -- should be 1 <= k < (L/2) where L is the total number of labels.</pre>
*
* <pre> -P <value>
* Sets the pruning value, defining an infrequent labelset as one which occurs <= P times in the data (P = 0 defaults to LC).
* default: 0 (LC)</pre>
*
* <pre> -N <value>
* Sets the (maximum) number of frequent labelsets to subsample from the infrequent labelsets.
* default: 0 (none)
* n N = n
* -n N = n, or 0 if LCard(D) >= 2
* n-m N = random(n,m)</pre>
*
* <pre> -S <value>
* The seed value for randomization
* default: 0</pre>
*
* <pre> -W
* Full name of base classifier.
* (default: weka.classifiers.trees.J48)</pre>
*
* <pre> -output-debug-info
* If set, classifier is run in debug mode and
* may output additional info to the console</pre>
*
* <pre> -do-not-check-capabilities
* If set, classifier capabilities are not checked before classifier is built
* (use with caution).</pre>
*
* <pre> -num-decimal-places
* The number of decimal places for the output of numbers in the model (default 2).</pre>
*
* <pre> -batch-size
* The desired batch size for batch prediction (default 100).</pre>
*
* <pre>
* Options specific to classifier weka.classifiers.trees.J48:
* </pre>
*
* <pre> -U
* Use unpruned tree.</pre>
*
* <pre> -O
* Do not collapse tree.</pre>
*
* <pre> -C <pruning confidence>
* Set confidence threshold for pruning.
* (default 0.25)</pre>
*
* <pre> -M <minimum number of instances>
* Set minimum number of instances per leaf.
* (default 2)</pre>
*
* <pre> -R
* Use reduced error pruning.</pre>
*
* <pre> -N <number of folds>
* Set number of folds for reduced error
* pruning. One fold is used as pruning set.
* (default 3)</pre>
*
* <pre> -B
* Use binary splits only.</pre>
*
* <pre> -S
* Do not perform subtree raising.</pre>
*
* <pre> -L
* Do not clean up after the tree has been built.</pre>
*
* <pre> -A
* Laplace smoothing for predicted probabilities.</pre>
*
* <pre> -J
* Do not use MDL correction for info gain on numeric attributes.</pre>
*
* <pre> -Q <seed>
* Seed for random data shuffling (default 1).</pre>
*
* <pre> -doNotMakeSplitPointActualValue
* Do not make split point actual value.</pre>
*
* <pre> -output-debug-info
* If set, classifier is run in debug mode and
* may output additional info to the console</pre>
*
* <pre> -do-not-check-capabilities
* If set, classifier capabilities are not checked before classifier is built
* (use with caution).</pre>
*
* <pre> -num-decimal-places
* The number of decimal places for the output of numbers in the model (default 2).</pre>
*
* <pre> -batch-size
* The desired batch size for batch prediction (default 100).</pre>
*
<!-- options-end -->
*
* @author fracpete (fracpete at waikato dot ac dot nz)
* @version $Revision: 4521 $
*/
public class MultiSearch
extends AbstractMultiSearch
implements MultiLabelClassifier, MultiTargetClassifier {
/** for serialization. */
private static final long serialVersionUID = -5129316523575906233L;
/**
* Returns the default classifier to use.
*
* @return the default classifier
*/
protected Classifier defaultClassifier() {
return new RAkELd();
}
/**
* Returns the default search parameters.
*
* @return the parameters
*/
protected AbstractParameter[] defaultSearchParameters() {
AbstractParameter[] result;
MathParameter param;
result = new AbstractParameter[1];
param = new MathParameter();
param.setProperty("K");
param.setMin(1);
param.setMax(3);
param.setStep(1);
param.setBase(10);
param.setExpression("I");
result[0] = param;
try {
result = (AbstractParameter[]) new SerializedObject(result).getObject();
}
catch (Exception e) {
result = new AbstractParameter[0];
System.err.println("Failed to create copy of default parameters!");
e.printStackTrace();
}
return result;
}
/**
* Set the base learner.
*
* @param newClassifier the classifier to use.
*/
@Override
public void setClassifier(Classifier newClassifier) {
if (!(newClassifier instanceof MultiTargetClassifier))
throw new IllegalStateException(
"Base classifier must implement " + MultiTargetClassifier.class.getName()
+ ", provided: " + newClassifier.getClass().getName());
super.setClassifier(newClassifier);
}
/**
* Returns the revision string.
*
* @return the revision
*/
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision: 4521 $");
}
/**
* Main method for running this classifier from commandline.
*
* @param args the options
*/
public static void main(String[] args) {
runClassifier(new MultiSearch(), args);
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/core/A.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package meka.core;
import weka.core.Utils;
import java.util.*;
/**
* A.java - Handy array operations
* @version April 2014
* @author Jesse Read
*/
public abstract class A {
/** Set */
public static double[] set(double a[], int j, double k) {
a[j] = k;
return a;
}
// join 'a' and 'b' together into 'c' [1,2],[3] -> [1,2,3]
public static final int[] join(int a[], int b[]) {
int c[] = new int[a.length+b.length];
int i = 0;
for(int j = 0; j < a.length; j++, i++) {
c[i] = a[j];
}
for(int j = 0; j < b.length; j++, i++) {
c[i] = b[j];
}
return c;
}
// reverse 'a' [1,2,3] -> [3,2,1]
public static final int[] reverse(int a[]) {
int c[] = new int[a.length];
for(int i = a.length-1, j = 0; i >=0 ; i--, j++) {
c[j] = a[i];
}
return c;
}
// sort 'a' [1,3,2] -> [1,2,3]
public static final int[] sort(int a[]) {
int c[] = Arrays.copyOf(a,a.length);
Utils.sort(c); // @todo: Arrays.sort ?
return c;
}
/** ToString - Return a double[] as a nice String. */
public static String toString(double v[]) {
return toString(v,2);
}
/** ToString - Return a double[] as a nice String (formated to a 'adp' digits after the decimal point). */
public static String toString(double v[], int adp) {
if (adp <= 0)
return toString(toIntArray(v));
int w = adp + 2;
StringBuilder sb = new StringBuilder("[ ");
for(int k = 0; k < v.length; k++) {
sb.append(String.format("%"+w+"."+adp+"f ", v[k]));
}
sb.append("]");
return sb.toString();
}
/** ToString - Return an int[] as a nice String. */
public static String toString(int v[]) {
StringBuilder sb = new StringBuilder("[ ");
for(int k = 0; k < v.length; k++) {
sb.append((int)v[k]);
sb.append(" ");
}
sb.append("]");
return sb.toString();
}
/** ToString - Return an int[] as a nice String. */
public static String toString(int v[], int w) {
StringBuilder sb = new StringBuilder("[ ");
for(int k = 0; k < v.length; k++) {
sb.append(String.format("%"+w+"d ", v[k]));
}
sb.append("]");
return sb.toString();
}
/** Multiply a vector with a constant. */
public static double[] multiply(double a[], double w) {
double c[] = new double[a.length];
for (int j = 0; j < a.length; j++) {
c[j] = a[j] * w;
}
return c;
}
/** Multiply two vectors (element-wise) together. */
public static double[] multiply(double a[], double w[]) {
double c[] = new double[a.length];
for (int j = 0; j < a.length; j++) {
c[j] = a[j] * w[j];
}
return c;
}
// product
public static final double product(double v[]) {
double p = 1.0;
for(double d : v) {
p *= d;
}
return p;
}
// sum
public static double sum(double v[]) {
double p = 0.0;
for(double d : v) {
p += d;
}
return p;
}
// sum
public static int sum(int v[]) {
int p = 0;
for(int d : v) {
p += d;
}
return p;
}
/** Normalize (to sum to 1) */
public static double[] norm(double a[]) {
double s = A.sum(a);
return A.multiply(a,1/s);
}
/** Minimum */
public static int min(int a[]) {
int min = Integer.MAX_VALUE;
for (int a_ : a) {
if (a_ < min)
min = a_;
}
return min;
}
/** Max */
public static double max(double a[]) {
double max = Double.MIN_VALUE;
for (double a_ : a) {
if (a_ > max)
max = a_;
}
return max;
}
/** Max */
public static int max(int a[]) {
int max = Integer.MIN_VALUE;
for (int a_ : a) {
if (a_ > max)
max = a_;
}
return max;
}
/** Argmax */
public static int argmax(double a[]) {
double max = a[0];
int arg = 0;
for (int i = 1; i < a.length; i++) {
if (a[i] > max) {
max = a[i];
arg = i;
}
}
return arg;
}
/** Mode */
public static int mode(int a[]) {
int max = 0;
int count = 0;
HashMap<Integer,Integer> d = new HashMap<Integer,Integer>();
for (int v: a) {
Integer nn = d.get(v);
int n = (nn==null) ? 1 : nn + 1;
d.put(v,n);
if (count < n) {
count = n;
max = v;
}
}
return max;
}
// append value 'v' to 'array[]'
public static int[] append(int array[], final int v) {
int n = array.length;
array = Arrays.copyOf(array,n+1);
array[n] = v;
return array;
}
// delete index 'i' from 'array[]'
public static int[] delete(int array[], final int i) {
int n = array.length;
array[i] = array[n-1];
array = Arrays.copyOf(array,n-1);
return array;
}
// delete indices 'i[]' from 'array[]' @note: slow!
public static int[] delete(int array[], final int i[]) {
for (int i_ : i) {
array = delete(array,i_);
}
return array;
}
/*
// append values 'v[]' to 'array[]'
public static int[] append(int array[], final int v[]) {
int n = array.length;
array = Arrays.copyOf(array,n+1);
array[n] = v;
return array;
}
*/
// select/get the elements 'indices[]' from 'array[]'
public static int[] select(int array[], final int indices[]) {
int selection[] = new int[indices.length];
for(int i = 0; i < indices.length; i++) {
selection[i] = array[indices[i]];
}
return selection;
}
// swap the 'j'th and 'k'th indices in 'array[]'
public static int[] swap(int array[], int j, int k) {
int temp = array[j];
array[j] = array[k];
array[k] = temp;
return array;
}
/* randomly swap two elements in 'array[]' */
public static int[] swap(int array[], Random r) {
if (array.length == 1)
// cannot swap if only one element !
return array;
int a = r.nextInt(array.length);
int b = r.nextInt(array.length-1);
return swap(array,a,(a==b) ? array.length-1 : b);
}
/** Sample a PMF - select i with probabilitiy w[i] (w must be normalised) */
public static int samplePMF(double w[], Random r) {
double u = r.nextDouble();
double sum = w[0];
int i = 0;
while (sum < u) {
i++;
sum+=w[i];
}
return i;
}
/** Make Sequence - Given L, generate and return new int[]{0,1,2,3,...,L-1}. */
public static final int[] make_sequence(int L) {
int ind[] = new int[L];
for(int i = 0; i < L; i++) {
ind[i] = i;
}
return ind;
}
/** Make Sequence - Generate and return new int[]{start,start+1,...,end-2,end-1}. */
public static final int[] make_sequence(int start, int end) {
int array[] = new int[end-start];
for(int j = start, i = 0; j < end; j++, i++) {
array[i] = j;
}
return array;
}
/**
* Invert - take the compliment of indices up to length L, e.g., if indices = [3,5,6], then invert(indices,7) = [1,2,4,7].
* @param indices indices
* @param L supposed length of the compliment
* @return the compliment (inverted list)
*/
public static final int[] invert(int indices[], int L) {
int sindices[] = Arrays.copyOf(indices,indices.length);
Arrays.sort(sindices);
int inverted[] = new int[L-sindices.length];
for(int j = 0,i = 0; j < L; j++) {
if (Arrays.binarySearch(sindices,j) < 0) {
inverted[i++] = j;
}
}
return inverted;
}
/** Shuffle 'array' given Random 'r' */
public static final void shuffle(int array[], Random r) {
//TODO: use this (but beware some results may change):
//Collections.shuffle(Arrays.asList(array), r);
//return array;
//MLUtils.randomize(array,r);
for (int i = array.length - 1; i > 0; i--) {
int index = r.nextInt(i + 1);
int temp = array[index];
array[index] = array[i];
array[i] = temp;
}
}
/**
* ToPrimitive - cast Integer[] to int[].
*/
public static int[] toPrimitive(Integer a[]) {
int b[] = new int[a.length];
for(int i = 0; i < a.length; i++) {
b[i] = a[i];
}
return b;
}
/**
* ToPrimitive - cast Double[] to double[].
*/
public static double[] toPrimitive(Double a[]) {
double b[] = new double[a.length];
for(int i = 0; i < a.length; i++) {
b[i] = a[i];
}
return b;
}
/**
* ToPrimitive - cast List<Integer> to int[].
*/
public static int[] toPrimitive(List<Integer> list) {
int[] a = new int[list.size()];
Iterator<Integer> iter = list.iterator();
for (int i=0; iter.hasNext(); i++) {
a[i] = iter.next();
}
return a;
}
/**
* ToDoubleArray - cast int[] to double[].
*/
public static final double[] toDoubleArray(int z[]) {
double y[] = new double[z.length];
for(int j = 0; j < z.length; j++) {
y[j] = (double)z[j];
}
return y;
}
/**
* Convert eg "12" to [1,2].
* see also MLUtils.toIntArray(String s) <br>
* */
public static final int[] toIntArray(String s) {
int[] array = new int[s.length()];
for (int i = 0; i < s.length(); i++) {
array[i] = Character.digit(s.charAt(i), 10);
}
return array;
}
/** Convert eg ["1","2"] to [1,2] */
public static final int[] toIntArray(String s[]) {
int u[] = new int[s.length];
for(int j = 0; j < s.length; j++) {
u[j] = Integer.parseInt(s[j]);
}
return u;
}
/**
* Convert an array of doubles to ints through a threshold.
* see also toIntArray(z,t)
*/
public static final int[] toIntArray(double z[]) {
int u[] = new int[z.length];
for(int j = 0; j < z.length; j++) {
u[j] = (int)z[j];
}
return u;
}
/**
* Convert an array of doubles to ints through a threshold.
* see M.threshold
*/
public static final int[] toIntArray(double z[], double t) {
int u[] = new int[z.length];
for(int j = 0; j < z.length; j++) {
u[j] = (z[j] >= t) ? 1 : 0;
}
return u;
}
/**
* Convert integer to binary string (double representation) of L digits.
* e.g., toDoubleArray(3,4) = [0., 0., 1., 1.]
*/
public static final double[] toDoubleArray(int i, int L) {
double u[] = new double[L];
if (i == 0)
return u;
String s = Integer.toBinaryString(i);
for(int j = 0; j < s.length(); j++) {
u[j] = (double)Integer.parseInt(String.valueOf(s.charAt(j)));
}
return u;
}
/**
* Do some tests.
*/
public static void main (String args[]) {
int a[] = new int[] {1, 3, 4, 8, -4, -3, 4, 4, 4, 10, -3, -3, -3, -3, -3};
System.out.println("mode: "+A.mode(a));
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/core/CCUtils.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package meka.core;
import meka.classifiers.multilabel.CC;
import weka.classifiers.Classifier;
import weka.core.Instance;
import weka.core.Instances;
/**
* CCUtils.java - Handy Utils for working with Classifier Chains (and Trees and Graphs)
* @author Jesse Read
* @version June 2014
*/
public abstract class CCUtils {
/**
* BuildCC - Given a base classifier 'g', build a new CC classifier on data D, given chain order 'chain'.
*/
public static CC buildCC(int chain[], Instances D, Classifier g) throws Exception {
// a new classifier chain
CC h = new CC();
// build this chain
h.prepareChain(chain);
h.setClassifier(g);
h.buildClassifier(new Instances(D));
return h;
}
/*
* BuildCL - Given a base classifier 'g', build a new CL classifier on data D, given chain order 'chain'.
public static CL buildCL(int chain[], Instances D, Classifier g) throws Exception {
// a new classifier chain
CL h = new CL();
// build this chain
h.setChain(chain);
h.setClassifier(g);
h.buildClassifier(new Instances(D));
return h;
}
*/
/*
* BuildCT - Given a base classifier 'g', build a new CT classifier on data D, given chain order 'chain'.
* TODO: combine with above function
public static CT buildCT(int chain[], Instances D, Classifier g) throws Exception {
// a new classifier chain
CT h = new CT();
h.setChainIterations(0);
// build this chain
h.setChain(chain);
h.setClassifier(g);
h.buildClassifier(new Instances(D));
return h;
}
*/
/*
* Use this code to try ALL possible combinations
*
public static int[] ChainSearchOptimal(Instances D_train, Instances D_test, Classifier c) {
String perms[] = MLUtils.permute(MLUtils.toBitString(MLUtils.gen_indices(L)));
for (int t = 0; t < perms.length; t++) {
int s_[] = A.string2IntArray(perms[t]);
System.out.println("proposing s' = perm["+t+"] = "+Arrays.toString(s_));
}
}
*/
/**
* SetPath - set 'path[]' into the first L attributes of Instance 'xy'.
* @param xy an Example (x,y)
* @param path a label vector
*/
public static void setPath(Instance xy, double path[]) {
int L = xy.classIndex(); // = path.length
for(int j = 0; j < L; j++) {
xy.setValue(j,(int)Math.round(path[j])); // x = x + path_j
}
}
/**
* LinkTransform - prepare 'D' for training at a node 'j' of the chain, by excluding 'exl'.
* @param D dataset
* @param j index of the label of this node
* @param exl indices of labels which are NOT parents of j
* @return the transformed dataset (which can be used as a template)
*/
public static Instances linkTransform(Instances D, int j, int exl[]) {
Instances D_j = new Instances(D);
D_j.setClassIndex(-1);
// delete all the attributes (and track where our index ends up)
int ndx = j;
for(int i = exl.length-1; i >= 0; i--) {
D_j.deleteAttributeAt(exl[i]);
if (exl[i] < ndx)
ndx--;
}
D_j.setClassIndex(ndx);
return D_j;
}
/**
* LinkTransform - prepare 'x' for testing at a node 'j' of the chain, by excluding 'exl'.
* @param x instance
* @param excl indices of labels which are NOT parents of j
* @param _D the dataset template to use
* @return the transformed instance
*/
public static Instance linkTransformation(Instance x, int excl[], Instances _D) {
// copy
Instance copy = (Instance)x.copy();
copy.setDataset(null);
// delete attributes we don't need
for(int i = excl.length-1; i >= 0; i--) {
copy.deleteAttributeAt(excl[i]);
}
//set template
copy.setDataset(_D);
return copy;
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/core/ExceptionUtils.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/**
* ExceptionUtils.java
* Copyright (C) 2015 University of Waikato, Hamilton, NZ
*/
package meka.core;
import com.googlecode.jfilechooserbookmarks.core.Utils;
import java.io.PrintWriter;
import java.io.StringWriter;
/**
* Helper class for throwables and exceptions.
*
* @author FracPete (fracpete at waikato dot ac dot nz)
* @version $Revision$
*/
public class ExceptionUtils {
/**
* Returns the stacktrace of the throwable as string.
*
* @param t the throwable to get the stacktrace for
* @return the stacktrace
*/
public static String throwableToString(Throwable t) {
return throwableToString(t, -1);
}
/**
* Returns the stacktrace of the throwable as string.
*
* @param t the throwable to get the stacktrace for
* @param maxLines the maximum number of lines to print, <= 0 for all
* @return the stacktrace
*/
public static String throwableToString(Throwable t, int maxLines) {
StringWriter writer;
StringBuilder result;
String[] lines;
int i;
writer = new StringWriter();
t.printStackTrace(new PrintWriter(writer));
if (maxLines > 0) {
result = new StringBuilder();
lines = writer.toString().split("\n");
for (i = 0; i < maxLines; i++) {
if (i > 0)
result.append("\n");
result.append(lines[i]);
}
}
else {
result = new StringBuilder(writer.toString());
}
return result.toString();
}
/**
* Outputs the stacktrace along with the message on stderr and returns a
* combination of both of them as string.
*
* @param source the object that generated the exception, can be null
* @param msg the message for the exception
* @param t the exception
* @return the full error message (message + stacktrace)
*/
public static String handleException(Object source, String msg, Throwable t) {
return handleException(source, msg, t, false);
}
/**
* Generates a string from the stacktrace along with the message and returns
* that. Depending on the silent flag, this string is also output on stderr.
*
* @param source the object that generated the exception, can be null
* @param msg the message for the exception
* @param t the exception
* @param silent if true then the generated message is not forwarded
* to the source's logger
* @return the full error message (message + stacktrace)
*/
public static String handleException(Object source, String msg, Throwable t, boolean silent) {
String result;
result = msg.trim() + "\n" + Utils.throwableToString(t);
if (!silent) {
if (source != null)
System.err.println(source.getClass().getName());
System.err.println(result);
}
return result;
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/core/F.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package meka.core;
import weka.core.Instance;
import weka.core.Instances;
import weka.filters.Filter;
import weka.filters.unsupervised.attribute.Remove;
import weka.filters.unsupervised.attribute.Reorder;
/**
* F.java - transform/Filter Operations on instances.
* Transform 'D' and 'x' in many different ways wrt the labels.
* Note that there are similar/related functions in MLUtils.java. In the future these should be combined together.
* @see MLUtils
* @author Jesse Read
*/
public abstract class F {
/**
* meka2mulan - Move L label attributes from the beginning to end of attribute space of an Instances.
* Necessary because MULAN assumes label attributes are at the end, not the beginning.
* (the extra time for this process is not counted in the running-time analysis of published work).
*/
public static final Instances meka2mulan(Instances D, int L) {
for(int j = 0; j < L; j++) {
//D.insertAttributeAt(new Attribute(D.attribute(0).name()+"-"),D.numAttributes());
D.insertAttributeAt(D.attribute(0).copy(D.attribute(0).name()+"-"),D.numAttributes());
for(int i = 0; i < D.numInstances(); i++) {
D.instance(i).setValue(D.numAttributes()-1,D.instance(i).value(0));
}
D.deleteAttributeAt(0);
}
return D;
}
/**
* meka2mulan - Move L label attributes from the beginning to end of attribute space of an Instance.
* Necessary because MULAN assumes label attributes are at the end, not the beginning.
* (the extra time for this process is not counted in the running-time analysis of published work).
*/
public static final Instance meka2mulan(Instance x, int L) {
x.setDataset(null);
for(int j = 0; j < L; j++) {
x.insertAttributeAt(x.numAttributes());
x.deleteAttributeAt(0);
}
return x;
}
/**
* mulan2meka - Move label attributes from the End to the Beginning of attribute space (MULAN format to MEKA format).
* Note: can use e.g.: java weka.filters.unsupervised.attribute.Reorder -i thyroid.arff -R 30-last,1-29"
* See also: F.reorderLabels(D,s)
*/
public static final Instances mulan2meka(Instances D, int L) {
int d = D.numAttributes();
for(int j = 0; j < L; j++) {
D.insertAttributeAt(D.attribute(d-1).copy(D.attribute(d-1).name()+"-"),0);
for(int i = 0; i < D.numInstances(); i++) {
D.instance(i).setValue(0,D.instance(i).value(d));
}
D.deleteAttributeAt(d);
}
return D;
}
/**
* ReorderLabels - swap values of y[1] to y[L] according to s[].
* @param s new indices order (supposing that it contains the first s.length indices)
*/
public static void reorderLabels(Instances D, int s[]) throws Exception {
int L = s.length;
Reorder f = new Reorder();
String range = "";
for(int j = 0; j < L; j++) {
range += String.valueOf(s[0]) + ",";
}
range = range + (L+1) + "-last";
f.setAttributeIndices(range);
f.setInputFormat(D);
D = Filter.useFilter(D, f);
//return D;
}
/**
* Remove Indices - Remove attribute indices 'indices' from 'D'.
* @param D Dataset
* @param indices attribute indices to remove/keep
* @param inv if true, then keep 'indices'
* @return New dataset with 'indices' removed.
*/
public static Instances remove(Instances D, int indices[], boolean inv) throws Exception {
Remove remove = new Remove();
remove.setAttributeIndicesArray(indices);
remove.setInvertSelection(inv);
remove.setInputFormat(D);
return Filter.useFilter(D, remove);
}
/**
* Remove Indices - Remove ALL labels (assume they are the first L attributes) from D.
* @param D Dataset
* @param L number of labels
* @return New dataset with labels removed.
*/
public static Instances removeLabels(Instances D, int L) throws Exception {
Remove remove = new Remove();
remove.setAttributeIndices("1-"+L);
remove.setInputFormat(D);
return Filter.useFilter(D, remove);
}
/**
* Remove Indices - Remove some labels (assume they are the first L attributes) from D.
* @param D Dataset
* @param L number of labels
* @param j indices of labels to keep
* @return New dataset with labels removed.
*/
public static Instances keepLabels(Instances D, int L, int j[]) throws Exception {
int to_remove[] = A.invert(j,L);
return remove(D,to_remove,false);
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/core/FileFormatSupporter.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/**
* FileFormatSupporter.java
* Copyright (C) 2015 University of Waikato, Hamilton, NZ
*/
package meka.core;
/**
* For classes that support file formats.
*
* @author FracPete (fracpete at waikato dot ac dot nz)
* @version $Revision$
*/
public interface FileFormatSupporter {
/**
* Returns the format description.
*
* @return the file format
*/
public String getFormatDescription();
/**
* Returns the format extension(s).
*
* @return the extension(s) (incl dot)
*/
public String[] getFormatExtensions();
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/core/FileUtils.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* FileUtils.java
* Copyright (C) 2009-2015 University of Waikato, Hamilton, New Zealand
*/
package meka.core;
import java.io.*;
/**
* Utility class for I/O related actions.
*
* @author fracpete (fracpete at waikato dot ac dot nz)
* @version $Revision$
*/
public class FileUtils {
/**
* Returns the number of directories that this file object contains.
* E.g.: /home/blah/some/where.txt will return 3. /blah.txt returns 0.
*
* @param file the file
*/
public static int getDirectoryDepth(File file) {
int result;
result = 0;
if (!file.isDirectory())
file = file.getParentFile();
while (file.getParentFile() != null) {
result++;
file = file.getParentFile();
}
return result;
}
/**
* Creates a partial filename for the given file, based on how many parent
* directories should be included. Examples:
* <pre>
* createPartialFilename(new File("/home/some/where/file.txt"), -1)
* = /home/some/where/file.txt
* createPartialFilename(new File("/home/some/where/file.txt"), 0)
* = file.txt
* createPartialFilename(new File("/home/some/where/file.txt"), 1)
* = where/file.txt
* createPartialFilename(new File("/home/some/where/file.txt"), 2)
* = some/where/file.txt
* </pre>
*
* @param file the file to create the partial filename for
* @param numParentDirs the number of parent directories to include in
* the partial name, -1 returns the absolute
* filename
* @return the generated filename
*/
public static String createPartialFilename(File file, int numParentDirs) {
String result;
File parent;
int i;
if (numParentDirs == -1) {
result = file.getAbsolutePath();
}
else {
result = file.getName();
parent = file;
for (i = 0; (i < numParentDirs) && (parent.getParentFile() != null); i++) {
parent = parent.getParentFile();
result = parent.getName() + File.separator + result;
}
}
return result;
}
/**
* Closes the stream, if possible, suppressing any exception.
*
* @param is the stream to close
*/
public static void closeQuietly(InputStream is) {
if (is != null) {
try {
is.close();
}
catch (Exception e) {
// ignored
}
}
}
/**
* Closes the stream, if possible, suppressing any exception.
*
* @param os the stream to close
*/
public static void closeQuietly(OutputStream os) {
if (os != null) {
try {
os.flush();
}
catch (Exception e) {
// ignored
}
try {
os.close();
}
catch (Exception e) {
// ignored
}
}
}
/**
* Closes the reader, if possible, suppressing any exception.
*
* @param reader the reader to close
*/
public static void closeQuietly(Reader reader) {
if (reader != null) {
try {
reader.close();
}
catch (Exception e) {
// ignored
}
}
}
/**
* Closes the writer, if possible, suppressing any exception.
*
* @param writer the writer to close
*/
public static void closeQuietly(Writer writer) {
if (writer != null) {
try {
writer.flush();
}
catch (Exception e) {
// ignored
}
try {
writer.close();
}
catch (Exception e) {
// ignored
}
}
}
/**
* Returns the extension of the file, if any.
*
* @param file the file to get the extension from
* @return the extension (no dot), null if none available
*/
public static String getExtension(File file) {
if (file.getName().contains("."))
return file.getName().substring(file.getName().lastIndexOf(".") + 1);
else
return null;
}
/**
* Returns the extension of the file, if any.
*
* @param filename the file to get the extension from
* @return the extension (no dot), null if none available
*/
public static String getExtension(String filename) {
return getExtension(new File(filename));
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/core/LabelSet.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package meka.core;
import java.io.Serializable;
import java.util.Arrays;
import java.util.Comparator;
import java.util.List;
import java.util.Set;
/**
* Comparator - A fast sparse labelset representation.
* e.g. [0,2,3] -- the indices of relevant labels.
* @author Jesse Read
* @version March 2014
*/
public class LabelSet implements Comparator, Serializable {
/** for serialization. */
private static final long serialVersionUID = -6089833712444497991L;
public int indices[]; // indices of relevant labels, e.g., [1,5,7]
public LabelSet() {
}
/**
* A new LabelSet, given a list of SORTED indices.
*/
public LabelSet(int indices[]) {
// Assume they are sorted
this.indices = indices;
}
/**
* A new LabelSet, indicating sort=true if indices they need to be sorted (i.e., are NOT sorted).
*/
public LabelSet(int indices[], boolean sort) {
this.indices = indices;
// Sort them
if (sort == true)
Arrays.sort(this.indices);
}
public LabelSet(List<Integer> list) {
// ASSUME THEY ARE NOT SORTED
// Set<Integer> = new HashSet<Integer>();
Integer[] array = list.toArray(new Integer[0]);
//Arrays.sort(list);
//this.indices = new int[list.size()];
//list.toArray(this.indices);
this.indices = A.toPrimitive(array);
}
public LabelSet(Set<Integer> set) {
Integer[] array = set.toArray(new Integer[0]);
this.indices = A.toPrimitive(array);
}
/*
public double getValue(int i) {
int i_ = Arrays.binarySearch(indices,j);
return (i_ < 0) 0.0 : values[j];
}
*/
public final boolean contains(int j) {
return Arrays.binarySearch(indices,j) < 0 ? false : true;
}
public final boolean contains(int js[]) {
for (int j : js) {
if (!contains(j))
return false;
}
return true;
}
@Override
public final int hashCode() {
return Arrays.hashCode(indices);
}
@Override
public boolean equals(Object o) {
LabelSet l2 = (LabelSet)o;
if (indices.length != l2.indices.length)
return false;
else {
for(int i = 0; i < indices.length; i++) {
if (indices[i] != l2.indices[i]) {
return false;
}
}
}
return true;
}
// @todo, return based on map, if we have access to one, else just length
@Override
public int compare(Object o1, Object o2) {
LabelSet l1 = (LabelSet) o1;
LabelSet l2 = (LabelSet) o2;
if (l2.indices.length > l1.indices.length) {
return -1;
}
else if (l2.indices.length < l1.indices.length) {
return 1;
}
else {
/*
if (l1.w > l2.w) {
return -1;
}
if (l1.w < l2.w) {
return 1;
}
*/
//else {
return 0;
//}
}
}
public final int subsetof(LabelSet y) {
return subset(this.indices,y.indices);
}
/**
* Subset - returns > 0 if y1 \subsetof y2
*/
public final static int subset(int y1[], int y2[]) {
//System.out.println(""+Arrays.toString(y1) + " subsetof " + Arrays.toString(y2));
int j = 0;
int k = 0;
while (j < y1.length) {
if (k >= y2.length)
return -1;
if (y1[j] == y2[k]) {
j++;
k++;
}
else if (y1[j] > y2[k]) {
k++;
}
else {
return -1;
}
}
return j;
}
//j k
// j k
//[275226, 338304] \ [99203, 115256]]]
public void minus(LabelSet l2) {
this.indices = minus(this.indices,l2.indices);
}
/**
* Minus aka Set Difference, e.g., [3,4,7,9] \ [3,7] = [4,9].
*/
public static int[] minus(int y1[], int y2[]) {
int keep[] = new int[Math.max(y1.length,y2.length)];
int i = 0, j = 0, k = 0;
while (j < y1.length && k < y2.length) {
if (y1[j] == y2[k]) {
j++;
k++;
}
else if (y1[j] < y2[k]) {
keep[i++] = y1[j];
j++;
}
else {
//keep[i++] = y1[j];
k++;
}
}
while (j < y1.length) {
//System.out.println("");
//System.out.println("keep["+i+"] = y1["+j+"]");
keep[i++] = y1[j++];
}
return Arrays.copyOf(keep,i);
}
// the distance between two labelsets = longest common sub-sequence?
public int distance(LabelSet o) {
return 0;
}
public LabelSet deep_copy() {
return new LabelSet(Arrays.copyOf(this.indices,this.indices.length));
}
public String toString() {
return Arrays.toString(indices);
}
//public String toBinaryString() {
//}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/core/LabelSetComparator.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package meka.core;
import java.util.HashMap;
public class LabelSetComparator extends LabelSet {
HashMap<LabelSet,Integer> c = null;
public LabelSetComparator(HashMap<LabelSet,Integer> c) {
this.c = c;
}
@Override
// a negative integer, zero, or a positive integer as the
//first argument is less than, equal to, or greater than the second.
public int compare(Object obj1, Object obj2) {
LabelSet l1 = (LabelSet) obj1;
LabelSet l2 = (LabelSet) obj2;
if (l1.indices.length < l2.indices.length) {
return -1;
}
else if (l1.indices.length > l2.indices.length) {
return 1;
}
else {
int c1 = this.c.get(l1);
int c2 = this.c.get(l2);
if (c2 > c1) {
return -1;
}
else if (c1 > c2) {
return 1;
}
else {
return 0;
}
}
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/core/LabelVector.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package meka.core;
import java.util.Comparator;
import meka.core.A;
import java.util.HashMap;
import java.util.Arrays;
/**
* LabelVector - Multi-target compatible vector.
* e.g. [0,3,2,1] for L=4.
* @author Jesse Read
* @version March 2014
*/
public class LabelVector extends LabelSet {
/** for serialization. */
private static final long serialVersionUID = -6089833712552497991L;
public int values[]; // values
// public int confidences[]
//HashMap<String,Integer> c = null;
public LabelVector(int values[]) {
this.values = values;
}
/*
public double getValue(int i) {
return values[i];
}
*/
/*
public LabelVector(HashMap<String,Integer> c) {
this.c = c;
}
*/
@Override
public boolean equals(Object o) {
int v2[] = ((LabelVector) o).values;
if (values.length != v2.length) {
System.err.println("[Error] different sized vectors!");
return false;
}
else {
for(int i = 0; i < values.length; i++) {
if (values[i] != v2[i]) {
return false;
}
}
}
return true;
}
/**
* multi-label suitable only
*/
public int compare(Object obj1, Object obj2) {
int v1[] = ((LabelVector) obj1).values;
int v2[] = ((LabelVector) obj2).values;
if (A.sum(v1) > A.sum(v2)) {
return -1;
}
else if (A.sum(v1) < A.sum(v2)) {
return 1;
}
else {
/*
* DEAL WITH WEIGHTS / COUNTS HERE
if (c.get(s1) > c.get(s2)) {
return -1;
}
if (c.get(s1) < c.get(s2)) {
return 1;
}
else {
// @todo: could add further conditions
return 0;
}
*/
return 0;
}
}
public String toString() {
return Arrays.toString(values);
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/core/LatexUtils.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/**
* LatexUtils.java
* Copyright (C) 2015 University of Waikato, Hamilton, NZ
*/
package meka.core;
import gnu.trove.list.array.TCharArrayList;
import java.util.ArrayList;
import java.util.List;
/**
* Helper functions around LaTeX.
*
* @author FracPete (fracpete at waikato dot ac dot nz)
* @version $Revision$
*/
public class LatexUtils {
/**
* The characters to escape.
*
* @author FracPete (fracpete at waikato dot ac dot nz)
* @version $Revision$
*/
public enum Characters {
BACKSLASH,
PERCENTAGE,
UNDERSCORE,
DOLLAR,
AMPERSAND,
CARET
}
/**
* Ensures that the string is LaTeX compliant.
*
* @param input the string to process
* @return the compliant string
*/
public static String escape(String input) {
return escape(input, Characters.values());
}
/**
* Ensures that the string is LaTeX compliant.
*
* @param input the string to process
* @param characters the characters to escape
* @return the compliant string
*/
public static String escape(String input, Characters[] characters) {
String result;
TCharArrayList chars;
List<String> escaped;
chars = new TCharArrayList();
escaped = new ArrayList<>();
for (Characters ch: characters) {
switch (ch) {
case AMPERSAND:
chars.add('&');
escaped.add("\\&");
break;
case BACKSLASH:
chars.add('\\');
escaped.add("\\textbackslash ");
break;
case DOLLAR:
chars.add('$');
escaped.add("\\$");
break;
case UNDERSCORE:
chars.add('_');
escaped.add("\\_");
break;
case CARET:
chars.add('^');
escaped.add("$^\\wedge$");
break;
case PERCENTAGE:
chars.add('%');
escaped.add("\\%");
break;
default:
throw new IllegalStateException("Unhandled character: " + ch);
}
}
result = OptionUtils.backQuoteChars(input, chars.toArray(), escaped.toArray(new String[escaped.size()]));
return result;
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/core/MLEvalUtils.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package meka.core;
import weka.core.*;
import java.util.*;
/**
* MLEvalUtils - Utility functions for Evaluation.
* @see meka.core.Metrics
* @author Jesse Read
* @version March 2014
*/
public abstract class MLEvalUtils {
/**
* GetThreshold - Get a threshold from a Threshold OPtion string 'top'.
* @param Y label space; for calculating a threshold with PCut
* @param D training data; for calculating a threshold with PCut
* @param top Threshold OPtion (either "PCut1", "PCutL" or a real value e.g. "0.5" or L real values e.g. "[0.1, 0.2, 0.8]" for L = 3
*/
public static String getThreshold(ArrayList<double[]> Y, Instances D, String top) throws Exception {
if (top.equals("PCut1") || top.equals("c")) { // Proportional Cut threshold (1 general threshold)
return String.valueOf(ThresholdUtils.calibrateThreshold(Y,MLUtils.labelCardinality(D)));
}
else if (top.equals("PCutL") || top.equals("C")) { // Proportional Cut thresholds (one for each Label)
return Arrays.toString(ThresholdUtils.calibrateThresholds(Y,MLUtils.labelCardinalities(D)));
}
else {
// Set our own threshold (we assume top = "0.5" or top = "[0.1,...,0.3]" (we make no checks here!)
return top;
}
}
/**
* GetMLStats - Given predictions and corresponding true values and a threshold string, retreive statistics.
* @param Rpred predictions (may be real-valued confidences)
* @param Y corresponding true values
* @param t a threshold string, e.g. "0.387"
* @param vop the verbosity option, e.g. "5"
* @return the evaluation statistics
*/
public static HashMap<String,Object> getMLStats(double Rpred[][], int Y[][], String t, String vop) {
double ts[] = ThresholdUtils.thresholdStringToArray(t,Y[0].length);
return getMLStats(Rpred,Y,ts,vop);
}
/**
* GetMLStats - Given predictions and corresponding true values and a threshold string, retreive statistics.
* @param Rpred predictions (may be double-valued confidences in the multi-label case)
* @param Y corresponding true values
* @param t a vector of thresholds, e.g. [0.1,0.1,0.1] or [0.1,0.5,0.4,0.001]
* @return the evaluation statistics
*/
public static HashMap<String,Object> getMLStats(double Rpred[][], int Y[][], double t[], String vop) {
int N = Y.length;
int L = Y[0].length;
int V = MLUtils.getIntegerOption(vop,1); // default 1
int Ypred[][] = ThresholdUtils.threshold(Rpred,t);
HashMap<String,Object> results = new LinkedHashMap<String,Object>();
results.put("Number of test instances (N)" ,(int)N);
results.put("Accuracy" ,Metrics.P_Accuracy(Y,Ypred));
results.put("Jaccard index" ,Metrics.P_Accuracy(Y,Ypred));
results.put("Hamming score" ,Metrics.P_Hamming(Y,Ypred));
results.put("Exact match" ,Metrics.P_ExactMatch(Y,Ypred));
if (V > 1) {
results.put("Jaccard distance" ,Metrics.L_JaccardDist(Y,Ypred));
results.put("Hamming loss" ,Metrics.L_Hamming(Y,Ypred));
results.put("ZeroOne loss" ,Metrics.L_ZeroOne(Y,Ypred));
results.put("Harmonic score" ,Metrics.P_Harmonic(Y,Ypred));
results.put("One error" ,Metrics.L_OneError(Y,Rpred));
results.put("Rank loss" ,Metrics.L_RankLoss(Y,Rpred));
results.put("Avg precision" ,Metrics.P_AveragePrecision(Y,Rpred));
results.put("Log Loss (lim. L)" ,Metrics.L_LogLossL(Y,Rpred));
results.put("Log Loss (lim. D)" ,Metrics.L_LogLossD(Y,Rpred));
if (V > 3) {
results.put("Micro Precision" ,Metrics.P_PrecisionMicro(Y,Ypred));
results.put("Micro Recall" ,Metrics.P_RecallMicro(Y,Ypred));
results.put("Macro Precision" ,Metrics.P_PrecisionMacro(Y,Ypred));
results.put("Macro Recall" ,Metrics.P_RecallMacro(Y,Ypred));
}
results.put("F1 (micro averaged)" ,Metrics.P_FmicroAvg(Y,Ypred));
results.put("F1 (macro averaged by example)" ,Metrics.P_FmacroAvgD(Y,Ypred));
results.put("F1 (macro averaged by label)" ,Metrics.P_FmacroAvgL(Y,Ypred));
results.put("AUPRC (macro averaged)" ,Metrics.P_macroAUPRC(Y,Rpred));
results.put("AUROC (macro averaged)" ,Metrics.P_macroAUROC(Y,Rpred));
// This will not be displayed to text output, rather as a graph
results.put("Curve Data" ,Metrics.curveData(Y,Rpred));
results.put("Macro Curve Data" ,Metrics.curveDataMacroAveraged(Y,Rpred));
results.put("Micro Curve Data" ,Metrics.curveDataMicroAveraged(Y,Rpred));
if (V > 2) {
results.put("Label indices " ,A.make_sequence(L));
double HL[] = new double[L];
double HA[] = new double[L];
double Pr[] = new double[L];
double Re[] = new double[L];
for(int j = 0; j < L; j++) {
HL[j] = Metrics.P_Hamming(Y,Ypred,j);
HA[j] = Metrics.P_Harmonic(Y,Ypred,j);
Pr[j] = Metrics.P_Precision(Y,Ypred,j);
Re[j] = Metrics.P_Recall(Y,Ypred,j);
}
results.put("Accuracy (per label)" ,HL);
if (V > 3) {
results.put("Harmonic (per label)" ,HA);
results.put("Precision (per label)" ,Pr);
results.put("Recall (per label)" ,Re);
}
}
if (V > 2) {
results.put("Empty labelvectors (predicted)" ,MLUtils.emptyVectors(Ypred));
results.put("Label cardinality (predicted)" ,MLUtils.labelCardinality(Ypred));
results.put("Levenshtein distance", Metrics.L_LevenshteinDistance(Y, Ypred));
if (V > 3) {
// Label cardinality
results.put("Label cardinality (difference)" ,MLUtils.labelCardinality(Y)-MLUtils.labelCardinality(Ypred));
double diff_LC[] = new double[L];
double true_LC[] = new double[L];
double pred_LC[] = new double[L];
for(int j = 0; j < L; j++) {
diff_LC[j] = MLUtils.labelCardinality(Y,j) - MLUtils.labelCardinality(Ypred,j);
true_LC[j] = MLUtils.labelCardinality(Y,j);
pred_LC[j] = MLUtils.labelCardinality(Ypred,j);
}
results.put("avg. relevance (test set)" ,true_LC);
results.put("avg. relevance (predicted) " ,pred_LC);
results.put("avg. relevance (difference) " ,diff_LC);
}
}
}
return results;
}
/**
* GetMTStats - Given multi-target predictions and corresponding true values, retreive evaluation statistics.
* @param Rpred predictions
* @param Y corresponding true values
* @return the evaluation statistics
*/
public static HashMap<String,Object> getMTStats(double Rpred[][], int Y[][], String vop) {
// just a question of rounding for now, could use A.toIntArray(..)
int Ypred[][] = ThresholdUtils.round(Rpred);
int N = Y.length;
int L = Y[0].length;
int V = MLUtils.getIntegerOption(vop,1); // default 1
HashMap<String,Object> output = new LinkedHashMap<String,Object>();
output.put("N(test)" ,(double)N);
output.put("L" ,(double)L);
output.put("Hamming score" ,Metrics.P_Hamming(Y,Ypred));
output.put("Exact match" ,Metrics.P_ExactMatch(Y,Ypred));
if (V > 1) {
output.put("Hamming loss" ,Metrics.L_Hamming(Y,Ypred));
output.put("ZeroOne loss" ,Metrics.L_ZeroOne(Y,Ypred));
}
if (V > 2) {
output.put("Levenshtein distance", Metrics.L_LevenshteinDistance(Y, Ypred));
double HL[] = new double[L];
for(int j = 0; j < L; j++) {
HL[j] = Metrics.P_Hamming(Y,Ypred,j);
}
output.put("Label indices " ,A.make_sequence(L));
output.put("Accuracy (per label)" ,HL);
}
if (V > 3) {
//output.put("Levenshtein distance", Metrics.L_LevenshteinDistance(Y, Ypred));
}
return output;
}
/**
* Combine Predictions - Combine together various results (for example, from cross-validation)
* into one, simply by appending predictions and true values together, and averaging together their 'vals'.
* @param folds an array of Results
* @return a combined Result
*/
public static Result combinePredictions(Result folds[]) {
Result r = new Result();
// set info
r.info = folds[0].info;
// append all predictions and true values
for(int f = 0; f < folds.length; f++) {
r.predictions.addAll(folds[f].predictions);
r.actuals.addAll(folds[f].actuals);
}
r.vals = folds[0].vals;
// average all vals
for(String metric : folds[0].vals.keySet()) {
if (folds[0].vals.get(metric) instanceof Double) {
double values[] = new double[folds.length];
for(int i = 0; i < folds.length; i++) {
values[i] = (Double)folds[i].vals.get(metric);
}
r.vals.put(metric,Utils.mean(values));
}
}
return r;
}
/**
* AverageResults - Create a Result with the average of an array of Results by taking the average +/- standand deviation.
* @param folds array of Results (e.g., from CV-validation)
* @return A result reporting the average of these folds.
*/
@Deprecated
public static Result averageResults(Result folds[]) {
Result r = new Result();
// info (should be the same across folds).
r.info = folds[0].info;
// for output ..
for(String metric : folds[0].output.keySet()) {
if (folds[0].output.get(metric) instanceof Double) {
double values[] = new double[folds.length];
for(int i = 0; i < folds.length; i++) {
values[i] = (Double)folds[i].output.get(metric);
}
String avg_sd = Utils.doubleToString(Utils.mean(values),5,3)+" +/- "+Utils.doubleToString(Math.sqrt(Utils.variance(values)),5,3);
r.output.put(metric,avg_sd);
}
else if (folds[0].output.get(metric) instanceof Integer) {
// TODO combine with previous clause
double values[] = new double[folds.length];
for(int i = 0; i < folds.length; i++) {
values[i] = (Integer)folds[i].output.get(metric);
}
String avg_sd = Utils.doubleToString(Utils.mean(values),5,3)+" +/- "+Utils.doubleToString(Math.sqrt(Utils.variance(values)),5,3);
r.output.put(metric,avg_sd);
}
else if (folds[0].output.get(metric) instanceof double[]) {
double avg[] = new double[((double[])folds[0].output.get(metric)).length];
for(int i = 0; i < folds.length; i++) {
for(int j = 0; j < avg.length; j++) {
avg[j] = avg[j] + ((double[])folds[i].output.get(metric))[j] * 1./folds.length;
}
}
r.output.put(metric,avg);
}
/*
else if (folds[0].output.get(metric) instanceof int[]) {
int avg[] = new int[((int[])folds[0].output.get(metric)).length];
for(int i = 0; i < folds.length; i++) {
for(int j = 0; j < avg.length; j++) {
avg[j] = avg[j] + ((int[])folds[i].output.get(metric))[j];
}
}
for(int j = 0; j < avg.length; j++) {
avg[j] = avg[j] / avg.length;
}
r.output.put(metric,avg);
}
*/
}
// and now for 'vals' ..
for(String metric : folds[0].vals.keySet()) {
if (folds[0].vals.get(metric) instanceof Double) {
double values[] = new double[folds.length];
for(int i = 0; i < folds.length; i++) {
values[i] = (Double)folds[i].vals.get(metric);
}
String avg_sd = Utils.doubleToString(Utils.mean(values),5,3)+" +/- "+Utils.doubleToString(Math.sqrt(Utils.variance(values)),5,3);
r.vals.put(metric,avg_sd);
}
}
if (r.getInfo("Type").equalsIgnoreCase("MLi")) {
// Also display across time ...
r.output.put("Window indices" ,A.make_sequence(folds.length));
for(String metric : folds[0].output.keySet()) {
if (folds[0].output.get(metric) instanceof Double) {
double values[] = new double[folds.length];
for(int i = 0; i < folds.length; i++) {
values[i] = (Double)folds[i].output.get(metric);
}
r.output.put(""+metric+" per window",values);
}
else if (folds[0].output.get(metric) instanceof Integer) {
int values[] = new int[folds.length];
for(int i = 0; i < folds.length; i++) {
values[i] = (Integer)folds[i].output.get(metric);
}
r.output.put(""+metric+" per window",values);
}
}
}
r.setInfo("Type","CV");
return r;
}
/**
* Main - can use this function for writing tests during development.
* @param args command line arguments
*/
public static void main(String args[]) {
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/core/MLUtils.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package meka.core;
import weka.core.Attribute;
import weka.core.Instance;
import weka.core.Instances;
import weka.core.Utils;
import weka.core.converters.ConverterUtils.DataSource;
import weka.classifiers.evaluation.NominalPrediction;
import weka.classifiers.evaluation.Prediction;
import java.io.*;
import java.util.*;
/**
* MLUtils - Helpful functions for dealing with multi-labelled data.
* Note that there are some similar/related methods in F.java.
* @see MLUtils
* @author Jesse Read
* @version March 2013
*/
public abstract class MLUtils {
/**
* GetDataSetOptions - Look for options in the 'relationName' in format 'dataset-name: options'
* @return The dataset options found
*/
public static final String[] getDatasetOptions(Instances instances) {
String name = instances.relationName();
if(name.indexOf(':') > 0) {
return name.substring(name.indexOf(':')+1).split(" ");
}
else return new String[]{};
}
/**
* GetDataSetName - Look for name in the 'relationName' in format 'dataset-name: options'
* @return The dataset name
*/
public static final String getDatasetName(Instances instances) {
return getRelationName(instances.relationName());
}
/**
* GetRelationName - get, e.g., 'Music' from 'Music: -C 6'
* @param name dataset name
* @return relation
*/
public static final String getRelationName(String name) {
if(name.indexOf(':') > 0) {
return name.substring(0,name.indexOf(':'));
}
else return name;
}
/**
* GetShortMethodName - get, e.g., 'BR' from 'meka.classifiers.multilabel.BR'.
* @param method long method name
* @return short method name
*/
public static final String getShortMethodName(String method) {
String name = new String(method);
int idx_2 = name.indexOf(' ');
name = name.substring(0,idx_2);
int idx_1 = name.lastIndexOf('.');
return name.substring(idx_1+1);
}
/**
* DEPRECATED - use A.make_sequence(L) instead.
*/
@Deprecated
public static final int[] gen_indices(int L) {
return A.make_sequence(L);
}
/**
* DEPRECATED - use A.shuffle(array,r) instead.
*/
@Deprecated
public static final void randomize(int array[], Random r) {
A.shuffle(array,r);
}
/**
* Instance with L labels to double[] of length L.
* Rounds to the nearest whole number.
*/
public static final double[] toDoubleArray(Instance x, int L) {
double a[] = new double[L];
for(int i = 0; i < L; i++) {
a[i] = Math.round(x.value(i));
}
return a;
}
/**
* Instance with L labels to double[] of length L, where L = x.classIndex().
* Rounds to the nearest whole number.
*/
public static final double[] toDoubleArray(Instance x) {
int L = x.classIndex();
return toDoubleArray(x,L);
}
/**
* ToBitString - returns a String representation of x = [0,0,1,0,1,0,0,0], e.g., "000101000".
* NOTE: It may be better to use a sparse representation for some applications.
*/
public static final String toBitString(Instance x, int L) {
StringBuilder sb = new StringBuilder(L);
for(int i = 0; i < L; i++) {
sb.append((int)Math.round(x.value(i)));
}
return sb.toString();
}
/**
* ToBitString - returns a String representation of i[].
*/
public static final String toBitString(int i[]) {
StringBuilder sb = new StringBuilder(i.length);
for (int b : i) {
sb.append(b);
}
return sb.toString();
}
/**
* ToBitString - returns a String representation of d[].
*/
public static final String toBitString(double d[]) {
StringBuilder sb = new StringBuilder(d.length);
for (double b : d) {
sb.append((int)Math.round(b));
}
return sb.toString();
}
/**
* FromBitString - returns a double[] representation of s.
*/
public static final double[] fromBitString(String s) {
char a[] = s.toCharArray();
double d[] = new double[a.length];
for(int i = 0; i < a.length; i++) {
d[i] = (double)char2int(a[i]);
}
return d;
}
/** ToIntArray - Return an int[] from a String, e.g., "[0,1,2,0]" to [0,1,2,3]. */
public static final int[] toIntArray(String s) {
s = new String(s.trim());
if (s.length() <= 2) // i.e., if "s = []"
return new int[]{};
return toIntArray((s.substring(1,s.length()-1)).split(","));
}
/** ToIntArray - Return an int[] from a String[], e.g., ["0","1","2","3"] to [0,1,2,3]. */
public static final int[] toIntArray(String s[]) {
int y[] = new int[s.length];
for(int j = 0; j < s.length; j++) {
y[j] = Integer.parseInt(s[j].trim());
}
return y;
}
/**
* Convert to Weka (multi-target) Predictions.
* Note: currently only multi-label.
* */
public static ArrayList<Prediction> toWekaPredictions(int y[], double p[]) {
ArrayList<Prediction> predictions = new ArrayList<Prediction>();
for(int i = 0; i < y.length; i++) {
predictions.add(new NominalPrediction((double)y[i], new double[]{1.-p[i],p[i]}));
}
return predictions;
}
/**
* To Sub Indices Set - return the indices out of 'sub_indices', in x, whose values are greater than 1.
*/
public static final List toSubIndicesSet(Instance x, int sub_indices[]) {
List<Integer> y_list = new ArrayList<Integer>();
for(int j : sub_indices) {
if (x.value(j) > 0.) {
y_list.add(j);
}
}
return y_list;
}
/**
* To Indices Set - return the indices in x[], whose values are greater than t, e.g., [0.3,0.0,0.5,0.8],0.4 to {2,3}.
*/
public static final List toIndicesSet(double x[], double t) {
List<Integer> y_list = new ArrayList<Integer>();
for(int j = 0; j < x.length; j++) {
if (x[j] > t) {
y_list.add(j);
}
}
return y_list;
}
/**
* To Indices Set - return the indices in x[], whose values are greater than 0, e.g., [0,0,1,1] to {2,3}.
*/
public static final List toIndicesSet(int x[]) {
List<Integer> y_list = new ArrayList<Integer>();
for(int j = 0; j < x.length; j++) {
if (x[j] > 0) {
y_list.add(j);
}
}
return y_list;
}
/**
* To Indices Set - return the indices in x, whose values are greater than 1.
*/
public static final List<Integer> toIndicesSet(Instance x, int L) {
List<Integer> y_list = new ArrayList<Integer>();
for(int j = 0; j < L; j++) {
if (x.value(j) > 0.) {
y_list.add(j);
}
}
return y_list;
}
/**
* To Sparse Int Array - A sparse String representation, e.g., [1,34,73].
* Only returns indices greater than 0 (not necessarily multi-target generic!)
*/
public static final int[] toSparseIntArray(Instance x, int L) {
return A.toPrimitive(toIndicesSet(x,L));
}
/**
* From Sparse String - From a sparse String representation, e.g., [1,34,73], to a binary int[] where those indices are set to 1.
*/
public static final int[] fromSparseString(String s) {
return toIntArray(s.split(","));
}
/**
* ToIntArray - raw instance to int[] representation
*/
public static final int[] toIntArray(Instance x, int L) {
int y[] = new int[L];
for(int j = 0; j < L; j++) {
// added the following if-statement to change missing values to -1
if(x.isMissing(j)){
y[j] = -1;
} else{
y[j] = (int)Math.round(x.value(j));
}
}
return y;
}
// @see also M.threshold(z,t)
@Deprecated
/** Use A.toIntArray(z,t) instead */
public static final int[] toIntArray(double z[], double t) {
return A.toIntArray(z,t);
}
/** To Double Arary - Convert something like "[1.0,2.0]" to [1.0,2.0] */
public static final double[] toDoubleArray(String s) {
s = new String(s.trim());
return toDoubleArray((s.substring(1,s.length()-1)).split(","));
}
/** To Double Arary - Convert something like ["1.0","2.0"] to [1.0,2.0] */
public static final double[] toDoubleArray(String s[]) {
double y[] = new double[s.length];
for(int j = 0; j < s.length; j++) {
y[j] = Double.parseDouble(s[j].trim());
}
return y;
}
/**
* LabelCardinality - return the label cardinality of dataset D.
*/
public static final double labelCardinality(Instances D) {
return labelCardinality(D,D.classIndex());
}
/**
* LabelCardinality - return the label cardinality of dataset D of L labels.
*/
public static final double labelCardinality(Instances D, int L) {
double sum = 0.0;
double numInstances = (double)D.numInstances();
for(int i = 0; i < D.numInstances(); i++) {
for(int j = 0; j < L; j++) {
if (!D.instance(i).isMissing(j)) {
sum += D.instance(i).value(j);
}
}
}
return (double)sum/ numInstances;
}
/**
* LabelCardinality - return the average number of times the j-th label is relevant in label data Y.
*/
public static final double labelCardinality(int Y[][], int j) {
int N = Y.length;
int L = Y[0].length;
double sum = 0.0;
for(int i = 0; i < N; i++) {
sum += Y[i][j];
}
return (double)sum/(double)N;
}
/**
* LabelCardinality - return the label cardinality of label data Y.
* TODO: move to Metrics.java ? / Use M.sum(Y)/N
*/
public static final double labelCardinality(int Y[][]) {
int N = Y.length;
int L = Y[0].length;
double sum = 0.0;
for(int i = 0; i < N; i++) {
for(int j = 0; j < L; j++) {
sum += Y[i][j];
}
}
return (double)sum/(double)N;
}
/**
* LabelCardinalities - return the frequency of each label of dataset D.
*/
public static final double[] labelCardinalities(Instances D) {
int L = D.classIndex();
double lc[] = new double[L];
for(int j = 0; j < L; j++) {
int count = 0;
for(int i = 0; i < D.numInstances(); i++) {
//if for missing valueses
if(!D.instance(i).isMissing(j)){
lc[j] += D.instance(i).value(j);
count ++;
}
}
lc[j] /= count; //D.numInstances();
}
return lc;
}
/**
* LabelCardinalities - return the frequency of each label of dataset D.
*/
public static final double[] labelCardinalities(ArrayList<int[]> Y) {
// TODO what about missing values here? Seems like only used with predictions?
int L = ((int[]) Y.get(0)).length;
double lc[] = new double[L];
int[] count = new int[L];
for(int y[] : Y) {
for(int j = 0; j < L; j++) {
//if for missing values
if(lc[j] <= 0){
lc[j] += y[j];
count[j]++;
}
}
}
for(int j = 0; j < L; j++) {
lc[j] /= count[j];//Y.size();
}
return lc;
}
/**
* EmptyVectors - percentage of empty vectors sum(y[i])==0 in Y.
*/
public static final double emptyVectors(int Y[][]) {
int N = Y.length;
int L = Y[0].length;
double sum = 0.0;
for(int i = 0; i < N; i++) {
if (Utils.sum(Y[i]) <= 0.0)
sum ++;
}
return (double)sum/(double)N;
}
/**
* MostCommonCombination - Most common label combination in D.
*/
public static final String mostCommonCombination(Instances D) {
return mostCommonCombination(D,D.classIndex());
}
/**
* MostCommonCombination - Most common label combination in D (of L labels).
*/
public static final String mostCommonCombination(Instances D, int L) {
HashMap<String,Integer> hm = new HashMap<String,Integer>(D.numInstances());
double max_v = 0.0;
int max_i = 0;
for(int i = 0; i < D.numInstances(); i++) {
String y = MLUtils.toBitString(D.instance(i),L);
Integer v = hm.get(y);
if (v == null) {
hm.put(y,0);
} else {
if (v > max_v) {
max_v = v;
max_i = i;
}
hm.put(y,v+1);
}
}
return MLUtils.toBitString(D.instance(max_i),L);
}
// the number of chars different in the two strings (suitable for binary strings)
public static final int bitDifference(String s1, String s2) {
int sum = 0;
for(int i = 0; i < s1.length(); i++) {
if (s1.charAt(i) != s2.charAt(i))
sum++;
}
return sum;
}
public static final int bitDifference(String y1[], String y2[]) {
int sum = 0;
for(int i = 0; i < y1.length; i++) {
if (!y1[i].equals(y2[i]))
sum++;
}
return sum;
}
public static final int bitDifference(int y1[], int y2[]) {
int sum = 0;
for(int i = 0; i < y1.length; i++) {
if (y1[i] != y2[i])
sum++;
}
return sum;
}
// BitCount. Count relevant labels
public static final int bitCount(String s) {
int total = 0;
for(int i = 0; i < s.length(); i++) {
total += char2int(s.charAt(i));
}
return total;
}
// eg '0' ==> 0
public static final int char2int(char c) {
return (int)(c - '0');
}
/**
* CountCombinations - return a mapping of each distinct label combination and its count.
* NOTE: A sparse representation would be much better for many applications, i.e., instead of using toBitString(...), use toSparseRepresentation(...) instead.
* @param D dataset
* @param L number of labels
* @return a HashMap where a String representation of each label combination is associated with an Integer count, e.g., "00010010",3
*/
public static final HashMap<String,Integer> countCombinations(Instances D, int L) {
HashMap<String,Integer> map = new HashMap<String,Integer>();
for (int i = 0; i < D.numInstances(); i++) {
//String y = MLUtils.toSparseRepresentation(D.instance(i),L);
String y = MLUtils.toBitString(D.instance(i),L);
Integer c = map.get(y);
map.put(y,c == null ? 1 : c+1);
}
return map;
}
/**
* CountCombinations in a sparse way.
* @see MLUtils#countCombinations(Instances,int)
* @param D dataset
* @param L number of labels
* @return a HashMap where a String representation of each label combination is associated with an Integer count, e.g., "00010010",3
*/
public static final HashMap<LabelSet,Integer> countCombinationsSparse(Instances D, int L) {
return PSUtils.countCombinationsSparse(D,L);
}
/**
* ClassCombinationCounts - multi-target version of countCombinations(...).
* NOTE: uses the encodeValue(...) function which does NOT consider sparse data.
* TODO: use LabelVector instead of Strings
* @param D dataset
* @return a HashMap where a String representation of each class combination is associated with an Integer count, e.g. [0,2,2,3,2],5
*/
public static final HashMap<String,Integer> classCombinationCounts(Instances D) {
int L = D.classIndex();
HashMap<String,Integer> map = new HashMap<String,Integer>();
for (int i = 0; i < D.numInstances(); i++) {
String y = encodeValue(toIntArray(D.instance(i),L));
Integer c = map.get(y);
map.put(y,c == null ? 1 : c+1);
}
return map;
}
/**
* Encode Value.
* [0,3,2] -> "0+3+2"
* Deprecated - Use LabelSet or LabelVector
*/
@Deprecated
public static String encodeValue(int s[]) {
StringBuilder sb = new StringBuilder(String.valueOf(s[0]));
for(int i = 1; i < s.length; i++) {
sb.append('+').append(s[i]);
}
return sb.toString();
}
/**
* Encode Value.
* "0+3+2"-> [0,3,2]
* Deprecated - Use LabelSet or LabelVector
*/
@Deprecated
public static int[] decodeValue(String a) {
return toIntArray(a.split("\\+"));
}
/**
* maxItem - argmax function for a HashMap
* @return argmax_k map.get(k)
*/
public static final Object maxItem(HashMap<?,Double> map) {
Object max_k = null;
double max_v = 0.0;
for (Object k : map.keySet()) {
if (map.get(k) >= max_v) {
max_k = k;
max_v = map.get(k);
}
}
return max_k;
}
/**
* maxItem - argmax function for a HashMap
* NOTE: same as above, but for integer
* (TODO: do something more clever than this)
*/
public static final Object argmax(HashMap<?,Integer> map) {
Object max_k = null;
double max_v = 0.0;
for (Object k : map.keySet()) {
if (map.get(k) >= max_v) {
max_k = k;
max_v = map.get(k);
}
}
return max_k;
}
/** Get the number of unique label combinations in a dataset */
public static final int numberOfUniqueCombinations(Instances D) {
HashMap<String,Integer> hm = classCombinationCounts(D);
return hm.size();
}
//
// ***NOTE*** Some of the following methods are present in F.java in the form of removeLabels/keepLabels, but using the Remove() filter. This appears much faster for large Instances, so for BR.java it has been changed over. It still needs to be changed over for CR.java and NSR.java. Then these methods can probably be deleted.
//
/**
* Delete attributes from an instance 'x' indexed by 'indicesToRemove[]'.
* @param x instance
* @param indicesToRemove array of attribute indices
* @return the modified dataset
*/
public static final Instance deleteAttributesAt(Instance x, int indicesToRemove[]) {//, boolean keep) {
Arrays.sort(indicesToRemove);
for(int j = indicesToRemove.length-1; j >= 0; j--) {
x.deleteAttributeAt(indicesToRemove[j]);
}
return x;
}
/**
* Delete all attributes from an instance 'x' <i>except</i> those indexed by 'indicesToRemove[]', up to the 'lim'-th attribute.
* For example, lim = 10, indicesToRemove[] = {4,5}, keeps indices 4,5,10,11,12,...
* @param x instance
* @param indicesToRemove array of attribute indices
* @param lim excluding
* @return the modified dataset
*/
public static final Instance keepAttributesAt(Instance x, int indicesToRemove[], int lim){
return deleteAttributesAt(x, A.invert(indicesToRemove, lim));
}
/**
* Delete attributes from a dataset 'D' indexed by 'indicesToRemove[]'.
* @param D dataset
* @param indicesToRemove array of attribute indices
* @return the modified dataset
*/
public static final Instances deleteAttributesAt(Instances D, int indicesToRemove[]) {//, boolean keep) {
Arrays.sort(indicesToRemove);
for(int j = indicesToRemove.length-1; j >= 0; j--) {
D.deleteAttributeAt(indicesToRemove[j]);
}
return D;
}
/**
* Delete all attributes from a dataset 'D' <i>except</i> those indexed by 'indicesToRemove[]', up to the 'lim'-th attribute.
* For example, lim = 10, indicesToRemove[] = {4,5}, keeps indices 4,5,10,11,12,...
* @param D dataset
* @param indicesToRemove array of attribute indices
* @param lim excluding
* @return the modified dataset
*/
public static final Instances keepAttributesAt(Instances D, int indicesToRemove[], int lim){
return deleteAttributesAt(D, A.invert(indicesToRemove, lim));
}
public static final Instance setTemplate(Instance x, Instances instancesTemplate) {
int L = x.classIndex();
int L_t = instancesTemplate.classIndex();
x = (Instance)x.copy();
x.setDataset(null);
for (int i = L_t; i < L; i++)
x.deleteAttributeAt(0);
x.setDataset(instancesTemplate);
return x;
}
/**
* SetTemplate - returns a copy of x_template, set with x's attributes, and set to dataset D_template (of which x_template) is a template of this.
* This function is very useful when Weka throws a strange IndexOutOfBounds exception for setTemplate(x,Template)
*/
public static final Instance setTemplate(Instance x, Instance x_template, Instances D_template) {
Instance x_ = (Instance)x_template.copy();
int L_y = x.classIndex();
int L_z = D_template.classIndex();
// copy over x space
MLUtils.copyValues(x_,x,L_y,L_z);
// set class values to missing
MLUtils.setLabelsMissing(x_,L_z);
// set dataset
x_.setDataset(D_template);
return x_;
}
/**
* CopyValues - Set x_dest[j+offset] = x_src[i+from].
*/
public static final Instance copyValues(Instance x_dest, Instance x_src, int from, int offset) {
int d = x_src.numAttributes();
for(int i = from, j = 0; i < d; i++, j++) {
x_dest.setValue(j+offset,x_src.value(i));
}
return x_dest;
}
/**
* CopyValues - Set x_dest[i++] = x_src[j] for all j in indices[].
*/
public static final Instance copyValues(Instance x_dest, Instance x_src, int indices[]) {
int i = 0;
for(int j : indices) {
x_dest.setValue(i++,x_src.value(j));
}
return x_dest;
}
/**
* SetValues - set the attribute values in Instsance x (having L labels) to z[].
* TODO: call above method
*/
public static final Instance setValues(Instance x, double z[], int L) {
for(int a = 0; a < z.length; a++) {
x.setValue(L+a,z[a]);
}
return x;
}
public static String printAsTextMatrix(double M[][]) {
StringBuilder sb = new StringBuilder("M = [\n");
for(int j = 0; j < M.length; j++) {
for(int k = 0; k < M[j].length; k++) {
sb.append(Utils.doubleToString(M[j][k],8,2));
}
if (j < M.length - 1)
sb.append("\n");
}
sb.append("\n]");
return sb.toString();
}
/**
* PruneCountHashMap - remove entries in hm = {(label,count)} where 'count' is no more than 'p'.
*/
public static void pruneCountHashMap(HashMap<?,Integer> hm, int p) {
ArrayList removeList = new ArrayList();
for (Object obj : hm.keySet()) {
if(hm.get(obj) <= p) {
removeList.add(obj);
}
}
for (Object obj : removeList) {
hm.remove(obj);
}
removeList.clear();
removeList = null;
}
// assume that no hm.get(.) > N
public static HashMap<?,Integer> pruneCountHashMapBasedAsAFractionOf(HashMap<?,Integer> hm, double p, int N) {
ArrayList al = new ArrayList();
for (Object o : hm.keySet()) {
if((double)hm.get(o)/(double)N <= p) {
al.add(o);
}
}
for (Object o : al) {
hm.remove(o);
}
al.clear();
al = null;
return hm;
}
/**
* SetLabelsMissing - Set all labels in D to missing.
*/
public static Instances setLabelsMissing(Instances D) {
int L = D.classIndex();
for(int i = 0; i < D.numInstances(); i++) {
for(int j = 0; j < L ; j++) {
D.instance(i).setMissing(j);
}
}
return D;
}
/**
* SetLabelsMissing - Set all labels in x to missing.
*/
public static Instance setLabelsMissing(Instance x) {
return setLabelsMissing(x,x.classIndex());
}
/**
* SetLabelsMissing - Set all (L) labels in x to missing.
*/
public static Instance setLabelsMissing(Instance x, int L) {
for(int j = 0; j < L ; j++) {
x.setMissing(j);
}
return x;
}
/**
* Stack two Instances together row-wise.
*/
public static final Instances combineInstances(Instances D1, Instances D2) {
Instances D = new Instances(D1);
for(int i = 0; i < D2.numInstances(); i++) {
D.add(D2.instance(i));
}
return D;
}
public static final String toDebugString(Instances D) {
int L = D.classIndex();
StringBuilder sb = new StringBuilder();
sb.append("D="+D.numInstances());
sb.append(" L="+L+" {");
for(int j = 0; j < L; j++) {
sb.append(D.attribute(j).name()+" ");
}
sb.append("}");
return sb.toString();
}
public static final String toDebugString(Instance x) {
int L = x.classIndex();
StringBuilder sb = new StringBuilder();
sb.append("y = [");
for(int j = 0; j < L; j++) {
sb.append(x.value(j)+" ");
}
sb.append("], x = [");
for(int j = L; j < L+10; j++) {
sb.append(x.value(j)+" ");
}
sb.append(" ... ]");
return sb.toString();
}
public static int[] toPrimitive(Integer a[]) {
int b[] = new int[a.length];
for(int i = 0; i < a.length; i++) {
b[i] = a[i];
}
return b;
}
/**
* ToBinaryString - use to go through all 'L' binary combinations.
* @see A#toDoubleArray(int, int)
* @param l the number to permute
* @param L number of labels
*/
public static final String toBinaryString(int l, int L) {
String sb = new String(Integer.toBinaryString(l));
while (sb.length() < L) {
sb = "0"+sb;
}
return sb;
}
private static void permute(String beginningString, String endingString, ArrayList<String> perm) {
if (endingString.length() <= 1) {
perm.add(beginningString + endingString);
}
else
for (int i = 0; i < endingString.length(); i++) {
String newString = endingString.substring(0, i) + endingString.substring(i + 1);
permute(beginningString + endingString.charAt(i), newString, perm);
}
}
/**
* Permute -- e.g., permute("AB") returns ["AB","BA"]
*/
public static String[] permute(String s) {
ArrayList<String> a = new ArrayList<String>();
permute("", s, a);
return a.toArray(new String[0]);
}
/**
* Print out a HashMap nicely.
* @param map HashMap
* @param dp decimal point precision (-1 for no limitation)
* @return String representation of map
*/
public static String hashMapToString(HashMap<?,?> map, int dp) {
StringBuilder sb = new StringBuilder();
for (Object k : map.keySet()) {
sb.append(Utils.padRight(k.toString(),31));
Object obj = map.get(k);
//sb.append(" : ");
if (obj instanceof Double) {
sb.append(Utils.doubleToString((Double)obj,5,dp));
}
else if (obj instanceof double[]) {
sb.append(A.toString((double[])obj,dp));
}
else if (obj instanceof int[]) {
sb.append(A.toString((int[])obj,dp+2));
}
else if (obj instanceof String) {
String s = (String) obj;
if (s.contains("\n"))
sb.append("\n");
sb.append(obj);
if (s.contains("\n"))
sb.append("\n");
}
else {
// don't append if we don't know what it is!
//sb.append(obj);
}
sb.append('\n');
}
return sb.toString();
}
public static String hashMapToString(HashMap<?,?> map) {
return hashMapToString(map,-1);
}
/**
* GetIntegerOption - parse 'op' to an integer if we can, else used default 'def'.
*/
public static int getIntegerOption(String op, int def) {
try {
return Integer.parseInt(op);
} catch(Exception e) {
System.err.println("[Warning] Failed to parse "+op+" to integer number; using default of "+def);
return def;
}
}
/** Clear Labels -- set the value of all label attributes to 0.0 */
public static void clearLabels(Instance x) {
int L = x.classIndex();
for(int j = 0; j < L; j++)
x.setValue(j,0.0);
}
/**
* GetXfromD - Extract attributes as a double X[][] from Instances D.
* TODO: getXfromInstances would be a better name.
*/
public static double[][] getXfromD(Instances D) {
int N = D.numInstances();
int L = D.classIndex();
int d = D.numAttributes()-L;
//System.out.println("d="+d);
double X[][] = new double[N][d];
for(int i = 0; i < N; i++) {
for(int k = 0; k < d; k++) {
X[i][k] = D.instance(i).value(k+L);
}
}
return X;
}
/**
* GetXfromD - Extract labels as a double Y[][] from Instances D.
* TODO: getYfromInstances would be a better name.
*/
public static double[][] getYfromD(Instances D) {
int L = D.classIndex();
int N = D.numInstances();
double Y[][] = new double[N][L];
for(int i = 0; i < N; i++) {
for(int k = 0; k < L; k++) {
Y[i][k] = D.instance(i).value(k);
}
}
return Y;
}
/**
* GetxfromInstances - Extract attributes as a double x[] from an Instance.
*/
public static double[] getxfromInstance(Instance xy) {
int L = xy.classIndex();
double xy_[] = xy.toDoubleArray();
return Arrays.copyOfRange(xy_,L,xy_.length);
}
/**
* ReplaceZasAttributes - data Z[][] will be the new attributes in D.
* @param D dataset (of N instances)
* @param Z attribute space (of N rows, H columns)
* @param L number of classes / labels.
*/
public static Instances replaceZasAttributes(Instances D, double Z[][], int L) {
D.setClassIndex(0);
int m = D.numAttributes()-L;
for(int j = 0; j < m; j++) {
D.deleteAttributeAt(L);
}
return addZtoD(D, Z, L);
}
/**
* ReplaceZasClasses - data Z[][] will be the new class labels in D.
* @param D dataset (of N instances)
* @param Z attribute space (of N rows, H columns)
* @param L column to add Z from in D
*/
public static Instances replaceZasClasses(Instances D, double Z[][], int L) {
D.setClassIndex(-1);
for(int j = 0; j < L; j++) {
D.deleteAttributeAt(0);
}
return insertZintoD(D, Z);
}
/**
* InsertZintoD - Insert data Z[][] to Instances D (e.g., as labels).
* NOTE: Assumes binary labels!
* @see #addZtoD(Instances, double[][], int)
*/
private static Instances insertZintoD(Instances D, double Z[][]) {
int L = Z[0].length;
// add attributes
for(int j = 0; j < L; j++) {
D.insertAttributeAt(new Attribute("c"+j,Arrays.asList(new String[]{"0","1"})),j);
}
// add values Z[0]...Z[N] to D
// (note that if D.numInstances() < Z.length, only some are added)
for(int j = 0; j < L; j++) {
for(int i = 0; i < D.numInstances(); i++) {
D.instance(i).setValue(j,Z[i][j] > 0.5 ? 1.0 : 0.0);
}
}
D.setClassIndex(L);
return D;
}
/**
* AddZtoD - Add attribute space Z[N][H] (N rows of H columns) to Instances D, which should have N rows also.
* @param D dataset (of N instances)
* @param Z attribute space (of N rows, H columns)
* @param L column to add Z from in D
*/
private static Instances addZtoD(Instances D, double Z[][], int L) {
int H = Z[0].length;
int N = D.numInstances();
// add attributes
for(int a = 0; a < H; a++) {
D.insertAttributeAt(new Attribute("A"+a),L+a);
}
// add values Z[0]...Z[N] to D
for(int a = 0; a < H; a++) {
for(int i = 0; i < N; i++) {
D.instance(i).setValue(L+a,Z[i][a]);
}
}
D.setClassIndex(L);
return D;
}
/**
* Get K - get the number of values associated with each label L.
* @param D a dataset
* @return a vector of size L: K_1,...,K_L
*/
public int[] getK(Instances D) {
int L = D.classIndex();
HashSet counts[] = new HashSet[L];
int K[] = new int[L];
for(int j = 0; j < L; j++) {
counts[j] = new HashSet<Integer>();
for(Instance x : D) {
int k = (int)x.value(j);
counts[j].add(k);
}
K[j] = counts[j].size();
/*
System.out.println(""+j+" = "+counts[j]);
if (counts[j].size() < 2) {
System.out.println("OK, this is a problem ...");
//System.exit(1);
}
*/
}
return K;
}
/**
* Load Object - load the Object stored in 'filename'.
*/
public static final Object loadObject(String filename) throws Exception {
FileInputStream streamIn = new FileInputStream(filename);
ObjectInputStream objectinputstream = new ObjectInputStream(streamIn);
Object object = objectinputstream.readObject();
objectinputstream.close();
return object;
}
/**
* Save Object - save 'object' into file 'filename'.
*/
public static final void saveObject(Object object, String filename) throws Exception {
FileOutputStream fout = new FileOutputStream(filename);
ObjectOutputStream oos = new ObjectOutputStream(fout);
oos.writeObject(object);
oos.flush();
oos.close();
}
/**
* Fixes the relation name by adding the "-C" attribute to it if necessary.
*
* @param data the dataset to fix
*/
public static void fixRelationName(Instances data) {
fixRelationName(data, 0);
}
/**
* Fixes the relation name by adding the "-C" attribute to it if necessary.
*
* @param data the dataset to fix
* @param numClassAtts the number of class attributes (0 for none, >0 for attributes at start, <0 for attributes at end)
*/
public static void fixRelationName(Instances data, int numClassAtts) {
if (data.relationName().indexOf(":") == -1)
data.setRelationName(data.relationName() + ": -C " + numClassAtts);
}
/**
* Prepares the class index of the data.
*
* @param data the data to prepare
* @throws Exception if preparation fails
*/
public static void prepareData(Instances data) throws Exception {
String doptions[] = null;
try {
doptions = MLUtils.getDatasetOptions(data);
}
catch(Exception e) {
throw new Exception("[Error] Failed to Get Options from @Relation Name", e);
}
try {
int c = (Utils.getOptionPos('C', doptions) >= 0) ? Integer.parseInt(Utils.getOption('C',doptions)) : Integer.parseInt(Utils.getOption('c',doptions));
// if negative, then invert
if ( c < 0) {
c = -c;
data = F.mulan2meka(data,c);
}
// set c
data.setClassIndex(c);
}
catch (Exception e) {
throw new Exception(
"Failed to parse options stored in relation name; expected format for relation name:\n"
+ " 'name: options'\n"
+ "But found:\n"
+ " '" + data.relationName() + "'\n"
+ "Format example:\n"
+ " 'Example_Dataset: -C 3 -split-percentage 50'\n"
+ "'-C 3' specifies the number of target attributes to be 3. See tutorial for more information.",
e);
}
}
/**
* Attempts to determine the number of classes/class index from the
* specified file. In case of ARFF files, only the header will get loaded.
*
* @param file the file to inspect
* @return the class index of the file, Integer.MAX_VALUE in case of error
*/
public static int peekClassIndex(File file) {
int result;
DataSource source;
Instances structure;
result = Integer.MAX_VALUE;
try {
source = new DataSource(file.getAbsolutePath());
structure = source.getStructure();
prepareData(structure);
result = structure.classIndex();
}
catch (Exception e) {
// ignored
}
return result;
}
/**
* For retrieving some dataset statistics on the command line.
* Note: -L, -d does not work for Mulan format (labels at the end)
*/
public static final void main (String args[]) throws Exception {
/*
* If we are given an argument, load a file and extract some info and exit.
*/
if (args.length > 0) {
//System.out.println("loading ...");
Instances D = new Instances(new BufferedReader(new FileReader(args[0])));
int N = D.numInstances();
int L = Integer.parseInt(Utils.getOption('C',MLUtils.getDatasetOptions(D)));
D.setClassIndex(L);
switch(args[1].charAt(0)) {
case 'L' : System.out.println(L); // return the number of labels of D
break;
case 'N' : System.out.println(D.numInstances()); // return the number of Instances of D
break;
case 'd' : System.out.println(D.numAttributes()-L); // reurns the number of (non-label) attributes of D
break;
case 'A' : System.out.println(D.numAttributes()); // returns the number of ALL attributes of D
break;
case 'l' : System.out.println(MLUtils.labelCardinality(D)); // reurns the label cardinalities
break;
case 'P' : System.out.println(Arrays.toString(MLUtils.labelCardinalities(D))); // reurns the label cardinalities
break;
case 'C' : System.out.println(hashMapToString(MLUtils.countCombinations(D,L))); // counts
break;
case 'p' : System.out.println("collecting ...");
HashMap<LabelSet,Integer> hm = PSUtils.countCombinationsSparse(D,L);
System.out.println("pruning ...");
//MLUtils.pruneCountHashMap(hm,1);
//System.out.println(""+hm);
System.out.println("writing ...");
saveObject(hm, "hm-NEW.serialized");
break;
default : System.out.println(MLUtils.getDatasetName(D)); // returns the name of D
break;
}
return;
}
/*
* Else, just do some tests ...
*/
else {
// NEED THIS FOR SOME SCRIPTS
/*
String p[] = permute(args[0]);
int i = 0;
for(String s: p) {
System.out.println(""+(i++)+" "+s);
}
*/
//System.out.println(""+Arrays.toString(invert(new int[]{1,2},6)));
//System.out.println(""+Arrays.toString(invert(new int[]{0,2},6)));
//System.out.println(""+Arrays.toString(invert(new int[]{5,2},6)));
return;
}
}
/**
* Transforms the predictions into a ranking array. The ranking array can have multiple
* entries at the same rank, e.g., if the predictions are
* [0.0, 0.75, 0.5, 0.5, 0.25, 1.0 ]
* the result would be
* [5, 1, 3, 3, 4, 0]
*
* @param predictions The predictions array.
* @return The ranking of the predictions with equal ranks.
*/
public static final int[] predictionsToRanking(double[] predictions) {
int[] res = new int[predictions.length];
for (int i = 0; i < predictions.length ; i++ ) {
int countHigherOrEqual = 0;
for (int j = 0; j < predictions.length ; j++ ) {
if (predictions[i] <= predictions[j]) {
countHigherOrEqual ++;
}
}
res[i] = countHigherOrEqual -1;
}
return res;
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/core/MatrixUtils.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/**
* MatrixUtils.java
* Copyright (C) 2015 University of Mainz, Germany (to/from instances)
*/
package meka.core;
import weka.core.DenseInstance;
import weka.core.Instances;
import weka.core.matrix.Matrix;
import java.util.Arrays;
import java.util.Random;
/**
* Utility functions relating to matrices.
*
* @author Jesse Read (jesse@tsc.uc3m.es)
* @author Joerg Wicker (wicker@uni-mainz.de)
* @version $Revision$
*/
public class MatrixUtils {
/**
* Helper method that transforma an Instances object to a Matrix object.
*
* @param inst The Instances to transform.
* @return The resulting Matrix object.
*/
public static Matrix instancesToMatrix(Instances inst){
double[][] darr = new double[inst.numInstances()][inst.numAttributes()];
for (int i =0 ; i < inst.numAttributes(); i++) {
for (int j = 0; j < inst.attributeToDoubleArray(i).length; j++) {
darr[j][i] = inst.attributeToDoubleArray(i)[j];
}
}
return new Matrix(darr);
}
/**
* Helper method that transforms a Matrix object to an Instances object.
*
* @param mat The Matrix to transform.
* @param patternInst the Instances template to use
* @return The resulting Instances object.
*/
public static Instances matrixToInstances(Matrix mat, Instances patternInst){
Instances result = new Instances(patternInst);
for (int i = 0; i < mat.getRowDimension(); i++) {
double[] row = mat.getArray()[i];
DenseInstance denseInst = new DenseInstance(1.0, row);
result.add(denseInst);
}
return result;
}
/**
* GetCol - return the k-th column of M (as a vector).
*/
public static double[] getCol(double[][] M, int k) {
double[] col_k = new double[M.length];
for (int i = 0; i < M.length; i++) {
col_k[i] = M[i][k];
}
return col_k;
}
/**
* GetCol - return the k-th column of M (as a vector).
*/
public static int[] getCol(int[][] M, int k) {
int[] col_k = new int[M.length];
for (int i = 0; i < M.length; i++) {
col_k[i] = M[i][k];
}
return col_k;
}
public static double[] addBias(double[] x) {
final double[] x2 = new double[x.length+1];
x2[0] = 1.0;
for(int j = 0; j < x.length; j++) {
x2[j+1] = x[j];
}
return x2;
}
public static double[][] addBias(double[][] M) {
final double[][] C = new double[M.length][M[0].length+1];
for (int i = 0; i < M.length; i++) {
C[i][0] = 1.0;
for(int j = 0; j < M[i].length; j++) {
C[i][j+1] = M[i][j];
}
}
return C;
}
public static Jama.Matrix addBias(Jama.Matrix M) {
double[][] M_ = M.getArray();
final double[][] C = new double[M_.length][M_[0].length+1];
for (int i = 0; i < M_.length; i++) {
C[i][0] = 1.0;
for(int j = 0; j < M_[i].length; j++) {
C[i][j+1] = M_[i][j];
}
}
return new Jama.Matrix(C);
}
public static double[] removeBias(double[] x) {
final double[] x2 = new double[x.length-1];
for(int j = 1; j < x.length; j++) {
x2[j-1] = x[j];
}
return x2;
}
public static Jama.Matrix removeBias(Jama.Matrix M) {
return new Jama.Matrix(removeBias(M.getArray()));
}
public static double[][] removeBias(double[][] M) {
final double[][] C = new double[M.length][M[0].length-1];
for (int i = 0; i < M.length; i++) {
for(int j = 1; j < M[i].length; j++) {
C[i][j-1] = M[i][j];
}
}
return C;
}
/**
* Multiply - multiply each value in A[][] by constant K.
*/
public static double[][] multiply(final double[][] A, double K) {
final double[][] C = new double[A.length][A[0].length];
for (int i = 0; i < A.length; i++) {
for(int j = 0; j < A[i].length; j++) {
C[i][j] = A[i][j] * K;
}
}
return C;
}
public static String toString(Jama.Matrix M) {
return toString(M.getArray());
}
/**
* ToString - return a String representation (to adp decimal places).
*/
public static String toString(double M_[][], int adp) {
StringBuilder sb = new StringBuilder();
for(int j = 0; j < M_.length; j++) {
for(int k = 0; k < M_[j].length; k++) {
//sb.append(Utils.doubleToString(v[k],w,adp));
double d = M_[j][k];
String num = String.format("%6.2f", d);
if (adp == 0) // cheap override
num = String.format("%2.0f", d);
sb.append(num);
}
sb.append("\n");
}
return sb.toString();
}
/**
* ToString - return a String representation.
*/
public static String toString(double M_[][]) {
return toString(M_,2);
//sb.append(A.toString(s.predictions.get(i),2));
}
/**
* ToString - return a String representation.
*/
public static String toString(int M_[][]) {
StringBuilder sb = new StringBuilder();
for(int j = 0; j < M_.length; j++) {
for(int k = 0; k < M_[j].length; k++) {
String num = String.format("%5d", M_[j][k]);
sb.append(num);
}
sb.append("\n");
}
return sb.toString();
}
/**
* ToString - return a String representation of 'M', in Matlab format, called 'name'.
*/
public static String toString(double M[][], String name) {
StringBuilder sb = new StringBuilder(name+" = [\n");
for(int j = 0; j < M.length; j++) {
for(int k = 0; k < M[j].length; k++) {
sb.append(String.format("%6.2f ", M[j][k]));
}
sb.append(";\n");
}
sb.append("]");
return sb.toString();
}
/**
* Threshold - apply threshold t to matrix P[][].
*/
public static final double[][] threshold(double P[][], double t) {
double X[][] = new double[P.length][P[0].length];
for(int i = 0; i < P.length; i++) {
for(int j = 0; j < P[i].length; j++) {
X[i][j] = (P[i][j] > t) ? 1. : 0.;
}
}
return X;
}
/**
* Flatten - turn Matrix [0 1; 2 3] into vector [0 1 2 3].
*/
public static int[] flatten(int M[][]) {
int v[] = new int[M.length * M[0].length];
int k = 0;
for(int i = 0; i < M.length; i++) {
for(int j = 0; j < M[i].length; j++) {
v[k++] = M[i][j];
}
}
return v;
}
/**
* Flatten - turn Matrix [0. 1.; 2. 3.] into vector [0. 1. 2. 3.].
*/
public static double[] flatten(double M[][]) {
double v[] = new double[M.length * M[0].length];
int k = 0;
for(int i = 0; i < M.length; i++) {
for(int j = 0; j < M[i].length; j++) {
v[k++] = M[i][j];
}
}
return v;
}
public static double[][] subtract(double[][] A, double[][] B) {
//if (A.length != bRows) // no can do
// throw new IllegalArgumentException(" A.cols ("+aCols+") != B.rows ("+bRows+") ");
double[][] C = new double[A.length][A[0].length];
for (int i = 0; i < A.length; i++) {
for (int j = 0; j < A[i].length; j++ ) {
C[i][j] = A[i][j] - B[i][j];
}
}
return C;
}
/**
* absolute value
*/
public static double[][] abs(double[][] A) {
double[][] C = new double[A.length][A[0].length];
for (int i = 0; i < A.length; i++) {
for (int j = 0; j < A[i].length; j++ ) {
C[i][j] = Math.abs(A[i][j]);
}
}
return C;
}
/**
* squared sum
*/
public static double SS(double M[][]) {
double sum = 0;
for (int i = 0; i < M.length; i++) {
for (int j = 0; j < M[i].length; j++) {
sum += M[i][j];
}
}
return sum;
}
/**
* Sigmoid / Logistic function
*/
public static final double sigma(double a) {
return 1.0/(1.0+Math.exp(-a));
}
/**
* Sigmoid function applied to vector
*/
public static final double[] sigma(double v[]) {
double u[] = new double[v.length];
for(int j = 0; j < v.length; j++) {
u[j] = sigma(v[j]);
}
return u;
}
/**
* Sigmoid function applied to matrix (2D array)
*/
public static final double[][] sigma(double A[][]) {
double X[][] = new double[A.length][A[0].length];
for(int i = 0; i < A.length; i++) {
for(int j = 0; j < A[i].length; j++) {
X[i][j] = sigma(A[i][j]);
}
}
return X;
}
/**
* Sigmoid function applied to Matrix
*/
public static final Jama.Matrix sigma(Jama.Matrix A) {
return new Jama.Matrix(sigma(A.getArray()));
}
/**
* Derivative of the sigmoid function applied to scalar
*/
public static final double dsigma(double a) {
double s = sigma(a);
return s * (1. - s);
}
/**
* Derivative of the sigmoid function applied to vector
*/
public static final double[] dsigma(double v[]) {
double u[] = new double[v.length];
for(int j = 0; j < v.length; j++) {
u[j] = dsigma(v[j]);
}
return u;
}
/**
* Derivative of the sigmoid function applied to Matrix
*/
public static final double[][] dsigma(double A[][]) {
double X[][] = new double[A.length][A[0].length];
for(int i = 0; i < A.length; i++) {
for(int j = 0; j < A[i].length; j++) {
X[i][j] = dsigma(A[i][j]);
}
}
return X;
}
/**
* Derivative of the sigmoid function applied to Jama Matrix
*/
public static final Jama.Matrix dsigma(Jama.Matrix A) {
double A_[][] = A.getArray();
double X[][] = new double[A_.length][A_[0].length];
for(int i = 0; i < A_.length; i++) {
for(int j = 0; j < A_[i].length; j++) {
X[i][j] = dsigma(A_[i][j]);
}
}
return new Jama.Matrix(X);
}
/**
* Deep Copy - Make a deep copy of M[][].
*/
public static int[][] deep_copy(int M[][]) {
int[][] C = new int[M.length][];
for(int i = 0; i < C.length; i++) {
C[i] = Arrays.copyOf(M[i], M[i].length);
}
return C;
}
/**
* Ones - return a vector full of 1s.
*/
public static double[] ones(int L) {
double m[] = new double[L];
Arrays.fill(m,1.);
return m;
}
/**
* Sum - sum this matrix.
*/
public static int[] sum(int M[][]) {
int s[] = new int[M.length];
for(int j = 0; j < M.length; j++) {
for(int k = 0; k < M[j].length; k++) {
s[j] += M[j][k];
}
}
return s;
}
/**
* Sum - sum this matrix.
*/
public static double[] sum(double M[][]) {
double s[] = new double[M.length];
for(int j = 0; j < M.length; j++) {
for(int k = 0; k < M[j].length; k++) {
s[j] += M[j][k];
}
}
return s;
}
public static final void fillRow(double M[][], int k, double val) {
for(int j = 0; j < M[k].length; j++) {
M[k][j] = val;
}
}
public static final void fillCol(double M[][], int k, double val) {
for(int i = 0; i < M.length; i++) {
M[i][k] = val;
}
}
/** A 2D array of Gaussian random numbers */
public static double[][] randn(int rows, int cols, Random r) {
double X[][] = new double[rows][cols];
for(int i = 0; i < rows; i++) {
for(int j = 0; j < cols; j++) {
X[i][j] = r.nextGaussian();
}
}
return X;
}
/** A matrix of Gaussian random numbers */
public static Jama.Matrix randomn(int nrows, int ncols, Random r) {
return new Jama.Matrix(randn(nrows, ncols, r));
}
/** Copy a 2D array */
public static final double[][] copy(double P[][]) {
double X[][] = new double[P.length][P[0].length];
for(int i = 0; i < P.length; i++) {
for(int j = 0; j < P[i].length; j++) {
X[i][j] = P[i][j];
}
}
return X;
}
/**
* sample from matrix
*/
public static final double[][] threshold(double P[][], Random r) {
double X[][] = new double[P.length][P[0].length];
for(int i = 0; i < P.length; i++) {
for(int j = 0; j < P[i].length; j++) {
X[i][j] = (P[i][j] > r.nextDouble()) ? 1.0 : 0.0;
}
}
return X;
}
/**
* threshold function applied to vector
*/
public static final double[] threshold(double v[], double t) {
double u[] = new double[v.length];
for(int j = 0; j < v.length; j++) {
u[j] = (v[j] > t) ? 1. : 0.;
}
return u;
}
/**
* sigmoid function applied to vector
*/
public static final Jama.Matrix threshold(Jama.Matrix M, double t) {
return new Jama.Matrix(threshold(M.getArray(), t));
}
public static double[][] transposeMultiply(double[][] A, double[][] B) {
double[][] At = getTranspose(A);
return multiply(At, B);
}
public static double[][] multiplyTranspose(double[][] A, double[][] B) {
double[][] Bt = getTranspose(B);
return multiply(A, Bt);
}
/**
* Multiply - multiply matrices A and B together.
*/
public static double[][] multiply(final double[][] A, final double[][] B) {
int aRows = A.length;
int aCols = A[0].length;
int bRows = B.length;
int bCols = B[0].length;
if (aCols != bRows) // no can do
throw new IllegalArgumentException(" A.cols ("+aCols+") != B.rows ("+bRows+") ");
double C[][] = new double[aRows][bCols];
for (int i = 0; i < aRows; i++) {
for (int k = 0; k < aCols; k++) {
for(int j = 0; j < bCols; j++) {
C[i][j] += A[i][k] * B[k][j];
}
}
}
return C;
}
/**
* Multiply - multiply vectors a and b together.
*/
public static double[] multiply(final double[] a, final double[] b) throws Exception {
Jama.Matrix a_ = new Jama.Matrix(a,1);
Jama.Matrix b_ = new Jama.Matrix(b,1);
Jama.Matrix c_ = a_.arrayTimes(b_);
return c_.getArray()[0];
}
public static double[][] getTranspose(double[][] M) {
double[][] C = new double[M[0].length][];
for (int i = 0; i < M[0].length; i++) {
C[i] = getCol(M, i);
}
return C;
}
public static double[][] add(double[][] A, double[][] B) {
double[][] C = new double[A.length][A[0].length];
for (int i = 0; i < A.length; i++) {
for (int j = 0; j < A[i].length; j++ ) {
C[i][j] = A[i][j] + B[i][j];
}
}
return C;
}
public static double[][] add(double[][] A, double v) {
double[][] C = new double[A.length][A[0].length];
for (int i = 0; i < A.length; i++) {
for (int j = 0; j < A[i].length; j++ ) {
C[i][j] = A[i][j] + v;
}
}
return C;
}
public static double squaredError(double[] vector1, double[] vector2) {
double squaredError = 0;
for (int i = 0; i < vector1.length; i++) {
squaredError += (vector1[i] - vector2[i]) * (vector1[i] - vector2[i]);
}
return squaredError;
}
public static double meanSquaredError(double[][] vectorBatch1, double[][] vectorBatch2) {
double error = 0;
for (int i = 0; i < vectorBatch1.length; i++) {
error += squaredError(vectorBatch1[i], vectorBatch2[i]);
}
return error / vectorBatch1.length;
}
/** dot product of two vectors */
public static double dot(double v[], double u[]) {
double sum = 0.0;
for(int i = 0; i < v.length; i++) {
sum += (v[i] * u[i]);
}
return sum;
}
/**
* Sample - Returns Matrix C where each value C[j][k] is 1 with probability M[j][k] and 0 otherwise.
* (assume each value is in [0,1])
*/
public static double[][] sample(double M[][], Random r) {
return threshold(M, r);
}
/**
* Sample - Returns vector c where each value c[j][k] is 1 with probability v[j][k] and 0 otherwise.
* (assume each value is in [0,1])
*/
public static double[] sample(double v[], Random r) {
return threshold(new double[][]{v}, r)[0];
}
/**
* Sample - Returns Matrix C where each value C[j][k] is 1 with probability M[j][k] and 0 otherwise.
* (assume each value is in [0,1])
*/
public static Jama.Matrix sample(Jama.Matrix M, Random r) {
return new Jama.Matrix(sample(M.getArray(),r));
}
public static void printMatrix(double M_[][]) {
Jama.Matrix M = new Jama.Matrix(M_);
M.print(5,3);
}
public static void printDim(Jama.Matrix M) {
printDim(M.getArray());
}
public static void printDim(double M[][]) {
System.out.println(""+M.length+" x "+M[0].length+" (rows x cols)");
}
public static String getDim(double M[][]) {
return ""+M.length+" x "+M[0].length+" (rows x cols)";
}
public static String getDim(Jama.Matrix M) {
return getDim(M.getArray());
}
/**
* returns argmax_{j,k} M[j][k]
*/
public static int[] maxIndices(double M[][]) {
double max = Double.MIN_VALUE;
int i_max = -1;
int j_max = -1;
for(int i = 0; i < M.length; i++) {
for(int j = 0; j < M[i].length; j++) {
if (M[i][j] > max) {
max = M[i][j];
i_max = i;
j_max = j;
}
}
}
return new int[]{i_max,j_max};
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/core/Metrics.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package meka.core;
import java.util.ArrayList;
import java.util.List;
import weka.classifiers.evaluation.ThresholdCurve;
import weka.core.Instance;
import weka.core.Instances;
import weka.core.Utils;
/**
* Metrics.java - Evaluation Metrics.
* <p>L_ are loss/error measures (less is better)</p>
* <p>P_ are payoff/accuracy measures (higher is better).</p>
* For more on the evaluation and threshold selection implemented here, see
* <br> Jesse Read, <i>Scalable Multi-label Classification</i>. PhD Thesis, University of Waikato, Hamilton, New Zealand (2010).
* @author Jesse Read (jesse@tsc.uc3m.es)
* @version Feb 2013
*/
public abstract class Metrics {
/**
* Helper function for missing values in the labels. Transforms a double array to an
* int array.
*
* @return the new array
*/
public static int[] toIntArray(final double[] doubles) {
int[] res = new int[doubles.length];
for (int i = 0; i < doubles.length; i++) {
res[i] = (int) doubles[i];
}
return res;
}
/**
* Helper function for missing values in the labels. Simply checks if all
* real labels are missing.
*
* @return If all labels are missing
*/
public static boolean allMissing(final int[] real) {
for (int i = 0; i < real.length; i++) {
if (real[i] != -1) {
return false;
}
}
return true;
}
/**
* Helper function for missing values in the labels. Simply returns number of
* real missing labels.
*
* @return Number of missing labels.
*/
public static int numberOfMissingLabels(final int[] real) {
int missing = 0;
for (int i = 0; i < real.length; i++) {
if (real[i] == -1) {
missing++;
}
}
return missing;
}
/**
* Helper function for missing values in the labels and missing predictions
* (i.e., from abstaining classifiers). Aligns the predictions
* with the real labels, discarding labels and predictions that are missing.
*
* @param real The real values from the data
* @param pred The predicted values from the classifiers
* @return Aligned predicted and real labels.
*/
public static int[][] align(final int[] real, final int[] pred) {
int missing = numberOfMissingLabels(real);
int[] _real = new int[real.length - missing];
int[] _pred = new int[real.length - missing];
int offset = 0;
for (int i = 0; i < real.length; i++) {
if (real[i] == -1 || pred[i] == -1) {
offset++;
continue;
}
_real[i - offset] = real[i];
_pred[i - offset] = pred[i];
}
int[][] res = new int[2][0];
res[0] = _real;
res[1] = _pred;
return res;
}
/**
* Helper function for missing values in the labels and missing predictions
* (i.e., from abstaining classifiers). Aligns the predictions
* with the real labels, discarding labels and predictions that are missing.
*
* @param real The real values from the data
* @param pred The predicted values from the classifiers
* @return Aligned predicted and real labels.
*/
public static double[][] align(final int[] real, final double[] pred) {
int missing = numberOfMissingLabels(real);
double[] _real = new double[real.length - missing];
double[] _pred = new double[real.length - missing];
int offset = 0;
for (int i = 0; i < real.length; i++) {
if (real[i] == -1 || pred[i] == -1.0 || Double.isNaN(pred[i])) {
offset++;
continue;
}
_real[i - offset] = real[i];
_pred[i - offset] = pred[i];
}
double[][] res = new double[2][0];
res[0] = _real;
res[1] = _pred;
return res;
}
/** Exact Match, i.e., 1 - [0/1 Loss]. */
public static double P_ExactMatch(final int Y[][], final int Ypred[][]) {
// works with missing
return 1. - L_ZeroOne(Y, Ypred);
}
/** 0/1 Loss. */
public static double L_ZeroOne(final int y[], final int ypred[]) {
// works with missing
int[][] aligned = align(y, ypred);
int[] yAligned = aligned[0];
int[] ypredAligned = aligned[1];
int L = yAligned.length;
for (int j = 0; j < L; j++) {
if (yAligned[j] != ypredAligned[j]) {
return 1.;
}
}
return 0.;
}
/** 0/1 Loss. */
public static double L_ZeroOne(final int Y[][], final int Ypred[][]) {
// works with missing
int allMissings = 0;
int N = Y.length;
double loss = 0.0;
for (int i = 0; i < N; i++) {
if (allMissing(Y[i])) {
allMissings++;
continue;
}
double curLoss = L_ZeroOne(Y[i], Ypred[i]);
if (Double.isNaN(curLoss)) {
allMissings++;
continue;
}
loss += curLoss;
}
return loss / (N - allMissings);
}
/** Hamming loss. */
public static double L_Hamming(final int y[], final int ypred[]) {
// works with missing
int[][] aligned = align(y, ypred);
int[] yAligned = aligned[0];
int[] ypredAligned = aligned[1];
int L = yAligned.length;
if (L == 0) {
return Double.NaN;
}
double loss = 0.0;
for (int j = 0; j < L; j++) {
if (yAligned[j] != ypredAligned[j]) {
loss++;
}
}
return loss / L;
}
/** Hamming loss. */
public static double L_Hamming(final int Y[][], final int Ypred[][]) {
// works with missing
int N = Y.length;
int allMissings = 0;
double loss = 0.0;
for (int i = 0; i < N; i++) {
if (allMissing(Y[i])) {
allMissings++;
continue;
}
double curLoss = L_Hamming(Y[i], Ypred[i]);
if (Double.isNaN(curLoss)) {
allMissings++;
continue;
}
loss += curLoss;
}
return loss / (N - allMissings);
}
/** Hamming score aka label accuracy. */
public static double P_Hamming(final int Y[][], final int Ypred[][]) {
// works with missing
return 1. - L_Hamming(Y, Ypred);
}
/** Hamming score aka label accuracy. */
public static double P_Hamming(final int Y[][], final int Ypred[][], final int j) {
// works with missing
int y_j[] = MatrixUtils.getCol(Y, j);
int ypred_j[] = MatrixUtils.getCol(Ypred, j);
int[][] aligned = align(y_j, ypred_j);
int[] y_jAligned = aligned[0];
int[] ypred_jAligned = aligned[1];
return 1. - L_Hamming(y_jAligned, ypred_jAligned);
}
/** Harmonic Accuracy. Multi-label only. */
public static double P_Harmonic(final int y[], final int ypred[]) {
// works with missing
int[][] aligned = align(y, ypred);
int[] yAligned = aligned[0];
int[] ypredAligned = aligned[1];
int L = yAligned.length;
double acc[] = new double[2];
double N[] = new double[2];
for (int j = 0; j < L; j++) {
N[yAligned[j]]++;
if (yAligned[j] == ypredAligned[j]) {
acc[yAligned[j]]++;
}
}
for (int v = 0; v < 2; v++) {
acc[v] = acc[v] / N[v];
}
return 2. / ((1. / acc[0]) + (1. / acc[1]));
}
/** Harmonic Accuracy -- for the j-th label. Multi-label only. */
public static double P_Harmonic(final int Y[][], final int Ypred[][], final int j) {
// works with missing
int y_j[] = MatrixUtils.getCol(Y, j);
int ypred_j[] = MatrixUtils.getCol(Ypred, j);
return P_Harmonic(y_j, ypred_j);
}
/** Harmonic Accuracy -- average over all labels. Multi-label only. */
public static double P_Harmonic(final int Y[][], final int Ypred[][]) {
// works with missing
int allMissings = 0;
int N = Y.length;
double loss = 0.0;
for (int i = 0; i < N; i++) {
if (allMissing(Y[i])) {
allMissings++;
continue;
}
double curLoss = P_Harmonic(Y[i], Ypred[i]);
if (Double.isNaN(curLoss)) {
allMissings++;
continue;
}
loss += curLoss;
}
return loss / (N - allMissings);
}
/** Jaccard Index -- often simply called multi-label 'accuracy'. Multi-label only. */
public static double P_Accuracy(final int y[], final int ypred[]) {
// works with missing
int[][] aligned = align(y, ypred);
int[] yAligned = aligned[0];
int[] ypredAligned = aligned[1];
int L = yAligned.length;
int set_union = 0;
int set_inter = 0;
for (int j = 0; j < L; j++) {
if (yAligned[j] == 1 || ypredAligned[j] == 1) {
set_union++;
}
if (yAligned[j] == 1 && ypredAligned[j] == 1) {
set_inter++;
}
}
// = intersection / union; (or, if both sets are empty, then = 1.)
return (set_union > 0) ? (double) set_inter / (double) set_union : 1.0;
}
/** Jaccard Index -- often simply called multi-label 'accuracy'. Multi-label only. */
public static double P_Accuracy(final int Y[][], final int Ypred[][]) {
// works with missing
int allMissings = 0;
int N = Y.length;
double accuracy = 0.0;
for (int i = 0; i < Y.length; i++) {
if (allMissing(Y[i])) {
allMissings++;
continue;
}
accuracy += P_Accuracy(Y[i], Ypred[i]);
}
return accuracy / (N - allMissings);
}
/** Jaccard Index -- often simply called multi-label 'accuracy'. Multi-label only. */
public static double P_JaccardIndex(final int Y[][], final int Ypred[][]) {
// works with missing
return P_Accuracy(Y, Ypred);
}
/** Jaccard Distance -- the loss version of Jaccard Index */
public static double L_JaccardDist(final int Y[][], final int Ypred[][]) {
// works with missing
return 1. - P_Accuracy(Y, Ypred);
}
/**
* L_LogLoss - the log loss between real-valued confidence rpred and true prediction y.
* @param y label
* @param rpred prediction (confidence)
* @param C limit (maximum loss of log(C))
* @return Log loss
*/
public static double L_LogLoss(final double y, final double rpred, final double C) {
if (y == -1) {
return 0.0;
}
// base 2 ?
double ans = Math.min(Utils.eq(y, rpred) ? 0.0 : -((y * Math.log(rpred)) + ((1.0 - y) * Math.log(1.0 - rpred))), C);
return (Double.isNaN(ans) ? 0.0 : ans);
}
/**
* L_LogLoss - the log loss between real-valued confidences Rpred and true predictions
* Y with a maximum penalty based on the number of labels L [Important Note: Earlier
* versions of Meka only normalised by N, and not N*L as here].
*/
public static double L_LogLossL(final int Y[][], final double Rpred[][]) {
int N = Y.length;
int L = Y[0].length;
int missing = 0;
for (int i = 0; i < Y.length; i++) {
if (allMissing(Y[i])) {
N--;
}
for (int j = 0; j < Y[i].length; j++) {
if (Y[i][j] == -1) {
missing++;
}
}
}
if (N == 0) {
return Double.NaN;
}
return L_LogLoss(Y, Rpred, Math.log(L)) / (((double) N * (double) L) - missing);
}
/**
* L_LogLoss - the log loss between real-valued confidences Rpred and true predictions
* Y with a maximum penalty based on the number of examples D [Important Note: Earlier
* versions of Meka only normalised by N, and not N*L as here].
*/
public static double L_LogLossD(final int Y[][], final double Rpred[][]) {
int N = Y.length;
int L = Y[0].length;
int missing = 0;
for (int i = 0; i < Y.length; i++) {
if (allMissing(Y[i])) {
N--;
}
for (int j = 0; j < Y[i].length; j++) {
if (Y[i][j] == -1) {
missing++;
}
}
}
if (N == 0) {
return Double.NaN;
}
return L_LogLoss(Y, Rpred, Math.log(N)) / (((double) N * (double) L) - missing);
}
/**
* L_LogLoss - the log loss between real-valued confidences Rpred and true predictions Y with a maximum penalty C [Important Note: Earlier versions of Meka only normalised by N, and not N*L as here].
*/
public static double L_LogLoss(final int Y[][], final double Rpred[][], final double C) {
double loss = 0.0;
for (int i = 0; i < Y.length; i++) {
if (allMissing(Y[i])) {
continue;
}
for (int j = 0; j < Y[i].length; j++) {
if (Y[i][j] == -1) {
continue;
}
loss += L_LogLoss(Y[i][j], Rpred[i][j], C);
}
}
return loss;
}
/**
* P_TruePositives - 1 and supposed to be 1 (the intersection, i.e., logical AND).
*/
public static double P_TruePositives(final int y[], final int ypred[]) {
// works with missing
// return Utils.sum(A.AND(y,ypred));
int s = 0;
for (int j = 0; j < y.length; j++) {
if (ypred[j] == 1 && y[j] == 1) {
s++;
}
}
return s;
}
/**
* P_FalsePositives - 1 but supposed to be 0 (the length of y \ ypred).
*/
public static double P_FalsePositives(final int y[], final int ypred[]) {
// works with missing
int s = 0;
for (int j = 0; j < y.length; j++) {
if (ypred[j] == 1 && y[j] == 0) {
s++;
}
}
return s;
}
/**
* P_TrueNegatives - 0 and supposed to be 0.
*/
public static double P_TrueNegatives(final int y[], final int ypred[]) {
// works with missing
int s = 0;
for (int j = 0; j < y.length; j++) {
if (ypred[j] == 0 && y[j] == 0) {
s++;
}
}
return s;
}
/**
* P_FalseNegatives - 0 but supposed to be 1 (the length of ypred \ y).
*/
public static double P_FalseNegatives(final int y[], final int ypred[]) {
// works with missing
int s = 0;
for (int j = 0; j < y.length; j++) {
if (ypred[j] == 0 && y[j] == 1) {
s++;
}
}
return s;
}
/**
* P_Precision - (retrieved AND relevant) / retrieved
*/
public static double P_Precision(final int y[], final int ypred[]) {
// works with missing
if (allMissing(y)) {
return Double.NaN;
}
double tp = P_TruePositives(y, ypred);
double fp = P_FalsePositives(y, ypred);
if (tp == 0.0 && fp == 0.0) {
return 0.0;
}
return tp / (tp + fp);
}
/**
* P_Recall - (retrieved AND relevant) / relevant
*/
public static double P_Recall(final int y[], final int ypred[]) {
// works with missing
if (allMissing(y)) {
return Double.NaN;
}
double tp = P_TruePositives(y, ypred);
double fn = P_FalseNegatives(y, ypred);
if (tp == 0.0 && fn == 0.0) {
return 0.0;
}
return tp / (tp + fn);
}
/**
* F1 - the F1 measure for two sets.
*/
public static double F1(final int s1[], final int s2[]) {
// works with missing
double p = P_Precision(s1, s2);
double r = P_Recall(s1, s2);
if (Double.isNaN(r) || Double.isNaN(p)) {
return Double.NaN;
}
if (p == 0.0 && r == 0.0) {
return 0.0;
}
return 2. * p * r / (p + r);
}
/*
public static double P_Recall(int Y[][], int YPred[][]) {
return P_Recall(flatten(Y),flatten(YPRed));
}
*/
/**
* P_Precision - (retrieved AND relevant) / retrieved
*/
public static double P_PrecisionMacro(final int Y[][], final int Ypred[][]) {
// works with missing
int L = Y[0].length;
double m = 0.0;
int missing = 0;
for (int j = 0; j < L; j++) {
int[] y_j = MatrixUtils.getCol(Y, j);
int[] p_j = MatrixUtils.getCol(Ypred, j);
if (allMissing(y_j)) {
missing++;
continue;
}
int[][] aligned = align(y_j, p_j);
int[] y_jAligned = aligned[0];
int[] p_jAligned = aligned[1];
double curPrec = P_Precision(y_jAligned, p_jAligned);
if (Double.isNaN(curPrec)) {
continue;
}
m += curPrec;
}
L -= missing;
if (L == 0) {
return Double.NaN;
}
return m / L;
}
/**
* P_Recall - (retrieved AND relevant) / relevant
*/
public static double P_RecallMacro(final int Y[][], final int Ypred[][]) {
// works with missing
int L = Y[0].length;
double m = 0.0;
int missing = 0;
for (int j = 0; j < L; j++) {
int[] y_j = MatrixUtils.getCol(Y, j);
int[] p_j = MatrixUtils.getCol(Ypred, j);
if (allMissing(y_j)) {
missing++;
continue;
}
int[][] aligned = align(y_j, p_j);
int[] y_jAligned = aligned[0];
int[] p_jAligned = aligned[1];
double curRecall = P_Recall(y_jAligned, p_jAligned);
if (Double.isNaN(curRecall)) {
continue;
}
m += curRecall;
}
L -= missing;
if (L == 0) {
return Double.NaN;
}
return m / L;
}
/**
* P_Precision - (retrieved AND relevant) / retrieved
*/
public static double P_PrecisionMicro(final int Y[][], final int Ypred[][]) {
// works with missing
return P_Precision(MatrixUtils.flatten(Y), MatrixUtils.flatten(Ypred));
}
/**
* P_Recall - (retrieved AND relevant) / relevant
*/
public static double P_RecallMicro(final int Y[][], final int Ypred[][]) {
// works with missing
return P_Recall(MatrixUtils.flatten(Y), MatrixUtils.flatten(Ypred));
}
/**
* P_Precision - (retrieved AND relevant) / retrieved
*/
public static double P_Precision(final int Y[][], final int Ypred[][], final int j) {
// works with missing
return P_Precision(MatrixUtils.getCol(Y, j), MatrixUtils.getCol(Ypred, j));
// int retrieved = M.sum(M.sum(Ypred));
// int correct = M.sum(M.sum(M.multiply(Y,Ypred)));
// return (double)correct / (double)predicted;
}
/**
* P_Recall - (retrieved AND relevant) / relevant
*/
public static double P_Recall(final int Y[][], final int Ypred[][], final int j) {
// works with missing
return P_Recall(MatrixUtils.getCol(Y, j), MatrixUtils.getCol(Ypred, j));
// int relevant = M.sum(M.sum(Y));
// int correct = M.sum(M.sum(M.multiply(Y,Ypred)));
// return (double)correct / (double)relevant;
}
/**
* P_FmicroAvg - Micro Averaged F-measure (F1, as if all labels in the dataset formed a single vector)
*/
public static double P_FmicroAvg(final int Y[][], final int Ypred[][]) {
// works with missing
return F1(MatrixUtils.flatten(Y), MatrixUtils.flatten(Ypred));
// double precision = P_Precision(M.flatten(Y),M.flatten(Ypred));
// double recall = P_Recall(M.flatten(Y),M.flatten(Ypred));
// return (2.0 * precision * recall) / (precision + recall);
}
/**
* F-Measure Macro Averaged by L - The 'standard' macro average.
*/
public static double P_FmacroAvgL(final int Y[][], final int Ypred[][]) {
// works with missing
int L = Y[0].length;
double TP[] = new double[L];
double FP[] = new double[L];
double FN[] = new double[L];
double F[] = new double[L];
int missing = 0;
for (int j = 0; j < L; j++) {
if (allMissing(MatrixUtils.getCol(Y, j))) {
missing++;
continue;
}
int y_j[] = MatrixUtils.getCol(Y, j);
int ypred_j[] = MatrixUtils.getCol(Ypred, j);
TP[j] = P_TruePositives(y_j, ypred_j);
FP[j] = P_FalsePositives(y_j, ypred_j);
FN[j] = P_FalseNegatives(y_j, ypred_j);
if (TP[j] <= 0) {
F[j] = 0.0;
} else {
double prec = TP[j] / (TP[j] + FP[j]);
double recall = TP[j] / (TP[j] + FN[j]);
F[j] = 2 * ((prec * recall) / (prec + recall));
}
}
L -= missing;
if (L == 0) {
return Double.NaN;
}
return A.sum(F) / L;
}
/**
* F-Measure Averaged by D - The F-measure macro averaged by example.
* The Jaccard index is also averaged this way.
*/
public static double P_FmacroAvgD(final int Y[][], final int Ypred[][]) {
// works with missing
int N = Y.length;
double F1_macro_D = 0.0;
int missing = 0;
for (int i = 0; i < N; i++) {
if (allMissing(Y[i])) {
missing++;
continue;
}
F1_macro_D += F1(Y[i], Ypred[i]);
}
N -= missing;
if (N == 0) {
return Double.NaN;
}
return F1_macro_D / N;
}
/**
* OneError -
*/
public static double L_OneError(final int Y[][], final double Rpred[][]) {
// works with missing
int N = Y.length;
int one_error = 0;
int missing = 0;
for (int i = 0; i < N; i++) {
if (allMissing(Y[i])) {
missing++;
continue;
}
if (Y[i][Utils.maxIndex(Rpred[i])] == 0) {
one_error++;
}
}
N -= missing;
if (N == 0) {
return Double.NaN;
}
return (double) one_error / (double) N;
}
public static double P_AveragePrecision(final int Y[][], final double Rpred[][]) {
// works with missing
int N = Y.length;
double loss = 0.0;
for (int i = 0; i < Y.length; i++) {
if (allMissing(Y[i])) {
N--;
continue;
}
loss += P_AveragePrecision(Y[i], Rpred[i]);
}
return loss / N;
}
/**
* Converts confidences in prediction array to ranking array, and continues
* with
* {@link #P_AveragePrecision(int[], int[])
* P_AveragePrecision(using ranking array)}.
*
*
*
* @param y The real label values of an instance.
* @param rpred The predicted confidences for the labels.
* @return the calculated average precision for an instance.
*/
public static double P_AveragePrecision(final int y[], final double rpred[]) {
double[][] aligned = align(y, rpred);
double[] alignedy = aligned[0];
double[] alignedp = aligned[1];
int r[] = MLUtils.predictionsToRanking(alignedp);
return P_AveragePrecision(toIntArray(alignedy), r);
}
/**
* Average Precision - computes for each relevant label the percentage
* of relevant labels among all labels that are ranked before it.
* @param y 0/1 labels [0, 0, 1 ] (true labels)
* @param r ranking position [1, 2, 0 ]
* @return Average Precision
*/
public static double P_AveragePrecision(final int y[], final int r[]) {
// works with missing
double avg_prec = 0;
int L = y.length;
List<Integer> ones = new ArrayList<Integer>();
for (int j = 0; j < L; j++) {
if (y[j] == 1) {
ones.add(j);
}
}
if (ones.size() <= 0) {
return 1.0;
}
for (int j : ones) {
// 's' = the percentage of relevant labels ranked before 'j'
double s = 0.0;
for (int k : ones) {
if (r[k] <= r[j]) {
s++;
}
}
// 's' divided by the position of 'j'
avg_prec += (s / (1 + r[j]));
}
avg_prec /= ones.size();
return avg_prec;
}
//////////////////////////////////////////////////////////////////////////
public static double L_RankLoss(final int Y[][], final double Rpred[][]) {
// works with missing
int N = Y.length;
double loss = 0.0;
for (int i = 0; i < Y.length; i++) {
if (allMissing(Y[i])) {
N--;
continue;
}
loss += L_RankLoss(Y[i], Rpred[i]);
}
return loss / N;
}
public static double L_RankLoss(int y[], double rpred[]) {
// works with missing
double[][] aligned = align(y, rpred);
y = toIntArray(aligned[0]);
rpred = aligned[1];
int r[] = Utils.sort(rpred);
return L_RankLoss(y, r);
}
/**
* Rank Loss - the average fraction of labels which are not correctly ordered.
* Thanks to Noureddine Yacine NAIR BENREKIA for providing bug fix for this.
* @param y 0/1 labels [0, 0, 1 ]
* @param r ranking position [1, 2, 0 ]
* @return Ranking Loss
*/
public static double L_RankLoss(final int y[], final int r[]) {
int L = y.length;
ArrayList<Integer> tI = new ArrayList<Integer>();
ArrayList<Integer> fI = new ArrayList<Integer>();
for (int j = 0; j < L; j++) {
if (y[j] == 1) {
tI.add(j);
} else {
fI.add(j);
}
}
if (!tI.isEmpty() && !fI.isEmpty()) {
int c = 0;
for (int k : tI) {
for (int l : fI) {
if (position(k, r) < position(l, r)) {
c++;
}
}
}
return (double) c / (double) (tI.size() * fI.size());
} else {
return 0.0;
}
}
private static int position(final int index, final int r[]) {
int i = 0;
while (r[i] != index) {
i++;
}
return i;
}
/**
* Helper function, returns macro AUROC (roc = true) or macro RPC (roc = false)
*/
private static double getMacro(final int Y[][], final double P[][], final boolean roc) {
// works with missing
int L = Y[0].length;
double AUC[] = new double[L];
int missing = 0;
for (int j = 0; j < L; j++) {
if (allMissing(MatrixUtils.getCol(Y, j))) {
missing++;
continue;
}
ThresholdCurve curve = new ThresholdCurve();
double[][] aligned = align(MatrixUtils.getCol(Y, j), MatrixUtils.getCol(P, j));
Instances result = curve.getCurve(MLUtils.toWekaPredictions(toIntArray(aligned[0]), aligned[1]));
if (roc) {
AUC[j] = ThresholdCurve.getROCArea(result);
} else {
AUC[j] = ThresholdCurve.getPRCArea(result);
}
// System.out.println(Arrays.toString(MatrixUtils.getCol(Y, j)));
// System.out.println(Arrays.toString(MatrixUtils.getCol(P, j)));
//
// System.out.println(AUC[j]);
}
L -= missing;
if (L == 0) {
return Double.NaN;
}
return Utils.mean(AUC);
}
/** Calculate AUPRC: Area Under the Precision-Recall curve. */
public static double P_macroAUPRC(final int Y[][], final double P[][]) {
return getMacro(Y, P, false);
}
/** Calculate AUROC: Area Under the ROC curve. */
public static double P_macroAUROC(final int Y[][], final double P[][]) {
return getMacro(Y, P, true);
}
/** Get Data for Plotting PR and ROC curves. */
public static Instances curveDataMicroAveraged(final int Y[][], final double P[][]) {
// works with missing
int y[] = MatrixUtils.flatten(Y);
double p[] = MatrixUtils.flatten(P);
double[][] aligned = align(y, p);
y = toIntArray(aligned[0]);
p = aligned[1];
ThresholdCurve curve = new ThresholdCurve();
return curve.getCurve(MLUtils.toWekaPredictions(y, p));
}
/** Get Data for Plotting PR and ROC curves. */
public static Instances curveDataMacroAveraged(final int Y[][], final double P[][]) {
// Note: 'Threshold' contains the probability threshold that gives rise to the previous performance values.
Instances curveData[] = curveData(Y, P);
int L = curveData.length;
int noNullIndex = -1;
for (int i = 0; i < curveData.length; i++) {
if (curveData[i] == null) {
L--;
} else {
if (noNullIndex == -1) {
// checking for the first curveData that is not null (=does not consist of
// only missing values or 0s)
noNullIndex = i;
}
}
}
Instances avgCurve = new Instances(curveData[noNullIndex], 0);
int D = avgCurve.numAttributes();
for (double t = 0.0; t < 1.; t += 0.01) {
Instance x = (Instance) curveData[noNullIndex].instance(0).copy();
// System.out.println("x1\n"+x);
boolean firstloop = true;
for (int j = 0; j < L; j++) {
// if there are only missing values in a column, curveData[j] is null
if (curveData[j] == null) {
continue;
}
int i = ThresholdCurve.getThresholdInstance(curveData[j], t);
if (firstloop) {
// reset
for (int a = 0; a < D; a++) {
x.setValue(a, curveData[j].instance(i).value(a) * 1. / L);
}
firstloop = false;
} else {
// add
for (int a = 0; a < D; a++) {
double v = x.value(a);
x.setValue(a, v + curveData[j].instance(i).value(a) * 1. / L);
}
}
}
// System.out.println("x2\n"+x);
avgCurve.add(x);
}
/*
System.out.println(avgCurve);
System.exit(1);
// Average everything
for (int i = 0; i < avgCurve.numInstances(); i++) {
for(int j = 0; j < L; j++) {
for (int a = 0; a < D; a++) {
double o = avgCurve.instance(i).value(a);
avgCurve.instance(i).setValue(a, o / L);
}
}
}
*/
return avgCurve;
}
/** Get Data for Plotting PR and ROC curves. */
public static Instances curveData(int y[], double p[]) {
// works with missing
double[][] aligned = align(y, p);
y = toIntArray(aligned[0]);
p = aligned[1];
ThresholdCurve curve = new ThresholdCurve();
return curve.getCurve(MLUtils.toWekaPredictions(y, p));
}
/** Get Data for Plotting PR and ROC curves. */
public static Instances[] curveData(final int Y[][], final double P[][]) {
// works with missing
int L = Y[0].length;
Instances curveData[] = new Instances[L];
for (int j = 0; j < L; j++) {
Instances cd = curveData(MatrixUtils.getCol(Y, j), MatrixUtils.getCol(P, j));
curveData[j] = cd;
}
return curveData;
}
/** Levenshtein Distance. Multi-target compatible */
public static double L_LevenshteinDistance(final int Y[][], final int P[][]) {
double loss = 0.;
int N = Y.length;
int missing = 0;
for (int i = 0; i < N; i++) {
if (allMissing(Y[i])) {
missing++;
continue;
}
loss += L_LevenshteinDistance(Y[i], P[i]);
}
N -= missing;
if (N == 0) {
return Double.NaN;
}
return loss / N;
}
/** Levenshtein Distance divided by the number of labels. Multi-target compatible */
public static double L_LevenshteinDistance(final int y[], final int p[]) {
int L = y.length - numberOfMissingLabels(y);
int[][] aligned = align(y, p);
int[] alignedy = aligned[0];
int[] alignedp = aligned[1];
return (getLevenshteinDistance(alignedy, alignedp) / (double) L);
}
/*
* Levenshtein Distance.
* Given true labels y, and pRedicted labels r.
* based on http://www.merriampark.com/ldjava.htm
*/
private static int getLevenshteinDistance(int y[], int r[]) {
int n = y.length;
int m = r.length;
if (n == 0) {
return m;
} else if (m == 0) {
return n;
}
if (n > m) {
final int[] tmp = y;
y = r;
r = tmp;
n = m;
m = r.length;
}
int p[] = new int[n + 1];
int d[] = new int[n + 1];
int _d[];
int i;
int j;
int r_j;
int cost;
for (i = 0; i <= n; i++) {
p[i] = i;
}
for (j = 1; j <= m; j++) {
r_j = r[j - 1];
d[0] = j;
for (i = 1; i <= n; i++) {
cost = y[i - 1] == r_j ? 0 : 1;
d[i] = Math.min(Math.min(d[i - 1] + 1, p[i] + 1), p[i - 1] + cost);
}
_d = p;
p = d;
d = _d;
}
return p[n];
}
/** Log Likelihood */
public double P_LogLikelihood(final int y[], final double p[]) {
int L = y.length;
double l = 0.0; // likelihood
for (int j = 0; j < L; j++) {
// independence assumption
l += Math.log(Math.pow(p[j], y[j]) * Math.pow(1. - p[j], 1 - y[j])); // multi-label only
}
return l;
}
/** MSE */
public double L_MSE(final int y[], final double p[]) {
int L = y.length;
double l[] = new double[L]; // likelihood
for (int j = 0; j < L; j++) {
// independence assumption
l[j] = Math.pow(p[j] - y[j], 2); // MSE
}
return A.product(l);
}
/** MAE */
public double L_MAE(final int y[], final double p[]) {
int L = y.length;
double l[] = new double[L]; // likelihood
for (int j = 0; j < L; j++) {
// independence assumption
l[j] = Math.abs(p[j] - y[j]); // MAE
}
return A.product(l);
}
/** Product
public double P_Product(int Y[][], double P[][]) {
int N = Y.length;
double s = 1.;
for(int i = 0; i < N; i++) {
s *= L_MAE(Y[i],P[i]);
}
return s;
}*/
/** Log Sum
public double P_LogSum(int Y[][], double P[][]) {
int N = Y.length;
double s = 0.;
for(int i = 0; i < N; i++) {
s += Math.log(A.product(L_MAE(Y[i],P[i])));
}
return s;
}*/
/** Avg Sum
public double P_Avg_Sum(int Y[][], double P[][]) {
int N = Y.length;
double s = 0.;
for(int i = 0; i < N; i++) {
s += L_MAE(Y[i],P[i]);
}
return s / N;
}*/
/**
* Do some tests.
*/
public static void main(final String args[]) {
int Y[][] = new int[][] {
// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4
{ 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0 },
// {1,2},
// {3,4,5},
// {6},
// {7}
};
double P[][] = new double[][] {
// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4
{ 0, 0.7, 0.8, 0.9, 0, 0, 0, 0, 0, 0.7, 0, 0, 0, 0, 0 }, { 0, 0, 0, 0.6, 0.7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0, 0, 0.8, 0, 0, 0, 0, 0, 0.8, 0, 0 }, { 0, 0.7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, };
int Ypred[][] = new int[][] {
// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4
{ 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0 }, { 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0 }, { 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
// {1,2,3,9},
// {3,4},
// {6,12},
// {1}
};
System.out.println("0.533333333... = " + P_FmacroAvgD(Y, Ypred));
System.out.println("LD = " + L_LevenshteinDistance(Y, Ypred));
System.out.println("MA = \n" + curveDataMacroAveraged(Y, P));
// System.out.println("\nMi = \n"+curveDataMicroAveraged(Y,P));
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/core/MultiLabelDrawable.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package meka.core;
import java.util.Map;
/**
* Interface for classes that generate graphs per label.
*
* @author fracpete
*/
public interface MultiLabelDrawable {
int NOT_DRAWABLE = 0, TREE = 1, BayesNet = 2, Newick = 3;
/**
* Returns the type of graph representing
* the object.
*
* @return the type of graph representing the object (label index as key)
*/
public Map<Integer,Integer> graphType();
/**
* Returns a string that describes a graph representing
* the object. The string should be in XMLBIF ver.
* 0.3 format if the graph is a BayesNet, otherwise
* it should be in dotty format.
*
* @return the graph described by a string (label index as key)
* @throws Exception if the graph can't be computed
*/
public Map<Integer,String> graph() throws Exception;
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/core/OS.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/**
* OS.java
* Copyright (C) 2011-2015 University of Waikato, Hamilton, New Zealand
*/
package meka.core;
/**
* Helper class for operating system related stuff.
*
* @author fracpete (fracpete at waikato dot ac dot nz)
* @version $Revision$
*/
public class OS {
/** whether the OS is Windows. */
protected static Boolean m_IsWindows;
/** whether the OS is Mac. */
protected static Boolean m_IsMac;
/** whether the OS is Linux. */
protected static Boolean m_IsLinux;
/** whether the OS is Android. */
protected static Boolean m_IsAndroid;
/**
* Checks whether the operating system is Windows.
*
* @return true if the OS is Windows flavor
*/
public static synchronized boolean isWindows() {
if (m_IsWindows == null)
m_IsWindows = System.getProperty("os.name").toLowerCase().contains("windows");
return m_IsWindows;
}
/**
* Checks whether the operating system is Mac.
*
* @return true if the OS is Mac flavor
*/
public synchronized static boolean isMac() {
if (m_IsMac == null)
m_IsMac = System.getProperty("os.name").toLowerCase().startsWith("mac os");
return m_IsMac;
}
/**
* Checks whether the operating system is Linux (but not Android).
*
* @return true if the OS is Linux flavor (but not Android)
*/
public synchronized static boolean isLinux() {
String os;
if (m_IsLinux == null)
m_IsLinux = System.getProperty("os.name").toLowerCase().startsWith("linux") && !isAndroid();
return m_IsLinux;
}
/**
* Checks whether the operating system is Android.
*
* @return true if the OS is Android flavor
*/
public synchronized static boolean isAndroid() {
if (m_IsAndroid == null) {
m_IsAndroid = System.getProperty("java.vm.vendor").toLowerCase().contains("android")
|| System.getProperty("java.vendor").toLowerCase().contains("android")
|| System.getProperty("java.vendor.url").toLowerCase().contains("android");
}
return m_IsAndroid;
}
/**
* Returns the "bitness", ie 32 or 64 bit of the underlying OS.
*
* @return the number of bits
*/
public synchronized static int getBitness() {
String arch;
arch = System.getProperty("os.arch");
if (arch.endsWith("86"))
return 32;
else if (arch.endsWith("64"))
return 64;
else
throw new IllegalStateException("Cannot interpret 'os.arch' for bitness: " + arch);
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/core/ObjectUtils.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/**
* Utils.java
* Copyright (C) 2015 University of Waikato, Hamilton, NZ
*/
package meka.core;
import weka.core.SerializedObject;
import java.io.Serializable;
import java.util.List;
/**
* Helper class for object-related operations.
*
* @author FracPete (fracpete at waikato dot ac dot nz)
* @version $Revision$
*/
public class ObjectUtils {
/**
* Flattens the list into a single, long string. The separator string gets
* added between the objects, but not after the last one.
*
* @param lines the lines to flatten
* @param sep the separator
* @return the generated string
*/
public static String flatten(List lines, String sep) {
return flatten(lines.toArray(new Object[lines.size()]), sep);
}
/**
* Flattens the array into a single, long string. The separator string gets
* added between the objects, but not after the last one. Uses the "toString()"
* method of the objects to turn them into a string.
*
* @param lines the lines to flatten
* @param sep the separator
* @return the generated string
*/
public static String flatten(Object[] lines, String sep) {
StringBuilder result;
int i;
result = new StringBuilder();
for (i = 0; i < lines.length; i++) {
if (i > 0)
result.append(sep);
result.append(lines[i].toString());
}
return result.toString();
}
/**
* Creates a deep copy of the given object (must be serializable!). Returns
* null in case of an error.
*
* @param o the object to copy
* @return the deep copy
*/
public static Object deepCopy(Object o) {
Object result;
SerializedObject so;
try {
so = new SerializedObject((Serializable) o);
result = so.getObject();
}
catch (Exception e) {
System.err.println("Failed to serialize " + o.getClass().getName() + ":");
e.printStackTrace();
result = null;
}
return result;
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/core/OptionUtils.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* OptionUtils.java
* Copyright (C) 2015-2018 University of Waikato, Hamilton, NZ
*/
package meka.core;
import weka.core.InheritanceUtils;
import weka.core.Option;
import weka.core.OptionHandler;
import weka.core.Utils;
import java.io.File;
import java.lang.reflect.Array;
import java.lang.reflect.Constructor;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Enumeration;
import java.util.List;
import java.util.Vector;
/**
* Helper class for option parsing.
*
* @author FracPete (fracpete at waikato dot ac dot nz)
* @version $Revision$
*/
public class OptionUtils {
/**
* Parses an int option, uses default if option is missing.
*
* @param options the option array to use
* @param option the option to look for in the options array (no leading dash)
* @param defValue the default value
* @return the parsed value (or default value if option not present)
* @throws Exception if parsing of value fails
*/
public static int parse(String[] options, char option, int defValue) throws Exception {
return parse(options, "" + option, defValue);
}
/**
* Parses an int option, uses default if option is missing.
*
* @param options the option array to use
* @param option the option to look for in the options array (no leading dash)
* @param defValue the default value
* @return the parsed value (or default value if option not present)
* @throws Exception if parsing of value fails
*/
public static int parse(String[] options, String option, int defValue) throws Exception {
String value = Utils.getOption(option, options);
if (value.isEmpty())
return defValue;
else
return Integer.parseInt(value);
}
/**
* Parses an long option, uses default if option is missing.
*
* @param options the option array to use
* @param option the option to look for in the options array (no leading dash)
* @param defValue the default value
* @return the parsed value (or default value if option not present)
* @throws Exception if parsing of value fails
*/
public static long parse(String[] options, char option, long defValue) throws Exception {
return parse(options, "" + option, defValue);
}
/**
* Parses a long option, uses default if option is missing.
*
* @param options the option array to use
* @param option the option to look for in the options array (no leading dash)
* @param defValue the default value
* @return the parsed value (or default value if option not present)
* @throws Exception if parsing of value fails
*/
public static long parse(String[] options, String option, long defValue) throws Exception {
String value = Utils.getOption(option, options);
if (value.isEmpty())
return defValue;
else
return Long.parseLong(value);
}
/**
* Parses a float option, uses default if option is missing.
*
* @param options the option array to use
* @param option the option to look for in the options array (no leading dash)
* @param defValue the default value
* @return the parsed value (or default value if option not present)
* @throws Exception if parsing of value fails
*/
public static float parse(String[] options, char option, float defValue) throws Exception {
return parse(options, "" + option, defValue);
}
/**
* Parses a float option, uses default if option is missing.
*
* @param options the option array to use
* @param option the option to look for in the options array (no leading dash)
* @param defValue the default value
* @return the parsed value (or default value if option not present)
* @throws Exception if parsing of value fails
*/
public static float parse(String[] options, String option, float defValue) throws Exception {
String value = Utils.getOption(option, options);
if (value.isEmpty())
return defValue;
else
return Float.parseFloat(value);
}
/**
* Parses a double option, uses default if option is missing.
*
* @param options the option array to use
* @param option the option to look for in the options array (no leading dash)
* @param defValue the default value
* @return the parsed value (or default value if option not present)
* @throws Exception if parsing of value fails
*/
public static double parse(String[] options, char option, double defValue) throws Exception {
return parse(options, "" + option, defValue);
}
/**
* Parses a double option, uses default if option is missing.
*
* @param options the option array to use
* @param option the option to look for in the options array (no leading dash)
* @param defValue the default value
* @return the parsed value (or default value if option not present)
* @throws Exception if parsing of value fails
*/
public static double parse(String[] options, String option, double defValue) throws Exception {
String value = Utils.getOption(option, options);
if (value.isEmpty())
return defValue;
else
return Double.parseDouble(value);
}
/**
* Parses a String option, uses default if option is missing.
*
* @param options the option array to use
* @param option the option to look for in the options array (no leading dash)
* @param defValue the default value
* @return the parsed value (or default value if option not present)
* @throws Exception if parsing of value fails
*/
public static String parse(String[] options, char option, String defValue) throws Exception {
return parse(options, "" + option, defValue);
}
/**
* Parses a String option, uses default if option is missing.
*
* @param options the option array to use
* @param option the option to look for in the options array (no leading dash)
* @param defValue the default value
* @return the parsed value (or default value if option not present)
* @throws Exception if parsing of value fails
*/
public static String parse(String[] options, String option, String defValue) throws Exception {
String value = Utils.getOption(option, options);
if (value.isEmpty())
return defValue;
else
return value;
}
/**
* Parses a File option, uses default if option is missing.
*
* @param options the option array to use
* @param option the option to look for in the options array (no leading dash)
* @param defValue the default value
* @return the parsed value (or default value if option not present)
* @throws Exception if parsing of value fails
*/
public static File parse(String[] options, char option, File defValue) throws Exception {
return parse(options, "" + option, defValue);
}
/**
* Parses a File option, uses default if option is missing.
*
* @param options the option array to use
* @param option the option to look for in the options array (no leading dash)
* @param defValue the default value
* @return the parsed value (or default value if option not present)
* @throws Exception if parsing of value fails
*/
public static File parse(String[] options, String option, File defValue) throws Exception {
String value = Utils.getOption(option, options);
if (value.isEmpty())
return defValue;
else
return new File(value);
}
/**
* Parses an OptionHandler option, uses default if option is missing.
*
* @param options the option array to use
* @param option the option to look for in the options array (no leading dash)
* @param defValue the default value
* @return the parsed value (or default value if option not present)
* @throws Exception if parsing of value fails
*/
public static OptionHandler parse(String[] options, char option, OptionHandler defValue) throws Exception {
return parse(options, "" + option, defValue);
}
/**
* Parses an OptionHandler option, uses default if option is missing.
*
* @param options the option array to use
* @param option the option to look for in the options array (no leading dash)
* @param defValue the default value
* @return the parsed value (or default value if option not present)
* @throws Exception if parsing of value fails
*/
public static OptionHandler parse(String[] options, String option, OptionHandler defValue) throws Exception {
String value = Utils.getOption(option, options);
if (value.isEmpty())
return defValue;
else
return OptionUtils.fromCommandLine(OptionHandler.class, value);
}
/**
* Parses an array option, returns all the occurrences of the option as a string array.
*
* @param options the option array to use
* @param option the option to look for in the options array (no leading dash)
* @return the parsed value (or default value if option not present)
* @throws Exception if parsing of value fails
*/
public static String[] parse(String[] options, char option) throws Exception {
return parse(options, "" + option);
}
/**
* Parses an array option, returns all the occurrences of the option as a string array.
*
* @param options the option array to use
* @param option the option to look for in the options array (no leading dash)
* @return the parsed value (or default value if option not present)
* @throws Exception if parsing of value fails
*/
public static String[] parse(String[] options, String option) throws Exception {
List<String> result = new ArrayList<>();
while (Utils.getOptionPos(option, options) > -1)
result.add(Utils.getOption(option, options));
return result.toArray(new String[result.size()]);
}
/**
* Parses an array option, returns all the occurrences of the option as a string array.
*
* @param options the option array to use
* @param option the option to look for in the options array (no leading dash)
* @return the parsed value (or default value if option not present)
* @param cls the class type to use (requires a constructor that takes a string)
* @throws Exception if parsing of value fails
*/
public static <T> T[] parse(String[] options, char option, Class<T> cls) throws Exception {
return parse(options, "" + option, cls);
}
/**
* Parses a double option, uses default if option is missing.
*
* @param options the option array to use
* @param option the option to look for in the options array (no leading dash)
* @param defValue the default value
* @return the parsed value (or default value if option not present)
* @throws Exception if parsing of value fails
*/
public static boolean parse(String[] options, char option, boolean defValue) throws Exception {
return parse(options, "" + option, defValue);
}
/**
* Parses a double option, uses default if option is missing.
*
* @param options the option array to use
* @param option the option to look for in the options array (no leading dash)
* @param defValue the default value
* @return the parsed value (or default value if option not present)
* @throws Exception if parsing of value fails
*/
public static boolean parse(String[] options, String option, boolean defValue) throws Exception {
String value = Utils.getOption(option, options);
if (value.isEmpty())
return defValue;
else
return Boolean.parseBoolean(value);
}
/**
* Parses an array option, returns all the occurrences of the option as a string array.
*
* @param options the option array to use
* @param option the option to look for in the options array (no leading dash)
* @return the parsed value (or default value if option not present)
* @param cls the class type to use (requires a constructor that takes a string)
* @throws Exception if parsing of value fails
*/
public static <T> T[] parse(String[] options, String option, Class<T> cls) throws Exception {
// gather all options
List<String> list = new ArrayList<>();
while (Utils.getOptionPos(option, options) > -1)
list.add(Utils.getOption(option, options));
// Optionhandler?
if (InheritanceUtils.hasInterface(OptionHandler.class, cls)) {
Object result = Array.newInstance(cls, list.size());
for (int i = 0; i < list.size(); i++) {
try {
Array.set(result, i, OptionUtils.fromCommandLine(cls, list.get(i)));
} catch (Exception e) {
System.err.println("Failed to instantiate class '" + cls.getName() + "' with command-line: " + list.get(i));
}
}
return (T[]) result;
}
else {
Constructor constr = cls.getConstructor(String.class);
if (constr == null)
throw new IllegalArgumentException("Class '" + cls.getName() + "' does not have a constructor that takes a String!");
// convert to type
Object result = Array.newInstance(cls, list.size());
for (int i = 0; i < list.size(); i++) {
try {
Array.set(result, i, constr.newInstance(list.get(i)));
} catch (Exception e) {
System.err.println("Failed to instantiate class '" + cls.getName() + "' with string value: " + list.get(i));
}
}
return (T[]) result;
}
}
/**
* Adds the int value to the options.
*
* @param options the current list of options to extend
* @param option the option (without the leading dash)
* @param value the current value
*/
public static void add(List<String> options, char option, int value) {
add(options, "" + option, value);
}
/**
* Adds the int value to the options.
*
* @param options the current list of options to extend
* @param option the option (without the leading dash)
* @param value the current value
*/
public static void add(List<String> options, String option, int value) {
options.add("-" + option);
options.add("" + value);
}
/**
* Adds the long value to the options.
*
* @param options the current list of options to extend
* @param option the option (without the leading dash)
* @param value the current value
*/
public static void add(List<String> options, char option, long value) {
add(options, "" + option, value);
}
/**
* Adds the long value to the options.
*
* @param options the current list of options to extend
* @param option the option (without the leading dash)
* @param value the current value
*/
public static void add(List<String> options, String option, long value) {
options.add("-" + option);
options.add("" + value);
}
/**
* Adds the float value to the options.
*
* @param options the current list of options to extend
* @param option the option (without the leading dash)
* @param value the current value
*/
public static void add(List<String> options, char option, float value) {
add(options, "" + option, value);
}
/**
* Adds the float value to the options.
*
* @param options the current list of options to extend
* @param option the option (without the leading dash)
* @param value the current value
*/
public static void add(List<String> options, String option, float value) {
options.add("-" + option);
options.add("" + value);
}
/**
* Adds the double value to the options.
*
* @param options the current list of options to extend
* @param option the option (without the leading dash)
* @param value the current value
*/
public static void add(List<String> options, char option, double value) {
add(options, "" + option, value);
}
/**
* Adds the double value to the options.
*
* @param options the current list of options to extend
* @param option the option (without the leading dash)
* @param value the current value
*/
public static void add(List<String> options, String option, double value) {
options.add("-" + option);
options.add("" + value);
}
/**
* Adds the String value to the options.
*
* @param options the current list of options to extend
* @param option the option (without the leading dash)
* @param value the current value
*/
public static void add(List<String> options, char option, String value) {
add(options, "" + option, value);
}
/**
* Adds the String value to the options.
*
* @param options the current list of options to extend
* @param option the option (without the leading dash)
* @param value the current value
*/
public static void add(List<String> options, String option, String value) {
options.add("-" + option);
options.add("" + value);
}
/**
* Adds the boolean flag (if true) to the options.
*
* @param options the current list of options to extend
* @param option the option (without the leading dash)
* @param value the current value
*/
public static void add(List<String> options, char option, boolean value) {
add(options, "" + option, value);
}
/**
* Adds the boolean flag (if true) to the options.
*
* @param options the current list of options to extend
* @param option the option (without the leading dash)
* @param value the current value
*/
public static void add(List<String> options, String option, boolean value) {
if (value)
options.add("-" + option);
}
/**
* Adds the File value to the options.
*
* @param options the current list of options to extend
* @param option the option (without the leading dash)
* @param value the current value
*/
public static void add(List<String> options, char option, File value) {
add(options, "" + option, value);
}
/**
* Adds the File value to the options.
*
* @param options the current list of options to extend
* @param option the option (without the leading dash)
* @param value the current value
*/
public static void add(List<String> options, String option, File value) {
options.add("-" + option);
options.add("" + value);
}
/**
* Adds the OptionHandler to the options.
*
* @param options the current list of options to extend
* @param option the option (without the leading dash)
* @param value the current value
*/
public static void add(List<String> options, char option, OptionHandler value) {
add(options, "" + option, value);
}
/**
* Adds the OptionHandler to the options.
*
* @param options the current list of options to extend
* @param option the option (without the leading dash)
* @param value the current value
*/
public static void add(List<String> options, String option, OptionHandler value) {
options.add("-" + option);
options.add("" + Utils.toCommandLine(value));
}
/**
* Adds the array to the options.
*
* @param options the current list of options to extend
* @param option the option (without the leading dash)
* @param value the current value
*/
public static void add(List<String> options, char option, Object value) {
add(options, "" + option, value);
}
/**
* Adds the array to the options.
*
* @param options the current list of options to extend
* @param option the option (without the leading dash)
* @param value the current value
*/
public static void add(List<String> options, String option, Object value) {
if (!value.getClass().isArray())
throw new IllegalArgumentException("Value is not an array!");
for (int i = 0; i < Array.getLength(value); i++) {
Object element = Array.get(value, i);
if (element instanceof OptionHandler) {
add(options, option, (OptionHandler) element);
}
else {
options.add("-" + option);
options.add("" + element);
}
}
}
/**
* Adds the "super" options to the current list.
*
* @param options the current options
* @param superOptions the "super" options to add
*/
public static void add(List<String> options, String[] superOptions) {
options.addAll(Arrays.asList(superOptions));
}
/**
* Adds an Option for a flag to the list of options.
*
* @param options the options to extend
* @param text the description
* @param flag the flag (no dash)
*/
public static void addFlag(Vector options, String text, char flag) {
addFlag(options, text, "" + flag);
}
/**
* Adds an Option for a flag to the list of options.
*
* @param options the options to extend
* @param text the description
* @param flag the flag (no dash)
*/
public static void addFlag(Vector options, String text, String flag) {
options.add(new Option("\t" + text, flag, 0, "-" + flag));
}
/**
* Adds an Option for a flag to the list of options.
*
* @param options the options to extend
* @param text the description
* @param option the option (no dash)
*/
public static void addOption(Vector options, String text, String defValue, char option) {
addOption(options, text, defValue, "" + option);
}
/**
* Adds an Option for a flag to the list of options.
*
* @param options the options to extend
* @param text the description
* @param option the option (no dash)
*/
public static void addOption(Vector options, String text, String defValue, String option) {
options.add(new Option("\t" + text + "\n\t(default: " + defValue + ")", option, 0, "-" + option + " <value>"));
}
/**
* Adds the option description of the super class.
*
* @param current the current option descriptions to extend
* @param superOptions the "super" descriptions
*/
public static void add(Vector current, Enumeration superOptions) {
while (superOptions.hasMoreElements())
current.addElement(superOptions.nextElement());
}
/**
* Turns the list of options into an array.
*
* @param options the list of options to convert
* @return the generated array
*/
public static String[] toArray(List<String> options) {
return options.toArray(new String[options.size()]);
}
/**
* Returns the option descriptions as an enumeration.
*
* @param options the descriptions
* @return the enumeration
*/
public static Enumeration toEnumeration(Vector options) {
return options.elements();
}
/**
* Turns a commandline into an object.
*
* @param cls the class that the commandline is expected to be
* @param cmdline the commandline to parse
* @return the object, null if failed to instantiate
* @throws Exception if parsing fails
*/
public static <T> T fromCommandLine(Class<T> cls, String cmdline) throws Exception {
String[] options;
String classname;
options = Utils.splitOptions(cmdline);
classname = options[0];
options[0] = "";
return (T) Utils.forName(cls, classname, options);
}
/**
* Returns the commandline string for the object.
*
* @param obj the object to generate the commandline for
* @return the commandline
* @see Utils#toCommandLine(Object)
*/
public static String toCommandLine(Object obj) {
return Utils.toCommandLine(obj);
}
/**
* Converts specified characters into the string equivalents.
*
* @param string the string
* @param find the characters to replace
* @param replace the replacement strings for the characters
* @return the converted string
* @see #unbackQuoteChars(String, String[], char[])
*/
public static String backQuoteChars(String string, char[] find, String[] replace) {
int index;
StringBuilder newStr;
int i;
if (string == null)
return string;
for (i = 0; i < find.length; i++) {
if (string.indexOf(find[i]) != -1 ) {
newStr = new StringBuilder();
while ((index = string.indexOf(find[i])) != -1) {
if (index > 0)
newStr.append(string.substring(0, index));
newStr.append(replace[i]);
if ((index + 1) < string.length())
string = string.substring(index + 1);
else
string = "";
}
newStr.append(string);
string = newStr.toString();
}
}
return string;
}
/**
* Converts carriage returns and new lines in a string into \r and \n.
* Backquotes the following characters: ` " \ \t and %
*
* @param string the string
* @return the converted string
* @see #unbackQuoteChars(String)
*/
public static String backQuoteChars(String string) {
return backQuoteChars(
string,
new char[] {'\\', '\'', '\t', '\n', '\r', '"'},
new String[]{"\\\\", "\\'", "\\t", "\\n", "\\r", "\\\""});
}
/**
* The inverse operation of backQuoteChars().
* Converts the specified strings into their character representations.
*
* @param string the string
* @param find the string to find
* @param replace the character equivalents of the strings
* @return the converted string
* @see #backQuoteChars(String, char[], String[])
*/
public static String unbackQuoteChars(String string, String[] find, char[] replace) {
int index;
StringBuilder newStr;
int[] pos;
int curPos;
String str;
int i;
if (string == null)
return null;
pos = new int[find.length];
str = new String(string);
newStr = new StringBuilder();
while (str.length() > 0) {
// get positions and closest character to replace
curPos = str.length();
index = -1;
for (i = 0; i < pos.length; i++) {
pos[i] = str.indexOf(find[i]);
if ( (pos[i] > -1) && (pos[i] < curPos) ) {
index = i;
curPos = pos[i];
}
}
// replace character if found, otherwise finished
if (index == -1) {
newStr.append(str);
str = "";
}
else {
newStr.append(str.substring(0, pos[index]));
newStr.append(replace[index]);
str = str.substring(pos[index] + find[index].length());
}
}
return newStr.toString();
}
/**
* The inverse operation of backQuoteChars().
* Converts back-quoted carriage returns and new lines in a string
* to the corresponding character ('\r' and '\n').
* Also "un"-back-quotes the following characters: ` " \ \t and %
*
* @param string the string
* @return the converted string
* @see #backQuoteChars(String)
*/
public static String unbackQuoteChars(String string) {
return unbackQuoteChars(
string,
new String[]{"\\\\", "\\'", "\\t", "\\n", "\\r", "\\\""},
new char[]{'\\', '\'', '\t', '\n', '\r', '"'});
}
/**
* Creates a shallow copy of the option handler, just using its options.
*
* @param obj the object to copy
* @return the copy, null if failed to copy
*/
public static OptionHandler shallowCopy(OptionHandler obj) {
if (obj == null) {
System.err.println("Cannot create shallow copy of null object!");
return null;
}
try {
return fromCommandLine(OptionHandler.class, toCommandLine(obj));
}
catch (Exception e) {
System.err.println("Failed to create shallow copy of " + obj.getClass().getName() + ":");
e.printStackTrace();
return null;
}
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/core/PSUtils.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package meka.core;
import weka.core.Attribute;
import weka.core.Instance;
import weka.core.Instances;
import weka.core.Utils;
import java.util.*;
/**
* PSUtils.java - Handy Utils for working with Pruned Sets.
* Essentially, we have a <code>P</code> parameter for pruning and an <code>N</code> parameter for reintroduction.
* @author Jesse Read
* @version June 2014
*/
public abstract class PSUtils {
/**
* Sum Counts - sum all the values in 'map'.
*/
public static int sumCounts(HashMap<LabelSet,Integer> map) {
int c = 0;
for(Integer c_ : map.values()) {
c = c + c_;
}
return c;
}
/**
* Count Subsets - returns the number of times labelset 'ysub' exists as a subset in 'Y'.
*/
public static int countSubsets(LabelSet ysub, Set<LabelSet> Y) {
int c = 0;
for(LabelSet s : Y) {
if (ysub.subsetof(s) > 0)
c++;
}
return c;
}
/**
* Get Subsets - get all subsets of 'y' in the set 'set'.
*/
public static Set<LabelSet> getSubsets(LabelSet y, Set<LabelSet> set) {
Set<LabelSet> subsets = new HashSet<LabelSet>();
for(LabelSet s : set) {
// is it a subset?
int m = LabelSet.subset(s.indices,y.indices);
if (m > 0) {
// it is!
subsets.add(s);
}
}
return subsets;
}
/**
* Get Sorted Subsets - get all subsets of 'y' in the set 'set'; sorted according to 'cmp'.
*/
public static SortedSet<LabelSet> getSortedSubsets(LabelSet y, Set<LabelSet> set, Comparator cmp) {
SortedSet<LabelSet> subsets = new TreeSet<LabelSet>(cmp);
for(LabelSet s : set) {
// is it a subset?
int m = LabelSet.subset(s.indices,y.indices);
if (m > 0) {
// it is!
subsets.add(s);
}
}
return subsets;
}
/**
* Get Sorted Subsets - get all subsets of 'y' in the set 'set'; sorted according to length, and counts in 'map'.
*/
public static SortedSet<LabelSet> getSortedSubsets(LabelSet y, HashMap<LabelSet,Integer> map) {
return getSortedSubsets(y, map.keySet(), new LabelSetComparator(map));
}
/**
* Cover - cover 'y' completely (or as best as possible) with sets from 'map'.
* @param y a LabelSet, e.g., [0,2,7]
* @param map a map of LabelSets to counts e.g., {[0,2,7]:39,...}
* @return the sets to cover y (or just y, if it already covers itself).
*/
public static LabelSet[] cover(LabelSet y, HashMap<LabelSet,Integer> map) {
Integer count = map.get(y);
if (count != null && count >= 1) {
return new LabelSet[]{y};
}
else {
// Find some matches (i.e., subsets)
Comparator cmp = new LabelSetComparator(map);
SortedSet<LabelSet> allS = getSortedSubsets(y, map.keySet(), cmp);
Set<LabelSet> covS = cover(y, allS, cmp);
return covS.toArray(new LabelSet[0]);
}
}
public static Set<LabelSet> cover(LabelSet y, SortedSet<LabelSet> S, Comparator cmp) {
LabelSet y_copy = y.deep_copy();
Set<LabelSet> K = new HashSet<LabelSet>();
// While we have more, and not covered, ...
while (S.size() > 0 && y_copy.indices.length > 0) {
//System.out.println("y = "+y_copy);
//System.out.println("S = "+S);
LabelSet s_ = S.last();
//System.out.println("s_ = "+s_);
K.add(s_);
// add s_ to new 'keep' list
y_copy.minus(s_);
S = getSortedSubsets(y_copy, S, cmp);
//System.out.println(""+y_copy);
}
return K;
}
/**
* GetAllSubsets - Get all frequent subsets of 'y' according to 'map'.
* @param y a labelset, e.g., [0,2,7]
* @param map a map of labelsets to counts e.g., {[0,2]:39, [2,7]:5, [2,9]:24...}
* @return the LabelSets to use to decompose y into, e.g., [[0,2],[2,7]]
*/
public static LabelSet[] getAllSubsets(LabelSet y, HashMap<LabelSet,Integer> map) {
Integer count = map.get(y);
if (count != null && count >= 1) {
// don't prune
return new LabelSet[]{y};
}
SortedSet<LabelSet> subsets = getSortedSubsets(y, map.keySet(), new LabelSetComparator(map));
LabelSet s[] = subsets.toArray(new LabelSet[subsets.size()]);
return s;
}
/**
* GetTopNSubsets - Don't cover all (like cover(y,map), rather only the top 'n')
* @param y a labelset, e.g., [0,2,7]
* @param map a map of labelsets to counts e.g., {[0,2]:39, [2,7]:5, [2,9]:24...}
* @param n the number of sets to take
* @return the LabelSets to use to decompose y into, e.g., [[0,2],[2,7]]
*/
public static LabelSet[] getTopNSubsets(LabelSet y, HashMap<LabelSet,Integer> map, int n) {
LabelSet s[] = getAllSubsets(y,map);
return Arrays.copyOfRange(s,Math.max(0,s.length-n),s.length);
}
public static SortedSet<LabelSet> getTopNSubsetsAsSet(LabelSet y, HashMap<LabelSet,Integer> map, int n) {
SortedSet<LabelSet> allSets = getSortedSubsets(y, map);
SortedSet<LabelSet> topSets = new TreeSet<LabelSet>();
int n_ = 0;
for(LabelSet Y : allSets) {
topSets.add(Y);
if (++n_ > n)
break;
}
return topSets;
}
public static LabelSet getTopSubset(LabelSet y, HashMap<LabelSet,Integer> map) {
return getTopNSubsets(y,map,1)[0];
}
/**
* CountCombinationsSparseSubset - like CountCombinationsSparse, but only interested in 'indices[]' wrt 'D'.
* @param D dataset
* @param indices indices we are interested in
* @return a HashMap where a LabelSet representation of each label combination is associated with an Integer count, e.g., [3,7,14],3
*/
public static HashMap<LabelSet,Integer> countCombinationsSparseSubset(Instances D, int indices[]) {
HashMap<LabelSet,Integer> map = new HashMap<LabelSet,Integer>();
for(int i = 0; i < D.numInstances(); i++) {
LabelSet m = new LabelSet(MLUtils.toSubIndicesSet(D.instance(i), indices));
map.put(m, map.containsKey(m) ? map.get(m) + 1 : 1);
}
return map;
}
/**
* CountCombinationsSparse - return a mapping of each distinct label combination and its count.
* @param D dataset
* @param L number of labels
* @return a HashMap where a LabelSet representation of each label combination is associated with an Integer count, e.g., [3,7,14],3
*/
public static final HashMap<LabelSet,Integer> countCombinationsSparse(Instances D, int L) {
HashMap<LabelSet,Integer> map = new HashMap<LabelSet,Integer>();
for (int i = 0; i < D.numInstances(); i++) {
LabelSet y = new LabelSet(MLUtils.toSparseIntArray(D.instance(i),L));
Integer c = map.get(y);
map.put(y, c == null ? 1 : c+1);
}
return map;
}
/** used by convertDistribution(p,L) */
@Deprecated
private static final double[] toDoubleArray(String labelSet, int L) {
int set[] = (labelSet.length() <= 2) ? new int[]{} : MLUtils.toIntArray(labelSet);
//StringBuffer y = new StringBuffer(L);
double y[] = new double[L];
//for(int j = 0; j < L; j++) {
// y.append("0");
//}
for(int j : set) {
//y.setCharAt(j,'1');
y[j] = 1.;
}
return y;
//return y.toString();
}
/**
* Convert Distribution - Given the posterior across combinations, return the distribution across labels.
* <br>
* TODO Use recombination!!!
* @see PSUtils#recombination(double[],int,LabelSet[])
* @param p the posterior of the super classes (combinations), e.g., P([1,3],[2]) = [1,0]
* @param L the number of labels
* @return the distribution across labels, e.g., P(1,2,3) = [1,0,1]
*/
@Deprecated
public static double[] convertDistribution(double p[], int L, Instances iTemplate) {
double y[] = new double[L];
int i = Utils.maxIndex(p);
double d[] = toDoubleArray(iTemplate.classAttribute().value(i),L);
for(int j = 0; j < d.length; j++) {
if(d[j] > 0.0)
y[j] = 1.0;
}
return y;
}
/**
* Convert Distribution - Given the posterior across combinations, return the distribution across labels.
* @param p the posterior of the super classes (combinations), e.g., P([1,3],[2]) = [0.3,0.7]
* @param L the number of labels, e.g., L = 3
* @param meta_labels typical mapping, e.g., [13] to [1,3]
* @return the distribution across labels, e.g., P(1,2,3) = [0.3,0.7,0.3]
*/
public static double[] convertDistribution(double p[], int L, LabelSet meta_labels[]) {
double y[] = new double[L];
for(int i = 0; i < p.length; i++) {
LabelSet Y_i = meta_labels[i]; // e.g., [1,4]
for(int j : Y_i.indices) { // j = 1, 4
y[j] += p[i]; // y[1] += p[i] = 0.5
}
}
return y;
}
public static final LabelSet[] makeLabelSetMap(Instances T) {
int L_ = 4;
return new LabelSet[L_];
}
// @todo name convertDistribution ?
/**
* Convert Distribution - Given the posterior across combinations, return the distribution across labels.
* @param p the posterior of the super classes (combinations), e.g., P([1,3],[2]) = [1,0]
* @param L the number of labels
* @return the distribution across labels, e.g., P(1,2,3) = [1,0,1]
*/
public static final double[] recombination(double p[], int L, LabelSet map[]) {
double y[] = new double[L];
int i = Utils.maxIndex(p);
LabelSet y_meta = map[i];
for(int j : y_meta.indices) {
y[j] = 1.0;
}
return y;
}
// @todo name convertDistribution ?
/**
* Convert Distribution - Given the posterior across combinations, return the distribution across labels.
* @param p the posterior of the super classes (combinations), e.g., P([1,3],[2]) = [0.3,0.7]
* @param L the number of labels
* @return the distribution across labels, e.g., P(1,2,3) = [0.3,0.7,0.3]
*/
public static final double[] recombination_t(double p[], int L, Instances iTemplate) {
double y[] = new double[L];
for(int k = 0; k < p.length; k++) {
String d_string = iTemplate.classAttribute().value(k); // e.g. d_string = "[1,3,5]"
int d[] = MLUtils.toIntArray(d_string); // e.g. d = [1,3,5] p[k] = 0.5
for(int j : d) {
y[j] += p[k]; // e.g., y[0] += d[0] * p[k] = 1 * 0.5 = 0.5
}
}
return y;
}
public static final double[] recombination_t(double p[], int L, LabelSet map[]) {
double y[] = new double[L];
for(int k = 0; k < p.length; k++) {
LabelSet y_meta = map[k];
for(int j : y_meta.indices) {
y[j] += p[k];
}
}
return y;
}
/**
* Convert a multi-label instance into a multi-class instance, according to a template.
*/
public static Instance convertInstance(Instance x, int L, Instances template) {
Instance x_ = (Instance) x.copy();
x_.setDataset(null);
for (int i = 0; i < L; i++)
x_.deleteAttributeAt(0);
x_.insertAttributeAt(0);
x_.setDataset(template);
return x_;
}
public static Instances LCTransformation(Instances D) {
return LCTransformation(D,D.classIndex());
}
public static Instances LCTransformation(Instances D, int L) {
return PSTransformation(D,L,"Class",0,0);
}
public static Instances PSTransformation(Instances D, int P, int N) {
return PSTransformation(D,D.classIndex(),"Class",P,N);
}
public static Instances PSTransformation(Instances D, int L, int P, int N) {
return PSTransformation(D,L,"Class",P,N);
}
/**
* Transform instances into a multi-class representation.
* @param D original dataset
* @param L number of labels in the original dataset
* @param cname class name for the new dataset (may want to encode the list of indices here for RAkEL-like methods)
* @param p pruning value
* @param n restoration value
* @return transformed dataset
*/
public static Instances PSTransformation(Instances D, int L, String cname, int p, int n) {
D = new Instances(D);
// Gather combinations
HashMap<LabelSet,Integer> distinctCombinations = PSUtils.countCombinationsSparse(D,L);
// Prune combinations
if (p > 0)
MLUtils.pruneCountHashMap(distinctCombinations,p);
// Check there are > 2
if (distinctCombinations.size() <= 1 && p > 0) {
// ... or try again if not ...
System.err.println("[Warning] You did too much pruning, setting P = P-1");
return PSTransformation(D,L,cname,p-1,n);
}
// Create class attribute
ArrayList<String> ClassValues = new ArrayList<String>();
for(LabelSet y : distinctCombinations.keySet())
ClassValues.add(y.toString());
Attribute C = new Attribute(cname, ClassValues);
// Insert new special attribute (which has all possible combinations of labels)
D.insertAttributeAt(C,L);
D.setClassIndex(L);
//Add class values
int N = D.numInstances();
for (int i = 0; i < N; i++) {
Instance x = D.instance(i);
LabelSet y = new LabelSet(MLUtils.toSparseIntArray(x,L));
String y_string = y.toString();
// add it
if(ClassValues.contains(y_string)) //if its class value exists
x.setClassValue(y_string);
// decomp
else if(n > 0) {
//String d_subsets[] = getTopNSubsets(comb,distinctCombinations,n);
LabelSet d_subsets[] = PSUtils.getTopNSubsets(y,distinctCombinations,n);
//LabelSet d_subsets[] = PSUtils.cover(y,distinctCombinations);
if (d_subsets.length > 0) {
// fast
x.setClassValue(d_subsets[0].toString());
// additional
if (d_subsets.length > 1) {
for(int s_i = 1; s_i < d_subsets.length; s_i++) {
Instance x_ = (Instance)(x).copy();
x_.setClassValue(d_subsets[s_i].toString());
D.add(x_);
}
}
}
else {
x.setClassMissing();
}
}
}
// remove with missing class
D.deleteWithMissingClass();
try {
D = F.removeLabels(D,L);
} catch(Exception e) {
// should never happen
}
D.setClassIndex(0);
return D;
}
/*
* This method was used before tighter MOA integration (in Feb 2016).
* This method could probably be elimitated if doing so does not cause any problems.
public static Instance[] PSTransformation(Instance x, int L, HashMap<LabelSet,Integer> map, int n) {
int y_[] = MLUtils.toSparseIntArray(x,L);
if (y_.length <= 0)
// there can be no transformation if there are no labels!
return new Instance[0];
LabelSet y = new LabelSet(y_);
if (map.get(y) != null) {
Instance x_subsets[] = new Instance[1];
x_subsets[0] = convertInstance(x,L,x.dataset());
x_subsets[0].setClassValue(y.toString());
return x_subsets;
}
else {
LabelSet d_subsets[] = PSUtils.getTopNSubsets(y,map,n);
Instance x_subsets[] = new Instance[d_subsets.length];
Instance x_template = convertInstance(x,L,x.dataset());
for(int i = 1; i < d_subsets.length; i++) {
x_subsets[i] = (Instance)(x_template).copy();
x_subsets[i].setClassValue(d_subsets[i].toString());
}
return x_subsets;
}
}
*/
/**
* Transform one instance into multi-class representations (an array of possibly multiple single-label instances).
* @param x instance
* @param L number of labels in the instance
* @param map a map of labelsets to their frequencies
* @param n restoration value
* @return transformed instances
*/
public static Instance[] PSTransformation(Instance x, int L, HashMap<LabelSet,Integer> map, int n, Instances iTemplate) {
int y_[] = MLUtils.toSparseIntArray(x,L);
if (y_.length <= 0)
// There can be no transformation if there are no labels!
return new Instance[0];
LabelSet y = new LabelSet(y_);
if (map.get(y) != null) {
// The labelset already exists in the map (was observed in the training set)
Instance x_subsets[] = new Instance[1];
x_subsets[0] = convertInstance(x,L,iTemplate);
x_subsets[0].setClassValue(y.toString()); // problem here!
return x_subsets;
}
else {
// The labelset has not been seen before, use thap to construct some instances that fit
LabelSet d_subsets[] = PSUtils.getTopNSubsets(y,map,n);
Instance x_subsets[] = new Instance[d_subsets.length];
Instance x_template = convertInstance(x,L,iTemplate);
for(int i = 1; i < d_subsets.length; i++) {
x_subsets[i] = (Instance)(x_template).copy();
x_subsets[i].setClassValue(d_subsets[i].toString());
}
return x_subsets;
}
}
/**
* Transform instances into a multi-class representation.
* @param D original dataset
* @param L number of labels in that dataset
* @param cname class name for the new dataset (may want to encode the list of indices here for RAkEL-like methods)
* @param p pruning value
* @param n restoration value
* @return transformed dataset
*/
public static Instances SLTransformation(Instances D, int L, String cname, int p, int n) {
D = new Instances(D);
// Gather combinations
HashMap<LabelSet,Integer> distinctCombinations = PSUtils.countCombinationsSparse(D,L);
// Prune combinations
if (p > 0)
MLUtils.pruneCountHashMap(distinctCombinations,p);
// Check there are > 2
if (distinctCombinations.size() <= 1 && p > 0) {
// ... or try again if not ...
System.err.println("[Warning] You did too much pruning, setting P = P-1");
return PSTransformation(D,L,cname,p-1,n);
}
// Create class attribute
ArrayList<String> ClassValues = new ArrayList<String>();
for(LabelSet y : distinctCombinations.keySet())
ClassValues.add(y.toString());
Attribute C = new Attribute(cname, ClassValues);
// Insert new special attribute (which has all possible combinations of labels)
D.insertAttributeAt(C,L);
D.setClassIndex(L);
//Add class values
int N = D.numInstances();
for (int i = 0; i < N; i++) {
Instance x = D.instance(i);
LabelSet y = new LabelSet(MLUtils.toSparseIntArray(x,L));
String y_string = y.toString();
// add it
if(ClassValues.contains(y_string)) //if its class value exists
x.setClassValue(y_string);
// decomp
else if(n > 0) {
//String d_subsets[] = getTopNSubsets(comb,distinctCombinations,n);
LabelSet d_subsets[] = PSUtils.getTopNSubsets(y,distinctCombinations,n);
//LabelSet d_subsets[] = PSUtils.cover(y,distinctCombinations);
if (d_subsets.length > 0) {
// fast
x.setClassValue(d_subsets[0].toString());
// additional
if (d_subsets.length > 1) {
for(int s_i = 1; s_i < d_subsets.length; s_i++) {
Instance x_ = (Instance)(x).copy();
x_.setClassValue(d_subsets[s_i].toString());
D.add(x_);
}
}
}
else {
x.setClassMissing();
}
}
}
// remove with missing class
D.deleteWithMissingClass();
try {
D = F.removeLabels(D,L);
} catch(Exception e) {
// should never happen
}
D.setClassIndex(0);
return D;
}
/**
* Given N labelsets 'sparseY', use a count 'map' to
*/
public static final LabelSet[] convert(LabelSet[] sparseY, HashMap<LabelSet,Integer> map) {
return null;
}
/**
* SaveMap - Save the HashMap 'map' to the file 'filename'.
*/
public static final void saveMap(String filename, HashMap<LabelSet,Integer> map) throws Exception {
MLUtils.saveObject(map,filename);
}
/**
* LoadMap - Load the HashMap stored in 'filename'.
*/
public static HashMap<LabelSet,Integer> loadMap(String filename) throws Exception {
return (HashMap<LabelSet,Integer>) MLUtils.loadObject(filename);
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/core/Project.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* Project.java
* Copyright (C) 2015 FracPete (fracpete at gmail dot com)
*
*/
package meka.core;
import java.io.File;
/**
* Helper class related to the project and it's "home" directory.
*
* @author fracpete (fracpete at waikato dot ac dot nz)
* @version $Revision$
*/
public class Project {
/** the windows directory. */
public final static String DIR_WINDOWS = "mekafiles";
/** the unix directory. */
public final static String DIR_UNIX = ".meka";
/**
* Returns the "home" directory of Meka, where to store the config files.
*
* @return the directory
*/
public static File getHome() {
String dir;
dir = System.getProperty("user.home") + File.separator;
if (OS.isWindows())
dir += DIR_WINDOWS;
else
dir += DIR_UNIX;
return new File(dir);
}
/**
* Adds the home directory to the file.
*
* @param file the file without path
* @return the expanded path
*/
public static File expandFile(String file) {
return new File(getHome().getAbsolutePath() + File.separator + file);
}
/**
* Makes sure that the project's home directory is present.
*
* @return true if home directory present (or successfully created)
*/
public static boolean initialize() {
if (!getHome().exists())
return getHome().mkdirs();
return getHome().isDirectory();
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/core/PropsUtils.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/**
* PropsUtils.java
* Copyright (C) 2014-2015 University of Waikato, Hamilton, New Zealand
*/
package meka.core;
import java.io.File;
import java.io.StringWriter;
import java.net.URL;
import java.util.ArrayList;
import java.util.Enumeration;
import java.util.List;
import java.util.Properties;
import weka.core.Utils;
/**
* Utility class for props files.
*
* @author fracpete (fracpete at waikato dot ac dot nz)
* @version $Revision$
*/
public class PropsUtils {
/** whether to output some debug information. */
public static boolean DEBUG = false;
/**
* Reads properties that inherit from three locations. Properties are first
* defined in the system resource location (i.e. in the CLASSPATH). These
* default properties must exist. Properties optionally defined in the user
* properties location (WekaPackageManager.PROPERTIES_DIR) override default
* settings. Properties defined in the current directory (optional) override
* all these settings.
*
* @param props the location of the props file that should be loaded.
* e.g.: "weka/core/Utils.props".
* @return the Properties
* @throws Exception if an error occurs reading the properties files.
* @see Utils#readProperties(String)
*/
public static Properties read(String props) throws Exception {
Properties result;
result = Utils.readProperties(props);
if (DEBUG)
System.out.println("start<PropsUtils.read: " + props + ">\n" + toString(result, null) + "end<PropsUtils.read: " + props + ">\n");
return result;
}
/**
* Locates the properties file in the current classpath.
*
* @param props the props file to locate
* @return the URLs where the props file was found
*/
public static URL[] find(String props) {
List<URL> result;
Enumeration<URL> urls;
String propsName;
File propsFile;
URL url;
if (DEBUG)
System.out.println("start<PropsUtils.find: " + props + ">");
result = new ArrayList<URL>();
propsName = new File(props).getName();
if (DEBUG)
System.out.println("- propsName: " + propsName);
try {
if (DEBUG)
System.out.println("1. system resources: ");
urls = ClassLoader.getSystemResources(props);
while (urls.hasMoreElements()) {
url = urls.nextElement();
if (DEBUG)
System.out.println("- " + url);
result.add(url);
}
}
catch (Exception e) {
System.err.println("Failed to obtain systems resources (URLs) for: " + props);
}
// home directory
if (DEBUG)
System.out.println("2. home dir: " + System.getProperty("user.home"));
propsFile = new File(System.getProperty("user.home") + File.separator + propsName);
if (DEBUG) {
System.out.println("- propsFile: " + propsFile);
System.out.println("- propsFile exists: " + propsFile.exists());
}
if (propsFile.exists()) {
try {
result.add(propsFile.toURL());
}
catch (Exception e) {
System.err.println("Failed to turn '" + propsFile + "' into URL:");
e.printStackTrace();
}
}
// home directory
if (DEBUG)
System.out.println("3. meka home dir: " + Project.getHome());
propsFile = new File(Project.getHome() + File.separator + propsName);
if (DEBUG) {
System.out.println("- propsFile: " + propsFile);
System.out.println("- propsFile exists: " + propsFile.exists());
}
if (propsFile.exists()) {
try {
result.add(propsFile.toURL());
}
catch (Exception e) {
System.err.println("Failed to turn '" + propsFile + "' into URL:");
e.printStackTrace();
}
}
// current directory
if (DEBUG)
System.out.println("4. current dir: " + System.getProperty("user.dir"));
propsFile = new File(System.getProperty("user.dir") + File.separator + propsName);
if (DEBUG) {
System.out.println("- propsFile: " + propsFile);
System.out.println("- propsFile exists: " + propsFile.exists());
}
if (propsFile.exists()) {
try {
result.add(propsFile.toURL());
}
catch (Exception e) {
System.err.println("Failed to turn '" + propsFile + "' into URL:");
e.printStackTrace();
}
}
if (DEBUG)
System.out.println("end<PropsUtils.find: " + props + ">");
return result.toArray(new URL[result.size()]);
}
/**
* Collapses all the inherited and current properties into a single Properties
* object and returns it.
*
* @param props the properties to collapse
* @return the collapsed version of this Properties object
*/
public static Properties collapse(Properties props) {
Properties result;
Enumeration<String> keys;
String key;
result = new Properties();
keys = (Enumeration<String>) props.propertyNames();
while (keys.hasMoreElements()) {
key = keys.nextElement();
result.setProperty(key, props.getProperty(key));
}
return result;
}
/**
* Outputs the properties as they would be written to a file.
*
* @param props the properties to turn into a string
* @param comment the comment to output
* @return the generated output or null in case of an error
*/
public static String toString(Properties props, String comment) {
String result;
StringWriter writer;
result = null;
try {
writer = new StringWriter();
collapse(props).store(writer, comment);
writer.flush();
writer.close();
result = writer.toString();
}
catch (Exception e) {
result = null;
System.err.println("Failed to turn props into string: " + props);
e.printStackTrace();
}
return result;
}
/**
* Prints the usage of this class from the commandline to stdout.
*/
protected static void printUsage() {
System.out.println("Usage: " + PropsUtils.class.getName() + " <read|find> <props>");
System.out.println("Use uppercase of read/find to enable debug output");
System.out.println();
System.out.println("Examples:");
System.out.println("- read");
System.out.println(" " + PropsUtils.class.getName() + " read meka/gui/goe/MekaEditors.props");
System.out.println("- find");
System.out.println(" " + PropsUtils.class.getName() + " find meka/gui/goe/MekaEditors.props");
System.out.println();
}
/**
* Allows some basic operations on properties files:
* <ul>
* <li>read <props>- reads the specified props file and outputs it,
* e.g., "read meka/gui/goe/MekaEditors.props"
* <li>find <props>- finds all occurrences of the specified props
* file and outputs them, e.g., "find meka/gui/goe/MekaEditors.props"
* </ul>
*/
public static void main(String[] args) throws Exception {
if (args.length == 2) {
if (args[0].toLowerCase().equals("read")) {
if (args[0].equals("READ"))
DEBUG = true;
Properties props = read(args[1]);
System.out.println(toString(props, null));
}
else if (args[0].toLowerCase().equals("find")) {
if (args[0].equals("FIND"))
DEBUG = true;
URL[] urls = find(args[1]);
for (URL url: urls)
System.out.println(url);
}
else {
printUsage();
}
}
else {
printUsage();
}
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/core/Result.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package meka.core;
import weka.core.Instance;
import weka.core.DenseInstance;
import weka.core.Instances;
import weka.core.Attribute;
import weka.core.Utils;
import java.io.BufferedWriter;
import java.io.FileWriter;
import java.io.PrintWriter;
import java.io.Serializable;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.LinkedHashMap;
import java.util.Set;
/**
* Result - Stores predictions alongside true labels, for evaluation.
* For more on the evaluation and threshold selection implemented here; see:
* <p>
* Jesse Read, Bernhard Pfahringer, Geoff Holmes, Eibe Frank. <i>Classifier Chains for Multi-label Classification</i>. Machine Learning Journal. Springer (2011).<br>
* Jesse Read, <i>Scalable Multi-label Classification</i>. PhD Thesis, University of Waikato, Hamilton, New Zealand (2010).<br>
* </p>
* @author Jesse Read
* @version March 2012 - Multi-target Compatible
*/
public class Result implements Serializable {
private static final long serialVersionUID = 1L;
/** The number of label (target) variables in the problem */
public int L = 0;
public ArrayList<double[]> predictions = null;
// TODO, store in sparse fashion with either LabelSet or LabelVector
public ArrayList<int[]> actuals = null;
public HashMap<String,String> info = new LinkedHashMap<String,String>(); // stores general dataset/classifier info
public HashMap<String,Object> output = new LinkedHashMap<String,Object>();// stores predictive evaluation statistics
public HashMap<String,Object> vals = new LinkedHashMap<String,Object>(); // stores non-predictive evaluation stats
public HashMap<String,String> model = new LinkedHashMap<String,String>(); // stores the model itself
public Result() {
predictions = new ArrayList<double[]>();
actuals = new ArrayList<int[]>();
}
public Result(int L) {
predictions = new ArrayList<double[]>();
actuals = new ArrayList<int[]>();
this.L = L;
}
public Result(int N, int L) {
predictions = new ArrayList<double[]>(N);
actuals = new ArrayList<int[]>(N);
this.L = L;
}
/** The number of value-prediction pairs stared in this Result */
public int size() {
return predictions.size();
}
/**
* Provides a nice textual output of all evaluation information.
* @return String representation
*/
@Override
public String toString() {
StringBuilder resultString = new StringBuilder();
if (info.containsKey("Verbosity")) {
int V = MLUtils.getIntegerOption(info.get("Verbosity"),1);
if ( V > 4) {
resultString.append("== Individual Errors\n\n");
// output everything
resultString.append(Result.getResultAsString(this,V-5) + "\n\n");
}
}
// output the stats in general
if (model.size() > 0)
resultString.append("== Model info\n\n" + MLUtils.hashMapToString(model));
resultString.append("== Evaluation Info\n\n" + MLUtils.hashMapToString(info));
resultString.append("\n\n== Predictive Performance\n\n" + MLUtils.hashMapToString(output,3));
String note = "";
if (info.containsKey("Type") && info.get("Type").endsWith("CV")) {
note = " (averaged across folds)";
}
resultString.append("\n\n== Additional Measurements"+note+"\n\n" + MLUtils.hashMapToString(vals,3));
resultString.append("\n\n");
return resultString.toString();
}
/**
* AddResult - Add an entry.
* @param pred predictions
* @param real an instance containing the true label values
*/
public void addResult(double pred[], Instance real) {
predictions.add(pred);
actuals.add(MLUtils.toIntArray(real,pred.length));
}
/**
* RowActual - Retrieve the true values for the i-th instance.
*/
public int[] rowTrue(int i) {
return actuals.get(i);
}
/**
* RowConfidence - Retrieve the prediction confidences for the i-th instance.
*/
public double[] rowConfidence(int i) {
return predictions.get(i);
}
/**
* RowPrediction - Retrieve the predicted values for the i-th instance according to threshold t.
*/
public int[] rowPrediction(int i, double t) {
return A.toIntArray(rowConfidence(i), t);
}
/**
* RowPrediction - Retrieve the predicted values for the i-th instance according to pre-calibrated/chosen threshold.
*/
public int[] rowPrediction(int i) {
String t = info.get("Threshold");
if (t != null) {
// For multi-label data, should know about a threshold first
return ThresholdUtils.threshold(rowConfidence(i), t);
}
else {
// Probably multi-target data (no threshold allowed)
return A.toIntArray(rowConfidence(i));
}
}
/**
* ColConfidence - Retrieve the prediction confidences for the j-th label (column).
* Similar to M.getCol(Y,j)
*/
public double[] colConfidence(int j) {
double y[] = new double[predictions.size()];
for(int i = 0; i < predictions.size(); i++) {
y[i] = rowConfidence(i)[j];
}
return y;
}
/**
* AllPredictions - Retrieve all prediction confidences in an L * N matrix (2d array).
*/
public double[][] allPredictions() {
double Y[][] = new double[predictions.size()][];
for(int i = 0; i < predictions.size(); i++) {
Y[i] = rowConfidence(i);
}
return Y;
}
/**
* AllPredictions - Retrieve all predictions (according to threshold t) in an L * N matrix.
*/
public int[][] allPredictions(double t) {
int Y[][] = new int[predictions.size()][];
for(int i = 0; i < predictions.size(); i++) {
Y[i] = rowPrediction(i,t);
}
return Y;
}
/**
* AllTrueValues - Retrieve all true values in an L x N matrix.
*/
public int[][] allTrueValues() {
int Y[][] = new int[actuals.size()][];
for(int i = 0; i < actuals.size(); i++) {
Y[i] = rowTrue(i);
}
return Y;
}
/*
* AddValue.
* Add v to an existing metric value.
public void addValue(String metric, double v) {
Double freq = (Double)vals.get(metric);
vals.put(metric,(freq == null) ? v : freq + v);
}
*/
/**
* Return the set of metrics for which measurements are available.
*/
public Set<String> availableMetrics() {
return output.keySet();
}
/**
* Set the measurement for metric 'metric'.
*/
public void setMeasurement(String metric, Object stat) { output.put(metric,stat); }
/**
* Retrieve the measurement for metric 'metric'.
*/
public Object getMeasurement(String metric) { return output.get(metric); }
/**
* SetValue.
* Add an evaluation metric and a value for it.
*/
public void setValue(String metric, double v) {
vals.put(metric,v);
}
/**
* AddValue.
* Retrieve the value for metric 'metric'
*/
public Object getValue(String metric) { return vals.get(metric); }
/**
* SetInfo.
* Set a String value to an information category.
*/
public void setInfo(String cat, String val) {
info.put(cat,val);
}
/**
* GetInfo.
* Get the String value of category 'cat'.
*/
public String getInfo(String cat) {
return info.get(cat);
}
/**
* Set a model string.
*/
public void setModel(String key, String val) {
model.put(key, val);
}
/**
* Get the model value.
*/
public String getModel(String key) {
return model.get(key);
}
// ********************************************************************************************************
// STATIC METHODS
// ********************************************************************************************************
//
/**
* GetStats.
* Return the evaluation statistics given predictions and real values stored in r.
* In the multi-label case, a Threshold category must exist, containing a string defining the type of threshold we want to use/calibrate.
*/
public static HashMap<String,Object> getStats(Result r, String vop) {
if (r.getInfo("Type").startsWith("MT"))
return MLEvalUtils.getMTStats(r.allPredictions(),r.allTrueValues(), vop);
else
return MLEvalUtils.getMLStats(r.allPredictions(), r.allTrueValues(), r.getInfo("Threshold"), vop);
}
/**
* GetResultAsString - print out each prediction in a Result along with its true labelset.
*/
public static String getResultAsString(Result s) {
return getResultAsString(s,3);
}
/**
* WriteResultToFile -- write a Result 'result' out in plain text format to file 'fname'.
* @param result Result
* @param fname file name
*/
public static void writeResultToFile(Result result, String fname) throws Exception {
PrintWriter outer = new PrintWriter(new BufferedWriter(new FileWriter(fname)));
outer.write(result.toString());
outer.close();
}
/**
* Convert a list of Results into an Instances.
* @param results An ArrayList of Results
* @return Instances
*/
public static Instances getResultsAsInstances(ArrayList<HashMap<String,Object>> metrics) {
HashMap<String,Object> o_master = metrics.get(0);
ArrayList<Attribute> attInfo = new ArrayList<Attribute>();
for (String key : o_master.keySet()) {
if (o_master.get(key) instanceof Double) {
//System.out.println("key="+key);
attInfo.add(new Attribute(key));
}
}
Instances resultInstances = new Instances("Results",attInfo,metrics.size());
for (HashMap<String,Object> o : metrics) {
Instance rx = new DenseInstance(attInfo.size());
for (Attribute att : attInfo) {
String name = att.name();
rx.setValue(att,(double)o.get(name));
}
resultInstances.add(rx);
}
//System.out.println(""+resultInstances);
return resultInstances;
}
/**
* Convert predictions into Instances (and true values).
* The first L attributes (for L labels) hold the true values, and the next L attributes hold the predictions.
* @param result A Result
* @return Instances containing true values and predictions.
*/
public static Instances getPredictionsAsInstances(Result result) {
ArrayList<Attribute> attInfo = new ArrayList<Attribute>();
for(int j = 0; j < result.L; j++) {
attInfo.add(new Attribute("Y"+String.valueOf(j)));
}
for(int j = 0; j < result.L; j++) {
attInfo.add(new Attribute("P"+String.valueOf(j)));
}
double Y_pred[][] = result.allPredictions();
int Y_true[][] = result.allTrueValues();
Instances resultInstances = new Instances("Predictions",attInfo,Y_pred.length);
for(int i = 0; i < Y_pred.length; i++) {
Instance rx = new DenseInstance(attInfo.size());
rx.setDataset(resultInstances);
for(int j = 0; j < Y_true[i].length; j++) {
rx.setValue(j,(double)Y_true[i][j]);
}
for(int j = 0; j < Y_pred[i].length; j++) {
rx.setValue(j+result.L,Y_pred[i][j]);
}
resultInstances.add(rx);
}
return resultInstances;
}
/**
* GetResultAsString - print out each prediction in a Result (to a certain number of decimal points) along with its true labelset.
*/
public static String getResultAsString(Result result, int adp) {
StringBuilder sb = new StringBuilder();
double N = (double)result.predictions.size();
sb.append("|==== PREDICTIONS (N="+N+") =====>\n");
for(int i = 0; i < N; i++) {
sb.append("|");
sb.append(Utils.doubleToString((i+1),5,0));
sb.append(" ");
//System.out.println(""+result.info.get("Threshold"));
//System.out.println("|"+A.toString(result.rowPrediction(i)));
//System.out.println("|"+MLUtils.toIndicesSet(result.rowPrediction(i)));
if (adp == 0 && !result.getInfo("Type").equalsIgnoreCase("MT")) {
LabelSet y = new LabelSet(MLUtils.toIndicesSet(result.actuals.get(i)));
sb.append(y).append(" ");
LabelSet ypred = new LabelSet(MLUtils.toIndicesSet(result.rowPrediction(i)));
sb.append(ypred).append("\n");
}
else {
sb.append(A.toString(result.actuals.get(i))).append(" ");
sb.append(A.toString(result.predictions.get(i),adp)).append("\n");
}
}
sb.append("|==============================<\n");
return sb.toString();
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/core/StatUtils.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package meka.core;
import meka.classifiers.multilabel.BR;
import meka.classifiers.multilabel.Evaluation;
import meka.classifiers.multilabel.MultiLabelClassifier;
import weka.classifiers.Classifier;
import weka.classifiers.functions.SMO;
import weka.core.Instance;
import weka.core.Instances;
import java.util.HashMap;
import java.util.Random;
/**
* StatUtils - Helpful statistical functions.
* @author Jesse Read (jesse@tsc.uc3m.es)
* @version March 2013 - Multi-target Compatible
*/
public abstract class StatUtils {
//
// EMPIRICAL DISTRIBUTIONS
//
/**
* P - Empirical prior.
* @param Y label matrix
* @param x label values
* @return [P(Y_1==x[1]), P(Y_2==x[2]), ..., P(Y_L==x[L])]
*/
public static double[] P(double Y[][], int x[]) {
int L = x.length;
return P(Y,MLUtils.gen_indices(L),x);
}
/**
* P - Empirical prior.
* @param Y label matrix
* @param x label values
* @param j label indices
* @return [P(Y_j[1]==x[1]), P(Y_j[2]==x[2]), ..., P(Y_j[L]==x[L])]
*/
public static double[] P(double Y[][], int j[], int x[]) {
int L = j.length;
double p[] = new double[L];
for(int j_ = 0; j_ < L; j_++) {
p[j_] = p(Y,j[j_],x[j_]);
}
return p;
}
/**
* p - Empirical prior.
* In the multi-label case, k in {0,1}
* @param Y label matrix
* @param j label index
* @param k label value
* @return P(Y_j==k) in Y.
*/
public static double p(double Y[][], int j, int k) {
int N = Y.length;
double p = 0.0001;
for(int i = 0; i < N; i++) {
if ((int)Math.round(Y[i][j]) == k) {
p += 1.0;
}
}
return p/N;
}
/**
* p - Empirical prior.
* In the multi-label case, k in {0,1}
* @param D Instances
* @param j label index
* @param j_ label value
* @return P(Y_j==j_) in D.
*/
public static double p(Instances D, int j, int j_) {
return p(MLUtils.getYfromD(D),j,j_);
}
/**
* P - Empirical joint.
* Multi-target friendly.
* @param Y label matrix
* @param j 1st label index
* @param v 1st label value
* @param k 2nd label index
* @param w 2nd label value
* @return P(Y_j = v, Y_k = w) in Y.
*/
public static double P(double Y[][], int j, int v, int k, int w) {
int N = Y.length;
double p = 0.0001;
for(int i = 0; i < N; i++) {
if (((int)Math.round(Y[i][j]) == v) && ((int)Math.round(Y[i][k]) == w))
p += 1.0;
}
return p/N;
}
/**
* p - Empirical joint.
* Multi-target friendly.
* @param D Instances
* @param j 1st label index
* @param v 1st label value
* @param k 2nd label index
* @param w 2nd label value
* @return P(Y_j = v, Y_k = w) in D.
*/
public static double P(Instances D, int j, int v, int k, int w) {
return P(MLUtils.getYfromD(D),j,v,k,w);
}
/**
* Delta(x_1,x_2,x_3 = v_1,v_2,v_3) for j = 1,2,3, k = 1,2,3.
*/
private static boolean match(Instance x, int indices[], int values[]) {
for(int j = 0; j < indices.length; j++) {
int v = (int)Math.round(x.value(indices[j]));
if (v != values[j]) {
return false;
}
}
return true;
}
/**
* P - Empirical joint.
* Multi-target friendly.
* @param D Instances
* @param j label indices, e.g., 1,2,3
* @param v label values, e.g., 0,0,1
* @return P(x_1,x_2,x_3 = v_1,v_2,v_3) for j = 1,2,3 in D
*/
public static double P(Instances D, int j[], int v[]) {
int N = D.numInstances();
int n = 0;
for (Instance x : D) {
if (match(x,j,v))
n++;
}
return Math.max(0.0001,(double)n/N);
}
/**
* jPMF - Joint PMF.
* @return the joint PMF of the j-th and k-th labels in D.
*/
public static double[][] jPMF(Instances D, int j, int k) {
double JOINT[][] = new double[D.attribute(j).numValues()][D.attribute(k).numValues()];
int N = D.numInstances();
for(int i = 0; i < N; i++) {
int v_j = (int)Math.round(D.instance(i).value(j));
int v_k = (int)Math.round(D.instance(i).value(k));
JOINT[v_j][v_k] += (1.0 / (double)N);
}
return JOINT;
}
/**
* Joint Distribution.
* @return the joint PMF of the j-th and k-th and lthlabels in D.
*/
public static double[][][] jPMF(Instances D, int j, int k, int l) {
double JOINT[][][] = new double[D.attribute(j).numValues()][D.attribute(k).numValues()][D.attribute(l).numValues()];
int N = D.numInstances();
for(int i = 0; i < N; i++) {
int v_j = (int)Math.round(D.instance(i).value(j));
int v_k = (int)Math.round(D.instance(i).value(k));
int v_l = (int)Math.round(D.instance(i).value(l));
JOINT[v_j][v_k][v_l] += (1.0 / (double)N);
}
return JOINT;
}
/**
* GetP - Get a pairwise empirical joint-probability matrix P[][] from dataset D.
* <br>
* NOTE multi-label only
*/
public static double[][] getP(Instances D) {
double N = (double)D.numInstances();
int L = D.classIndex();
double P[][] = new double[L][L];
for(int j = 0; j < L; j++) {
P[j][j] = p(D,j,1);
for(int k = j+1; k < L; k++) {
P[j][k] = P(D,j,1,k,1);
}
}
return P;
}
/**
* GetApproxP - A fast version of getC(D), based on frequent sets.
* Actually, if we don't prune, this is not even approximate -- it is the real empirical P.
*/
public static int[][] getApproxC(Instances D) {
int N = D.numInstances();
int L = D.classIndex();
int C[][] = new int[L][L];
// @todo, can prune here to make even faster by pruning this.
HashMap<LabelSet,Integer> map = MLUtils.countCombinationsSparse(D,L);
for (LabelSet y : map.keySet()) {
int c = map.get(y);
for(int j = 0; j < y.indices.length; j++) {
int j_ = y.indices[j];
C[j_][j_] += c;
for(int k = j+1; k < y.indices.length; k++) {
int k_ = y.indices[k];
C[j_][k_] += c;
}
}
}
return C;
}
/**
* GetApproxP - A fast version of getP(D), based on frequent sets.
* Actually, if we don't prune, this is not even approximate -- it is the real empirical P.
*/
public static double[][] getApproxP(Instances D) {
int N = D.numInstances();
int L = D.classIndex();
double P[][] = new double[L][L];
// @todo, can prune here to make even faster by pruning this.
HashMap<LabelSet,Integer> map = MLUtils.countCombinationsSparse(D,L);
for (LabelSet y : map.keySet()) {
for(int j = 0; j < y.indices.length; j++) {
int y_j = y.contains(j) ? 1 : 0;
if (y_j > 0) {
P[j][j] += (double)y_j; // C[j==1] ++
for(int k = j+1; k < y.indices.length; k++) {
int y_k = y.contains(j) ? 1 : 0;
P[j][k] += (double)y_k; // C[j==1,k==1] ++
}
}
}
}
// @todo use getP(C,N) instead
for(int j = 0; j < L; j++) {
P[j][j] = Math.max(P[j][j]/(double)N,0.0001);
for(int k = j+1; k < L; k++) {
P[j][k] = Math.max(P[j][k]/(double)N,0.0001);
}
}
return P;
}
public static double[][] getP(int C[][], int N) {
int L = C.length;
double P[][] = new double[L][L];
for(int j = 0; j < L; j++) {
P[j][j] = Math.max(C[j][j]/(double)N,0.0001);
for(int k = j+1; k < L; k++) {
P[j][k] = Math.max(C[j][k]/(double)N,0.0001);
}
}
return P;
}
/**
* GetC - Get pairwise co-ocurrence counts from the training data D.
* <br>
* NOTE multi-label only
* @return C[][] where C[j][k] is the number of times where Y[i][j] = 1 and y[i][k] = 1 over all i = 1,...,N
*/
public static int[][] getC(Instances D) {
int L = D.classIndex();
int N = D.numInstances();
int C[][] = new int[L][L];
for(int i = 0; i < N; i++) {
for(int j = 0; j < L; j++) {
C[j][j] += (int)D.instance(i).value(j); // C[j==1] ++
for(int k = j+1; k < L; k++) {
C[j][k] += (D.instance(i).value(j) + D.instance(i).value(k) >= 2.0) ? 1 : 0; // C[j==1,k==1] ++
}
}
}
return C;
}
/**
* I - Mutual Information I(y_j;y_k).
* multi-label only -- count version
* @param C count matrix
* @param j j-th label index
* @param k k-th label index
* @param Ncount number of instances in the training set
* @return H(Y_j|Y_k)
*/
public static double I(int C[][], int j, int k, int Ncount) {
double N = (double)Ncount;
double N_j = Math.max(C[j][j],0.0001);
double N_k = Math.max(C[k][k],0.0001);
double p_5 = (N - N_j);
double p_6 = (N - N_k);
double p_7 = (N - (N_j + N_k));
return 1.0 / N * (
- p_5 * Math.log( p_5 )
- p_6 * Math.log( p_6 )
+ p_7 * Math.log( p_7 )
+ N * Math.log( N )
);
}
/**
* H - Conditional Entropy H(y_j|y_k).
* multi-label only
* @param C count matrix
* @param j j-th label index
* @param k k-th label index
* @param Ncount number of instances in the training set
* @return H(Y_j|Y_k)
*/
public static double H(int C[][], int j, int k, int Ncount) {
double N = (double)Ncount;
double N_j = Math.max(C[j][j],0.0001);
double N_k = Math.max(C[k][k],0.0001);
double N_jk = Math.max(C[j][k],0.0001);
double p_1 = (N + N_jk - (N_j + N_k));
double p_2 = (N_k - N_jk);
double p_3 = (N_j - N_jk);
double p_5 = (N - N_j);
return -1.0 / N * (
p_1 * Math.log( p_1 )
+ p_2 * Math.log( p_2 )
+ p_3 * Math.log( p_3 )
+ N_jk * Math.log( N_jk )
- p_5 * Math.log( p_5 )
- N_j * Math.log( N_j )
);
}
/*
* I - Mutual Information -- fast version, must calcualte P[][] = getP(D) first.
* multi-label only
* <br>
* TODO -- check this
* @return I(Y_j;Y_k)
public static double I(double P[][], int j, int k) {
double p_j = P[j][j];
double p_k = P[j][k];
double p_jk = P[j][k];
return p_jk * Math.log ( p_jk / ( p_j * p_k) );
}
*/
/**
* I - Mutual Information -- fast version, must calcualte P[][] = getP(D) first.
* @see #I(double[][], int, int)
* @return I[][]
*/
public static double[][] I(double P[][]) {
int L = P.length;
double M[][] = new double[L][L];
for(int j = 0; j < L; j++) {
for(int k = j+1; k < L; k++) {
M[j][k] = M[k][j] = I(P,j,k);
}
}
return M;
}
/**
* I - Mutual Information.
* <br>
* NOTE binary only
* @return I(Y_j;Y_k) in dataset D.
*/
public static double I(double P[][], int j, int k) {
double I = 0.0;
double p_x = P[j][j];
double p_y = P[k][k];
double p_xy = P[j][k];
I += p_xy * Math.log ( p_xy / ( p_x * p_y) );
I += (1.-p_xy) * Math.log ( (1.-p_xy) / ( (1.-p_x) * (1.-p_y)) );
return I;
}
/**
* I - Mutual Information.
* <br>
* NOTE Multi-target friendly (does not assume binary labels).
* <br>
* NOTE a bit slow
* @return I(Y_j;Y_k) in dataset D.
*/
public static double I(Instances D, int j, int k) {
double I = 0.0;
for(int x = 0; x < D.attribute(j).numValues(); x++) {
double p_x = p(D,j,x);
for(int y = 0; y < D.attribute(k).numValues(); y++) {
double p_y = p(D,k,y);
double p_xy = P(D,j,x,k,y);
I += p_xy * Math.log ( p_xy / ( p_x * p_y) );
}
}
return I;
}
/**
* I - Get an Unconditional Depndency Matrix.
* (Works for both ML and MT data).
* @param D dataset
* @param L number of labels
* @return a L*L matrix representing Unconditional Depndence.
*/
public static double[][] I(Instances D, int L) {
double M[][] = new double[L][L];
for(int j = 0; j < L; j++) {
for(int k = j+1; k < L; k++) {
// get I(Y_j;X_k)
M[j][k] = I(D,j,k);
}
}
return M;
}
/** Critical value used for Chi^2 test. */
public static final double CRITICAL[] = new double[]{0.,2.706, 4.605, 6.251, 7.779}; // P == 0.10
/**
* Chi^2 - Do the chi-squared test on the j-th and k-th labels in Y.
* <br>
* NOTE multi-label only! @TODO Use enumerateValues() !!!
* <br>
* If they are correlated, this means unconditional dependence!
* @return The chi-square statistic for labels j and k in Y.
*/
public static double chi2 (Instances Y,int j,int k) {
// H_0 : p(Y_j,Y_k) = p(Y_j)p(Y_k)
double chi2 = 0.0;
for(int j_ = 0; j_ < 2; j_++) {
for(int k_ = 0; k_ < 2; k_++) {
double E = p(Y,j,j_) * p(Y,k,k_); // Expected vaule P(Y_j = j_)P(Y_k = k_)
double O = P(Y,j,j_,k,k_); // Observed value P(Y_j = j_, Y_k = k_)
chi2 += ( ((O - E) * (O - E)) / E );
}
}
return chi2;
}
/**
* Chi^2 - Do the chi-squared test on all pairs of labels.
* @see #chi2(Instances, int, int)
* @param D dataset
* @return The chi-square statistic matrix X
*/
public static double[][] chi2 (Instances D) {
int L = D.classIndex();
double X[][] = new double[L][L];
for(int j = 0; j < L; j++) {
for(int k = j+1; k < L; k++) {
X[j][k] = chi2(D,j,k);
}
}
return X;
}
/**
* Chi^2 - Chi-squared test.
* If they are correlated, this means unconditional dependence!
* @param M measured joint P(Y_1,Y_2)
* @param Exp expect joint P(Y_1)P(Y_2) given null hypothesis
* @return The chi-square statistic for labels j and k in Y; normalized by critical value.
*/
public static double[][] chi2 (double M[][][], double Exp[][][]) {
int K = M.length;
int L = M[0].length;
int DoF = K - 1;
double V[][] = new double[L][L];
for(int i = 0; i < K; i++) {
for(int j = 0; j < L; j++) {
for(int k = j+1; k < L; k++) {
double J = M[i][j][k]; // actual (joint) p(e==e)
double E = Exp[i][j][k]; // expected (prior*prior)
V[j][k] += ( ((J - E) * (J - E)) / E );
}
}
}
//System.out.println(weka.core.M.toString((double[][])V));
// offset
double p = CRITICAL[DoF];
for(int j = 0; j < L; j++) {
for(int k = j+1; k < L; k++) {
V[j][k] -= p;
}
}
return V;
}
/**
* MargDepMatrix - Get an Unconditional Depndency Matrix.
* @param D dataset
* @param op how we will measure the dependency
* @return a L*L matrix representing Unconditional Depndence.
*/
public static double[][] margDepMatrix(Instances D, String op) {
int L = D.classIndex();
int N = D.numInstances();
// Simple Co-occurence counts
if (op.equals("C")) {
int C[][] = getApproxC(D);
double P[][] = getP(C,N);
return P;
}
// Mutual information -- complete / multi-target capable
if (op.equals("I")) {
return I(D,L);
}
// Mutual information -- binary (multi-label) approximation
if (op.equals("Ib")) {
int C[][] = getC(D);
//System.out.println(""+M.toString(C));
double P[][] = getP(C,N);
//System.out.println(""+M.toString(P));
return I(P);
}
// Mutual information -- fast binary (multi-label) approximation
if (op.equals("Ibf")) {
int C[][] = getApproxC(D);
//System.out.println(""+M.toString(C));
double P[][] = getP(C,N);
//System.out.println(""+M.toString(P));
return I(P);
}
// Conditional information -- binary (multi-label)
if (op.equals("H")) {
int C[][] = getC(D);
return H(C,N);
}
// Conditional information -- fast binary (multi-label) approximation
if (op.equals("H")) {
int C[][] = getApproxC(D);
return H(C,N);
}
// Chi-squared
if (op.equals("X")) {
return chi2(D);
}
// Frequencies (cheap)
if (op.equals("F")) {
double F[][] = F(D);
//System.out.println(""+M.toString(F));
return F;
}
/*
if (op == "C") {
return getC(D);
}
*/
System.err.println("No operation found; Using empty!");
return new double[L][L];
}
/**
* I - Get a Mutual Information Matrix.
*/
public static double[][] I(int C[][], int N) {
int L = C.length;
double M[][] = new double[L][L];
for(int j = 0; j < L; j++) {
for(int k = j+1; k < L; k++) {
M[j][k] = I(C,j,k,N);
}
}
return M;
}
/**
* H - Get a Conditional Entropy Matrix.
*/
public static double[][] H(int C[][], int N) {
int L = C.length;
double M[][] = new double[L][L];
for(int j = 0; j < L; j++) {
for(int k = j+1; k < L; k++) {
M[j][k] = H(C,j,k,N);
}
}
return M;
}
/**
* H - Get a Conditional Entropy Matrix.
*/
public static double[][] H(Instances D) {
int C[][] = getC(D);
return H(C, D.classIndex());
}
private static double f (Instances Y,int j,int k) {
double E = p(Y,j,1) * p(Y,k,1); // Expected vaule P(Y_j = j_)P(Y_k = k_)
double O = P(Y,j,1,k,1); // Observed value P(Y_j = j_, Y_k = k_)
return E/O;
}
/**
* F - Relative frequency matrix (between p(j),p(k) and p(j,k)) in dataset D.
*/
public static double[][] F(Instances D) {
int L = D.classIndex();
double M[][] = new double[L][L];
for(int j = 0; j < L; j++) {
for(int k = j+1; k < L; k++) {
M[j][k] = Math.abs(1. - f(D,j,k));
}
}
return M;
}
// A bit of a useless function -- get rid of it somehow?
private static double[] fillError(Result result, int L) {
double Yprob[][] = result.allPredictions();
int Ytrue[][] = result.allTrueValues();
int Ypred[][] = null;
if (result.getInfo("Type").equals("MT")) {
// Multi-target, so just round!
Ypred = ThresholdUtils.round(Yprob);
}
else {
double ts[] = ThresholdUtils.thresholdStringToArray(result.getInfo("Threshold"),L);
Ypred = ThresholdUtils.threshold(Yprob,ts);
}
double E[] = new double[L];
for(int j = 0; j < L; j++) {
//E[j] = 1.0 - result.output.get("Accuracy["+j+"]");
E[j] = Metrics.P_Hamming(Ytrue,Ypred,j);
}
return E;
}
/**
* CondDepMatrix - Get a Conditional Dependency Matrix.
* Based on Zhang's 'LEAD' approach, where<br>
* the probability of labels j and k both getting errors on the same instance is error(j)*error(k)
* if the actual co-occurence is otherwise.
* @param D dataset
* @return a L*L matrix of Unconditional Depndence.
*/
public static double[][] condDepMatrix(Instances D, Result result) {
int L = D.classIndex();
int N = D.numInstances();
double T[][] = MLUtils.getYfromD(D); // Output (TEACHER)
double Y[][] = MatrixUtils.threshold(result.allPredictions(), 0.5); // Output (PREDICTED)
result.output = Result.getStats(result,"6"); // <-- high verbosity, because we need individual accuracies
double E[] = fillError(result, L); // Errors (EXPECTED)
double F[][][] = new double[3][L][L]; // Errors (ACTUAL)
// Find the actual co-occurence ...
for(int i = 0; i < N; i++) {
int y[] = A.toIntArray(Y[i],0.5); // predicted
int t[] = A.toIntArray(T[i],0.5); // actual (teacher)
for(int j = 0; j < L; j++) {
for(int k = j+1; k < L; k++) {
if (y[j] != t[j] && y[k] != t[k]) {
// if j incorrect and k also ...
F[0][j][k]++; // error type 0
}
else if (y[j] == t[j] && t[k] == y[k]) {
// both are correct
F[2][j][k]++; // error type 2
}
else {
// if only one is correct
F[1][j][k]++; // error type 1
}
}
}
}
// Un-Normalize with the Expected error
double E_norm[][][] = new double[3][L][L];
for(int j = 0; j < L; j++) {
for(int k = j+1; k < L; k++) {
E_norm[0][j][k] = N * (E[j] * E[k]);
E_norm[2][j][k] = N * ((1.0 - E[k]) * (1.0 - E[j]));
E_norm[1][j][k] = N * ( (E[j] * (1.0 - E[k])) + (1.0 - E[j]) * E[k]);
}
}
return StatUtils.chi2(F,E_norm);
}
/**
* LEAD.
* Do the chi-squared LEAD test on all labels in D.
* We would expect the 3 kinds of error to be uncorrelacted.
* However, if they are significantly correlated, this means that there is conditional dependence!
*/
public static double[][] LEAD2 (Instances D, Result result) {
int L = D.classIndex();
int N = D.numInstances();
double Y[][] = MLUtils.getYfromD(D); // Real
double Y_[][] = MatrixUtils.threshold(result.allPredictions(), 0.5); // Predicted
// Error
double E[][] = MatrixUtils.subtract(Y, Y_);
// Expected (for each j)
double X[][] = new double[L][L];
for(int j = 0; j < L; j++) {
for(int k = j+1; k < L; k++) {
for(int v : new int[]{0,1,-1}) {
double p_j = p(E,j,v); // prior
double p_k = p(E,k,v); // prior
double p_jk = P(E,j,v,k,v); // joint
double Exp = p_j * p_k; // expected
//System.out.println("v = "+v);
//System.out.println("p_j "+p_j);
//System.out.println("p_k "+p_k);
//System.out.println("p_jk"+p_jk);
X[j][k] += ( ((p_jk - Exp) * (p_jk - Exp)) / Exp ); // calc.
}
//System.out.println(""+X[j][k]);
X[j][k] -= CRITICAL[1];
}
}
return X;
}
/**
* LEAD - Performs LEAD on dataset 'D', with corresponding gresult 'R', and dependency measurement type 'MDType'.
*/
public static double[][] LEAD (Instances D, Result R, String MDType) {
int L = D.classIndex();
int N = D.numInstances();
// Extract true labels from D, predicted labels from R
double Ytrue[][] = MLUtils.getYfromD(D); // True
double Ypred[][] = MatrixUtils.threshold(R.allPredictions(), 0.5); // Predicted
// Make Error matrix
double E[][] = MatrixUtils.abs(MatrixUtils.subtract(Ytrue, Ypred));
// Replace labels with errors
Instances D_E = MLUtils.replaceZasClasses(new Instances(D),E,L);
// Pass through any measure of marginal dependence
return StatUtils.margDepMatrix(D_E,MDType);
}
public static double[][] LEAD (Instances D, Result result) {
return LEAD(D,result,"I");
}
/**
* LEAD - Performs LEAD on dataset 'D', using BR with base classifier 'h', under random seed 'r'.
* <br>
* WARNING: changing this method will affect the perfomance of e.g., BCC -- on the other hand the original BCC paper did not use LEAD, so don't worry.
*/
public static double[][] LEAD(Instances D, Classifier h, Random r) throws Exception {
Instances D_r = new Instances(D);
D_r.randomize(r);
Instances D_train = new Instances(D_r,0,D_r.numInstances()*60/100);
Instances D_test = new Instances(D_r,D_train.numInstances(),D_r.numInstances()-D_train.numInstances());
BR br = new BR();
br.setClassifier(h);
Result result = Evaluation.evaluateModel((MultiLabelClassifier)br,D_train,D_test,"PCut1","1");
return LEAD2(D_test,result);
}
public static double[][] LEAD(Instances D, Classifier h, Random r, String MDType) throws Exception {
Instances D_r = new Instances(D);
D_r.randomize(r);
Instances D_train = new Instances(D_r,0,D_r.numInstances()*60/100);
Instances D_test = new Instances(D_r,D_train.numInstances(),D_r.numInstances()-D_train.numInstances());
BR br = new BR();
br.setClassifier(h);
Result result = Evaluation.evaluateModel((MultiLabelClassifier)br,D_train,D_test,"PCut1","1");
return LEAD(D_test, result, MDType);
}
/**
* Main - do some tests.
*/
public static void main(String args[]) throws Exception {
Instances D = Evaluation.loadDataset(args);
MLUtils.prepareData(D);
int L = D.classIndex();
double CD[][] = null;
if (args[2].equals("L")) {
String I = "I";
if (args.length >= 3)
I = args[3];
CD = StatUtils.LEAD(D, new SMO(), new Random(), I);
}
else {
CD = StatUtils.margDepMatrix(D,args[2]);
}
System.out.println(MatrixUtils.toString(CD, "M" + args[2]));
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/core/SuperLabel.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package meka.core;
import meka.filters.multilabel.SuperNodeFilter;
import meka.core.A;
import java.util.Comparator;
import java.util.HashMap;
import java.util.Arrays;
import java.util.ArrayList;
import java.util.Enumeration;
/**
* SuperLabel - A meta label is a label composed of multiple labels, e.g., [3,7], which can take multiple values, e.g., [[0,0],[0,1],[1,1]].
* @author Jesse Read
* @version March 2014
*/
public class SuperLabel {
/** for serialization. */
private static final long serialVersionUID = -6783833712552497991L;
public int indices[]; // e.g., [1,0]
public int values[][]; // e.g., [[0,0],[0,1],[1,1]]
/**
* SuperLabel
*/
public SuperLabel(int indices[], int values[][]) {
this.indices = indices;
this.values = values;
}
private static ArrayList<String> getList(Enumeration<String> ve) {
ArrayList<String> list = new ArrayList<String>();
while (ve.hasMoreElements()) {
list.add(ve.nextElement());
}
return list;
}
public SuperLabel(int indices[], Enumeration<String> ve) {
this(indices,getList(ve));
}
public SuperLabel(int indices[], ArrayList<String> vlist) {
this.indices = indices;
this.values = new int[vlist.size()][indices.length];
for(int i = 0; i < this.values.length; i++) {
this.values[i] = A.toIntArray(SuperNodeFilter.decodeValue(vlist.get(i)));
}
}
@Override
public String toString() {
String s = "";
s += ("INDICES "+Arrays.toString(indices)+", taking values in {");
for(int i = 0; i < this.values.length; i++) {
s += (" ["+i+"]:"+Arrays.toString(values[i]));
}
s += (" }");
return s;
}
// THE FOLLOWING CAME FROME SCC.java -- @TODO see if it is of any use.
/**
* Super Class.
class SuperClass {
SuperClass index[] = null;
int idx = -1;
double prob = 1.0;
public SuperClass() { // root
}
public SuperClass(int n) { // branch
idx = n;
}
**
* AddNode - add a node with index 'j' that can take 'k' values.
* @param n node index
* @param k number of values
*
public void addNode(int n, int k) {
double p = 0.0;
if (index != null) {
for(int v = 0; v < index.length; v++) {
index[v].addNode(n,k);
}
}
else {
index = new SuperClass[k];
for(int v = 0; v < index.length; v++) {
index[v] = new SuperClass(n); // p(x==v)
}
}
}
public void fillNodes(Instances D) {
// @todo could be much faster by prebuffering arry of size of the depth of the tree
fillNodes(new int[]{},new int[]{},D);
}
public void fillNodes(int indices[], int values[], Instances D) {
//System.out.println("fillNodes("+Arrays.toString(indices)+","+Arrays.toString(values)+")");
if (index == null) {
// END, calculate the joint
prob = StatUtils.P(D,indices,values);
//System.out.println("we arrived with P("+Arrays.toString(indices)+" == "+Arrays.toString(values)+") = "+prob);
}
else {
// GO DOWN
// @todo could be faster by moving the add(indices,idx) outside of the v loop (but not by much!)
for(int v = 0; v < index.length; v++) {
index[v].fillNodes(A.add(indices,index[0].idx), A.add(values,v), D);
}
}
}
// the probability of 'path' in this factor
// (only a part of the path may be relevant)
public double p_path(int path[]) {
//System.out.println(""+Arrays.toString(path));
//System.out.println(""+idx);
if (index==null) {
return prob;
}
else {
int i = index[0].idx; // 3 \in {1,...,L}
int v = path[i]; // 0 \in {0,1,..,K}
//System.out.println("take "+v+"th path");
return index[v].p_path(path);
}
}
public String toString() {
return (index!=null) ? idx + "/" + index.length + "\n" + index[0].toString() : " = "+prob;
}
}
*/
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/core/SuperLabelUtils.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package meka.core;
import meka.filters.multilabel.SuperNodeFilter;
import weka.core.Attribute;
import weka.core.Instance;
import weka.core.Instances;
import weka.core.Range;
import java.util.*;
/**
* SuperLabelUtils.java - Handy Utils for working with Meta Labels.
* <br>
* TODO call this class SuperClassUtils? SuperLabelUtils? Partition? PartitionUtils?
* @author Jesse Read
* @version March 2014
*/
public abstract class SuperLabelUtils {
/**
* Get k subset - return a set of k label indices (of L possible labels).
*/
public static int[] get_k_subset(int L, int k, Random r) {
int indices[] = A.make_sequence(L);
A.shuffle(indices, r);
int part[] = Arrays.copyOf(indices,k);
Arrays.sort(part);
return part;
}
private static int[][] generatePartition(int num, double M[][], Random r) {
int L = M.length;
int indices[] = A.make_sequence(L);
// shuffle indices
A.shuffle(indices,r);
ArrayList Y_meta[] = new ArrayList[num];
// we have a minimum of 'num' groups
for(int i = 0; i < num; i++) {
Y_meta[i] = new ArrayList<Integer>();
Y_meta[i].add(indices[i]);
}
// remaining
for(int i = num; i < L; i++) {
int idx = 0; //goesBestWith(i,Y_meta[i],M);
Y_meta[idx].add(indices[i]);
}
return convertListArrayTo2DArray(Y_meta);
}
/**
* generatePartition - return [[0],...,[L-1]].
* @param L number of labels
* @return [[0],...,[L-1]]
*/
public static int[][] generatePartition(int L) {
int partition[][] = new int[L][];
for(int j = 0; j < L; j++) {
partition[j] = new int[]{j};
}
return partition;
}
/**
* generatePartition - .
* @param indices [1,2,..,L]
* @param r Random
* @return partition
*/
public static int[][] generatePartition(int indices[], Random r) {
int L = indices.length;
return generatePartition(indices,r.nextInt(L)+1,r);
}
/**
* Generate a random Partition.
* <br>
* TODO can generate 'indices' inside, given L
* <br>
* Get a random layout of 'num' sets of 'indices'.
* @param indices [0,1,2,...,L-1]
* @param num number of super-nodes to generate (between 1 and L)
* @param r Random, if == null, then don't randomize
* @return partition
*/
public static int[][] generatePartition(int indices[], int num, Random r) {
int L = indices.length;
if (r != null)
// shuffle indices
A.shuffle(indices, r);
// we have a minimum of 'num' groups
ArrayList<Integer> selection[] = new ArrayList[num];
for(int i = 0; i < num; i++) {
selection[i] = new ArrayList<Integer>();
selection[i].add(indices[i]);
}
// remaining
for(int i = num; i < L; i++) {
int idx = r.nextInt(num);
selection[idx].add(indices[i]);
}
// convert <int[]>List into an int[][] array
int partition[][] = convertListArrayTo2DArray(selection);
for(int part[] : partition) {
Arrays.sort(part);
}
return partition;
}
/**
* Generate Random Partition
* @param indices label indices
* @param num the number of partitions
* @param r Random, if == null, then don't randomize
* @param balanced indicate if balanced (same number of labels in each set) or not
* @return SORTED partition
*/
public static int[][] generatePartition(int indices[], int num, Random r, boolean balanced) {
if (!balanced)
return generatePartition(indices,num,r);
int L = indices.length;
if (r != null)
// shuffle indices
A.shuffle(indices, r);
int partition[][] = new int[num][];
int k = L / num;
int e = L % num;
int m = 0;
for(int c = 0; c < num; c++) {
if (c < e) {
partition[c] = Arrays.copyOfRange(indices,m,m+k+1);
m = m + k + 1;
}
else {
partition[c] = Arrays.copyOfRange(indices,m,Math.min(L,m+k));
m = m + k;
}
Arrays.sort(partition[c]);
}
return partition;
}
/**
* Get Partition From Dataset Hierarchy - assumes attributes are hierarchically arranged with '.'.
* For example europe.spain indicates leafnode spain of branch europe.
* @param D Dataset
* @return partition
*/
public static final int[][] getPartitionFromDatasetHierarchy(Instances D) {
HashMap<String,LabelSet> map = new HashMap<String,LabelSet>();
int L = D.classIndex();
for(int j = 0; j < L; j++) {
String s = D.attribute(j).name().split("\\.")[0];
LabelSet Y = map.get(s);
if (Y==null)
Y = new LabelSet(new int[]{j});
else {
Y.indices = A.append(Y.indices,j);
Arrays.sort(Y.indices);
}
map.put(s, Y);
}
int partition[][] = new int[map.size()][];
int i = 0;
for(LabelSet part : map.values()) {
//System.out.println(""+i+": "+Arrays.toString(part.indices));
partition[i++] = part.indices;
}
return partition;
}
/*
* Rating - Return a score for the super-class 'partition' using the pairwise info in 'M'.
* +1 if two co-ocurring labels are in different partitions.
* -1 if two co-ocurring labels are in different partitions.
* @param partition super-class partition, e.g., [[0,3],[2],[1,4]]
* @param countMap each LabelSet and its count
public static double scorePartition(int partition[][], HashMap<LabelSet,Integer> countMap) {
return 0.0;
}
public static double scorePartition(int partition[][], double M[][]) {
return 0.0;
}
*/
public static final int[][] convertListArrayTo2DArray(ArrayList<Integer> listArray[]) {
// TODO try and do without this in the future.
int num_partitions = listArray.length;
int array[][] = new int[num_partitions][];
for(int i = 0; i < listArray.length; i++) {
array[i] = A.toPrimitive(listArray[i]);
}
return array;
}
/**
* ToString - A string representation for the super-class partition 'partition'.
*/
public static String toString(int partition[][]) {
StringBuilder sb = new StringBuilder();
sb.append("{");
for(int i = 0; i < partition.length; i++) {
sb.append(" "+Arrays.toString(partition[i]));
}
sb.append(" }");
return sb.toString();
}
/**
* Make Partition Dataset - out of dataset D, on indices part[].
* @param D regular multi-label dataset (of L = classIndex() labels)
* @param part list of indices we want to make into an LP dataset.
* @return Dataset with 1 multi-valued class label, representing the combinations of part[].
*/
public static Instances makePartitionDataset(Instances D, int part[]) throws Exception {
return makePartitionDataset(D,part,0,0);
}
/**
* Make Partition Dataset - out of dataset D, on indices part[].
* @param D regular multi-label dataset (of L = classIndex() labels)
* @param part list of indices we want to make into a PS dataset.
* @param P see {@link PSUtils}
* @param N see {@link PSUtils}
* @return Dataset with 1 multi-valued class label, representing the combinations of part[].
*/
public static Instances makePartitionDataset(Instances D, int part[], int P, int N) throws Exception {
int L = D.classIndex();
Instances D_ = new Instances(D);
// strip out irrelevant attributes
D_.setClassIndex(-1);
D_ = F.keepLabels(D,L,part);
D_.setClassIndex(part.length);
// make LC transformation
D_ = PSUtils.PSTransformation(D_,P,N);
return D_;
}
/*
* Make Partition Dataset - out of dataset D, on indices part[].
* @param D regular multi-label dataset (of L = classIndex() labels)
* @param part list of indices we want to make into a PS dataset.
* @param P see {@link PSUtils}
* @param N see {@link PSUtils}
* @return Dataset with 1 multi-valued class label, representing the combinations of part[].
public static Instances makePartitionDataset(Instances D, int part[], int P, int N) throws Exception {
int L = D.classIndex();
Instances D_ = new Instances(D);
// strip out irrelevant attributes
D_.setClassIndex(-1);
D_ = F.keepLabels(D,L,part);
D_.setClassIndex(part.length);
// encode the relevant indices into the class attribute name
Range r = new Range(Range.indicesToRangeList(part));
r.setUpper(L);
// make LC transformation
D_ = SuperNodeFilter.mergeLabels(D_, L, "c" + r.getRanges(), P, N);
return D_;
}
*/
/**
* Returns a map of values for this multi-class Instances. For example, values <code>{[2,3,1], [1,0,1, [2,0,1]}</code>.
* @param D_ multi-class Instances
* @return a map where map[d] returns an int[] array of all values referred to by the d-th classification.
*/
public static int[][] extractValues(Instances D_) {
int numVals = D_.classAttribute().numValues();
int vMap[][] = new int[numVals][];
for (int d = 0; d < numVals; d++) {
vMap[d] = MLUtils.toIntArray(D_.classAttribute().value(d));
}
return vMap;
}
/** Decode a string into sparse list of indices */
public static int[] decodeClass(String s) {
return MLUtils.toIntArray(s.substring(2));
}
/** Encode a sparse list of indices to a string */
public static String encodeClass(int c_[]) {
return "c_"+(new LabelSet(c_).toString());
}
public static int[] decodeValue(String s) {
return MLUtils.toIntArray(s);
}
/** Encode a vector of integer values to a string */
public static String encodeValue(Instance x, int indices[]) {
int values[] = new int[indices.length];
for (int j = 0; j < indices.length; j++) {
values[j] = (int)x.value(indices[j]);
}
return new LabelVector(values).toString();
}
/**
* GetTopNSubsets - return the top N subsets which differ from y by a single class value, ranked by the frequency storte in masterCombinations.
*/
public static String[] getTopNSubsets(String y, final HashMap <String,Integer>masterCombinations, int N) {
String y_bits[] = y.split("\\+");
ArrayList<String> Y = new ArrayList<String>();
for(String y_ : masterCombinations.keySet()) {
if(MLUtils.bitDifference(y_bits,y_.split("\\+")) <= 1) {
Y.add(y_);
}
}
Collections.sort(Y,new Comparator<String>(){
public int compare(String s1, String s2) {
// @note this is just done by the count, @todo: could add further conditions
return (masterCombinations.get(s1) > masterCombinations.get(s2) ? -1 : (masterCombinations.get(s1) > masterCombinations.get(s2) ? 1 : 0));
}
}
);
String Y_strings[] = Y.toArray(new String[Y.size()]);
//System.out.println("returning "+N+"of "+Arrays.toString(Y_strings));
return Arrays.copyOf(Y_strings,Math.min(N,Y_strings.length));
}
/**
* Return a set of all the combinations of attributes at 'indices' in 'D', pruned by 'p'; AND THEIR COUNTS, e.g., {(00:3),(01:8),(11:3))}.
*/
public static HashMap<String,Integer> getCounts(Instances D, int indices[], int p) {
HashMap<String,Integer> count = new HashMap<String,Integer>();
for(int i = 0; i < D.numInstances(); i++) {
String v = encodeValue(D.instance(i), indices);
count.put(v, count.containsKey(v) ? count.get(v) + 1 : 1);
}
MLUtils.pruneCountHashMap(count,p);
return count;
}
/**
* Super Label Transformation - transform dataset D into a dataset with <code>k</code> multi-class target attributes.
* Use the NSR/PS-style pruning and recomposition, according to partition 'indices', and pruning values 'p' and 'n'.
* @see PSUtils.PSTransformation
* @param indices m by k: m super variables, each relating to k original variables
* @param D either multi-label or multi-target dataset
* @param p pruning value
* @param n subset relpacement value
* @return a multi-target dataset
*/
public static Instances SLTransformation(Instances D, int indices[][], int p, int n) {
int L = D.classIndex();
int K = indices.length;
ArrayList<String> values[] = new ArrayList[K];
HashMap<String,Integer> counts[] = new HashMap[K];
// create D_
Instances D_ = new Instances(D);
// clear D_
// F.removeLabels(D_,L);
for(int j = 0; j < L; j++) {
D_.deleteAttributeAt(0);
}
// create atts
for(int j = 0; j < K; j++) {
int att[] = indices[j];
//int values[] = new int[2]; //getValues(indices,D,p);
counts[j] = getCounts(D,att,p);
Set<String> vals = counts[j].keySet(); //getValues(D,att,p);
values[j] = new ArrayList(vals);
D_.insertAttributeAt(new Attribute(encodeClass(att),new ArrayList(vals)),j);
}
// copy over values
ArrayList<Integer> deleteList = new ArrayList<Integer>();
for(int i = 0; i < D.numInstances(); i++) {
Instance x = D.instance(i);
for(int j = 0; j < K; j++) {
String y = encodeValue(x,indices[j]);
try {
D_.instance(i).setValue(j,y); // y =
} catch(Exception e) {
// value not allowed
deleteList.add(i); // mark it for deletion
String y_close[] = getTopNSubsets(y, counts[j], n); // get N subsets
for(int m = 0; m < y_close.length; m++) {
//System.out.println("add "+y_close[m]+" "+counts[j]);
Instance x_copy = (Instance)D_.instance(i).copy();
x_copy.setValue(j,y_close[m]);
x_copy.setWeight(1.0/y_close.length);
D_.add(x_copy);
}
}
}
}
// clean up
Collections.sort(deleteList, Collections.reverseOrder());
//System.out.println("Deleting "+deleteList.size()+" defunct instances.");
for (int i : deleteList) {
D_.delete(i);
}
// set class
D_.setClassIndex(K);
// done!
return D_;
}
public static double[] convertVotesToDistributionForInstance(HashMap<Integer,Double> votes[]) {
int L = votes.length;
double y[] = new double[L];
for(int j = 0; j < L; j++) {
y[j] = (Integer)MLUtils.maxItem(votes[j]);
}
return y;
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/core/SystemInfo.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/**
* SystemInfo.java
* Copyright (C) 2015 University of Waikato, Hamilton, NZ
*/
package meka.core;
import java.util.Hashtable;
/**
* Gathers information about the system environment.
*
* @author FracPete (fracpete at waikato dot ac dot nz)
* @version $Revision$
*/
public class SystemInfo
extends weka.core.SystemInfo {
/**
* returns a copy of the system info. the key is the name of the property and
* the associated object is the value of the property (a string).
*/
public Hashtable<String, String> getSystemInfo() {
Hashtable<String, String> result;
result = super.getSystemInfo();
result.put("Meka home dir", Project.getHome().getAbsolutePath());
return result;
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/core/ThreadLimiter.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/**
* ThreadLimiter.java
* Copyright (C) 2015 University of Waikato, Hamilton, NZ
*/
package meka.core;
/**
* Interface for classes that allow limiting the number of threads in use.
*
* @author FracPete (fracpete at waikato dot ac dot nz)
* @version $Revision$
*/
public interface ThreadLimiter {
/**
* Sets the number of threads to use.
*
* @param value the number of threads: -1 = # of CPUs/cores
*/
public void setNumThreads(int value);
/**
* Returns the number of threads to use.
*
* @return the number of threads: -1 = # of CPUs/cores
*/
public int getNumThreads();
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/core/ThreadUtils.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/**
* ThreadUtils.java
* Copyright (C) 2015 University of Waikato, Hamilton, NZ
*/
package meka.core;
/**
* Thread and multi-process related methods.
*
* @author FracPete (fracpete at waikato dot ac dot nz)
* @version $Revision$
*/
public class ThreadUtils {
/** whether to use all available cores. */
public final static int ALL = -1;
/** the number of threads used to be considered sequential. */
public final static int SEQUENTIAL = 1;
/**
* Returns the available number of processors on the machine.
*
* @return the number of processors
*/
public static int getAvailableProcessors() {
return Runtime.getRuntime().availableProcessors();
}
/**
* Calculates the number of threads to use.
*
* @param numThreads the requested number of threads (-1 for # of cores/cpus)
* @param maxThreads the maximum to ask for
* @return the actual number of threads to use, (1 = single thread)
*/
public static int getActualNumThreads(int numThreads, int maxThreads) {
int result;
if (numThreads == ALL)
result = getAvailableProcessors();
else if (numThreads > SEQUENTIAL)
result = Math.min(numThreads, maxThreads);
else
result = SEQUENTIAL;
if (result > getAvailableProcessors())
result = getAvailableProcessors();
return result;
}
/**
* Returns whether the number of threads represent a multi-threaded setup.
*
* @param numThreads the number of threads
* @return true if multi-threaded
*/
public static boolean isMultiThreaded(int numThreads) {
return (ThreadUtils.getActualNumThreads(numThreads, ThreadUtils.getAvailableProcessors()) != ThreadUtils.SEQUENTIAL);
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/core/ThresholdUtils.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package meka.core;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
/**
* ThresholdUtils - Helpful functions for calibrating thresholds.
* @author Jesse Read (jesse@tsc.uc3m.es)
* @version March 2013
*/
public abstract class ThresholdUtils {
/**
* ThresholdStringToArray - parse a threshold option string to an array of L thresholds (one for each label variable).
*/
public static double[] thresholdStringToArray(String top, int L) {
if (top.startsWith("[")) {
//if (L !=
return MLUtils.toDoubleArray(top); // threshold vector [t1 t2 ... tL]]
}
else {
double t[] = new double[L];
Arrays.fill(t,Double.parseDouble(top)); // make a threshold vector [t t t ... t]
return t;
}
}
/**
* CalibrateThreshold - Calibrate a threshold using PCut: the threshold which results in the best approximation of the label cardinality of the training set.
* @param Y labels
* @param LC_train label cardinality of the training set
*/
public static double calibrateThreshold(ArrayList<double[]> Y, double LC_train) {
if (Y.size() <= 0)
return 0.5;
int N = Y.size();
ArrayList<Double> big = new ArrayList<Double>();
for(double y[] : Y) {
for (double y_ : y) {
big.add(y_);
}
}
Collections.sort(big);
int i = big.size() - (int)Math.round(LC_train * (double)N);
if (N == big.size()) { // special cases
if (i+1 == N) // only one!
return (big.get(N-2)+big.get(N-1)/2.0);
if (i+1 >= N) // zero!
return 1.0;
else
return Math.max(((double)(big.get(i)+big.get(i+1))/2.0), 0.00001);
}
return Math.max(((double)(big.get(i)+big.get(Math.max(i+1,N-1))))/2.0 , 0.00001);
}
/**
* CalibrateThreshold - Calibrate a vector of thresholds (one for each label) using PCut: the threshold t[j] which results in the best approximation of the frequency of the j-th label in the training data.
* @param Y labels
* @param LC_train average frequency of each label
*/
public static double[] calibrateThresholds(ArrayList<double[]> Y, double LC_train[]) {
int L = LC_train.length;
double t[] = new double[L];
ArrayList<double[]> Y_[] = new ArrayList[L];
for(int j = 0; j < L; j++) {
Y_[j] = new ArrayList<double[]>();
}
for(double y[] : Y) {
for(int j = 0; j < L; j++) {
Y_[j].add(new double[]{y[j]});
}
}
for(int j = 0; j < L; j++) {
t[j] = calibrateThreshold(Y_[j],LC_train[j]);
}
return t;
}
/**
* Threshold - returns the labels after the prediction-confidence vector is passed through a vector of thresholds.
* @param Rpred label confidence predictions in [0,1]
* @param t threshold for each label
*/
public static final int[][] threshold(double Rpred[][], double t[]) {
int Ypred[][] = new int[Rpred.length][Rpred[0].length];
for(int i = 0; i < Rpred.length; i++) {
for(int j = 0; j < Rpred[i].length; j++) {
Ypred[i][j] = (Rpred[i][j] >= t[j]) ? 1 : 0;
}
}
return Ypred;
}
/**
* Threshold - returns the labels after the prediction-confidence vector is passed through threshold.
* @param Rpred label confidence predictions in [0,1]
* @param t threshold
*/
public static final int[][] threshold(double Rpred[][], double t) {
int Ypred[][] = new int[Rpred.length][Rpred[0].length];
for(int i = 0; i < Rpred.length; i++) {
for(int j = 0; j < Rpred[i].length; j++) {
Ypred[i][j] = (Rpred[i][j] >= t) ? 1 : 0;
}
}
return Ypred;
}
/**
* Threshold - returns the labels after the prediction-confidence vector is passed through threshold(s).
* @param rpred label confidence predictions in [0,1]
* @param ts threshold String
*/
public static final int[] threshold(double rpred[], String ts) {
int L = rpred.length;
double t[] = thresholdStringToArray(ts,L);
int ypred[] = new int[L];
for(int j = 0; j < L; j++) {
ypred[j] = (rpred[j] >= t[j]) ? 1 : 0;
}
return ypred;
}
/**
* Round - simply round numbers (e.g., 2.0 to 2) -- for multi-target data (where we don't *yet* use a threshold).
* @param Rpred class predictions in [0,1,...,K]
* @return integer representation of the predictions
*/
public static final int[][] round(double Rpred[][]) {
int Ypred[][] = new int[Rpred.length][Rpred[0].length];
for(int i = 0; i < Rpred.length; i++) {
for(int j = 0; j < Rpred[i].length; j++) {
Ypred[i][j] = (int)Math.round(Rpred[i][j]);
}
}
return Ypred;
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/core/Version.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/**
* Version.java
* Copyright (C) 2015-2016 University of Waikato, Hamilton, NZ
*/
package meka.core;
import java.io.InputStream;
/**
* For handling the MEKA version.
*
* @author FracPete (fracpete at waikato dot ac dot nz)
* @version $Revision$
*/
public class Version {
/** the file name. */
public final static String FILENAME = "meka/core/version.txt";
/**
* Returns the version of MEKA.
*
* @return the version
*/
public static String getVersion() {
StringBuilder result;
InputStream input;
int c;
result = new StringBuilder();
try {
input = ClassLoader.getSystemResourceAsStream(FILENAME);
while ((c = input.read()) != -1) {
result.append((char) c);
}
}
catch (Exception e) {
result.append("?.?.?");
}
return result.toString();
}
/**
* Outputs the version on stdout.
*
* @param args ignored
*/
public static void main(String[] args) {
System.out.println(getVersion());
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/core
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/core/converters/MultiLabelTextDirectoryLoader.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* MultiLabelTextDirectoryLoader.java
* Copyright (C) 2006-2017 University of Waikato, Hamilton, New Zealand
*
*/
package meka.core.converters;
import weka.core.Attribute;
import weka.core.CommandlineRunnable;
import weka.core.DenseInstance;
import weka.core.Instance;
import weka.core.Instances;
import weka.core.Option;
import weka.core.OptionHandler;
import weka.core.RevisionUtils;
import weka.core.Utils;
import weka.core.converters.AbstractLoader;
import weka.core.converters.BatchConverter;
import java.io.BufferedReader;
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStreamReader;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.Enumeration;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.Vector;
/**
<!-- globalinfo-start -->
* Loads text files in a directory.<br>
* First sub-directory level is used for the class attribute names, the second level for the labels for each class attribute.<br>
* Due to the multi-label nature, documents need to be copied into multiple sub-directories. The loader uses the file's name (no path) as a unique ID to identify the copies.<br>
* <br>
* Example:<br>
* /text-dataset<br>
* /class1<br>
* /0<br>
* 3.txt<br>
* 5.txt<br>
* /1<br>
* 1.txt<br>
* 2.txt<br>
* 4.txt<br>
* /class2<br>
* /0<br>
* 1.txt<br>
* 4.txt<br>
* /1<br>
* 2.txt<br>
* 3.txt<br>
* 5.txt<br>
* <br>
* Will generate something like this:<br>
* <br>
* @relation 'example: -C 2'<br>
* <br>
* @attribute @@class-class1@@ {0,1}<br>
* @attribute @@class-class2@@ {0,1}<br>
* @attribute file-ID string<br>
* @attribute text string<br>
* <br>
* @data<br>
* 1,0,1.txt,'file 1\n'<br>
* 1,1,2.txt,'file 2\n'<br>
* 0,1,3.txt,'file 3\n'<br>
* 1,0,4.txt,'file 4\n'<br>
* 0,1,5.txt,'file 5\n'<br>
* <br><br>
<!-- globalinfo-end -->
*
<!-- options-start -->
* Valid options are: <p>
*
* <pre> -D
* Enables debug output.
* (default: off)</pre>
*
* <pre> -F
* Stores the filename in an additional attribute.
* (default: off)</pre>
*
* <pre> -dir <directory>
* The directory to work on.
* (default: current directory)</pre>
*
* <pre> -charset <charset name>
* The character set to use, e.g UTF-8.
* (default: use the default character set)</pre>
*
<!-- options-end -->
*
* Based on code from Weka's TextDirectoryLoader
*
* @author fracpete (fracpete at waikato dot ac dot nz)
*/
public class MultiLabelTextDirectoryLoader
extends AbstractLoader
implements BatchConverter, OptionHandler, CommandlineRunnable {
/** for serialization */
private static final long serialVersionUID = 2592118773712247647L;
public static final String FILE_ID = "file-ID";
public static final String TEXT = "text";
/** Holds the determined structure (header) of the data set. */
protected Instances m_structure = null;
/** Holds the source of the data set. */
protected File m_sourceFile = new File(System.getProperty("user.dir"));
/** whether to print some debug information */
protected boolean m_Debug = false;
/** whether to include the filename as an extra attribute */
protected boolean m_OutputFilename = false;
/**
* The charset to use when loading text files (default is to just use the
* default charset).
*/
protected String m_charSet = "";
/**
* default constructor
*/
public MultiLabelTextDirectoryLoader() {
// No instances retrieved yet
setRetrieval(NONE);
}
/**
* Returns a string describing this loader
*
* @return a description of the evaluator suitable for displaying in the
* explorer/experimenter gui
*/
public String globalInfo() {
return
"Loads text files in a directory.\n"
+ "First sub-directory level is used for the class attribute names, "
+ "the second level for the labels for each class attribute.\n"
+ "Due to the multi-label nature, documents need to be copied "
+ "into multiple sub-directories. The loader uses the file's name "
+ "(no path) as a unique ID to identify the copies.\n"
+ "\n"
+ "Example:\n"
+ "/text-dataset\n"
+ " /class1\n"
+ " /0\n"
+ " 3.txt\n"
+ " 5.txt\n"
+ " /1\n"
+ " 1.txt\n"
+ " 2.txt\n"
+ " 4.txt\n"
+ " /class2\n"
+ " /0\n"
+ " 1.txt\n"
+ " 4.txt\n"
+ " /1\n"
+ " 2.txt\n"
+ " 3.txt\n"
+ " 5.txt\n"
+ "\n"
+ "Will generate something like this:\n\n"
+ "@relation 'example: -C 2'\n"
+ "\n"
+ "@attribute @@class-class1@@ {0,1}\n"
+ "@attribute @@class-class2@@ {0,1}\n"
+ "@attribute file-ID string\n"
+ "@attribute text string\n"
+ "\n"
+ "@data\n"
+ "1,0,1.txt,'file 1\\n'\n"
+ "1,1,2.txt,'file 2\\n'\n"
+ "0,1,3.txt,'file 3\\n'\n"
+ "1,0,4.txt,'file 4\\n'\n"
+ "0,1,5.txt,'file 5\\n'\n";
}
/**
* Lists the available options
*
* @return an enumeration of the available options
*/
@Override
public Enumeration<Option> listOptions() {
Vector<Option> result = new Vector<Option>();
result.add(new Option("\tEnables debug output.\n" + "\t(default: off)",
"D", 0, "-D"));
result.add(new Option("\tStores the filename in an additional attribute.\n"
+ "\t(default: off)", "F", 0, "-F"));
result.add(new Option("\tThe directory to work on.\n"
+ "\t(default: current directory)", "dir", 0, "-dir <directory>"));
result.add(new Option("\tThe character set to use, e.g UTF-8.\n\t"
+ "(default: use the default character set)", "charset", 1,
"-charset <charset name>"));
return result.elements();
}
/**
* Parses a given list of options.
* <p/>
*
<!-- options-start -->
* Valid options are: <p>
*
* <pre> -D
* Enables debug output.
* (default: off)</pre>
*
* <pre> -F
* Stores the filename in an additional attribute.
* (default: off)</pre>
*
* <pre> -dir <directory>
* The directory to work on.
* (default: current directory)</pre>
*
* <pre> -charset <charset name>
* The character set to use, e.g UTF-8.
* (default: use the default character set)</pre>
*
<!-- options-end -->
*
* @param options the options
* @throws Exception if options cannot be set
*/
@Override
public void setOptions(String[] options) throws Exception {
setDebug(Utils.getFlag("D", options));
setOutputFilename(Utils.getFlag("F", options));
setDirectory(new File(Utils.getOption("dir", options)));
String charSet = Utils.getOption("charset", options);
m_charSet = "";
if (charSet.length() > 0) {
m_charSet = charSet;
}
}
/**
* Gets the setting
*
* @return the current setting
*/
@Override
public String[] getOptions() {
Vector<String> options = new Vector<String>();
if (getDebug()) {
options.add("-D");
}
if (getOutputFilename()) {
options.add("-F");
}
options.add("-dir");
options.add(getDirectory().getAbsolutePath());
if (m_charSet != null && m_charSet.length() > 0) {
options.add("-charset");
options.add(m_charSet);
}
return options.toArray(new String[options.size()]);
}
/**
* the tip text for this property
*
* @return the tip text
*/
public String charSetTipText() {
return "The character set to use when reading text files (eg UTF-8) - leave"
+ " blank to use the default character set.";
}
/**
* Set the character set to use when reading text files (an empty string
* indicates that the default character set will be used).
*
* @param charSet the character set to use.
*/
public void setCharSet(String charSet) {
m_charSet = charSet;
}
/**
* Get the character set to use when reading text files. An empty string
* indicates that the default character set will be used.
*
* @return the character set name to use (or empty string to indicate that the
* default character set will be used).
*/
public String getCharSet() {
return m_charSet;
}
/**
* Sets whether to print some debug information.
*
* @param value if true additional debug information will be printed.
*/
public void setDebug(boolean value) {
m_Debug = value;
}
/**
* Gets whether additional debug information is printed.
*
* @return true if additional debug information is printed
*/
public boolean getDebug() {
return m_Debug;
}
/**
* the tip text for this property
*
* @return the tip text
*/
public String debugTipText() {
return "Whether to print additional debug information to the console.";
}
/**
* Sets whether the filename will be stored as an extra attribute.
*
* @param value if true the filename will be stored in an extra attribute
*/
public void setOutputFilename(boolean value) {
m_OutputFilename = value;
reset();
}
/**
* Gets whether the filename will be stored as an extra attribute.
*
* @return true if the filename is stored in an extra attribute
*/
public boolean getOutputFilename() {
return m_OutputFilename;
}
/**
* the tip text for this property
*
* @return the tip text
*/
public String outputFilenameTipText() {
return "Whether to store the filename in an additional attribute.";
}
/**
* Returns a description of the file type, actually it's directories.
*
* @return a short file description
*/
public String getFileDescription() {
return "Directories";
}
/**
* get the Dir specified as the source
*
* @return the source directory
*/
public File getDirectory() {
return new File(m_sourceFile.getAbsolutePath());
}
/**
* sets the source directory
*
* @param dir the source directory
* @throws IOException if an error occurs
*/
public void setDirectory(File dir) throws IOException {
setSource(dir);
}
/**
* Resets the loader ready to read a new data set
*/
@Override
public void reset() {
m_structure = null;
setRetrieval(NONE);
}
/**
* Resets the Loader object and sets the source of the data set to be the
* supplied File object.
*
* @param dir the source directory.
* @throws IOException if an error occurs
*/
@Override
public void setSource(File dir) throws IOException {
reset();
if (dir == null) {
throw new IOException("Source directory object is null!");
}
m_sourceFile = dir;
if (!dir.exists() || !dir.isDirectory()) {
throw new IOException("Directory '" + dir + "' not found");
}
}
/**
* Generates a class attribute name which should avoid clashes with
* attribute names generated by StringToWordVector filter.
*
* @param name the name
* @return the generated name
*/
protected String createClassAttributeName(String name) {
return "@@class-" + name + "@@";
}
/**
* Determines and returns (if possible) the structure (internally the header)
* of the data set as an empty set of instances.
*
* @return the structure of the data set as an empty set of Instances
* @throws IOException if an error occurs
*/
@Override
public Instances getStructure() throws IOException {
if (getDirectory() == null) {
throw new IOException("No directory/source has been specified");
}
// determine class labels, i.e., sub-dirs
if (m_structure == null) {
ArrayList<Attribute> atts = new ArrayList<Attribute>();
ArrayList<String> classes = new ArrayList<String>();
HashMap<String,ArrayList<String>> classLabels = new HashMap<String,ArrayList<String>>();
// iterate class attributes
String[] subdirs = getDirectory().list();
for (String subdir2 : subdirs) {
File subdir = new File(getDirectory().getAbsolutePath() + File.separator + subdir2);
if (subdir.isDirectory()) {
classes.add(subdir2);
classLabels.put(subdir2, new ArrayList<String>());
// iterate class labels
String[] subsubdirs = subdir.list();
for (String subsubdir2: subsubdirs) {
File subsubdir = new File(subdir.getAbsolutePath() + File.separator + subsubdir2);
if (subsubdir.isDirectory())
classLabels.get(subdir2).add(subsubdir2);
}
}
}
// make sure that the names of the class attributes are unlikely to
// clash with any attribute created via the StringToWordVector filter
List<String> classesSorted = new ArrayList<String>(classLabels.keySet());
Collections.sort(classesSorted);
for (String key: classesSorted) {
Collections.sort(classLabels.get(key));
atts.add(new Attribute(createClassAttributeName(key), classLabels.get(key)));
}
if (m_OutputFilename)
atts.add(new Attribute(FILE_ID, (ArrayList<String>) null));
atts.add(new Attribute(TEXT, (ArrayList<String>) null));
String relName = getDirectory().getAbsolutePath().replaceAll("/", "_");
relName = relName.replaceAll("\\\\", "_").replaceAll(":", "_");
m_structure = new Instances(relName + ": -C " + classesSorted.size(), atts, 0);
m_structure.setClassIndex(m_structure.numAttributes() - 1);
}
return m_structure;
}
/**
* Return the full data set. If the structure hasn't yet been determined by a
* call to getStructure then method should do so before processing the rest of
* the data set.
*
* @return the structure of the data set as an empty set of Instances
* @throws IOException if there is no source or parsing fails
*/
@Override
public Instances getDataSet() throws IOException {
if (getDirectory() == null) {
throw new IOException("No directory/source has been specified");
}
Instances data = getStructure();
// ID -> file
Map<String,File> fileIDs = new HashMap<String,File>();
// class attr -> label -> files
Map<String,Map<String,Set<String>>> files = new HashMap<String,Map<String,Set<String>>>();
// iterate class attributes
String[] subdirs = getDirectory().list();
for (String subdir2 : subdirs) {
File subdir = new File(getDirectory().getAbsolutePath() + File.separator + subdir2);
if (subdir.isDirectory()) {
files.put(subdir2, new HashMap<String,Set<String>>());
// iterate class labels
String[] subsubdirs = subdir.list();
for (String subsubdir2: subsubdirs) {
File subsubdir = new File(subdir.getAbsolutePath() + File.separator + subsubdir2);
if (subsubdir.isDirectory()) {
files.get(subdir2).put(subsubdir2, new HashSet<String>());
// iterate files for label
File[] labelFiles = subsubdir.listFiles();
for (File labelFile: labelFiles) {
if (!fileIDs.containsKey(labelFile.getName()))
fileIDs.put(labelFile.getName(), labelFile);
files.get(subdir2).get(subsubdir2).add(labelFile.getName());
}
}
}
}
}
List<String> fileIDsSorted = new ArrayList<>(fileIDs.keySet());
Collections.sort(fileIDsSorted);
for (String id: fileIDsSorted) {
try {
double[] values = new double[data.numAttributes()];
Arrays.fill(values, Utils.missingValue());
int index;
// text
File txt = fileIDs.get(id);
BufferedReader is;
if ((m_charSet == null) || (m_charSet.length() == 0))
is = new BufferedReader(new InputStreamReader(new FileInputStream(txt)));
else
is = new BufferedReader(new InputStreamReader(new FileInputStream(txt), m_charSet));
StringBuilder txtStr = new StringBuilder();
int c;
while ((c = is.read()) != -1)
txtStr.append((char) c);
index = data.attribute(TEXT).index();
values[index] = data.attribute(index).addStringValue(txtStr.toString());
// ID
if (m_OutputFilename) {
index = data.attribute(FILE_ID).index();
values[index] = data.attribute(index).addStringValue(id);
}
// class attributes
for (String cls : files.keySet()) {
Attribute att = data.attribute(createClassAttributeName(cls));
index = att.index();
for (String lbl : files.get(cls).keySet()) {
if (files.get(cls).get(lbl).contains(id)) {
values[index] = att.indexOfValue(lbl);
break;
}
}
}
// add instance
Instance inst = new DenseInstance(1.0, values);
data.add(inst);
}
catch (Exception e) {
System.err.println("Failed to process file: " + id + ", " + fileIDs.get(id));
e.printStackTrace();
}
}
return data;
}
/**
* Process input directories/files incrementally.
*
* @param structure ignored
* @return never returns without throwing an exception
* @throws IOException if a problem occurs
*/
@Override
public Instance getNextInstance(Instances structure) throws IOException {
throw new IOException("MultiLabelTextDirectoryLoader can't read data sets incrementally.");
}
/**
* Returns the revision string.
*
* @return the revision
*/
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision: 12184 $");
}
/**
* Perform any setup stuff that might need to happen before commandline
* execution. Subclasses should override if they need to do something here
*
* @throws Exception if a problem occurs during setup
*/
@Override
public void preExecution() throws Exception {
}
/**
* Perform any teardown stuff that might need to happen after execution.
* Subclasses should override if they need to do something here
*
* @throws Exception if a problem occurs during teardown
*/
@Override
public void postExecution() throws Exception {
}
/**
* Execute the supplied object.
*
* @param toRun the object to execute
* @param args any options to pass to the object
* @throws Exception if a problem occurs.
*/
@Override
public void run(Object toRun, String[] args) throws IllegalArgumentException {
if (!(toRun instanceof MultiLabelTextDirectoryLoader))
throw new IllegalArgumentException("Object to execute is not a MultiLabelTextDirectoryLoader!");
MultiLabelTextDirectoryLoader loader = (MultiLabelTextDirectoryLoader) toRun;
if (args.length > 0) {
try {
loader.setOptions(args);
System.out.println(loader.getDataSet());
}
catch (Exception e) {
System.err.println("Failed to set options: " + Utils.arrayToString(args));
e.printStackTrace();
}
}
else {
System.err.println("\nUsage:\n" + "\tMultiLabelTextDirectoryLoader [options]\n" + "\n" + "Options:\n");
Enumeration<Option> enm = new MultiLabelTextDirectoryLoader().listOptions();
while (enm.hasMoreElements()) {
Option option = enm.nextElement();
System.err.println(option.synopsis());
System.err.println(option.description());
}
System.err.println();
}
}
/**
* Main method.
*
* @param args should contain the name of an input file.
*/
public static void main(String[] args) {
MultiLabelTextDirectoryLoader loader = new MultiLabelTextDirectoryLoader();
loader.run(loader, args);
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/core
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/core/multisearch/MekaEvaluationFactory.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* MekaEvaluationFactory.java
* Copyright (C) 2017 University of Waikato, Hamilton, NZ
*/
package meka.core.multisearch;
import meka.core.Result;
import weka.classifiers.meta.multisearch.AbstractEvaluationFactory;
import weka.classifiers.meta.multisearch.MultiSearchCapable;
import weka.core.Instances;
import weka.core.SetupGenerator;
import weka.core.setupgenerator.Point;
/**
* Meka factory.
*
* @author FracPete (fracpete at waikato dot ac dot nz)
* @version $Revision$
*/
public class MekaEvaluationFactory
extends AbstractEvaluationFactory<MekaEvaluationMetrics, MekaEvaluationWrapper, MekaEvaluationTask, Result> {
private static final long serialVersionUID = -7535032839072532838L;
/**
* Returns a new metrics instance.
*
* @return the metrics
*/
@Override
public MekaEvaluationMetrics newMetrics() {
return new MekaEvaluationMetrics();
}
/**
* Returns a new wrapper.
*
* @param eval the evaluation to wrap
* @return the wrapper
*/
@Override
public MekaEvaluationWrapper newWrapper(Result eval) {
return new MekaEvaluationWrapper(eval, newMetrics());
}
/**
* Returns a new task.
*
* @param owner the owning search
* @param train the training data
* @param test the test data
* @param generator the generator
* @param values the values
* @param folds the number of folds
* @param eval the evaluation
* @param classLabel the class label index (0-based; if applicable)
* @return the task
*/
@Override
public MekaEvaluationTask newTask(MultiSearchCapable owner, Instances train, Instances test, SetupGenerator generator, Point<Object> values, int folds, int eval, int classLabel) {
return new MekaEvaluationTask(owner, train, test, generator, values, folds, eval, classLabel);
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/core
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/core/multisearch/MekaEvaluationMetrics.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* MekaEvaluationMetrics.java
* Copyright (C) 2017 University of Waikato, Hamilton, NZ
*/
package meka.core.multisearch;
import weka.classifiers.meta.multisearch.AbstractEvaluationMetrics;
import weka.core.Tag;
/**
* Default metrics.
*
* @author FracPete (fracpete at waikato dot ac dot nz)
* @version $Revision$
*/
public class MekaEvaluationMetrics
extends AbstractEvaluationMetrics {
private static final long serialVersionUID = 8549253661958964524L;
/** evaluation via: Accuracy. */
public static final int EVALUATION_ACC = 0;
/** evaluation via: Jaccard index. */
public static final int EVALUATION_JACCARDINDEX = 1;
/** evaluation via: Hamming score. */
public static final int EVALUATION_HAMMINGSCORE = 2;
/** evaluation via: Exact match. */
public static final int EVALUATION_EXACTMATCH = 3;
/** evaluation via: Jaccard distance. */
public static final int EVALUATION_JACCARDDISTANCE = 4;
/** evaluation via: Hamming loss. */
public static final int EVALUATION_HAMMINGLOSS = 5;
/** evaluation via: ZeroOne loss. */
public static final int EVALUATION_ZEROONELOSS = 6;
/** evaluation via: Harmonic score. */
public static final int EVALUATION_HARMONICSCORE = 7;
/** evaluation via: One error. */
public static final int EVALUATION_ONEERROR = 8;
/** evaluation via: Rank loss. */
public static final int EVALUATION_RANKLOSS = 9;
/** evaluation via: Avg precision. */
public static final int EVALUATION_AVGPRECISION = 10;
/** evaluation via: Log Loss (lim. L). */
public static final int EVALUATION_LOGLOSSLIML = 11;
/** evaluation via: Log Loss (lim. D). */
public static final int EVALUATION_LOGLOSSLIMD = 12;
/** evaluation via: F1 (micro averaged). */
public static final int EVALUATION_F1MICRO = 13;
/** evaluation via: F1 (macro averaged by example). */
public static final int EVALUATION_F1MACROEXAMPLE = 14;
/** evaluation via: F1 (macro averaged by label). */
public static final int EVALUATION_F1MACROLABEL = 15;
/** evaluation via: AUPRC (macro averaged). */
public static final int EVALUATION_AUPRCMACRO = 16;
/** evaluation via: AUROC (macro averaged). */
public static final int EVALUATION_AUROCMACRO = 17;
/** evaluation via: Label cardinality (predicted). */
public static final int EVALUATION_LABELCARDINALITY = 18;
/** evaluation via: Levenshtein distance. */
public static final int EVALUATION_LEVENSHTEINDISTANCE = 19;
/** evaluation. */
protected static final Tag[] TAGS_EVALUATION = {
new Tag(EVALUATION_ACC, "ACC", "Accuracy"),
new Tag(EVALUATION_JACCARDINDEX, "JIDX", "Jaccard index"),
new Tag(EVALUATION_HAMMINGSCORE, "HSCORE", "Hamming score"),
new Tag(EVALUATION_EXACTMATCH, "EM", "Exact match"),
new Tag(EVALUATION_JACCARDDISTANCE, "JDIST", "Jaccard distance"),
new Tag(EVALUATION_HAMMINGLOSS, "HLOSS", "Hamming loss"),
new Tag(EVALUATION_ZEROONELOSS, "ZOLOSS", "ZeroOne loss"),
new Tag(EVALUATION_HARMONICSCORE, "HARSCORE", "Harmonic score"),
new Tag(EVALUATION_ONEERROR, "OE", "One error"),
new Tag(EVALUATION_RANKLOSS, "RLOSS", "Rank loss"),
new Tag(EVALUATION_AVGPRECISION, "AVGPREC", "Avg precision"),
new Tag(EVALUATION_LOGLOSSLIML, "LOGLOSSL", "Log Loss (lim. L)"),
new Tag(EVALUATION_LOGLOSSLIMD, "LOGLOSSD", "Log Loss (lim. D)"),
new Tag(EVALUATION_F1MICRO, "F1MICRO", "F1 (micro averaged)"),
new Tag(EVALUATION_F1MACROEXAMPLE, "F1MACROEX", "F1 (macro averaged by example)"),
new Tag(EVALUATION_F1MACROLABEL, "F1MACROLBL", "F1 (macro averaged by label)"),
new Tag(EVALUATION_AUPRCMACRO, "AUPRC", "AUPRC (macro averaged)"),
new Tag(EVALUATION_AUROCMACRO, "AUROC", "AUROC (macro averaged)"),
new Tag(EVALUATION_LABELCARDINALITY, "LCARD", "Label cardinality (predicted)"),
new Tag(EVALUATION_LEVENSHTEINDISTANCE, "LDIST", "Levenshtein distance"),
};
/**
* Returns the tags to used in the GUI.
*
* @return the tags
*/
@Override
public Tag[] getTags() {
return TAGS_EVALUATION;
}
/**
* Returns the ID of default metric to use.
*
* @return the default
*/
@Override
public int getDefaultMetric() {
return EVALUATION_ACC;
}
/**
* Returns whether to negate the metric for sorting purposes.
*
* @param id the metric id
* @return true if to invert
*/
public boolean invert(int id) {
switch (id) {
case EVALUATION_ACC:
case EVALUATION_AVGPRECISION:
case EVALUATION_F1MICRO:
case EVALUATION_F1MACROEXAMPLE:
case EVALUATION_F1MACROLABEL:
case EVALUATION_AUPRCMACRO:
case EVALUATION_AUROCMACRO:
case EVALUATION_HAMMINGSCORE:
case EVALUATION_HARMONICSCORE:
case EVALUATION_JACCARDINDEX:
return true;
default:
return false;
}
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/core
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/core/multisearch/MekaEvaluationTask.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* MekaEvaluationTask.java
* Copyright (C) 2017 University of Waikato, Hamilton, NZ
*/
package meka.core.multisearch;
import java.io.Serializable;
import meka.classifiers.multilabel.Evaluation;
import meka.classifiers.multilabel.MultiLabelClassifier;
import meka.core.Result;
import weka.classifiers.Classifier;
import weka.classifiers.meta.multisearch.AbstractEvaluationTask;
import weka.classifiers.meta.multisearch.MultiSearchCapable;
import weka.classifiers.meta.multisearch.Performance;
import weka.core.Instances;
import weka.core.SetupGenerator;
import weka.core.setupgenerator.Point;
/**
* Meka Evaluation task.
*/
public class MekaEvaluationTask extends AbstractEvaluationTask {
/** the threshold option. */
protected String m_TOP;
/** the verbosity option. */
protected String m_VOP;
/**
* Initializes the task.
*
* @param owner
* the owning MultiSearch classifier
* @param train
* the training data
* @param test
* the test data, can be null
* @param generator
* the generator to use
* @param values
* the setup values
* @param folds
* the number of cross-validation folds
* @param eval
* the type of evaluation
* @param classLabel
* the class label index (0-based; if applicable)
*/
public MekaEvaluationTask(final MultiSearchCapable owner, final Instances train, final Instances test, final SetupGenerator generator, final Point<Object> values,
final int folds, final int eval, final int classLabel) {
super(owner, train, test, generator, values, folds, eval, classLabel);
this.m_TOP = "PCut1";
this.m_VOP = "3";
}
/**
* Returns whether predictions can be discarded (depends on selected measure).
*/
protected boolean canDiscardPredictions() {
switch (this.m_Owner.getEvaluation().getSelectedTag().getID()) {
default:
return true;
}
}
/**
* Performs the evaluation.
*
* @return false if evaluation fails
*/
@Override
protected Boolean doRun() throws Exception {
Point<Object> evals;
Result eval;
MultiLabelClassifier classifier;
Performance performance;
boolean completed;
// setup
evals = this.m_Generator.evaluate(this.m_Values);
classifier = (MultiLabelClassifier) this.m_Generator.setup((Serializable) this.m_Owner.getClassifier(), evals);
// evaluate
try {
if (this.m_Test == null) {
if (this.m_Folds >= 2) {
eval = Evaluation.cvModel(classifier, this.m_Train, this.m_Folds, this.m_TOP, this.m_VOP);
} else {
classifier.buildClassifier(this.m_Train);
eval = Evaluation.evaluateModel(classifier, this.m_Train, this.m_TOP, this.m_VOP);
}
} else {
classifier.buildClassifier(this.m_Train);
eval = Evaluation.evaluateModel(classifier, this.m_Test, this.m_TOP, this.m_VOP);
}
completed = true;
} catch (Exception e) {
eval = null;
completed = false;
}
// store performance
performance = new Performance(this.m_Values, this.m_Owner.getFactory().newWrapper(eval), this.m_Evaluation, this.m_ClassLabel,
(Classifier) this.m_Generator.setup((Serializable) this.m_Owner.getClassifier(), evals));
this.m_Owner.getAlgorithm().addPerformance(performance, this.m_Folds);
// log
this.m_Owner.log(performance + ": cached=false");
return completed;
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/core
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/core/multisearch/MekaEvaluationWrapper.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* MekaEvaluationWrapper.java
* Copyright (C) 2015-2016 University of Waikato, Hamilton, NZ
*/
package meka.core.multisearch;
import meka.core.Result;
import weka.classifiers.meta.multisearch.AbstractEvaluationWrapper;
/**
* Wrapper for the Result class.
*
* @author FracPete (fracpete at waikato dot ac dot nz)
* @version $Revision$
*/
public class MekaEvaluationWrapper
extends AbstractEvaluationWrapper<Result, MekaEvaluationMetrics> {
private static final long serialVersionUID = 931329614934902835L;
/** the evaluation object. */
protected Result m_Evaluation;
/**
* Initializes the wrapper.
*
* @param eval the evaluation to wrap
* @param metrics the metrics to use
*/
public MekaEvaluationWrapper(Result eval, MekaEvaluationMetrics metrics) {
super(eval, metrics);
}
/**
* Sets the evaluation object to use.
*
* @param eval the evaluation
*/
@Override
protected void setEvaluation(Result eval) {
m_Evaluation = eval;
}
/**
* Returns the metric for the given ID.
*
* @param id the id to get the metric for
* @param classLabel the class label index for which to return metric (if applicable)
* @return the metric
*/
public double getMetric(int id, int classLabel) {
try {
switch (id) {
case MekaEvaluationMetrics.EVALUATION_ACC:
return (Double) m_Evaluation.output.get("Accuracy");
case MekaEvaluationMetrics.EVALUATION_JACCARDINDEX:
return (Double) m_Evaluation.output.get("Jaccard index");
case MekaEvaluationMetrics.EVALUATION_HAMMINGSCORE:
return (Double) m_Evaluation.output.get("Hamming score");
case MekaEvaluationMetrics.EVALUATION_EXACTMATCH:
return (Double) m_Evaluation.output.get("Exact match");
case MekaEvaluationMetrics.EVALUATION_JACCARDDISTANCE:
return (Double) m_Evaluation.output.get("Jaccard distance");
case MekaEvaluationMetrics.EVALUATION_HAMMINGLOSS:
return (Double) m_Evaluation.output.get("Hamming loss");
case MekaEvaluationMetrics.EVALUATION_ZEROONELOSS:
return (Double) m_Evaluation.output.get("ZeroOne loss");
case MekaEvaluationMetrics.EVALUATION_HARMONICSCORE:
return (Double) m_Evaluation.output.get("Harmonic score");
case MekaEvaluationMetrics.EVALUATION_ONEERROR:
return (Double) m_Evaluation.output.get("One error");
case MekaEvaluationMetrics.EVALUATION_RANKLOSS:
return (Double) m_Evaluation.output.get("Rank loss");
case MekaEvaluationMetrics.EVALUATION_AVGPRECISION:
return (Double) m_Evaluation.output.get("Avg precision");
case MekaEvaluationMetrics.EVALUATION_LOGLOSSLIML:
return (Double) m_Evaluation.output.get("Log Loss (lim. L)");
case MekaEvaluationMetrics.EVALUATION_LOGLOSSLIMD:
return (Double) m_Evaluation.output.get("Log Loss (lim. D)");
case MekaEvaluationMetrics.EVALUATION_F1MICRO:
return (Double) m_Evaluation.output.get("F1 (micro averaged)");
case MekaEvaluationMetrics.EVALUATION_F1MACROEXAMPLE:
return (Double) m_Evaluation.output.get("F1 (macro averaged by example)");
case MekaEvaluationMetrics.EVALUATION_F1MACROLABEL:
return (Double) m_Evaluation.output.get("F1 (macro averaged by label)");
case MekaEvaluationMetrics.EVALUATION_AUPRCMACRO:
return (Double) m_Evaluation.output.get("AUPRC (macro averaged)");
case MekaEvaluationMetrics.EVALUATION_AUROCMACRO:
return (Double) m_Evaluation.output.get("AUROC (macro averaged)");
case MekaEvaluationMetrics.EVALUATION_LABELCARDINALITY:
return (Double) m_Evaluation.output.get("Label cardinality (predicted)");
case MekaEvaluationMetrics.EVALUATION_LEVENSHTEINDISTANCE:
return (Double) m_Evaluation.output.get("Levenshtein distance");
default:
return Double.NaN;
}
}
catch (Exception e) {
return Double.NaN;
}
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/core
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/core/multisearch/MekaSearch.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* MekaSearch.java
* Copyright (C) 2017 University of Waikato, Hamilton, NZ
*/
package meka.core.multisearch;
import java.io.File;
import java.util.AbstractMap;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Enumeration;
import java.util.Vector;
import java.util.concurrent.Future;
import weka.classifiers.Classifier;
import weka.classifiers.meta.multisearch.AbstractEvaluationTask;
import weka.classifiers.meta.multisearch.AbstractMultiThreadedSearch;
import weka.classifiers.meta.multisearch.Performance;
import weka.classifiers.meta.multisearch.PerformanceComparator;
import weka.core.Instances;
import weka.core.Option;
import weka.core.Utils;
import weka.core.converters.ConverterUtils.DataSource;
import weka.core.setupgenerator.Point;
import weka.core.setupgenerator.Space;
import weka.filters.Filter;
import weka.filters.unsupervised.instance.Resample;
/**
<!-- globalinfo-start -->
* Performs a search of an arbitrary number of parameters of a classifier and chooses the best setup found for the actual training.<br>
* The properties being explored are totally up to the user.<br>
* <br>
* E.g., if you have a FilteredClassifier selected as base classifier, sporting a PLSFilter and you want to explore the number of PLS components, then your property will be made up of the following components:<br>
* - filter: referring to the FilteredClassifier's property (= PLSFilter)<br>
* - numComponents: the actual property of the PLSFilter that we want to modify<br>
* And assembled, the property looks like this:<br>
* filter.numComponents<br>
* <br>
* The initial space is worked on with 2-fold CV to determine the values of the parameters for the selected type of evaluation (e.g., accuracy). The best point in the space is then taken as center and a 10-fold CV is performed with the adjacent parameters. If better parameters are found, then this will act as new center and another 10-fold CV will be performed (kind of hill-climbing). This process is repeated until no better pair is found or the best pair is on the border of the parameter space.<br>
* The number of CV-folds for the initial and subsequent spaces can be adjusted, of course.<br>
* <br>
* Instead of using cross-validation, it is possible to specify test sets, for the initial space evaluation and the subsequent ones.<br>
* <br>
* The outcome of a mathematical function (= double), MultiSearch will convert to integers (values are just cast to int), booleans (0 is false, otherwise true), float, char and long if necessary.<br>
* Via a user-supplied 'list' of parameters (blank-separated), one can also set strings and selected tags (drop-down comboboxes in Weka's GenericObjectEditor). Classnames with options (e.g., classifiers with their options) are possible as well.
* <br><br>
<!-- globalinfo-end -->
*
<!-- options-start -->
* Valid options are: <br>
*
* <pre> -sample-size <num>
* The size (in percent) of the sample to search the inital space with.
* (default: 100)</pre>
*
* <pre> -initial-folds <num>
* The number of cross-validation folds for the initial space.
* Numbers smaller than 2 turn off cross-validation and just
* perform evaluation on the training set.
* (default: 2)</pre>
*
* <pre> -subsequent-folds <num>
* The number of cross-validation folds for the subsequent sub-spaces.
* Numbers smaller than 2 turn off cross-validation and just
* perform evaluation on the training set.
* (default: 10)</pre>
*
* <pre> -initial-test-set <filename>
* The (optional) test set to use for the initial space.
* Gets ignored if pointing to a file. Overrides cross-validation.
* (default: .)</pre>
*
* <pre> -subsequent-test-set <filename>
* The (optional) test set to use for the subsequent sub-spaces.
* Gets ignored if pointing to a file. Overrides cross-validation.
* (default: .)</pre>
*
* <pre> -num-slots <num>
* Number of execution slots.
* (default 1 - i.e. no parallelism)</pre>
*
* <pre> -D
* Whether to enable debugging output.
* (default off)</pre>
*
<!-- options-end -->
*
* General notes:
* <ul>
* <li>Turn the <i>debug</i> flag on in order to see some progress output in the
* console</li>
* </ul>
*
* @author FracPete (fracpete at waikato dot ac dot nz)
* @version $Revision$
*/
public class MekaSearch extends AbstractMultiThreadedSearch {
private static final long serialVersionUID = -3579744329581176799L;
/** the sample size to search the initial space with. */
protected double m_SampleSize = 100;
/** number of cross-validation folds in the initial space. */
protected int m_InitialSpaceNumFolds = 2;
/** number of cross-validation folds in the subsequent spaces. */
protected int m_SubsequentSpaceNumFolds = 10;
/** the optional test set to use for the initial evaluation (overrides cross-validation, ignored if dir). */
protected File m_InitialSpaceTestSet = new File(".");
/** the optional test set to use for the subsequent evaluation (overrides cross-validation, ignored if dir). */
protected File m_SubsequentSpaceTestSet = new File(".");
/** the optional test set to use for the initial evaluation. */
protected Instances m_InitialSpaceTestInst;
/** the optional test set to use for the subsequent evaluation. */
protected Instances m_SubsequentSpaceTestInst;
/**
* Returns a string describing the object.
*
* @return a description suitable for displaying in the
* explorer/experimenter gui
*/
@Override
public String globalInfo() {
return "Performs a search of an arbitrary number of parameters of a classifier " + "and chooses the best setup found for the actual training.\n" + "The properties being explored are totally up to the user.\n" + "\n"
+ "E.g., if you have a FilteredClassifier selected as base classifier, " + "sporting a PLSFilter and you want to explore the number of PLS components, " + "then your property will be made up of the following components:\n"
+ " - filter: referring to the FilteredClassifier's property (= PLSFilter)\n" + " - numComponents: the actual property of the PLSFilter that we want to modify\n" + "And assembled, the property looks like this:\n"
+ " filter.numComponents\n" + "\n" + "The initial space is worked on with 2-fold CV to determine the values " + "of the parameters for the selected type of evaluation (e.g., "
+ "accuracy). The best point in the space is then taken as center and a " + "10-fold CV is performed with the adjacent parameters. If better parameters "
+ "are found, then this will act as new center and another 10-fold CV will " + "be performed (kind of hill-climbing). This process is repeated until "
+ "no better pair is found or the best pair is on the border of the parameter " + "space.\n" + "The number of CV-folds for the initial and subsequent spaces can be " + "adjusted, of course.\n" + "\n"
+ "Instead of using cross-validation, it is possible to specify test sets, " + "for the initial space evaluation and the subsequent ones.\n" + "\n"
+ "The outcome of a mathematical function (= double), MultiSearch will convert " + "to integers (values are just cast to int), booleans (0 is false, otherwise " + "true), float, char and long if necessary.\n"
+ "Via a user-supplied 'list' of parameters (blank-separated), one can also " + "set strings and selected tags (drop-down comboboxes in Weka's " + "GenericObjectEditor). Classnames with options (e.g., classifiers with "
+ "their options) are possible as well.";
}
/**
* Gets an enumeration describing the available options.
*
* @return an enumeration of all the available options.
*/
@Override
public Enumeration listOptions() {
Vector result;
Enumeration en;
result = new Vector();
result.addElement(new Option("\tThe size (in percent) of the sample to search the inital space with.\n" + "\t(default: 100)", "sample-size", 1, "-sample-size <num>"));
result.addElement(new Option("\tThe number of cross-validation folds for the initial space.\n" + "\tNumbers smaller than 2 turn off cross-validation and just\n" + "\tperform evaluation on the training set.\n" + "\t(default: 2)",
"initial-folds", 1, "-initial-folds <num>"));
result.addElement(
new Option("\tThe number of cross-validation folds for the subsequent sub-spaces.\n" + "\tNumbers smaller than 2 turn off cross-validation and just\n" + "\tperform evaluation on the training set.\n" + "\t(default: 10)",
"subsequent-folds", 1, "-subsequent-folds <num>"));
result.addElement(
new Option("\tThe (optional) test set to use for the initial space.\n" + "\tGets ignored if pointing to a file. Overrides cross-validation.\n" + "\t(default: .)", "initial-test-set", 1, "-initial-test-set <filename>"));
result.addElement(new Option("\tThe (optional) test set to use for the subsequent sub-spaces.\n" + "\tGets ignored if pointing to a file. Overrides cross-validation.\n" + "\t(default: .)", "subsequent-test-set", 1,
"-subsequent-test-set <filename>"));
en = super.listOptions();
while (en.hasMoreElements()) {
result.addElement(en.nextElement());
}
return result.elements();
}
/**
* returns the options of the current setup.
*
* @return the current options
*/
@Override
public String[] getOptions() {
int i;
Vector<String> result;
String[] options;
result = new Vector<String>();
result.add("-sample-size");
result.add("" + this.getSampleSizePercent());
result.add("-initial-folds");
result.add("" + this.getInitialSpaceNumFolds());
result.add("-subsequent-folds");
result.add("" + this.getSubsequentSpaceNumFolds());
result.add("-initial-test-set");
result.add("" + this.getInitialSpaceTestSet());
result.add("-subsequent-test-set");
result.add("" + this.getSubsequentSpaceTestSet());
options = super.getOptions();
for (i = 0; i < options.length; i++) {
result.add(options[i]);
}
return result.toArray(new String[result.size()]);
}
/**
* Parses the options for this object.
*
* @param options the options to use
* @throws Exception if setting of options fails
*/
@Override
public void setOptions(final String[] options) throws Exception {
String tmpStr;
tmpStr = Utils.getOption("sample-size", options);
if (tmpStr.length() != 0) {
this.setSampleSizePercent(Double.parseDouble(tmpStr));
} else {
this.setSampleSizePercent(100);
}
tmpStr = Utils.getOption("initial-folds", options);
if (tmpStr.length() != 0) {
this.setInitialSpaceNumFolds(Integer.parseInt(tmpStr));
} else {
this.setInitialSpaceNumFolds(2);
}
tmpStr = Utils.getOption("subsequent-folds", options);
if (tmpStr.length() != 0) {
this.setSubsequentSpaceNumFolds(Integer.parseInt(tmpStr));
} else {
this.setSubsequentSpaceNumFolds(10);
}
tmpStr = Utils.getOption("initial-test-set", options);
if (tmpStr.length() != 0) {
this.setInitialSpaceTestSet(new File(tmpStr));
} else {
this.setInitialSpaceTestSet(new File(System.getProperty("user.dir")));
}
tmpStr = Utils.getOption("subsequent-test-set", options);
if (tmpStr.length() != 0) {
this.setSubsequentSpaceTestSet(new File(tmpStr));
} else {
this.setSubsequentSpaceTestSet(new File(System.getProperty("user.dir")));
}
super.setOptions(options);
}
/**
* Returns the tip text for this property.
*
* @return tip text for this property suitable for
* displaying in the explorer/experimenter gui
*/
public String sampleSizePercentTipText() {
return "The sample size (in percent) to use in the initial space search.";
}
/**
* Gets the sample size for the initial space search.
*
* @return the sample size.
*/
public double getSampleSizePercent() {
return this.m_SampleSize;
}
/**
* Sets the sample size for the initial space search.
*
* @param value the sample size for the initial space search.
*/
public void setSampleSizePercent(final double value) {
this.m_SampleSize = value;
}
/**
* Returns the tip text for this property.
*
* @return tip text for this property suitable for
* displaying in the explorer/experimenter gui
*/
public String initialSpaceNumFoldsTipText() {
return "The number of cross-validation folds when evaluating the initial " + "space; values smaller than 2 turn cross-validation off and simple " + "evaluation on the training set is performed.";
}
/**
* Gets the number of CV folds for the initial space.
*
* @return the number of folds.
*/
public int getInitialSpaceNumFolds() {
return this.m_InitialSpaceNumFolds;
}
/**
* Sets the number of CV folds for the initial space.
*
* @param value the number of folds.
*/
public void setInitialSpaceNumFolds(final int value) {
this.m_InitialSpaceNumFolds = value;
}
/**
* Returns the tip text for this property.
*
* @return tip text for this property suitable for
* displaying in the explorer/experimenter gui
*/
public String subsequentSpaceNumFoldsTipText() {
return "The number of cross-validation folds when evaluating the subsequent " + "sub-spaces; values smaller than 2 turn cross-validation off and simple " + "evaluation on the training set is performed.";
}
/**
* Gets the number of CV folds for the sub-sequent sub-spaces.
*
* @return the number of folds.
*/
public int getSubsequentSpaceNumFolds() {
return this.m_SubsequentSpaceNumFolds;
}
/**
* Sets the number of CV folds for the sub-sequent sub-spaces.
*
* @param value the number of folds.
*/
public void setSubsequentSpaceNumFolds(final int value) {
this.m_SubsequentSpaceNumFolds = value;
}
/**
* Returns the tip text for this property.
*
* @return tip text for this property suitable for
* displaying in the explorer/experimenter gui
*/
public String initialSpaceTestSetTipText() {
return "The (optional) test set to use for evaluating the initial search space; " + "overrides cross-validation; gets ignored if pointing to a directory.";
}
/**
* Gets the test set to use for the initial space.
*
* @return the number of folds.
*/
public File getInitialSpaceTestSet() {
return this.m_InitialSpaceTestSet;
}
/**
* Sets the test set to use folds for the initial space.
*
* @param value the test set, ignored if dir.
*/
public void setInitialSpaceTestSet(final File value) {
this.m_InitialSpaceTestSet = value;
}
/**
* Returns the tip text for this property.
*
* @return tip text for this property suitable for
* displaying in the explorer/experimenter gui
*/
public String subsequentSpaceTestSetTipText() {
return "The (optional) test set to use for evaluating the subsequent search sub-spaces; " + "overrides cross-validation; gets ignored if pointing to a directory.";
}
/**
* Gets the test set to use for the sub-sequent sub-spaces.
*
* @return the test set, ignored if dir.
*/
public File getSubsequentSpaceTestSet() {
return this.m_SubsequentSpaceTestSet;
}
/**
* Sets the test set to use for the sub-sequent sub-spaces.
*
* @param value the test set, ignored if dir.
*/
public void setSubsequentSpaceTestSet(final File value) {
this.m_SubsequentSpaceTestSet = value;
}
/**
* determines the best point for the given space, using CV with
* specified number of folds.
*
* @param space the space to work on
* @param train the training data to work with
* @param test the test data to use, null if to use cross-validation
* @param folds the number of folds for cross-validation, if <2 then
* evaluation based on the training set is used
* @return the best point (not actual parameters!)
* @throws Exception if setup or training fails
*/
protected Performance determineBestInSpace(final Space space, final Instances train, final Instances test, final int folds) throws Exception {
try {
Performance result;
int i;
Enumeration<Point<Object>> enm;
Performance performance;
Point<Object> values;
boolean allCached;
Performance p1;
Performance p2;
AbstractEvaluationTask newTask;
int classLabel;
this.m_Performances.clear();
if (folds >= 2) {
this.log("Determining best values with " + folds + "-fold CV in space:\n" + space + "\n");
} else {
this.log("Determining best values with evaluation on training set in space:\n" + space + "\n");
}
enm = space.values();
allCached = true;
this.m_NumSetups = space.size();
if (train.classAttribute().isNominal()) {
classLabel = this.m_Owner.getClassLabelIndex(train.classAttribute().numValues());
} else {
classLabel = -1;
}
ArrayList<Future<Boolean>> results = new ArrayList<Future<Boolean>>();
while (enm.hasMoreElements()) {
values = enm.nextElement();
// already calculated?
if (this.m_Cache.isCached(folds, values)) {
performance = this.m_Cache.get(folds, values);
this.m_Performances.add(performance);
this.m_Trace.add(new AbstractMap.SimpleEntry<Integer, Performance>(folds, performance));
this.log(performance + ": cached=true");
} else {
allCached = false;
newTask = this.m_Owner.getFactory().newTask(this.m_Owner, train, test, this.m_Owner.getGenerator(), values, folds, this.m_Owner.getEvaluation().getSelectedTag().getID(), classLabel);
results.add(this.m_ExecutorPool.submit(newTask));
}
}
// wait for execution to finish
try {
for (Future<Boolean> future : results) {
if (!future.get()) {
throw new IllegalStateException("Execution of evaluaton thread failed.");
}
}
} catch (Exception e) {
throw new IllegalStateException("Thread-based execution of evaluation tasks failed: " + e.getMessage());
}
if (allCached) {
this.log("All points were already cached - abnormal state!");
throw new IllegalStateException("All points were already cached - abnormal state!");
}
// sort list
Collections.sort(this.m_Performances, new PerformanceComparator(this.m_Owner.getEvaluation().getSelectedTag().getID(), this.m_Owner.getMetrics()));
result = this.m_Performances.firstElement();
// check whether all performances are the same
this.m_UniformPerformance = true;
p1 = this.m_Performances.get(0);
for (i = 1; i < this.m_Performances.size(); i++) {
p2 = this.m_Performances.get(i);
if (p2.getPerformance(this.m_Owner.getEvaluation().getSelectedTag().getID()) != p1.getPerformance(this.m_Owner.getEvaluation().getSelectedTag().getID())) {
this.m_UniformPerformance = false;
break;
}
}
if (this.m_UniformPerformance) {
this.log("All performances are the same!");
}
this.logPerformances(space, this.m_Performances);
this.log("\nBest performance:\n" + this.m_Performances.firstElement());
this.m_Performances.clear();
return result;
} finally {
this.m_ExecutorPool.shutdownNow();
}
}
/**
* returns the best point in the space.
*
* @param inst the training data
* @return the best point (not evaluated parameters!)
* @throws Exception if something goes wrong
*/
protected Performance findBest(final Instances inst) throws Exception {
Performance result;
Point<Integer> center;
Space neighborSpace;
boolean finished;
Point<Object> evals;
Performance resultOld;
int iteration;
Instances sample;
Resample resample;
Classifier cls;
this.log("Step 1:\n");
// generate sample?
if (this.getSampleSizePercent() == 100) {
sample = inst;
} else {
this.log("Generating sample (" + this.getSampleSizePercent() + "%)");
resample = new Resample();
resample.setRandomSeed(this.retrieveOwner().getSeed());
resample.setSampleSizePercent(this.getSampleSizePercent());
resample.setInputFormat(inst);
sample = Filter.useFilter(inst, resample);
}
iteration = 0;
this.m_UniformPerformance = false;
// find first center
this.log("\n=== Initial space - Start ===");
result = this.determineBestInSpace(this.m_Space, sample, this.m_InitialSpaceTestInst, this.m_InitialSpaceNumFolds);
this.log("\nResult of Step 1: " + result + "\n");
this.log("=== Initial space - End ===\n");
finished = this.m_UniformPerformance;
if (!finished) {
do {
iteration++;
resultOld = (Performance) result.clone();
center = this.m_Space.getLocations(result.getValues());
// on border? -> finished
if (this.m_Space.isOnBorder(center)) {
this.log("Center is on border of space.");
finished = true;
}
// new space with current best one at center and immediate neighbors
// around it
if (!finished) {
neighborSpace = this.m_Space.subspace(center);
result = this.determineBestInSpace(neighborSpace, sample, this.m_SubsequentSpaceTestInst, this.m_SubsequentSpaceNumFolds);
this.log("\nResult of Step 2/Iteration " + (iteration) + ":\n" + result);
finished = this.m_UniformPerformance;
// no improvement?
if (result.getValues().equals(resultOld.getValues())) {
finished = true;
this.log("\nNo better point found.");
}
}
} while (!finished);
}
this.log("\nFinal result: " + result);
evals = this.m_Owner.getGenerator().evaluate(result.getValues());
cls = (Classifier) this.m_Owner.getGenerator().setup(this.m_Owner.getClassifier(), evals);
this.log("Classifier: " + this.getCommandline(cls));
return result;
}
/**
* Loads test data, if required.
*
* @param data the current training data
* @throws Exception if test sets are not compatible with training data
*/
protected void loadTestData(final Instances data) throws Exception {
String msg;
this.m_InitialSpaceTestInst = null;
if (this.m_InitialSpaceTestSet.exists() && !this.m_InitialSpaceTestSet.isDirectory()) {
this.m_InitialSpaceTestInst = DataSource.read(this.m_InitialSpaceTestSet.getAbsolutePath());
this.m_InitialSpaceTestInst.setClassIndex(data.classIndex());
msg = data.equalHeadersMsg(this.m_InitialSpaceTestInst);
if (msg != null) {
throw new IllegalArgumentException("Test set for initial space not compatible with training dta:\n" + msg);
}
this.m_InitialSpaceTestInst.deleteWithMissingClass();
this.log("Using test set for initial space: " + this.m_InitialSpaceTestSet);
}
this.m_SubsequentSpaceTestInst = null;
if (this.m_SubsequentSpaceTestSet.exists() && !this.m_SubsequentSpaceTestSet.isDirectory()) {
this.m_SubsequentSpaceTestInst = DataSource.read(this.m_SubsequentSpaceTestSet.getAbsolutePath());
this.m_SubsequentSpaceTestInst.setClassIndex(data.classIndex());
msg = data.equalHeadersMsg(this.m_SubsequentSpaceTestInst);
if (msg != null) {
throw new IllegalArgumentException("Test set for subsequent sub-spaces not compatible with training dta:\n" + msg);
}
this.m_SubsequentSpaceTestInst.deleteWithMissingClass();
this.log("Using test set for subsequent sub-spaces: " + this.m_InitialSpaceTestSet);
}
}
/**
* Performs the actual search and returns the best setup.
*
* @param data the dataset to use
* @return the best classifier setup
* @throws Exception if search fails
*/
@Override
public SearchResult doSearch(final Instances data) throws Exception {
SearchResult result;
Point<Object> evals;
Performance performance;
this.loadTestData(data);
performance = this.findBest(new Instances(data));
evals = this.m_Owner.getGenerator().evaluate(performance.getValues());
result = new SearchResult();
result.classifier = (Classifier) this.m_Owner.getGenerator().setup(this.m_Owner.getClassifier(), evals);
result.performance = performance;
result.values = evals;
return result;
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/events/LogEvent.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/**
* LogEvent.java
* Copyright (C) 2015 University of Waikato, Hamilton, NZ
*/
package meka.events;
import java.util.EventObject;
/**
* Event that contains a log message.
*
* @author FracPete (fracpete at waikato dot ac dot nz)
* @version $Revision$
*/
public class LogEvent
extends EventObject {
private static final long serialVersionUID = 7732581989591408787L;
/** the log message. */
protected String m_Message;
/**
* Gets called when the experiment starts on a new evaluation.
*
* @param source the object that triggered the event
* @param message the log message
*/
public LogEvent(Object source, String message) {
super(source);
m_Message = message;
}
/**
* Returns the message.
*
* @return the message
*/
public String getMessage() {
return m_Message;
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/events/LogListener.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/**
* IterationNotificationListener.java
* Copyright (C) 2015 University of Waikato, Hamilton, NZ
*/
package meka.events;
/**
* Interface for classes that listen to log events of an experiment.
*
* @author FracPete (fracpete at waikato dot ac dot nz)
* @version $Revision$
*/
public interface LogListener {
/**
* Gets called if there is a new log message.
*
* @param e the event
*/
public void logMessage(LogEvent e);
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/events/LogObject.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/**
* LogObject.java
* Copyright (C) 2015 University of Waikato, Hamilton, NZ
*/
package meka.events;
import meka.core.ExceptionUtils;
import java.io.Serializable;
import java.util.HashSet;
/**
* Ancestor for objects that support logging.
* Debug mode can be enabled using boolean system property 'meka.exec.debug'.
*
* @author FracPete (fracpete at waikato dot ac dot nz)
* @version $Revision$
*/
public class LogObject
implements Serializable, LogSupporter {
private static final long serialVersionUID = -3814825277914734502L;
/** whether to run code in DEBUG mode */
protected boolean m_Debug = System.getProperty("meka.exec.debug", "false").equals("true");
/** the listeners. */
protected transient HashSet<LogListener> m_LogListeners;
/**
* Returns the log listeners. Instantiates them if neccessary.
*
* @return the listeners
*/
protected HashSet<LogListener> getLogListeners() {
if (m_LogListeners == null)
m_LogListeners = new HashSet<>();
return m_LogListeners;
}
/**
* Adds the log listener to use.
*
* @param l the listener
*/
public void addLogListener(LogListener l) {
getLogListeners().add(l);
}
/**
* Remove the log listener to use.
*
* @param l the listener
*/
public void removeLogListener(LogListener l) {
getLogListeners().remove(l);
}
/**
* For logging messages. Uses stderr if no listeners defined.
*
* @param msg the message to output
*/
public synchronized void log(String msg) {
LogEvent e;
debug(msg);
if (getLogListeners().size() == 0) {
System.err.println(msg);
return;
}
e = new LogEvent(this, msg);
for (LogListener l: getLogListeners())
l.logMessage(e);
}
/**
* For debugging messages. Uses stderr.
*
* @param msg the message to output
*/
public synchronized void debug(String msg) {
if (m_Debug)
System.err.println("[DEBUG] " + getClass().getName() + " - " + msg);
}
/**
* Logs the stacktrace along with the message on stderr and returns a
* combination of both of them as string.
*
* @param msg the message for the exception
* @param t the exception
* @return the full error message (message + stacktrace)
*/
public String handleException(String msg, Throwable t) {
String result;
result = ExceptionUtils.handleException(this, msg, t, false);
log(result);
return result;
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/events/LogSupporter.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/**
* LogSupporter.java
* Copyright (C) 2015 University of Waikato, Hamilton, NZ
*/
package meka.events;
/**
* Interface for classes that support logging.
*
* @author FracPete (fracpete at waikato dot ac dot nz)
* @version $Revision$
*/
public interface LogSupporter {
/**
* Adds the log listener to use.
*
* @param l the listener
*/
public void addLogListener(LogListener l);
/**
* Remove the log listener to use.
*
* @param l the listener
*/
public void removeLogListener(LogListener l);
/**
* For logging messages.
*
* @param msg the message to log
*/
public void log(String msg);
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/experiment/DefaultExperiment.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/**
* DefaultExperiment.java
* Copyright (C) 2015 University of Waikato, Hamilton, NZ
*/
package meka.experiment;
import meka.classifiers.multilabel.MultiLabelClassifier;
import meka.core.OptionUtils;
import meka.events.LogListener;
import meka.events.LogObject;
import meka.experiment.datasetproviders.DatasetProvider;
import meka.experiment.datasetproviders.LocalDatasetProvider;
import meka.experiment.evaluationstatistics.EvaluationStatistics;
import meka.experiment.evaluationstatistics.EvaluationStatisticsHandler;
import meka.experiment.evaluationstatistics.IncrementalEvaluationStatisticsHandler;
import meka.experiment.evaluationstatistics.KeyValuePairs;
import meka.experiment.evaluators.CrossValidation;
import meka.experiment.evaluators.Evaluator;
import meka.experiment.events.*;
import weka.classifiers.AbstractClassifier;
import weka.core.Instances;
import weka.core.Option;
import weka.core.Utils;
import java.util.*;
/**
* Default experiment which executes experiments on the local machine.
*
* @author FracPete (fracpete at waikato dot ac dot nz)
* @version $Revision$
*/
public class DefaultExperiment
extends LogObject
implements Experiment {
private static final long serialVersionUID = 8654760249461885158L;
/** the notes. */
protected String m_Notes = "";
/** the classifiers to evaluate. */
protected MultiLabelClassifier[] m_Classifiers = new MultiLabelClassifier[0];
/** the dataset provider. */
protected DatasetProvider m_DatasetProvider = getDefaultDatasetProvider();
/** the evaluator. */
protected Evaluator m_Evaluator = getDefaultEvaluator();
/** the statistics handler. */
protected EvaluationStatisticsHandler m_StatisticsHandler = getDefaultStatisticsHandler();
/** whether the experiment is initializing. */
protected boolean m_Initializing;
/** whether the experiment is running. */
protected boolean m_Running;
/** whether the experiment is stopping. */
protected boolean m_Stopping;
/** the listeners for execution stages. */
protected transient HashSet<ExecutionStageListener> m_ExecutionStageListeners;
/** the listeners for iterations. */
protected transient HashSet<IterationNotificationListener> m_IterationNotficationListeners;
/** the listeners for statistics. */
protected transient HashSet<StatisticsNotificationListener> m_StatisticsNotificationListeners;
/** the collected statistics. */
protected List<EvaluationStatistics> m_Statistics = new ArrayList<>();
/**
* Sets the notes.
*
* @param value the notes
*/
public void setNotes(String value) {
m_Notes = value;
}
/**
* Returns the notes.
*
* @return the notes
*/
public String getNotes() {
return m_Notes;
}
/**
* Describes this property.
*
* @return the description
*/
public String notesTipText() {
return "The (optional) notes for this experiment in Markdown.";
}
/**
* Sets the classifiers to be evaluated.
*
* @param value the classifiers
*/
public void setClassifiers(MultiLabelClassifier[] value) {
m_Classifiers = value;
}
/**
* Returns the classifiers to be evaluated.
*
* @return the classifiers
*/
public MultiLabelClassifier[] getClassifiers() {
return m_Classifiers;
}
/**
* Describes this property.
*
* @return the description
*/
public String classifiersTipText() {
return "The classifiers to evaluate.";
}
/**
* Returns the default dataset provider.
*
* @return the default
*/
protected DatasetProvider getDefaultDatasetProvider() {
return new LocalDatasetProvider();
}
/**
* Sets the dataset provider to use.
*
* @param value the provider
*/
@Override
public void setDatasetProvider(DatasetProvider value) {
m_DatasetProvider = value;
}
/**
* Returns the dataset provider in use.
*
* @return the provider
*/
@Override
public DatasetProvider getDatasetProvider() {
return m_DatasetProvider;
}
/**
* Describes this property.
*
* @return the description
*/
@Override
public String datasetProviderTipText() {
return "The dataset provider to use.";
}
/**
* Returns the default evaluator to use.
*
* @return the default
*/
protected Evaluator getDefaultEvaluator() {
return new CrossValidation();
}
/**
* Sets the evaluator to use.
*
* @param value the evaluator
*/
@Override
public void setEvaluator(Evaluator value) {
m_Evaluator = value;
}
/**
* Returns the evaluator in use.
*
* @return the evaluator
*/
@Override
public Evaluator getEvaluator() {
return m_Evaluator;
}
/**
* Describes this property.
*
* @return the description
*/
@Override
public String evaluatorTipText() {
return "The evaluator to use.";
}
/**
* Sets the statistics handler.
*
* @param value the handler
*/
@Override
public void setStatisticsHandler(EvaluationStatisticsHandler value) {
m_StatisticsHandler = value;
}
/**
* Returns the default statistics handler.
*
* @return the default
*/
protected EvaluationStatisticsHandler getDefaultStatisticsHandler() {
return new KeyValuePairs();
}
/**
* Returns the statistics handler.
*
* @return the handler
*/
@Override
public EvaluationStatisticsHandler getStatisticsHandler() {
return m_StatisticsHandler;
}
/**
* Describes this property.
*
* @return the description
*/
@Override
public String statisticsHandlerTipText() {
return "The handler for the statistics (load/save).";
}
/**
* Adds the execution stage listener.
*
* @param l the listener to add
*/
public synchronized void addExecutionStageListener(ExecutionStageListener l) {
if (m_ExecutionStageListeners == null)
m_ExecutionStageListeners = new HashSet<>();
m_ExecutionStageListeners.add(l);
}
/**
* Removes the execution stage listener.
*
* @param l the listener to remove
*/
public synchronized void removeExecutionStageListener(ExecutionStageListener l) {
if (m_ExecutionStageListeners == null)
m_ExecutionStageListeners = new HashSet<>();
m_ExecutionStageListeners.remove(l);
}
/**
* Notifies all listeners of a new execution stage.
*
* @param stage the new stage
*/
protected synchronized void notifyExecutionStageListeners(ExecutionStageEvent.Stage stage) {
ExecutionStageEvent e;
if (m_ExecutionStageListeners == null)
return;
e = new ExecutionStageEvent(this, stage);
for (ExecutionStageListener l: m_ExecutionStageListeners)
l.experimentStage(e);
}
/**
* Adds the iteration listener.
*
* @param l the listener to add
*/
public synchronized void addIterationNotificationListener(IterationNotificationListener l) {
if (m_IterationNotficationListeners == null)
m_IterationNotficationListeners = new HashSet<>();
m_IterationNotficationListeners.add(l);
}
/**
* Removes the iteration listener.
*
* @param l the listener to remove
*/
public synchronized void removeIterationNotificationListener(IterationNotificationListener l) {
if (m_IterationNotficationListeners == null)
m_IterationNotficationListeners = new HashSet<>();
m_IterationNotficationListeners.remove(l);
}
/**
* Notifies all listeners of a new classifier/dataset combination.
*
* @param classifier the classifier
* @param dataset the dataset
*/
protected synchronized void notifyIterationNotificationListeners(MultiLabelClassifier classifier, Instances dataset) {
IterationNotificationEvent e;
if (m_IterationNotficationListeners == null)
return;
e = new IterationNotificationEvent(this, classifier, dataset);
for (IterationNotificationListener l: m_IterationNotficationListeners)
l.nextIteration(e);
}
/**
* Adds the statistics listener.
*
* @param l the listener to add
*/
public synchronized void addStatisticsNotificationListener(StatisticsNotificationListener l) {
if (m_StatisticsNotificationListeners == null)
m_StatisticsNotificationListeners = new HashSet<>();
m_StatisticsNotificationListeners.add(l);
}
/**
* Removes the statistics listener.
*
* @param l the listener to remove
*/
public synchronized void removeStatisticsNotificationListener(StatisticsNotificationListener l) {
if (m_StatisticsNotificationListeners == null)
m_StatisticsNotificationListeners = new HashSet<>();
m_StatisticsNotificationListeners.remove(l);
}
/**
* Notifies all listeners of a new classifier/dataset combination.
*
* @param stats the statistics
*/
protected synchronized void notifyStatisticsNotificationListeners(List<EvaluationStatistics> stats) {
StatisticsNotificationEvent e;
if (m_StatisticsNotificationListeners == null)
return;
e = new StatisticsNotificationEvent(this, stats);
for (StatisticsNotificationListener l: m_StatisticsNotificationListeners)
l.statisticsAvailable(e);
}
/**
* Returns an enumeration of all the available options..
*
* @return an enumeration of all available options.
*/
@Override
public Enumeration<Option> listOptions() {
Vector result = new Vector();
OptionUtils.addOption(result, notesTipText(), "", "notes");
OptionUtils.addOption(result, classifiersTipText(), "none", 'C');
OptionUtils.addOption(result, datasetProviderTipText(), getDefaultDatasetProvider().getClass().getName(), 'D');
OptionUtils.addOption(result, evaluatorTipText(), getDefaultEvaluator().getClass().getName(), 'E');
OptionUtils.addOption(result, statisticsHandlerTipText(), getDefaultStatisticsHandler().getClass().getName(), 'S');
return OptionUtils.toEnumeration(result);
}
/**
* Sets the options.
*
* @param options the options
* @throws Exception if parsing fails
*/
@Override
public void setOptions(String[] options) throws Exception {
setNotes(Utils.unbackQuoteChars(OptionUtils.parse(options, "notes", "")));
setClassifiers(OptionUtils.parse(options, 'C', MultiLabelClassifier.class));
setDatasetProvider((DatasetProvider) OptionUtils.parse(options, 'D', getDefaultDatasetProvider()));
setEvaluator((Evaluator) OptionUtils.parse(options, 'E', getDefaultEvaluator()));
setStatisticsHandler((EvaluationStatisticsHandler) OptionUtils.parse(options, 'S', getDefaultStatisticsHandler()));
}
/**
* Returns the options.
*
* @return the options
*/
@Override
public String[] getOptions() {
List<String> result = new ArrayList<>();
OptionUtils.add(result, "notes", Utils.backQuoteChars(getNotes()));
OptionUtils.add(result, 'C', getClassifiers());
OptionUtils.add(result, 'D', getDatasetProvider());
OptionUtils.add(result, 'E', getEvaluator());
OptionUtils.add(result, 'S', getStatisticsHandler());
return OptionUtils.toArray(result);
}
/**
* Adds the source's class name to the message if not null.
*
* @param source the source
* @param msg the error message, can be null
* @return null if no error message, otherwise enriched message
*/
protected String handleError(Object source, String msg) {
if (msg == null)
return null;
msg = source.getClass().getName() + ": " + msg;
log(msg);
return msg;
}
/**
* Initializes the experiment.
*
* @return null if successfully initialized, otherwise error message
*/
public String initialize() {
String result;
debug("pre: init");
m_Initializing = true;
m_Running = false;
m_Stopping = false;
notifyExecutionStageListeners(ExecutionStageEvent.Stage.INITIALIZING);
ExperimentUtils.ensureThreadSafety(this);
for (LogListener l: m_LogListeners) {
m_DatasetProvider.addLogListener(l);
m_StatisticsHandler.addLogListener(l);
m_Evaluator.addLogListener(l);
}
m_Statistics.clear();
result = handleError(m_DatasetProvider, m_DatasetProvider.initialize());
if (result == null)
result = handleError(m_StatisticsHandler, m_StatisticsHandler.initialize());
if (result != null)
log(result);
m_Initializing = false;
debug("post: init");
return result;
}
/**
* Returns whether the experiment is initializing.
*
* @return true if initializing
*/
public boolean isInitializing() {
return m_Initializing;
}
/**
* Runs the experiment.
*
* @return null if successfully run, otherwise error message
*/
public String run() {
String result;
Instances dataset;
List<EvaluationStatistics> stats;
boolean incremental;
debug("pre: run");
result = null;
m_Running = true;
incremental = (m_StatisticsHandler instanceof IncrementalEvaluationStatisticsHandler) &&
(((IncrementalEvaluationStatisticsHandler) m_StatisticsHandler).supportsIncrementalUpdate());
debug("Incremental statistics? " + incremental);
notifyExecutionStageListeners(ExecutionStageEvent.Stage.RUNNING);
while (m_DatasetProvider.hasNext()) {
// next dataset
debug("pre: next-dataset");
dataset = m_DatasetProvider.next();
debug("post: next-dataset");
if (dataset == null) {
result = "Failed to obtain next dataset!";
log(result);
m_Running = false;
break;
}
log("Using dataset: " + dataset.relationName());
// iterate classifiers
for (MultiLabelClassifier classifier: m_Classifiers) {
// evaluation required?
if (incremental) {
if (!((IncrementalEvaluationStatisticsHandler) m_StatisticsHandler).requires(classifier, dataset)) {
log("Already present, skipping: " + Utils.toCommandLine(classifier) + " --> " + dataset.relationName());
List<EvaluationStatistics> priorStats = ((IncrementalEvaluationStatisticsHandler) m_StatisticsHandler).retrieve(classifier, dataset);
m_Statistics.addAll(priorStats);
notifyStatisticsNotificationListeners(priorStats);
continue;
}
}
try {
classifier = (MultiLabelClassifier) AbstractClassifier.makeCopy(classifier);
}
catch (Exception e) {
result = handleException("Failed to create copy of classifier: " + classifier.getClass().getName(), e);
log(result);
m_Running = false;
break;
}
if (m_Running && !m_Stopping) {
// notify listeners
notifyIterationNotificationListeners(classifier, dataset);
log("Using classifier: " + OptionUtils.toCommandLine(classifier));
// perform evaluation
debug("pre: evaluator init");
result = m_Evaluator.initialize();
debug("post: evaluator init");
if (result != null) {
m_Running = false;
break;
}
try {
debug("pre: evaluator evaluate");
stats = m_Evaluator.evaluate(classifier, dataset);
debug("post: evaluator evaluate");
}
catch (Exception e) {
result = handleException("Failed to evaluate dataset '" + dataset.relationName() + "' with classifier: " + Utils.toCommandLine(classifier), e);
log(result);
m_Running = false;
break;
}
if (stats != null) {
m_Statistics.addAll(stats);
if (incremental)
((IncrementalEvaluationStatisticsHandler) m_StatisticsHandler).append(stats);
notifyStatisticsNotificationListeners(stats);
}
}
if (!m_Running || m_Stopping)
break;
}
if (!m_Running || m_Stopping)
break;
}
if (m_Running && !m_Stopping) {
if (!incremental)
m_StatisticsHandler.write(m_Statistics);
}
if (!m_Running) {
if (result == null)
result = "Experiment interrupted!";
else
result = "Experiment interrupted: " + result;
}
if (result != null)
log(result);
m_Running = false;
m_Stopping = false;
debug("post: run");
return result;
}
/**
* Returns whether the experiment is running.
*
* @return true if running
*/
public boolean isRunning() {
return m_Running;
}
/**
* Stops the experiment if running.
*/
public void stop() {
debug("pre: stop");
m_Stopping = true;
m_Initializing = false;
m_Running = false;
notifyExecutionStageListeners(ExecutionStageEvent.Stage.STOPPING);
m_Evaluator.stop();
debug("post: stop");
}
/**
* Returns whether the experiment is stopping.
*
* @return true if stopping
*/
public boolean isStopping() {
return m_Stopping;
}
/**
* Finishes the experiment.
*
* @return null if successfully finished, otherwise error message
*/
public String finish() {
String result;
debug("pre: finish");
result = handleError(m_DatasetProvider, m_DatasetProvider.finish());
if (result != null)
result = handleError(m_StatisticsHandler, m_StatisticsHandler.finish());
if (result != null)
log(result);
for (LogListener l: m_LogListeners) {
m_DatasetProvider.removeLogListener(l);
m_StatisticsHandler.removeLogListener(l);
m_Evaluator.removeLogListener(l);
}
m_Stopping = false;
m_Initializing = false;
m_Running = false;
notifyExecutionStageListeners(ExecutionStageEvent.Stage.FINISH);
debug("post: finish");
return result;
}
/**
* Returns the current statistics.
*
* @return the statistics, if any
*/
public List<EvaluationStatistics> getStatistics() {
return m_Statistics;
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/experiment/Experiment.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/**
* Experiment.java
* Copyright (C) 2015 University of Waikato, Hamilton, NZ
*/
package meka.experiment;
import meka.classifiers.multilabel.MultiLabelClassifier;
import meka.events.LogSupporter;
import meka.experiment.datasetproviders.DatasetProvider;
import meka.experiment.evaluationstatistics.EvaluationStatistics;
import meka.experiment.evaluationstatistics.EvaluationStatisticsHandler;
import meka.experiment.evaluators.Evaluator;
import meka.experiment.events.ExecutionStageListener;
import meka.experiment.events.IterationNotificationListener;
import meka.experiment.events.StatisticsNotificationListener;
import weka.core.OptionHandler;
import java.io.Serializable;
import java.util.List;
/**
* Interface for experiments.
*
* @author FracPete (fracpete at waikato dot ac dot nz)
* @version $Revision$
*/
public interface Experiment
extends OptionHandler, Serializable, LogSupporter {
/**
* Sets the notes.
*
* @param value the notes
*/
public void setNotes(String value);
/**
* Returns the notes.
*
* @return the notes
*/
public String getNotes();
/**
* Sets the classifiers to be evaluated.
*
* @param value the classifiers
*/
public void setClassifiers(MultiLabelClassifier[] value);
/**
* Returns the classifiers to be evaluated.
*
* @return the classifiers
*/
public MultiLabelClassifier[] getClassifiers();
/**
* Describes this property.
*
* @return the description
*/
public String classifiersTipText();
/**
* Sets the dataset provider to use.
*
* @param value the provider
*/
public void setDatasetProvider(DatasetProvider value);
/**
* Returns the dataset provider in use.
*
* @return the provider
*/
public DatasetProvider getDatasetProvider();
/**
* Describes this property.
*
* @return the description
*/
public String datasetProviderTipText();
/**
* Sets the evaluator to use.
*
* @param value the evaluator
*/
public void setEvaluator(Evaluator value);
/**
* Returns the evaluator in use.
*
* @return the evaluator
*/
public Evaluator getEvaluator();
/**
* Describes this property.
*
* @return the description
*/
public String evaluatorTipText();
/**
* Sets the statistics handler.
*
* @param value the handler
*/
public void setStatisticsHandler(EvaluationStatisticsHandler value);
/**
* Returns the statistics handler.
*
* @return the handler
*/
public EvaluationStatisticsHandler getStatisticsHandler();
/**
* Describes this property.
*
* @return the description
*/
public String statisticsHandlerTipText();
/**
* Adds the execution stage listener.
*
* @param l the listener to add
*/
public void addExecutionStageListener(ExecutionStageListener l);
/**
* Removes the execution stage listener.
*
* @param l the listener to remove
*/
public void removeExecutionStageListener(ExecutionStageListener l);
/**
* Adds the iteration listener.
*
* @param l the listener to add
*/
public void addIterationNotificationListener(IterationNotificationListener l);
/**
* Removes the iteration listener.
*
* @param l the listener to remove
*/
public void removeIterationNotificationListener(IterationNotificationListener l);
/**
* Adds the statistics listener.
*
* @param l the listener to add
*/
public void addStatisticsNotificationListener(StatisticsNotificationListener l);
/**
* Removes the statistics listener.
*
* @param l the listener to remove
*/
public void removeStatisticsNotificationListener(StatisticsNotificationListener l);
/**
* Initializes the experiment.
*
* @return null if successfully initialized, otherwise error message
*/
public String initialize();
/**
* Returns whether the experiment is initializing.
*
* @return true if initializing
*/
public boolean isInitializing();
/**
* Runs the experiment.
*
* @return null if successfully run, otherwise error message
*/
public String run();
/**
* Returns whether the experiment is running.
*
* @return true if running
*/
public boolean isRunning();
/**
* Stops the experiment if still running.
*/
public void stop();
/**
* Returns whether the experiment is stopping.
*
* @return true if stopping
*/
public boolean isStopping();
/**
* Finishes the experiment.
*
* @return null if successfully finished, otherwise error message
*/
public String finish();
/**
* Returns the current statistics.
*
* @return the statistics, if any
*/
public List<EvaluationStatistics> getStatistics();
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/experiment/ExperimentUtils.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/**
* ExperimentUtils.java
* Copyright (C) 2015 University of Waikato, Hamilton, NZ
*/
package meka.experiment;
import meka.core.ThreadLimiter;
import meka.core.ThreadUtils;
import meka.experiment.evaluationstatistics.OptionalIncrementalEvaluationStatisticsHandler;
import meka.experiment.evaluators.AbstractMetaEvaluator;
import meka.experiment.evaluators.Evaluator;
/**
* Experiment related methods.
*
* @author FracPete (fracpete at waikato dot ac dot nz)
* @version $Revision$
*/
public class ExperimentUtils {
/**
* Checks whether an Evaluator implements {@link ThreadLimiter} and uses multi-threading.
*
* @param exp the experiment to check/update
* @param evaluator the current evaluator to check
* @return true if ThreadLimiter evaluator present
*/
protected static boolean isMultiThreaded(Experiment exp, Evaluator evaluator) {
int numThreads;
if (evaluator instanceof ThreadLimiter) {
numThreads = ((ThreadLimiter) evaluator).getNumThreads();
if (ThreadUtils.isMultiThreaded(numThreads))
return true;
}
if (evaluator instanceof AbstractMetaEvaluator)
return isMultiThreaded(exp, ((AbstractMetaEvaluator) evaluator).getEvaluator());
else
return false;
}
/**
* Makes sure that the experiment uses a threadsafe setup.
*
* @param exp the experiment to check/update
* @param evaluator the current evaluator to check
*/
protected static void ensureThreadSafety(Experiment exp, Evaluator evaluator) {
int old;
if (evaluator instanceof ThreadLimiter) {
old = ((ThreadLimiter) evaluator).getNumThreads();
if (ThreadUtils.isMultiThreaded(old)) {
((ThreadLimiter) evaluator).setNumThreads(ThreadUtils.SEQUENTIAL);
exp.log(evaluator.getClass().getName() + ": changed #threads from " + old + " to " + ThreadUtils.SEQUENTIAL + " "
+ "(" + exp.getStatisticsHandler().getClass().getName() + " is not threadsafe)!");
}
}
if (evaluator instanceof AbstractMetaEvaluator)
ensureThreadSafety(exp, ((AbstractMetaEvaluator) evaluator).getEvaluator());
}
/**
* Makes sure that the experiment uses a threadsafe setup.
*
* @param exp the experiment to check/update
*/
public static void ensureThreadSafety(Experiment exp) {
boolean old;
// threadsafe statistics handler? don't worry then
if (exp.getStatisticsHandler().isThreadSafe())
return;
if (isMultiThreaded(exp, exp.getEvaluator())) {
// can we turn off incremental mode to take advantage of multi-threading?
if (exp.getStatisticsHandler() instanceof OptionalIncrementalEvaluationStatisticsHandler) {
OptionalIncrementalEvaluationStatisticsHandler optional = (OptionalIncrementalEvaluationStatisticsHandler) exp.getStatisticsHandler();
old = optional.isIncrementalDisabled();
optional.setIncrementalDisabled(true);
if (optional.isThreadSafe()) {
exp.log("Turned off incremental mode for " + optional.getClass().getName() + " to make use of multi-threading!");
return;
}
else {
optional.setIncrementalDisabled(old);
}
}
ensureThreadSafety(exp, exp.getEvaluator());
}
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/experiment
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/experiment/datasetproviders/AbstractDatasetProvider.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/**
* LocalDatasetProvider.java
* Copyright (C) 2015 University of Waikato, Hamilton, NZ
*/
package meka.experiment.datasetproviders;
import meka.events.LogObject;
import weka.core.Option;
import java.util.Enumeration;
import java.util.Vector;
/**
* Loads local files from disk.
*
* @author FracPete (fracpete at waikato dot ac dot nz)
* @version $Revision$
*/
public abstract class AbstractDatasetProvider
extends LogObject
implements DatasetProvider {
private static final long serialVersionUID = 2167509900278245507L;
/**
* Description to be displayed in the GUI.
*
* @return the description
*/
public abstract String globalInfo();
/**
* Returns an enumeration of all the available options..
*
* @return an enumeration of all available options.
*/
@Override
public Enumeration<Option> listOptions() {
return new Vector().elements();
}
/**
* Sets the options.
*
* @param options the options
* @throws Exception if parsing of options fails
*/
@Override
public void setOptions(String[] options) throws Exception {
}
/**
* Returns the options.
*
* @return the options
*/
@Override
public String[] getOptions() {
return new String[0];
}
/**
* Initializes the provider to start providing datasets from scratch.
*
* @return null if successfully initialized, otherwise error message
*/
@Override
public String initialize() {
return null;
}
/**
* Does nothing.
*/
@Override
public void remove() {
// ignored
}
/**
* Gets called after the experiment finishes.
*
* @return null if successfully finished, otherwise error message
*/
public String finish() {
return null;
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/experiment
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/experiment/datasetproviders/DatasetProvider.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/**
* DatasetProvider.java
* Copyright (C) 2015 University of Waikato, Hamilton, NZ
*/
package meka.experiment.datasetproviders;
import meka.events.LogSupporter;
import weka.core.Instances;
import weka.core.OptionHandler;
import java.io.Serializable;
import java.util.Iterator;
/**
* Interface for classes that provide datasets for the experiment.
*
* @author FracPete (fracpete at waikato dot ac dot nz)
* @version $Revision$
*/
public interface DatasetProvider
extends OptionHandler, Iterator<Instances>, Serializable, LogSupporter {
/**
* Initializes the provider to start providing datasets from scratch.
*
* @return null if successfully initialized, otherwise error message
*/
public String initialize();
/**
* Returns whether another dataset is available.
*
* @return true if another dataset is available
*/
@Override
public boolean hasNext();
/**
* Returns the next dataset.
*
* @return the next dataset
*/
@Override
public Instances next();
/**
* Gets called after the experiment finishes.
*
* @return null if successfully finished, otherwise error message
*/
public String finish();
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/experiment
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/experiment/datasetproviders/LocalDatasetProvider.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/**
* LocalDatasetProvider.java
* Copyright (C) 2015 University of Waikato, Hamilton, NZ
*/
package meka.experiment.datasetproviders;
import meka.core.MLUtils;
import meka.core.OptionUtils;
import weka.core.Instances;
import weka.core.Option;
import weka.core.converters.ConverterUtils;
import java.io.File;
import java.util.*;
/**
* Loads local files from disk.
*
* @author FracPete (fracpete at waikato dot ac dot nz)
* @version $Revision$
*/
public class LocalDatasetProvider
extends AbstractDatasetProvider {
private static final long serialVersionUID = 2167509900278245507L;
/** the files to load. */
protected List<File> m_Datasets = new ArrayList<>();
/** the iterator index. */
protected int m_Current;
/**
* Description to be displayed in the GUI.
*
* @return the description
*/
public String globalInfo() {
return "Loads local files from disk.";
}
/**
* Sets the datasets to use in the experiment.
*
* @param value the datasets
*/
public void setDatasets(File[] value) {
m_Datasets.clear();
if (value != null)
m_Datasets.addAll(Arrays.asList(value));
}
/**
* Returns the datasets to use in the experiment.
*
* @return the datasets
*/
public File[] getDatasets() {
return m_Datasets.toArray(new File[m_Datasets.size()]);
}
/**
* Describes this property.
*
* @return the description
*/
public String datasetsTipText() {
return "The datasets to load.";
}
/**
* Returns an enumeration of all the available options..
*
* @return an enumeration of all available options.
*/
@Override
public Enumeration<Option> listOptions() {
Vector result = new Vector();
OptionUtils.add(result, super.listOptions());
OptionUtils.addOption(result, datasetsTipText(), "none", "dataset");
return OptionUtils.toEnumeration(result);
}
/**
* Sets the options.
*
* @param options the options
* @throws Exception if parsing of options fails
*/
@Override
public void setOptions(String[] options) throws Exception {
setDatasets(OptionUtils.parse(options, "dataset", File.class));
super.setOptions(options);
}
/**
* Returns the options.
*
* @return the options
*/
@Override
public String[] getOptions() {
List<String> result = new ArrayList<>();
OptionUtils.add(result, super.getOptions());
OptionUtils.add(result, "dataset", getDatasets());
return OptionUtils.toArray(result);
}
/**
* Initializes the provider to start providing datasets from scratch.
*
* @return null if successfully initialized, otherwise error message
*/
@Override
public String initialize() {
m_Current = 0;
for (File dataset: m_Datasets) {
if (!dataset.exists())
return "Dataset does not exist: " + dataset;
if (dataset.isDirectory())
return "Dataset points to a directory: " + dataset;
}
return null;
}
/**
* Returns whether another dataset is available.
*
* @return true if another dataset is available
*/
@Override
public boolean hasNext() {
return (m_Current < m_Datasets.size());
}
/**
* Returns the next dataset.
*
* @return the next dataset, null in case of an error
*/
@Override
public Instances next() {
Instances result;
try {
log("Loading: " + m_Datasets.get(m_Current));
result = ConverterUtils.DataSource.read(m_Datasets.get(m_Current).getAbsolutePath());
MLUtils.prepareData(result);
}
catch (Exception e) {
result = null;
handleException("Failed to load dataset: " + m_Datasets.get(m_Current), e);
}
m_Current++;
return result;
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/experiment
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/experiment/datasetproviders/MultiDatasetProvider.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/**
* MultiDatasetProvider.java
* Copyright (C) 2015 University of Waikato, Hamilton, NZ
*/
package meka.experiment.datasetproviders;
import meka.core.OptionUtils;
import meka.events.LogListener;
import weka.core.Instances;
import weka.core.Option;
import java.util.ArrayList;
import java.util.Enumeration;
import java.util.List;
import java.util.Vector;
/**
* Combines multiple dataset providers.
*
* @author FracPete (fracpete at waikato dot ac dot nz)
* @version $Revision$
*/
public class MultiDatasetProvider
extends AbstractDatasetProvider {
private static final long serialVersionUID = 5886187555928103838L;
/** the datasset providers to use. */
protected DatasetProvider[] m_Providers = getDefaultProviders();
/** the iterator index. */
protected int m_Current;
/**
* Description to be displayed in the GUI.
*
* @return the description
*/
public String globalInfo() {
return "Combines multiple dataset providers.";
}
/**
* Returns the default dataset providers to use.
*
* @return the providers
*/
protected DatasetProvider[] getDefaultProviders() {
return new DatasetProvider[0];
}
/**
* Sets the dataset providers to use.
*
* @param value the providers
*/
public void setProviders(DatasetProvider[] value) {
m_Providers = value;
}
/**
* Returns the dataset providers to use.
*
* @return the providers
*/
public DatasetProvider[] getProviders() {
return m_Providers;
}
/**
* Describes this property.
*
* @return the description
*/
public String providersTipText() {
return "The dataset providers to use.";
}
/**
* Returns an enumeration of all the available options..
*
* @return an enumeration of all available options.
*/
@Override
public Enumeration<Option> listOptions() {
Vector result = new Vector();
OptionUtils.add(result, super.listOptions());
OptionUtils.addOption(result, providersTipText(), "none", "provider");
return OptionUtils.toEnumeration(result);
}
/**
* Sets the options.
*
* @param options the options
* @throws Exception if parsing of options fails
*/
@Override
public void setOptions(String[] options) throws Exception {
setProviders(OptionUtils.parse(options, "provider", DatasetProvider.class));
super.setOptions(options);
}
/**
* Returns the options.
*
* @return the options
*/
@Override
public String[] getOptions() {
List<String> result = new ArrayList<>();
OptionUtils.add(result, super.getOptions());
OptionUtils.add(result, "provider", getProviders());
return OptionUtils.toArray(result);
}
/**
* Adds the log listener to use.
*
* @param l the listener
*/
public void addLogListener(LogListener l) {
super.addLogListener(l);
for (DatasetProvider provider : m_Providers)
provider.addLogListener(l);
}
/**
* Remove the log listener to use.
*
* @param l the listener
*/
public void removeLogListener(LogListener l) {
super.removeLogListener(l);
for (DatasetProvider provider : m_Providers)
provider.removeLogListener(l);
}
/**
* Initializes the provider to start providing datasets from scratch.
*
* @return null if successfully initialized, otherwise error message
*/
@Override
public String initialize() {
String result;
int i;
result = null;
m_Current = 0;
for (i = 0; i < m_Providers.length; i++) {
result = m_Providers[i].initialize();
if (result != null) {
result = "Provider #" + (i+1) + ": " + result;
break;
}
}
return result;
}
/**
* Returns whether another dataset is available.
*
* @return true if another dataset is available
*/
@Override
public boolean hasNext() {
return (m_Current < m_Providers.length) && (m_Providers[m_Current].hasNext());
}
/**
* Returns the next dataset.
*
* @return the next dataset
*/
@Override
public Instances next() {
while (m_Current < m_Providers.length) {
if (m_Providers[m_Current].hasNext()) {
log("Dataset provider #" + (m_Current + 1));
return m_Providers[m_Current].next();
}
m_Current++;
}
return null;
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/experiment
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/experiment/evaluationstatistics/AbstractEvaluationStatisticsHandler.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/**
* AbstractEvaluationStatisticsHandler.java
* Copyright (C) 2015 University of Waikato, Hamilton, NZ
*/
package meka.experiment.evaluationstatistics;
import meka.events.LogObject;
import weka.core.Option;
import java.util.Enumeration;
import java.util.Vector;
/**
* Ancestor for handlers.
*
* By default {@link IncrementalEvaluationStatisticsHandler} are not considered threadsafe. This
* classes must explicitly state that they're threadsafe.
*
* @author FracPete (fracpete at waikato dot ac dot nz)
* @version $Revision$
*/
public abstract class AbstractEvaluationStatisticsHandler
extends LogObject
implements EvaluationStatisticsHandler {
private static final long serialVersionUID = -1090631157162943295L;
/**
* Description to be displayed in the GUI.
*
* @return the description
*/
public abstract String globalInfo();
/**
* Returns whether the handler is threadsafe.
*
* @return true if threadsafe
*/
public boolean isThreadSafe() {
return !(this instanceof IncrementalEvaluationStatisticsHandler);
}
/**
* Returns an enumeration of all the available options.
*
* @return an enumeration of all available options.
*/
@Override
public Enumeration<Option> listOptions() {
return new Vector().elements();
}
/**
* Sets the options.
*
* @param options the options
* @throws Exception never
*/
@Override
public void setOptions(String[] options) throws Exception {
}
/**
* Returns the options.
*
* @return the options
*/
@Override
public String[] getOptions() {
return new String[0];
}
/**
* Gets called after the experiment finished.
*
* @return null if successfully finished, otherwise error message
*/
public String finish() {
return null;
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/experiment
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/experiment/evaluationstatistics/AbstractFileBasedEvaluationStatisticsHandler.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/**
* AbstractFileBasedEvaluationStatisticsHandler.java
* Copyright (C) 2015 University of Waikato, Hamilton, NZ
*/
package meka.experiment.evaluationstatistics;
import meka.core.OptionUtils;
import weka.core.Option;
import java.io.File;
import java.util.ArrayList;
import java.util.Enumeration;
import java.util.List;
import java.util.Vector;
/**
* Ancestor for file-base handlers.
*
* @author FracPete (fracpete at waikato dot ac dot nz)
* @version $Revision$
*/
public abstract class AbstractFileBasedEvaluationStatisticsHandler
extends AbstractEvaluationStatisticsHandler
implements FileBasedEvaluationStatisticsHandler {
private static final long serialVersionUID = -1090631157162943295L;
/** the file to read from/write to. */
protected File m_File = getDefaultFile();
/**
* Returns the default file.
*
* @return the default
*/
protected File getDefaultFile() {
return new File(".");
}
/**
* Sets the file to read from/write to.
*
* @param value the file
*/
public void setFile(File value) {
m_File = value;
}
/**
* Returns the file to read from/write to.
*
* @return the file
*/
public File getFile() {
return m_File;
}
/**
* Describes this property.
*
* @return the description
*/
public String fileTipText() {
return "The file to read from/write to.";
}
/**
* Returns an enumeration of all the available options.
*
* @return an enumeration of all available options.
*/
@Override
public Enumeration<Option> listOptions() {
Vector result = new Vector();
OptionUtils.add(result, super.listOptions());
OptionUtils.addOption(result, fileTipText(), "" + getDefaultFile(), 'F');
return OptionUtils.toEnumeration(result);
}
/**
* Sets the options.
*
* @param options the options
* @throws Exception never
*/
@Override
public void setOptions(String[] options) throws Exception {
setFile(OptionUtils.parse(options, 'F', getDefaultFile()));
super.setOptions(options);
}
/**
* Returns the options.
*
* @return the options
*/
@Override
public String[] getOptions() {
List<String> result = new ArrayList<>();
OptionUtils.add(result, super.getOptions());
OptionUtils.add(result, 'F', getFile());
return OptionUtils.toArray(result);
}
/**
* Initializes the handler.
*
* @return null if successfully initialized, otherwise error message
*/
@Override
public String initialize() {
if (m_File.isDirectory())
return "File points to a directory: " + m_File;
return null;
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/experiment
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/experiment/evaluationstatistics/EvaluationStatistics.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/**
* EvaluationStatistics.java
* Copyright (C) 2015 University of Waikato, Hamilton, NZ
*/
package meka.experiment.evaluationstatistics;
import meka.classifiers.multilabel.MultiLabelClassifier;
import meka.core.OptionUtils;
import meka.core.Result;
import weka.core.Instances;
import weka.core.Utils;
import java.util.HashMap;
/**
* Stores evaluation statistics.
*
* @author FracPete (fracpete at waikato dot ac dot nz)
* @version $Revision$
*/
public class EvaluationStatistics
extends HashMap<String,Number> {
private static final long serialVersionUID = -1873027591755259927L;
/** the key for the classifier. */
public final static String KEY_CLASSIFIER = "Classifier";
/** the key for the relation. */
public final static String KEY_RELATION = "Relation";
/** the classifier. */
protected MultiLabelClassifier m_Classifier;
/** the classifier commandline. */
protected String m_CommandLine;
/** the relation name. */
protected String m_Relation;
/**
* Default constructor.
*/
public EvaluationStatistics() {
this(null, (String) null, null);
}
/**
* Extracts the statistics from the Result object.
*
* @param classifier the classifier
* @param dataset the dataset
* @param result the evaluation
*/
public EvaluationStatistics(MultiLabelClassifier classifier, Instances dataset, Result result) {
this(classifier, (dataset != null) ? dataset.relationName() : null, result);
}
/**
* Extracts the statistics from the Result object.
*
* @param classifier the classifier
* @param relation the relation
* @param result the evaluation
*/
public EvaluationStatistics(MultiLabelClassifier classifier, String relation, Result result) {
super();
m_Classifier = classifier;
m_CommandLine = (classifier == null) ? null : OptionUtils.toCommandLine(classifier);
m_Relation = relation;
if (result != null) {
for (String key : result.vals.keySet()) {
if (result.vals.get(key) instanceof Number)
put(key, (Number) result.vals.get(key));
}
for (String key : result.availableMetrics()) {
if (result.getMeasurement(key) instanceof Number)
put(key, (Number) result.getMeasurement(key));
}
}
}
/**
* Returns the classifier for these statistics.
*
* @return the classifier, null if not set
*/
public MultiLabelClassifier getClassifier() {
return m_Classifier;
}
/**
* Returns the commandline of the classifier for these statistics.
*
* @return the classifier commandline, null if not set
*/
public String getCommandLine() {
return m_CommandLine;
}
/**
* Returns the relation for these statistics.
*
* @return the relation, null if not set
*/
public String getRelation() {
return m_Relation;
}
/**
* Returns the statistics as string.
*
* @return the statistics
*/
public String toString() {
StringBuilder result = new StringBuilder();
result.append("Classifier=").append(Utils.toCommandLine(m_Classifier)).append(",");
result.append("Relation=").append(m_Relation).append(",");
result.append(super.toString());
return result.toString();
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/experiment
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/experiment/evaluationstatistics/EvaluationStatisticsComparator.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/**
* EvaluationStatisticsComparator.java
* Copyright (C) 2015 University of Waikato, Hamilton, NZ
*/
package meka.experiment.evaluationstatistics;
import weka.core.Utils;
import java.io.Serializable;
import java.util.Comparator;
/**
* Comparator for statistics.
*
* @author FracPete (fracpete at waikato dot ac dot nz)
* @version $Revision$
*/
public class EvaluationStatisticsComparator
implements Comparator<EvaluationStatistics>, Serializable {
private static final long serialVersionUID = 6043447420299753468L;
/** the default keys. */
public final static String[] DEFAULT_KEYS = new String[]{
EvaluationStatistics.KEY_CLASSIFIER,
EvaluationStatistics.KEY_RELATION};
/** the keys to use for comparison. */
protected String[] m_Keys;
/**
* Initializes the comparator with the default keys.
*
* @see #DEFAULT_KEYS
*/
public EvaluationStatisticsComparator() {
this(DEFAULT_KEYS);
}
/**
* Initializes the comparator with the specified keys.
*
* @param keys the keys to use
*/
public EvaluationStatisticsComparator(String[] keys) {
super();
m_Keys = keys;
}
/**
* Compares the two statistics.
*
* @param o1 the first stats
* @param o2 the second stats
* @return less than zero if first smaller than second, zero if the same, greater than zero if first
* greater than second
*/
@Override
public int compare(EvaluationStatistics o1, EvaluationStatistics o2) {
int result;
Number n1;
Number n2;
result = 0;
for (String key: m_Keys) {
if (key.equals(EvaluationStatistics.KEY_CLASSIFIER)) {
result = o1.getCommandLine().compareTo(o2.getCommandLine());
}
else if (key.equals(EvaluationStatistics.KEY_RELATION)) {
result = o1.getRelation().compareTo(o2.getRelation());
}
else {
n1 = o1.get(key);
n2 = o2.get(key);
if ((n1 == null) && (n2 == null))
result = 0;
else if (n1 == null)
result = -1;
else if (n2 == null)
result = +1;
else
result = new Double(n1.doubleValue()).compareTo(n2.doubleValue());
}
if (result != 0)
break;
}
return result;
}
/**
* Returns the keys as string.
*
* @return the keys
*/
public String toString() {
return Utils.arrayToString(m_Keys);
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/experiment
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/experiment/evaluationstatistics/EvaluationStatisticsHandler.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/**
* EvaluationStatisticsHandler.java
* Copyright (C) 2015 University of Waikato, Hamilton, NZ
*/
package meka.experiment.evaluationstatistics;
import meka.events.LogSupporter;
import weka.core.OptionHandler;
import java.io.Serializable;
import java.util.List;
/**
* Interface for classes that load and save collected {@link EvaluationStatistics} in some form.
*
* @author FracPete (fracpete at waikato dot ac dot nz)
* @version $Revision$
*/
public interface EvaluationStatisticsHandler
extends OptionHandler, Serializable, LogSupporter {
/**
* Returns whether the handler is threadsafe.
*
* @return true if threadsafe
*/
public boolean isThreadSafe();
/**
* Initializes the handler.
*
* @return null if successfully initialized, otherwise error message
*/
public String initialize();
/**
* Reads the statistics.
*
* @return the statistics that were read
*/
public List<EvaluationStatistics> read();
/**
* Stores the given statistics.
*
* @param stats the statistics to store
* @return null if successfully stored, otherwise error message
*/
public String write(List<EvaluationStatistics> stats);
/**
* Gets called after the experiment finished.
*
* @return null if successfully finished, otherwise error message
*/
public String finish();
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/experiment
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/experiment/evaluationstatistics/EvaluationStatisticsUtils.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/**
* EvaluationStatisticsUtils.java
* Copyright (C) 2015 University of Waikato, Hamilton, NZ
*/
package meka.experiment.evaluationstatistics;
import meka.classifiers.multilabel.MultiLabelClassifier;
import meka.core.A;
import meka.core.OptionUtils;
import meka.experiment.evaluators.CrossValidation;
import meka.experiment.evaluators.RepeatedRuns;
import weka.core.Instances;
import weka.core.Utils;
import java.util.*;
/**
* Helper class for stats related operations.
*
* @author FracPete (fracpete at waikato dot ac dot nz)
* @version $Revision$
*/
public class EvaluationStatisticsUtils {
/**
* Returns all the keys of all the statistics.
*
* @param stats the stats to inspect
* @param sort whether to sort the keys alphabetically
* @return the keys
*/
public static List<String> keys(List<EvaluationStatistics> stats, boolean sort) {
List<String> result;
HashSet<String> keys;
keys = new HashSet<>();
for (EvaluationStatistics stat: stats)
keys.addAll(stat.keySet());
result = new ArrayList<>(keys);
if (sort)
Collections.sort(result);
return result;
}
/**
* Returns all the unique classifiers of all the statistics.
*
* @param stats the stats to inspect
* @param sort whether to sort the classifiers alphabetically
* @return the classifiers
*/
public static List<MultiLabelClassifier> classifiers(List<EvaluationStatistics> stats, boolean sort) {
List<MultiLabelClassifier> result;
List<String> cmdlines;
result = new ArrayList<>();
cmdlines = commandLines(stats, sort);
for (String cmdline: cmdlines) {
try {
result.add(OptionUtils.fromCommandLine(MultiLabelClassifier.class, cmdline));
}
catch (Exception e) {
System.err.println("Failed to instantiate command-line: " + cmdline);
e.printStackTrace();
}
}
return result;
}
/**
* Returns all the unique classifiers of all the statistics.
*
* @param stats the stats to inspect
* @param sort whether to sort the classifiers alphabetically
* @return the command-lines
*/
public static List<String> commandLines(List<EvaluationStatistics> stats, boolean sort) {
List<String> result;
result = new ArrayList<>();
for (EvaluationStatistics stat: stats) {
if (!result.contains(stat.getCommandLine()))
result.add(stat.getCommandLine());
}
if (sort)
Collections.sort(result);
return result;
}
/**
* Returns all the values of a specific measurement for the specified classifier/dataset combination.
*
* @param stats the stats to inspect
* @param classifier the classifier to look for
* @param dataset the dataset to look for
* @param measurement the measurement to retrieve
* @return the values
*/
public static List<Number> measurements(List<EvaluationStatistics> stats, MultiLabelClassifier classifier, Instances dataset, String measurement) {
return measurements(stats, OptionUtils.toCommandLine(classifier), dataset.relationName(), measurement);
}
/**
* Returns all the values of a specific measurement for the specified classifier/dataset combination.
*
* @param stats the stats to inspect
* @param classifier the classifier to look for (commandline)
* @param dataset the dataset to look for (relation name)
* @param measurement the measurement to retrieve
* @return the values
*/
public static List<Number> measurements(List<EvaluationStatistics> stats, String classifier, String dataset, String measurement) {
List<Number> result;
result = new ArrayList<>();
for (EvaluationStatistics stat: stats) {
if (stat.getCommandLine().equals(classifier) && stat.getRelation().equals(dataset)) {
if (stat.containsKey(measurement))
result.add(stat.get(measurement));
}
}
return result;
}
/*
* Nemenyi Test - NOT YET IMPLEMENTED
*
* @param stats the stats to inspect
* @param measurement the measurement to run the test on
* @return the Ranks // the Nemenyi test results
*/
/**
* Value Matrix
*/
public static double[][] valueMatrix(List<EvaluationStatistics> stats, String measurement) {
List<Number> result;
List<String> classifiers = EvaluationStatisticsUtils.commandLines(stats, true);
List<String> relations = EvaluationStatisticsUtils.relations(stats, true);
int N = relations.size();
int k = classifiers.size();
double V[][] = new double[N][k];
for (int i = 0; i < N; i++) {
for (int j = 0; j < k; j++) {
List<Number> measurements = EvaluationStatisticsUtils.measurements(stats, classifiers.get(j), relations.get(i), measurement);
V[i][j] = (double)measurements.get(0);
}
}
return V;
}
/**
* Rank Matrix
*/
public static int[][] rankMatrix(List<EvaluationStatistics> stats, String measurement) {
double V[][] = valueMatrix(stats,measurement);
int N = V.length;
int k = V[0].length;
int R[][] = new int[N][k];
for (int i = 0; i < N; i++) {
int indices[] = Utils.sort(V[i]);
// add 1 to each
for (int j = 0; j < k; j++) {
R[i][indices[j]] = (j+1);
}
}
return R;
}
/**
* Returns all available measurements.
*
* @param stats the stats to inspect
* @return the values
*/
public static List<String> measurements(List<EvaluationStatistics> stats) {
List<String> result;
HashSet<String> set;
result = new ArrayList<>();
set = new HashSet<>();
for (EvaluationStatistics stat: stats)
set.addAll(stat.keySet());
result.addAll(set);
Collections.sort(result);
return result;
}
/**
* Returns all the unique relations of all the statistics.
*
* @param stats the stats to inspect
* @param sort whether to sort the relations alphabetically
* @return the relations
*/
public static List<String> relations(List<EvaluationStatistics> stats, boolean sort) {
List<String> result;
result = new ArrayList<>();
for (EvaluationStatistics stat: stats) {
if (!result.contains(stat.getRelation()))
result.add(stat.getRelation());
}
if (sort)
Collections.sort(result);
return result;
}
/**
* Creates a list of headers (= stats keys) from the provided statistics.
*
* @param stats the stats to use
* @param moveRunFold whether to moved "Fold" and "Run" to the start
* @param addClassifierRelation whether to add "Classifier" and "Relation"
* @return the generated header list
*/
public static List<String> headers(List<EvaluationStatistics> stats, boolean moveRunFold, boolean addClassifierRelation) {
List<String> result;
result = keys(stats, true);
if (moveRunFold) {
if (result.contains(CrossValidation.KEY_FOLD)) {
result.remove(CrossValidation.KEY_FOLD);
result.add(0, CrossValidation.KEY_FOLD);
}
if (result.contains(RepeatedRuns.KEY_RUN)) {
result.remove(RepeatedRuns.KEY_RUN);
result.add(0, RepeatedRuns.KEY_RUN);
}
}
if (addClassifierRelation) {
result.add(0, EvaluationStatistics.KEY_RELATION);
result.add(0, EvaluationStatistics.KEY_CLASSIFIER);
}
return result;
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/experiment
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/experiment/evaluationstatistics/FileBasedEvaluationStatisticsHandler.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/**
* FileBasedEvaluationStatisticsHandler.java
* Copyright (C) 2015 University of Waikato, Hamilton, NZ
*/
package meka.experiment.evaluationstatistics;
import meka.core.FileFormatSupporter;
import java.io.File;
/**
* Interface for file-based statistics handlers.
*
* @author FracPete (fracpete at waikato dot ac dot nz)
* @version $Revision$
*/
public interface FileBasedEvaluationStatisticsHandler
extends EvaluationStatisticsHandler, FileFormatSupporter {
/**
* Returns the format description.
*
* @return the file format
*/
public String getFormatDescription();
/**
* Returns the format extension(s).
*
* @return the extension(s) (incl dot)
*/
public String[] getFormatExtensions();
/**
* Sets the file to read from/write to.
*
* @param value the file
*/
public void setFile(File value);
/**
* Returns the file to read from/write to.
*
* @return the file
*/
public File getFile();
/**
* Describes this property.
*
* @return the description
*/
public String fileTipText();
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/experiment
|
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/experiment/evaluationstatistics/InMemory.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/**
* InMemory.java
* Copyright (C) 2015 University of Waikato, Hamilton, NZ
*/
package meka.experiment.evaluationstatistics;
import java.util.ArrayList;
import java.util.List;
/**
* Simple in-memory handler.
*
* @author FracPete (fracpete at waikato dot ac dot nz)
* @version $Revision$
*/
public class InMemory
extends AbstractEvaluationStatisticsHandler {
private static final long serialVersionUID = 121028869996940534L;
/** the collected statistics. */
protected List<EvaluationStatistics> m_Statistics = new ArrayList<>();
/**
* Returns whether the handler is threadsafe.
*
* @return true if threadsafe
*/
@Override
public boolean isThreadSafe() {
return true;
}
/**
* Description to be displayed in the GUI.
*
* @return the description
*/
public String globalInfo() {
return "Simply stores the statistics in memory.";
}
/**
* Initializes the handler.
*
* @return null if successfully initialized, otherwise error message
*/
@Override
public String initialize() {
m_Statistics.clear();
return null;
}
/**
* Reads the statistics.
*
* @return the statistics that were read
*/
@Override
public List<EvaluationStatistics> read() {
return m_Statistics;
}
/**
* Stores the given statistics.
*
* @param stats the statistics to store
* @return null if successfully stored, otherwise error message
*/
@Override
public String write(List<EvaluationStatistics> stats) {
m_Statistics.addAll(stats);
return null;
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.