index
int64 | repo_id
string | file_path
string | content
string |
|---|---|---|---|
0
|
java-sources/ai/libs/jaicore-ml/0.2.7/ai/libs/jaicore/ml/scikitwrapper
|
java-sources/ai/libs/jaicore-ml/0.2.7/ai/libs/jaicore/ml/scikitwrapper/simple/SimpleScikitLearnClassifier.java
|
package ai.libs.jaicore.ml.scikitwrapper.simple;
import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import org.api4.java.ai.ml.classification.singlelabel.evaluation.ISingleLabelClassification;
import org.api4.java.ai.ml.classification.singlelabel.evaluation.ISingleLabelClassificationPredictionBatch;
import org.api4.java.ai.ml.classification.singlelabel.learner.ISingleLabelClassifier;
import org.api4.java.ai.ml.core.dataset.schema.attribute.ICategoricalAttribute;
import org.api4.java.ai.ml.core.dataset.supervised.ILabeledDataset;
import org.api4.java.ai.ml.core.dataset.supervised.ILabeledInstance;
import org.api4.java.ai.ml.core.exception.PredictionException;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.node.ArrayNode;
import com.fasterxml.jackson.databind.node.TextNode;
import ai.libs.jaicore.basic.FileUtil;
import ai.libs.jaicore.ml.classification.singlelabel.SingleLabelClassification;
import ai.libs.jaicore.ml.classification.singlelabel.SingleLabelClassificationPredictionBatch;
import ai.libs.jaicore.ml.scikitwrapper.ScikitLearnWrapperExecutionFailedException;
public class SimpleScikitLearnClassifier extends ASimpleScikitLearnWrapper<ISingleLabelClassification, ISingleLabelClassificationPredictionBatch> implements ISingleLabelClassifier {
public SimpleScikitLearnClassifier(final String constructorCall, final String imports) throws IOException, InterruptedException {
super(constructorCall, imports, "classification");
}
@Override
public ISingleLabelClassificationPredictionBatch predict(final ILabeledDataset<? extends ILabeledInstance> dTest) throws PredictionException, InterruptedException {
ISingleLabelClassificationPredictionBatch batch = null;
try {
File predictOutputFile = this.executePipeline(dTest);
List<String> labels = ((ICategoricalAttribute) dTest.getLabelAttribute()).getLabels();
JsonNode n = new ObjectMapper().readTree(FileUtil.readFileAsString(predictOutputFile));
if (!(n instanceof ArrayNode)) {
throw new PredictionException("Json file for predictions does not contain an array as root element");
}
List<String> ascendSortingLabels = new ArrayList<>(labels);
Collections.sort(ascendSortingLabels);
List<ISingleLabelClassification> predictions = new ArrayList<>();
ArrayNode preds = (ArrayNode) n;
for (JsonNode pred : preds) {
double[] labelProbabilities = new double[labels.size()];
if (pred instanceof ArrayNode) {
int i = 0;
for (JsonNode prob : pred) {
labelProbabilities[labels.indexOf(ascendSortingLabels.get(i++))] = prob.asDouble();
}
} else if (pred instanceof TextNode) {
int index = (int) dTest.getLabelAttribute().deserializeAttributeValue(pred.asText());
labelProbabilities[index] = 1.0;
}
predictions.add(new SingleLabelClassification(labelProbabilities));
}
batch = new SingleLabelClassificationPredictionBatch(predictions);
} catch (InterruptedException e) {
throw e;
} catch (IOException e) {
throw new PredictionException("Could not write executable python file.", e);
} catch (ScikitLearnWrapperExecutionFailedException e) {
throw new PredictionException("Could not execute scikit learn wrapper", e);
}
return batch;
}
}
|
0
|
java-sources/ai/libs/jaicore-ml/0.2.7/ai/libs/jaicore/ml/scikitwrapper
|
java-sources/ai/libs/jaicore-ml/0.2.7/ai/libs/jaicore/ml/scikitwrapper/simple/SimpleScikitLearnRegressor.java
|
package ai.libs.jaicore.ml.scikitwrapper.simple;
import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import org.api4.java.ai.ml.core.dataset.supervised.ILabeledDataset;
import org.api4.java.ai.ml.core.dataset.supervised.ILabeledInstance;
import org.api4.java.ai.ml.core.exception.PredictionException;
import org.api4.java.ai.ml.regression.evaluation.IRegressionPrediction;
import org.api4.java.ai.ml.regression.evaluation.IRegressionResultBatch;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.node.ArrayNode;
import ai.libs.jaicore.basic.FileUtil;
import ai.libs.jaicore.ml.regression.singlelabel.SingleTargetRegressionPrediction;
import ai.libs.jaicore.ml.regression.singlelabel.SingleTargetRegressionPredictionBatch;
import ai.libs.jaicore.ml.scikitwrapper.ScikitLearnWrapperExecutionFailedException;
public class SimpleScikitLearnRegressor extends ASimpleScikitLearnWrapper<IRegressionPrediction, IRegressionResultBatch> {
private Logger logger = LoggerFactory.getLogger(SimpleScikitLearnRegressor.class);
public SimpleScikitLearnRegressor(final String constructorCall, final String imports) throws IOException, InterruptedException {
super(constructorCall, imports, "regression");
}
@Override
public IRegressionResultBatch predict(final ILabeledDataset<? extends ILabeledInstance> dTest) throws PredictionException, InterruptedException {
IRegressionResultBatch batch = null;
try {
File predictOutputFile = this.executePipeline(dTest);
JsonNode n = new ObjectMapper().readTree(FileUtil.readFileAsString(predictOutputFile));
if (!(n instanceof ArrayNode)) {
throw new PredictionException("Json file for predictions does not contain an array as root element");
}
List<IRegressionPrediction> predictions = new ArrayList<>();
ArrayNode preds = (ArrayNode) n;
for (JsonNode pred : preds) {
predictions.add(new SingleTargetRegressionPrediction(pred.asDouble()));
}
batch = new SingleTargetRegressionPredictionBatch(predictions);
} catch (InterruptedException e) {
this.logger.info("SimpleScikitLearnRegressor for pipeline {} got interrupted.", this.constructorCall);
throw e;
} catch (IOException e) {
throw new PredictionException("Could not write executable python file.", e);
} catch (ScikitLearnWrapperExecutionFailedException e) {
throw new PredictionException("Could not execute scikit learn wrapper", e);
}
return batch;
}
}
|
0
|
java-sources/ai/libs/jaicore-ml-meka/0.2.7/ai/libs/jaicore/ml/classification
|
java-sources/ai/libs/jaicore-ml-meka/0.2.7/ai/libs/jaicore/ml/classification/multilabel/package-info.java
|
/**
*
*/
/**
* @author mwever
*
*/
package ai.libs.jaicore.ml.classification.multilabel;
|
0
|
java-sources/ai/libs/jaicore-ml-meka/0.2.7/ai/libs/jaicore/ml/classification/multilabel
|
java-sources/ai/libs/jaicore-ml-meka/0.2.7/ai/libs/jaicore/ml/classification/multilabel/dataset/IMekaInstance.java
|
package ai.libs.jaicore.ml.classification.multilabel.dataset;
import org.apache.commons.math3.ml.clustering.Clusterable;
import org.api4.java.ai.ml.core.dataset.supervised.ILabeledInstance;
import org.api4.java.common.attributedobjects.IElementDecorator;
import weka.core.Instance;
public interface IMekaInstance extends ILabeledInstance, IElementDecorator<Instance>, Clusterable {
@Override
public Instance getElement();
}
|
0
|
java-sources/ai/libs/jaicore-ml-meka/0.2.7/ai/libs/jaicore/ml/classification/multilabel
|
java-sources/ai/libs/jaicore-ml-meka/0.2.7/ai/libs/jaicore/ml/classification/multilabel/dataset/IMekaInstances.java
|
package ai.libs.jaicore.ml.classification.multilabel.dataset;
import org.api4.java.ai.ml.core.dataset.supervised.ILabeledDataset;
import org.api4.java.ai.ml.core.exception.DatasetCreationException;
import weka.core.Instances;
public interface IMekaInstances extends ILabeledDataset<IMekaInstance> {
public Instances getList();
public default Instances getInstances() {
return this.getList();
}
@Override
public IMekaInstances createEmptyCopy() throws DatasetCreationException, InterruptedException;
@Override
public IMekaInstances createCopy() throws DatasetCreationException, InterruptedException;
}
|
0
|
java-sources/ai/libs/jaicore-ml-meka/0.2.7/ai/libs/jaicore/ml/classification/multilabel
|
java-sources/ai/libs/jaicore-ml-meka/0.2.7/ai/libs/jaicore/ml/classification/multilabel/dataset/MekaInstance.java
|
package ai.libs.jaicore.ml.classification.multilabel.dataset;
import static ai.libs.jaicore.ml.weka.dataset.WekaInstancesUtil.transformInstanceToWekaInstance;
import java.util.stream.IntStream;
import org.apache.commons.lang3.builder.HashCodeBuilder;
import org.api4.java.ai.ml.core.dataset.schema.ILabeledInstanceSchema;
import org.api4.java.ai.ml.core.dataset.serialization.UnsupportedAttributeTypeException;
import org.api4.java.ai.ml.core.dataset.supervised.ILabeledInstance;
import ai.libs.jaicore.basic.sets.ElementDecorator;
import ai.libs.jaicore.ml.weka.WekaUtil;
import weka.core.Attribute;
import weka.core.Instance;
public class MekaInstance extends ElementDecorator<Instance> implements IMekaInstance {
public MekaInstance(final Instance instance) {
super(instance);
}
public MekaInstance(final ILabeledInstanceSchema schema, final ILabeledInstance instance) throws UnsupportedAttributeTypeException {
super(transformInstanceToWekaInstance(schema, instance));
if (schema.getNumAttributes() != instance.getNumAttributes()) {
throw new IllegalStateException("Number of attributes in the instance deviate from those in the scheme.");
}
}
@Override
public int[] getLabel() {
int[] labels = new int[this.getElement().classIndex()];
IntStream.range(0, this.getNumLabels()).forEach(x -> labels[x] = Integer.parseInt(this.getElement().attribute(x).value((int) this.getElement().value(x))));
return labels;
}
public int getNumLabels() {
return this.getElement().classIndex();
}
@Override
public Double getAttributeValue(final int pos) {
return this.getElement().value(pos);
}
@Override
public Object[] getAttributes() {
return IntStream.range(0, this.getElement().numAttributes()).filter(x -> x != this.getElement().classIndex()).mapToObj(x -> this.getElement().attribute(x)).map(this::transformAttributeValueToData).toArray();
}
private Object transformAttributeValueToData(final Attribute att) {
if (att.isNominal() || att.isString() || att.isRelationValued() || att.isDate() || att.isRegular()) {
return att.value((int) this.getElement().value(att));
} else {
return this.getElement().value(att);
}
}
@Override
public double[] getPoint() {
return IntStream.range(this.getNumLabels(), this.getElement().numAttributes()).mapToDouble(x -> this.getElement().value(x)).toArray();
}
@Override
public double getPointValue(final int pos) {
return this.getPoint()[pos];
}
@Override
public void removeColumn(final int columnPos) {
throw new UnsupportedOperationException("Not yet implemented!");
}
@Override
public void setLabel(final Object obj) {
if (obj instanceof String) {
this.getElement().setClassValue((String) obj);
} else if (obj instanceof Double) {
this.getElement().setClassValue((Double) obj);
} else {
throw new IllegalArgumentException("The value for the label must not be of type " + obj.getClass().getName() + ". The only valid types are Double and String.");
}
}
@Override
public void setAttributeValue(final int pos, final Object value) {
if (value instanceof String) {
this.getElement().setValue(pos, (String) value);
} else if (value instanceof Double) {
this.getElement().setValue(pos, (Double) value);
} else {
throw new IllegalArgumentException("The value for the label must not be of type " + value.getClass().getName() + ". The only valid types are Double and String.");
}
}
@Override
public int hashCode() {
return new HashCodeBuilder().append(this.getElement().toDoubleArray()).toHashCode();
}
@Override
public boolean equals(final Object obj) {
if (this == obj) {
return true;
}
if (obj == null) {
return false;
}
if (!(obj instanceof MekaInstance)) {
return false;
}
return WekaUtil.areInstancesEqual(this.getElement(), ((MekaInstance) obj).getElement());
}
}
|
0
|
java-sources/ai/libs/jaicore-ml-meka/0.2.7/ai/libs/jaicore/ml/classification/multilabel
|
java-sources/ai/libs/jaicore-ml-meka/0.2.7/ai/libs/jaicore/ml/classification/multilabel/dataset/MekaInstances.java
|
package ai.libs.jaicore.ml.classification.multilabel.dataset;
import static ai.libs.jaicore.ml.classification.multilabel.dataset.MekaInstancesUtil.extractSchema;
import java.lang.reflect.Constructor;
import java.util.ArrayList;
import java.util.List;
import java.util.stream.Collectors;
import org.apache.commons.lang3.builder.HashCodeBuilder;
import org.api4.java.ai.ml.core.dataset.schema.ILabeledInstanceSchema;
import org.api4.java.ai.ml.core.dataset.schema.attribute.IAttribute;
import org.api4.java.ai.ml.core.dataset.serialization.UnsupportedAttributeTypeException;
import org.api4.java.ai.ml.core.dataset.supervised.ILabeledDataset;
import org.api4.java.ai.ml.core.dataset.supervised.ILabeledInstance;
import org.api4.java.ai.ml.core.exception.DatasetCreationException;
import org.api4.java.common.attributedobjects.IListDecorator;
import org.api4.java.common.reconstruction.IReconstructible;
import org.api4.java.common.reconstruction.IReconstructionInstruction;
import org.api4.java.common.reconstruction.IReconstructionPlan;
import ai.libs.jaicore.basic.reconstruction.ReconstructionInstruction;
import ai.libs.jaicore.basic.reconstruction.ReconstructionPlan;
import ai.libs.jaicore.ml.weka.WekaUtil;
import ai.libs.jaicore.ml.weka.dataset.IWekaInstances;
import weka.core.Instance;
import weka.core.Instances;
public class MekaInstances implements IMekaInstances, IListDecorator<Instances, Instance, IMekaInstance>, IReconstructible {
private static final long serialVersionUID = -1980814429448333405L;
private ILabeledInstanceSchema schema;
private final List<IReconstructionInstruction> reconstructionInstructions;
private Instances dataset;
public MekaInstances(final Instances dataset) {
this(dataset, extractSchema(dataset));
this.reconstructionInstructions.add(new ReconstructionInstruction("Instances", "__construct", new Class<?>[] {}, new Object[] {}));
}
public MekaInstances(final Instances dataset, final ILabeledInstanceSchema schema) {
this.schema = schema;
this.dataset = dataset;
this.reconstructionInstructions = new ArrayList<>();
}
public MekaInstances(final ILabeledDataset<? extends ILabeledInstance> dataset) {
this.schema = dataset.getInstanceSchema();
if (dataset instanceof IMekaInstances) {
this.dataset = new Instances(((MekaInstances) dataset).dataset);
} else if (dataset instanceof IWekaInstances) {
this.dataset = new Instances(((IWekaInstances) dataset).getInstances());
} else {
try {
this.dataset = MekaInstancesUtil.datasetToWekaInstances(dataset);
} catch (UnsupportedAttributeTypeException e) {
throw new IllegalArgumentException("Could not convert dataset to weka's Instances.", e);
}
}
if (this.dataset.numAttributes() != dataset.getNumAttributes() + this.dataset.classIndex()) {
throw new IllegalStateException("Number of attributes in the MekaInstances do not coincide. We have " + this.dataset.numAttributes() + " while given dataset had " + dataset.getNumAttributes()
+ ". There should be a difference of " + this.dataset.classIndex() + ", because WEKA counts the labels as an attribute each.");
}
this.reconstructionInstructions = (dataset instanceof IReconstructible) ? ((ReconstructionPlan) ((IReconstructible) dataset).getConstructionPlan()).getInstructions() : null;
}
@Override
public Instances getInstances() {
return this.dataset;
}
@Override
public void removeColumn(final int columnPos) {
throw new UnsupportedOperationException("Not yet implemented.");
}
@Override
public IMekaInstances createEmptyCopy() throws DatasetCreationException {
return new MekaInstances(new Instances(this.dataset, 0));
}
@Override
public int hashCode() {
HashCodeBuilder hb = new HashCodeBuilder();
for (IMekaInstance inst : this) {
hb.append(inst.hashCode());
}
return hb.toHashCode();
}
@Override
public boolean equals(final Object obj) {
if (this == obj) {
return true;
}
if (obj == null) {
return false;
}
if (this.getClass() != obj.getClass()) {
return false;
}
MekaInstances other = (MekaInstances) obj;
int n = this.size();
for (int i = 0; i < n; i++) {
if (!this.get(i).equals(other.get(i))) {
return false;
}
}
return true;
}
public int getFrequency(final IMekaInstance instance) {
return (int) this.stream().filter(instance::equals).count();
}
@Override
public String toString() {
return "WekaInstances [schema=" + this.getInstanceSchema() + "]\n" + this.dataset;
}
@Override
public Class<IMekaInstance> getTypeOfDecoratingItems() {
return IMekaInstance.class;
}
@Override
public Class<Instance> getTypeOfDecoratedItems() {
return Instance.class;
}
@Override
public Constructor<? extends IMekaInstance> getConstructorForDecoratingItems() {
try {
return MekaInstance.class.getConstructor(this.getTypeOfDecoratedItems());
} catch (Exception e) {
throw new IllegalArgumentException("The constructor of the list class could not be invoked.");
}
}
@Override
public Instances getList() {
return this.dataset;
}
@Override
public IMekaInstances createCopy() throws DatasetCreationException, InterruptedException {
return new MekaInstances(this);
}
@Override
public Object[] getLabelVector() {
return WekaUtil.getClassesAsList(this.dataset).toArray();
}
@Override
public ILabeledInstanceSchema getInstanceSchema() {
return this.schema;
}
@Override
public Object[][] getFeatureMatrix() {
throw new UnsupportedOperationException();
}
@Override
public void removeColumn(final String columnName) {
throw new UnsupportedOperationException();
}
@Override
public void removeColumn(final IAttribute attribute) {
throw new UnsupportedOperationException();
}
@Override
public IReconstructionPlan getConstructionPlan() {
return new ReconstructionPlan(this.reconstructionInstructions.stream().map(i -> (ReconstructionInstruction) i).collect(Collectors.toList()));
}
@Override
public void addInstruction(final IReconstructionInstruction instruction) {
this.reconstructionInstructions.add(instruction);
}
}
|
0
|
java-sources/ai/libs/jaicore-ml-meka/0.2.7/ai/libs/jaicore/ml/classification/multilabel
|
java-sources/ai/libs/jaicore-ml-meka/0.2.7/ai/libs/jaicore/ml/classification/multilabel/dataset/MekaInstancesUtil.java
|
package ai.libs.jaicore.ml.classification.multilabel.dataset;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.LinkedList;
import java.util.List;
import java.util.Objects;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
import org.api4.java.ai.ml.core.dataset.schema.ILabeledInstanceSchema;
import org.api4.java.ai.ml.core.dataset.schema.attribute.IAttribute;
import org.api4.java.ai.ml.core.dataset.schema.attribute.ICategoricalAttribute;
import org.api4.java.ai.ml.core.dataset.schema.attribute.INumericAttribute;
import org.api4.java.ai.ml.core.dataset.serialization.UnsupportedAttributeTypeException;
import org.api4.java.ai.ml.core.dataset.supervised.ILabeledDataset;
import org.api4.java.ai.ml.core.dataset.supervised.ILabeledInstance;
import ai.libs.jaicore.ml.core.dataset.schema.LabeledInstanceSchema;
import ai.libs.jaicore.ml.core.dataset.schema.attribute.IntBasedCategoricalAttribute;
import ai.libs.jaicore.ml.core.dataset.schema.attribute.MultiLabelAttribute;
import ai.libs.jaicore.ml.core.dataset.schema.attribute.NumericAttribute;
import weka.core.Attribute;
import weka.core.DenseInstance;
import weka.core.Instance;
import weka.core.Instances;
public class MekaInstancesUtil {
private MekaInstancesUtil() {
/* Intentionally blank, hiding standard constructor for this util class. */
}
public static ILabeledInstanceSchema extractSchema(final Instances dataset) {
int targetIndex = dataset.classIndex();
if (targetIndex < 0) {
throw new IllegalArgumentException("Class index of Instances object is not set!");
}
List<IAttribute> attributes = IntStream.range(dataset.classIndex(), dataset.numAttributes()).mapToObj(dataset::attribute).map(MekaInstancesUtil::transformWEKAAttributeToAttributeType).collect(Collectors.toList());
List<String> values = IntStream.range(0, dataset.classIndex()).mapToObj(x -> dataset.attribute(x).name()).collect(Collectors.toList());
IAttribute labelAttribute = new MultiLabelAttribute("labels", values);
return new LabeledInstanceSchema(dataset.relationName(), attributes, labelAttribute);
}
public static Instances datasetToWekaInstances(final ILabeledDataset<? extends ILabeledInstance> dataset) throws UnsupportedAttributeTypeException {
Instances wekaInstances = createDatasetFromSchema(dataset.getInstanceSchema());
for (ILabeledInstance inst : dataset) {
double[] point = inst.getPoint();
double[] pointWithLabel = Arrays.copyOf(point, point.length + 1);
DenseInstance iNew = new DenseInstance(1, pointWithLabel);
iNew.setDataset(wekaInstances);
if (dataset.getLabelAttribute() instanceof ICategoricalAttribute) {
iNew.setClassValue(((ICategoricalAttribute) dataset.getLabelAttribute()).getLabelOfCategory((int) inst.getLabel()));
} else {
iNew.setClassValue((Double) inst.getLabel());
}
wekaInstances.add(iNew); // this MUST come here AFTER having set the class value; otherwise, the class is not registered correctly in the Instances object!!
}
return wekaInstances;
}
public static Instances createDatasetFromSchema(final ILabeledInstanceSchema schema) throws UnsupportedAttributeTypeException {
Objects.requireNonNull(schema);
List<Attribute> attributes = new LinkedList<>();
for (int i = 0; i < schema.getNumAttributes(); i++) {
IAttribute attType = schema.getAttributeList().get(i);
if (attType instanceof NumericAttribute) {
attributes.add(new Attribute("att" + i));
} else if (attType instanceof IntBasedCategoricalAttribute) {
attributes.add(new Attribute("att" + i, ((IntBasedCategoricalAttribute) attType).getLabels()));
} else {
throw new UnsupportedAttributeTypeException("The class attribute has an unsupported attribute type " + attType.getName() + ".");
}
}
IAttribute classType = schema.getLabelAttribute();
Attribute classAttribute;
if (classType instanceof INumericAttribute) {
classAttribute = new Attribute("class");
} else if (classType instanceof ICategoricalAttribute) {
classAttribute = new Attribute("class", ((IntBasedCategoricalAttribute) classType).getLabels());
} else {
throw new UnsupportedAttributeTypeException("The class attribute has an unsupported attribute type.");
}
ArrayList<Attribute> attributeList = new ArrayList<>(attributes);
attributeList.add(classAttribute);
Instances wekaInstances = new Instances("weka-instances", attributeList, 0);
wekaInstances.setClassIndex(wekaInstances.numAttributes() - 1);
return wekaInstances;
}
public static IAttribute transformWEKAAttributeToAttributeType(final Attribute att) {
String attributeName = att.name();
if (att.isNumeric()) {
return new NumericAttribute(attributeName);
} else if (att.isNominal()) {
List<String> domain = new LinkedList<>();
for (int i = 0; i < att.numValues(); i++) {
domain.add(att.value(i));
}
return new IntBasedCategoricalAttribute(attributeName, domain);
}
throw new IllegalArgumentException("Can only transform numeric or categorical attributes");
}
public static Instance transformInstanceToWekaInstance(final ILabeledInstanceSchema schema, final ILabeledInstance instance) throws UnsupportedAttributeTypeException {
if (instance.getNumAttributes() != schema.getNumAttributes()) {
throw new IllegalArgumentException("Schema and instance do not coincide. The schema defines " + schema.getNumAttributes() + " attributes but the instance has " + instance.getNumAttributes() + " attributes.");
}
if (instance instanceof MekaInstance) {
return ((MekaInstance) instance).getElement();
}
Objects.requireNonNull(schema);
Instances dataset = createDatasetFromSchema(schema);
Instance iNew = new DenseInstance(dataset.numAttributes());
iNew.setDataset(dataset);
for (int i = 0; i < instance.getNumAttributes(); i++) {
if (schema.getAttribute(i) instanceof INumericAttribute) {
iNew.setValue(i, ((INumericAttribute) schema.getAttribute(i)).getAsAttributeValue(instance.getAttributeValue(i)).getValue());
} else if (schema.getAttribute(i) instanceof ICategoricalAttribute) {
iNew.setValue(i, ((ICategoricalAttribute) schema.getAttribute(i)).getAsAttributeValue(instance.getAttributeValue(i)).getValue());
} else {
throw new UnsupportedAttributeTypeException("Only categorical and numeric attributes are supported!");
}
}
if (schema.getLabelAttribute() instanceof INumericAttribute) {
iNew.setValue(iNew.numAttributes() - 1, ((INumericAttribute) schema.getLabelAttribute()).getAsAttributeValue(instance.getLabel()).getValue());
} else if (schema.getLabelAttribute() instanceof ICategoricalAttribute) {
iNew.setValue(iNew.numAttributes() - 1, ((ICategoricalAttribute) schema.getLabelAttribute()).getAsAttributeValue(instance.getLabel()).getValue());
} else {
throw new UnsupportedAttributeTypeException("Only categorical and numeric attributes are supported!");
}
return iNew;
}
}
|
0
|
java-sources/ai/libs/jaicore-ml-meka/0.2.7/ai/libs/jaicore/ml/classification/multilabel
|
java-sources/ai/libs/jaicore-ml-meka/0.2.7/ai/libs/jaicore/ml/classification/multilabel/dataset/package-info.java
|
/**
* This package contains classes for weka-specific logics regarding the dataset.
* For instance, it contains a util for transforming weka's Instances to IDataset.
*
* @author mwever
*/
package ai.libs.jaicore.ml.classification.multilabel.dataset;
|
0
|
java-sources/ai/libs/jaicore-ml-meka/0.2.7/ai/libs/jaicore/ml/classification/multilabel
|
java-sources/ai/libs/jaicore-ml-meka/0.2.7/ai/libs/jaicore/ml/classification/multilabel/learner/AMultiLabelClassifier.java
|
package ai.libs.jaicore.ml.classification.multilabel.learner;
import java.util.ArrayList;
import java.util.List;
import org.api4.java.ai.ml.classification.multilabel.evaluation.IMultiLabelClassification;
import org.api4.java.ai.ml.classification.multilabel.evaluation.IMultiLabelClassificationPredictionBatch;
import org.api4.java.ai.ml.classification.multilabel.learner.IMultiLabelClassifier;
import org.api4.java.ai.ml.core.dataset.supervised.ILabeledDataset;
import org.api4.java.ai.ml.core.dataset.supervised.ILabeledInstance;
import org.api4.java.ai.ml.core.exception.PredictionException;
import ai.libs.jaicore.ml.classification.multilabel.MultiLabelClassificationPredictionBatch;
import ai.libs.jaicore.ml.core.learner.ASupervisedLearner;
public abstract class AMultiLabelClassifier extends ASupervisedLearner<ILabeledInstance, ILabeledDataset<? extends ILabeledInstance>, IMultiLabelClassification, IMultiLabelClassificationPredictionBatch> implements IMultiLabelClassifier {
@Override
public IMultiLabelClassificationPredictionBatch predict(final ILabeledInstance[] dTest) throws PredictionException, InterruptedException {
List<IMultiLabelClassification> batch = new ArrayList<>();
for (ILabeledInstance instance : dTest) {
batch.add(this.predict(instance));
}
return new MultiLabelClassificationPredictionBatch(batch);
}
}
|
0
|
java-sources/ai/libs/jaicore-ml-meka/0.2.7/ai/libs/jaicore/ml/classification/multilabel
|
java-sources/ai/libs/jaicore-ml-meka/0.2.7/ai/libs/jaicore/ml/classification/multilabel/learner/IMekaClassifier.java
|
package ai.libs.jaicore.ml.classification.multilabel.learner;
import org.api4.java.ai.ml.classification.IClassifier;
import meka.classifiers.multilabel.MultiLabelClassifier;
public interface IMekaClassifier extends IClassifier {
public MultiLabelClassifier getClassifier();
}
|
0
|
java-sources/ai/libs/jaicore-ml-meka/0.2.7/ai/libs/jaicore/ml/classification/multilabel
|
java-sources/ai/libs/jaicore-ml-meka/0.2.7/ai/libs/jaicore/ml/classification/multilabel/learner/MekaClassifier.java
|
package ai.libs.jaicore.ml.classification.multilabel.learner;
import java.util.concurrent.atomic.AtomicInteger;
import org.api4.java.ai.ml.classification.multilabel.evaluation.IMultiLabelClassification;
import org.api4.java.ai.ml.core.dataset.schema.ILabeledInstanceSchema;
import org.api4.java.ai.ml.core.dataset.serialization.UnsupportedAttributeTypeException;
import org.api4.java.ai.ml.core.dataset.supervised.ILabeledDataset;
import org.api4.java.ai.ml.core.dataset.supervised.ILabeledInstance;
import org.api4.java.ai.ml.core.exception.PredictionException;
import org.api4.java.ai.ml.core.exception.TrainingException;
import org.api4.java.common.reconstruction.IReconstructible;
import org.api4.java.common.reconstruction.IReconstructionInstruction;
import org.api4.java.common.reconstruction.IReconstructionPlan;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import ai.libs.jaicore.basic.reconstruction.ReconstructionPlan;
import ai.libs.jaicore.ml.classification.multilabel.MultiLabelClassification;
import ai.libs.jaicore.ml.classification.multilabel.dataset.IMekaInstances;
import ai.libs.jaicore.ml.classification.multilabel.dataset.MekaInstance;
import ai.libs.jaicore.ml.classification.multilabel.dataset.MekaInstances;
import ai.libs.jaicore.ml.weka.WekaUtil;
import meka.classifiers.multilabel.MultiLabelClassifier;
import weka.core.DenseInstance;
public class MekaClassifier extends AMultiLabelClassifier implements IMekaClassifier, IReconstructible {
private static final Logger LOGGER = LoggerFactory.getLogger(MekaClassifier.class);
private static final AtomicInteger ID_COUNTER = new AtomicInteger(0);
private MultiLabelClassifier classifier;
private IReconstructionPlan reconstructionPlan;
private ILabeledInstanceSchema schema;
private final int id;
public MekaClassifier(final MultiLabelClassifier classifier) {
this.reconstructionPlan = new ReconstructionPlan();
this.classifier = classifier;
this.id = ID_COUNTER.getAndIncrement();
}
@Override
public MultiLabelClassifier getClassifier() {
return this.classifier;
}
@Override
public void fit(final ILabeledDataset<? extends ILabeledInstance> dTrain) throws TrainingException, InterruptedException {
LOGGER.debug("Obtain instance schema from training instances.");
this.schema = dTrain.getInstanceSchema();
LOGGER.debug("Ensure instances to be of the correct format.");
IMekaInstances dataset;
if (dTrain instanceof IMekaInstances) {
LOGGER.debug("Instances are already of type IMekaInstances so just perform a type cast.");
dataset = (IMekaInstances) dTrain;
} else {
LOGGER.debug("Instances are not of type IMekaInstances so make them MekaInstances.");
dataset = new MekaInstances(dTrain);
}
LOGGER.debug("Build the classifier");
try {
this.classifier.buildClassifier(dataset.getInstances());
LOGGER.debug("Done building the classifier.");
} catch (InterruptedException e) {
throw e;
} catch (Exception e) {
throw new TrainingException("Could not build classifier.", e);
}
}
@Override
public IReconstructionPlan getConstructionPlan() {
return this.reconstructionPlan;
}
@Override
public void addInstruction(final IReconstructionInstruction instruction) {
this.reconstructionPlan.getInstructions().add(instruction);
}
@Override
public IMultiLabelClassification predict(final ILabeledInstance xTest) throws PredictionException, InterruptedException {
if (this.schema == null) {
throw new IllegalStateException("Cannot conduct predictions with the classifier, because the dataset scheme has not been defined.");
}
MekaInstance instance;
if (xTest instanceof MekaInstance) {
instance = (MekaInstance) xTest;
} else {
try {
instance = new MekaInstance(this.schema, xTest);
} catch (UnsupportedAttributeTypeException e) {
throw new PredictionException("Could not create WekaInstance object from given instance.");
}
}
try {
// XXX: Work Around: Better make a fresh copy of that instance because some MEKA classifiers might change the information contained in that instance.
DenseInstance copy = new DenseInstance(instance.getElement());
copy.setDataset(instance.getElement().dataset());
double[] dist = this.classifier.distributionForInstance(copy);
return new MultiLabelClassification(dist);
} catch (InterruptedException e) {
throw e;
} catch (Exception e) {
throw new PredictionException("Could not make a prediction since an exception occurred in the wrapped weka classifier.", e);
}
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("#");
sb.append(this.id);
sb.append(": ");
sb.append(WekaUtil.getClassifierDescriptor(this.classifier));
return sb.toString();
}
}
|
0
|
java-sources/ai/libs/jaicore-ml-meka/0.2.7/ai/libs/jaicore/ml/classification/multilabel
|
java-sources/ai/libs/jaicore-ml-meka/0.2.7/ai/libs/jaicore/ml/classification/multilabel/learner/package-info.java
|
package ai.libs.jaicore.ml.classification.multilabel.learner;
|
0
|
java-sources/ai/libs/jaicore-ml-meka/0.2.7/ai/libs/jaicore/ml/classification/multilabel/learner
|
java-sources/ai/libs/jaicore-ml-meka/0.2.7/ai/libs/jaicore/ml/classification/multilabel/learner/homer/HOMERLeaf.java
|
package ai.libs.jaicore.ml.classification.multilabel.learner.homer;
import java.util.Arrays;
import java.util.Collection;
import java.util.List;
public class HOMERLeaf extends HOMERNode {
private Integer label;
public HOMERLeaf(final Integer label) {
this.label = label;
}
@Override
public List<HOMERNode> getChildren() {
return Arrays.asList(this);
}
@Override
public boolean isLeaf() {
return true;
}
@Override
public Collection<Integer> getLabels() {
return Arrays.asList(this.label);
}
@Override
public String toString() {
return this.label + "";
}
}
|
0
|
java-sources/ai/libs/jaicore-ml-meka/0.2.7/ai/libs/jaicore/ml/classification/multilabel/learner
|
java-sources/ai/libs/jaicore-ml-meka/0.2.7/ai/libs/jaicore/ml/classification/multilabel/learner/homer/HOMERNode.java
|
package ai.libs.jaicore.ml.classification.multilabel.learner.homer;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.HashSet;
import java.util.LinkedList;
import java.util.List;
import java.util.stream.Collectors;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import ai.libs.jaicore.basic.ArrayUtil;
import meka.classifiers.multilabel.AbstractMultiLabelClassifier;
import meka.classifiers.multilabel.BR;
import meka.classifiers.multilabel.MultiLabelClassifier;
import meka.core.F;
import weka.core.Instance;
import weka.core.Instances;
import weka.filters.Filter;
import weka.filters.unsupervised.attribute.Add;
public class HOMERNode extends AbstractMultiLabelClassifier {
/**
*
*/
private static final long serialVersionUID = -2634579245812714183L;
private static final Logger LOGGER = LoggerFactory.getLogger(HOMERNode.class);
private static final boolean HIERARCHICAL_STRING = false;
private static final double THRESHOLD = 0.5;
private List<HOMERNode> children;
private MultiLabelClassifier baselearner;
private String baselearnerName;
private boolean doThreshold = false;
public HOMERNode(final HOMERNode... nodes) {
this(Arrays.asList(nodes));
}
public HOMERNode(final List<HOMERNode> nodes) {
this.children = nodes;
Collections.sort(this.children, (o1, o2) -> {
List<Integer> o1Labels = new LinkedList<>(o1.getLabels());
List<Integer> o2Labels = new LinkedList<>(o2.getLabels());
Collections.sort(o1Labels);
Collections.sort(o2Labels);
return o1Labels.get(0).compareTo(o2Labels.get(0));
});
this.baselearner = new BR();
}
public void setThreshold(final boolean doThreshold) {
this.doThreshold = doThreshold;
}
public void setBaselearner(final MultiLabelClassifier baselearner) {
this.baselearner = baselearner;
}
public String getBaselearnerName() {
return this.baselearnerName;
}
public void setBaselearnerName(final String baselearnerName) {
this.baselearnerName = baselearnerName;
}
public List<HOMERNode> getChildren() {
return this.children;
}
/**
* @return The set of labels this node is responsible for.
*/
public Collection<Integer> getLabels() {
Collection<Integer> labels = new HashSet<>();
this.children.stream().map(HOMERNode::getLabels).forEach(labels::addAll);
return labels;
}
@Override
public void buildClassifier(final Instances trainingSet) throws Exception {
LOGGER.debug("Build node with {} as a base learner", this.baselearnerName);
Instances currentDataset = this.prepareInstances(trainingSet);
List<Integer> removeInstances = new ArrayList<>();
for (int i = 0; i < trainingSet.size(); i++) {
boolean addedLabel = false;
for (int j = 0; j < this.children.size(); j++) {
int currentI = i;
if (this.children.get(j).getLabels().stream().mapToDouble(x -> trainingSet.get(currentI).value(x)).sum() > 0) {
addedLabel = true;
currentDataset.get(i).setValue(j, 1.0);
} else {
currentDataset.get(i).setValue(j, 0.0);
}
}
if (!addedLabel) {
removeInstances.add(i);
}
}
for (int i = removeInstances.size() - 1; i >= 0; i--) {
currentDataset.remove((int) removeInstances.get(i));
}
this.baselearner.buildClassifier(currentDataset);
for (HOMERNode child : this.children) {
if (child.getLabels().size() > 1) {
child.buildClassifier(trainingSet);
}
}
}
@Override
public double[] distributionForInstance(final Instance testInstance) throws Exception {
Instances copy = new Instances(testInstance.dataset(), 0);
copy.add(testInstance.copy(testInstance.toDoubleArray()));
Instances prepared = this.prepareInstances(copy);
int length;
int[] tDist = {};
double[] dist = {};
if (this.doThreshold) {
tDist = ArrayUtil.thresholdDoubleToBinaryArray(this.baselearner.distributionForInstance(prepared.get(0)), THRESHOLD);
length = tDist.length;
} else {
dist = this.baselearner.distributionForInstance(prepared.get(0));
length = dist.length;
}
double[] returnDist = new double[testInstance.classIndex()];
for (int i = 0; i < length; i++) {
if (this.doThreshold && tDist[i] == 1) {
if (this.children.get(i).getLabels().size() == 1) {
returnDist[this.children.get(i).getLabels().iterator().next()] = 1.0;
} else {
ArrayUtil.add(returnDist, this.children.get(i).distributionForInstance(testInstance));
}
} else if (!this.doThreshold) {
if (this.children.get(i).getLabels().size() == 1) {
returnDist[this.children.get(i).getLabels().iterator().next()] = dist[i];
} else {
double[] childDist = this.children.get(i).distributionForInstance(testInstance);
for (Integer childLabel : this.children.get(i).getLabels()) {
returnDist[childLabel] = childDist[childLabel] * dist[i];
}
}
}
}
return returnDist;
}
public Instances prepareInstances(final Instances dataset) throws Exception {
Instances currentDataset = F.keepLabels(dataset, dataset.classIndex(), new int[] {});
for (int i = this.children.size() - 1; i >= 0; i--) {
Collection<Integer> labels = this.children.get(i).getLabels();
Add add = new Add();
add.setAttributeName(labels.stream().map(x -> dataset.attribute(x).name()).collect(Collectors.joining("&")));
add.setAttributeIndex("first");
add.setNominalLabels("0,1");
add.setInputFormat(currentDataset);
currentDataset = Filter.useFilter(currentDataset, add);
}
currentDataset.setClassIndex(this.children.size());
return currentDataset;
}
public boolean isLeaf() {
return false;
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
if (!HIERARCHICAL_STRING) {
String actualBaselearnerName = this.baselearner.getOptions()[1];
sb.append(actualBaselearnerName.substring(actualBaselearnerName.lastIndexOf('.') + 1, actualBaselearnerName.length()));
sb.append("(");
sb.append(this.children.stream().map(HOMERNode::toString).collect(Collectors.joining(",")));
sb.append(")");
}
return sb.toString();
}
}
|
0
|
java-sources/ai/libs/jaicore-ml-meka/0.2.7/ai/libs/jaicore/ml/classification/multilabel/learner
|
java-sources/ai/libs/jaicore-ml-meka/0.2.7/ai/libs/jaicore/ml/classification/multilabel/learner/homer/package-info.java
|
package ai.libs.jaicore.ml.classification.multilabel.learner.homer;
|
0
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka/RankingByPairwiseComparison.java
|
package ai.libs.jaicore.ml.weka;
import java.util.HashMap;
import java.util.HashSet;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Set;
import org.api4.java.ai.ml.core.exception.PredictionException;
import org.api4.java.ai.ml.core.exception.TrainingException;
import ai.libs.jaicore.basic.Maps;
import weka.classifiers.AbstractClassifier;
import weka.classifiers.Classifier;
import weka.core.Instance;
import weka.core.Instances;
import weka.filters.Filter;
import weka.filters.unsupervised.attribute.Add;
import weka.filters.unsupervised.attribute.Remove;
public class RankingByPairwiseComparison {
private RankingByPairwiseComparisonConfig config;
private List<Integer> labelIndices;
private Set<String> labelSet = new HashSet<>();
class PairWiseClassifier {
private String a;
private String b;
private Classifier c;
}
private List<PairWiseClassifier> pwClassifiers = new LinkedList<>();
public RankingByPairwiseComparison(final RankingByPairwiseComparisonConfig config) {
this.config = config;
}
private Instances applyFiltersToDataset(final Instances dataset) throws Exception {
Remove removeFilter = new Remove();
removeFilter.setAttributeIndicesArray(this.labelIndices.stream().mapToInt(x -> x).toArray());
removeFilter.setInvertSelection(false);
removeFilter.setInputFormat(dataset);
Instances filteredDataset = Filter.useFilter(dataset, removeFilter);
Add addTarget = new Add();
addTarget.setAttributeIndex("last");
addTarget.setNominalLabels("true,false");
addTarget.setAttributeName("a>b");
addTarget.setInputFormat(filteredDataset);
filteredDataset = Filter.useFilter(filteredDataset, addTarget);
filteredDataset.setClassIndex(filteredDataset.numAttributes() - 1);
return filteredDataset;
}
private static List<Integer> getLabelIndices(final int labels, final Instances dataset) {
List<Integer> labelIndices = new LinkedList<>();
if (labels < 0) {
for (int i = dataset.numAttributes() - 1; i >= dataset.numAttributes() + labels; i--) {
labelIndices.add(i);
}
} else {
for (int i = 0; i < labels; i++) {
labelIndices.add(i);
}
}
return labelIndices;
}
public void fit(final Instances dataset, final int labels) throws Exception {
this.labelIndices = getLabelIndices(labels, dataset);
this.labelIndices.stream().map(x -> dataset.attribute(x).name()).forEach(this.labelSet::add);
Instances plainPWDataset = this.applyFiltersToDataset(dataset);
try {
for (int i = 0; i < this.labelIndices.size() - 1; i++) {
for (int j = i + 1; j < this.labelIndices.size(); j++) {
PairWiseClassifier pwc = new PairWiseClassifier();
pwc.a = dataset.attribute(this.labelIndices.get(i)).name();
pwc.b = dataset.attribute(this.labelIndices.get(j)).name();
pwc.c = AbstractClassifier.forName(this.config.getBaseLearner(), null);
Instances pwDataset = new Instances(plainPWDataset);
for (int k = 0; k < pwDataset.size(); k++) {
String value;
if (dataset.get(k).value(this.labelIndices.get(i)) > dataset.get(k).value(this.labelIndices.get(j))) {
value = "true";
} else {
value = "false";
}
pwDataset.get(k).setValue(pwDataset.numAttributes() - 1, value);
}
pwDataset.setClassIndex(pwDataset.numAttributes() - 1);
pwc.c.buildClassifier(pwDataset);
this.pwClassifiers.add(pwc);
}
}
} catch (Exception e) {
throw new TrainingException("Could not build ranker", e);
}
}
public List<String> predict(final Instance xTest) throws PredictionException {
try {
Instances datasetCopy = new Instances(xTest.dataset(), 0);
datasetCopy.add(xTest);
datasetCopy = this.applyFiltersToDataset(datasetCopy);
Map<String, Double> vote = new HashMap<>();
this.labelSet.stream().forEach(x -> vote.put(x, 0.0));
for (PairWiseClassifier pwc : this.pwClassifiers) {
double[] dist = pwc.c.distributionForInstance(datasetCopy.get(0));
switch (this.config.getVotingStrategy()) {
case RankingByPairwiseComparisonConfig.V_VOTING_STRATEGY_CLASSIFY:
if (dist[0] > dist[1]) {
Maps.increaseCounterInDoubleMap(vote, pwc.a);
} else {
Maps.increaseCounterInDoubleMap(vote, pwc.b);
}
break;
default:
case RankingByPairwiseComparisonConfig.V_VOTING_STRATEGY_PROBABILITY:
Maps.increaseCounterInDoubleMap(vote, pwc.a, dist[0]);
Maps.increaseCounterInDoubleMap(vote, pwc.b, dist[1]);
break;
}
}
List<String> ranking = new LinkedList<>(vote.keySet());
ranking.sort((arg0, arg1) -> vote.get(arg1).compareTo(vote.get(arg0)));
return ranking;
} catch (Exception e) {
throw new PredictionException("Could not create a prediction.", e);
}
}
}
|
0
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka/RankingByPairwiseComparisonConfig.java
|
package ai.libs.jaicore.ml.weka;
import org.aeonbits.owner.Config;
public interface RankingByPairwiseComparisonConfig extends Config {
public static final String K_BASE_LEARNER = "rpc.baselearner";
public static final String K_VOTING_STRATEGY = "rpc.votingstrategy";
public static final String V_VOTING_STRATEGY_CLASSIFY = "classify";
public static final String V_VOTING_STRATEGY_PROBABILITY = "probability";
@Key(K_BASE_LEARNER)
@DefaultValue("weka.classifiers.trees.J48")
public String getBaseLearner();
@Key(K_VOTING_STRATEGY)
@DefaultValue(V_VOTING_STRATEGY_PROBABILITY)
public String getVotingStrategy();
}
|
0
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka/WekaInstancesFeatureUnion.java
|
package ai.libs.jaicore.ml.weka;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import weka.core.Attribute;
import weka.core.DenseInstance;
import weka.core.Instance;
import weka.core.Instances;
public class WekaInstancesFeatureUnion {
public Instances merge(final Instances dataA, final Instances dataB) {
if (dataA == null || dataB == null) {
throw new IllegalArgumentException("Instances objects must not be null.");
}
List<Instances> datasetList = new LinkedList<>();
datasetList.add(dataA);
datasetList.add(dataB);
return this.merge(datasetList);
}
public Instances merge(final Collection<Instances> data) {
if (data.isEmpty()) {
throw new IllegalArgumentException("Merge cannot be invoked with empty collection of Instances");
} else if (data.size() == 1) {
return data.iterator().next();
}
boolean allEqualSize = true;
Iterator<Instances> dataIt = data.iterator();
Instances currentInstances = dataIt.next();
while (dataIt.hasNext()) {
Instances nextInstances = dataIt.next();
if (currentInstances.size() != nextInstances.size()) {
allEqualSize = false;
break;
}
currentInstances = nextInstances;
}
if (!allEqualSize) {
throw new IllegalArgumentException("The sizes of the provided Instances objects are not equal, Instance should only differ in the features not in the instances itself.");
}
// First of all merge the lists of attributes to construct the meta data of the feature merged
// dataset.
ArrayList<Attribute> mergedAttributeList = new ArrayList<>();
Map<Attribute, Attribute> attributeMap = new HashMap<>();
Attribute classAttribute = null;
String relationName = null;
Integer size = null;
int ns = 0;
for (Instances dataset : data) {
if (classAttribute == null) {
classAttribute = dataset.classAttribute().copy(ns + "-" + dataset.classAttribute().name());
attributeMap.put(dataset.classAttribute(), classAttribute);
}
if (relationName == null) {
relationName = dataset.relationName();
}
if (size == null) {
size = dataset.size();
}
for (int i = 0; i < dataset.numAttributes(); i++) {
if (i != dataset.classIndex()) {
Attribute copiedAttribute = dataset.attribute(i).copy(ns + "-" + dataset.attribute(i).name());
mergedAttributeList.add(copiedAttribute);
attributeMap.put(dataset.attribute(i), copiedAttribute);
}
}
ns++;
}
mergedAttributeList.add(classAttribute);
Instances mergedInstances = new Instances("FeatureUnionInstances-" + relationName, mergedAttributeList, size);
mergedInstances.setClassIndex(mergedInstances.numAttributes() - 1);
for (int i = 0; i < size; i++) {
Instance iNew = new DenseInstance(mergedAttributeList.size());
iNew.setDataset(mergedInstances);
// copy attribute values from original instance objects
for (Instances dataset : data) {
Instance iDataset = dataset.get(i);
for (int j = 0; j < dataset.numAttributes(); j++) {
Attribute originalKey = null;
for (Attribute key : attributeMap.keySet()) {
if (key == iDataset.attribute(j)) {
originalKey = key;
}
}
if (originalKey != null) {
iNew.setValue(attributeMap.get(dataset.attribute(j)), iDataset.value(dataset.attribute(j)));
}
}
}
mergedInstances.add(iNew);
}
return mergedInstances;
}
}
|
0
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka/WekaUtil.java
|
package ai.libs.jaicore.ml.weka;
import java.io.BufferedReader;
import java.io.File;
import java.io.IOException;
import java.io.StringReader;
import java.lang.reflect.Method;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.Enumeration;
import java.util.HashMap;
import java.util.HashSet;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Random;
import java.util.Set;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
import java.util.stream.Stream;
import org.apache.commons.lang3.reflect.MethodUtils;
import org.api4.java.ai.ml.core.dataset.splitter.SplitFailedException;
import org.api4.java.ai.ml.core.exception.DatasetCreationException;
import org.api4.java.algorithm.exceptions.AlgorithmException;
import org.api4.java.algorithm.exceptions.AlgorithmExecutionCanceledException;
import org.api4.java.algorithm.exceptions.AlgorithmTimeoutedException;
import org.nd4j.linalg.api.ndarray.INDArray;
import org.nd4j.linalg.factory.Nd4j;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.node.ArrayNode;
import com.google.common.collect.ContiguousSet;
import com.google.common.collect.DiscreteDomain;
import com.google.common.collect.Range;
import ai.libs.jaicore.basic.sets.CartesianProductComputationProblem;
import ai.libs.jaicore.basic.sets.LDSRelationComputer;
import ai.libs.jaicore.ml.core.filter.sampling.inmemory.stratified.sampling.LabelBasedStratifiedSampling;
import ai.libs.jaicore.ml.core.filter.sampling.inmemory.stratified.sampling.StratifiedSampling;
import ai.libs.jaicore.ml.weka.dataset.IWekaInstances;
import ai.libs.jaicore.ml.weka.dataset.WekaInstances;
import weka.attributeSelection.ASEvaluation;
import weka.attributeSelection.ASSearch;
import weka.classifiers.AbstractClassifier;
import weka.classifiers.Classifier;
import weka.classifiers.MultipleClassifiersCombiner;
import weka.classifiers.SingleClassifierEnhancer;
import weka.classifiers.functions.SMO;
import weka.core.Attribute;
import weka.core.DenseInstance;
import weka.core.Instance;
import weka.core.InstanceComparator;
import weka.core.Instances;
import weka.core.Option;
import weka.core.OptionHandler;
import weka.core.json.JSONInstances;
import weka.core.json.JSONNode;
import weka.filters.Filter;
import weka.filters.supervised.attribute.NominalToBinary;
import weka.filters.unsupervised.attribute.Remove;
public class WekaUtil {
private WekaUtil() {
/* avoid instantiation */
}
private static final String MSG_SUM1 = "Portions must sum up to at most 1.";
private static final String MSG_DEVIATING_NUMBER_OF_INSTANCES = "The number of instances in the folds does not equal the number of instances in the original dataset";
private static boolean debug = false;
public static Collection<String> getBasicLearners() {
Collection<String> classifiers = new ArrayList<>();
classifiers.add(weka.classifiers.bayes.BayesNet.class.getName());
classifiers.add(weka.classifiers.bayes.NaiveBayes.class.getName());
classifiers.add(weka.classifiers.bayes.NaiveBayesMultinomial.class.getName());
classifiers.add(weka.classifiers.functions.Logistic.class.getName());
classifiers.add(weka.classifiers.functions.MultilayerPerceptron.class.getName());
classifiers.add(weka.classifiers.functions.SimpleLinearRegression.class.getName());
classifiers.add(weka.classifiers.functions.SimpleLogistic.class.getName());
classifiers.add(weka.classifiers.functions.SMO.class.getName());
classifiers.add(weka.classifiers.functions.VotedPerceptron.class.getName());
classifiers.add(weka.classifiers.lazy.IBk.class.getName());
classifiers.add(weka.classifiers.lazy.KStar.class.getName());
classifiers.add(weka.classifiers.rules.DecisionTable.class.getName());
classifiers.add(weka.classifiers.rules.JRip.class.getName());
classifiers.add(weka.classifiers.rules.M5Rules.class.getName());
classifiers.add(weka.classifiers.rules.OneR.class.getName());
classifiers.add(weka.classifiers.rules.PART.class.getName());
classifiers.add(weka.classifiers.rules.ZeroR.class.getName());
classifiers.add(weka.classifiers.trees.DecisionStump.class.getName());
classifiers.add(weka.classifiers.trees.J48.class.getName());
classifiers.add(weka.classifiers.trees.LMT.class.getName());
classifiers.add(weka.classifiers.trees.M5P.class.getName());
classifiers.add(weka.classifiers.trees.RandomForest.class.getName());
classifiers.add(weka.classifiers.trees.RandomTree.class.getName());
classifiers.add(weka.classifiers.trees.REPTree.class.getName());
return classifiers;
}
public static Collection<String> getBasicClassifiers() {
Collection<String> classifiers = new ArrayList<>();
classifiers.add(weka.classifiers.bayes.BayesNet.class.getName());
classifiers.add(weka.classifiers.bayes.NaiveBayes.class.getName());
classifiers.add(weka.classifiers.bayes.NaiveBayesMultinomial.class.getName());
classifiers.add(weka.classifiers.functions.Logistic.class.getName());
classifiers.add(weka.classifiers.functions.MultilayerPerceptron.class.getName());
classifiers.add(weka.classifiers.functions.SimpleLogistic.class.getName());
classifiers.add(weka.classifiers.functions.SMO.class.getName());
classifiers.add(weka.classifiers.lazy.IBk.class.getName());
classifiers.add(weka.classifiers.lazy.KStar.class.getName());
classifiers.add(weka.classifiers.rules.DecisionTable.class.getName());
classifiers.add(weka.classifiers.rules.JRip.class.getName());
classifiers.add(weka.classifiers.rules.OneR.class.getName());
classifiers.add(weka.classifiers.rules.PART.class.getName());
classifiers.add(weka.classifiers.rules.ZeroR.class.getName());
classifiers.add(weka.classifiers.trees.DecisionStump.class.getName());
classifiers.add(weka.classifiers.trees.J48.class.getName());
classifiers.add(weka.classifiers.trees.LMT.class.getName());
classifiers.add(weka.classifiers.trees.RandomForest.class.getName());
classifiers.add(weka.classifiers.trees.RandomTree.class.getName());
classifiers.add(weka.classifiers.trees.REPTree.class.getName());
return classifiers;
}
public static Collection<String> getNativeMultiClassClassifiers() {
Collection<String> classifiers = new ArrayList<>();
classifiers.add(weka.classifiers.bayes.BayesNet.class.getName());
classifiers.add(weka.classifiers.bayes.NaiveBayes.class.getName());
classifiers.add(weka.classifiers.bayes.NaiveBayesMultinomial.class.getName());
classifiers.add(weka.classifiers.functions.Logistic.class.getName());
classifiers.add(weka.classifiers.functions.MultilayerPerceptron.class.getName());
classifiers.add(weka.classifiers.functions.SimpleLogistic.class.getName());
classifiers.add(weka.classifiers.lazy.IBk.class.getName());
classifiers.add(weka.classifiers.lazy.KStar.class.getName());
classifiers.add(weka.classifiers.rules.JRip.class.getName());
classifiers.add(weka.classifiers.rules.M5Rules.class.getName());
classifiers.add(weka.classifiers.rules.OneR.class.getName());
classifiers.add(weka.classifiers.rules.PART.class.getName());
classifiers.add(weka.classifiers.rules.ZeroR.class.getName());
classifiers.add(weka.classifiers.trees.DecisionStump.class.getName());
classifiers.add(weka.classifiers.trees.J48.class.getName());
classifiers.add(weka.classifiers.trees.LMT.class.getName());
classifiers.add(weka.classifiers.trees.M5P.class.getName());
classifiers.add(weka.classifiers.trees.RandomForest.class.getName());
classifiers.add(weka.classifiers.trees.RandomTree.class.getName());
classifiers.add(weka.classifiers.trees.REPTree.class.getName());
return classifiers;
}
public static Collection<String> getBinaryClassifiers() {
Collection<String> classifiers = new ArrayList<>();
classifiers.add(weka.classifiers.functions.SMO.class.getName());
classifiers.add(weka.classifiers.functions.VotedPerceptron.class.getName());
return classifiers;
}
public static Collection<String> getFeatureEvaluators() {
Collection<String> preprocessors = new ArrayList<>();
preprocessors.add(weka.attributeSelection.CfsSubsetEval.class.getName());
preprocessors.add(weka.attributeSelection.CorrelationAttributeEval.class.getName());
preprocessors.add(weka.attributeSelection.GainRatioAttributeEval.class.getName());
preprocessors.add(weka.attributeSelection.InfoGainAttributeEval.class.getName());
preprocessors.add(weka.attributeSelection.OneRAttributeEval.class.getName());
preprocessors.add(weka.attributeSelection.PrincipalComponents.class.getName());
preprocessors.add(weka.attributeSelection.ReliefFAttributeEval.class.getName());
preprocessors.add(weka.attributeSelection.SymmetricalUncertAttributeEval.class.getName());
return preprocessors;
}
public static Collection<String> getSearchers() {
Collection<String> preprocessors = new ArrayList<>();
preprocessors.add("weka.attributeSelection.Ranker");
preprocessors.add("weka.attributeSelection.BestFirst");
preprocessors.add("weka.attributeSelection.GreedyStepwise");
return preprocessors;
}
public static Collection<String> getMetaLearners() {
Collection<String> classifiers = new ArrayList<>();
classifiers.add("weka.classifiers.meta.AdaBoostM1");
classifiers.add("weka.classifiers.meta.AdditiveRegression");
classifiers.add("weka.classifiers.meta.AttributeSelectedClassifier");
classifiers.add("weka.classifiers.meta.Bagging");
classifiers.add("weka.classifiers.meta.ClassificationViaRegression");
classifiers.add("weka.classifiers.meta.LogitBoost");
classifiers.add("weka.classifiers.meta.MultiClassClassifier");
classifiers.add("weka.classifiers.meta.RandomCommittee");
classifiers.add("weka.classifiers.meta.RandomSubSpace");
classifiers.add("weka.classifiers.meta.Stacking");
classifiers.add("weka.classifiers.meta.Vote");
return classifiers;
}
public static boolean isValidPreprocessorCombination(final String searcher, final String evaluator) {
boolean isSetEvaluator = evaluator.toLowerCase().matches(".*(relief|gainratio|principalcomponents|onerattributeeval|infogainattributeeval|correlationattributeeval|symmetricaluncertattributeeval).*");
boolean isRanker = searcher.toLowerCase().contains("ranker");
boolean isNonRankerEvaluator = evaluator.toLowerCase().matches(".*(cfssubseteval).*");
return !(isSetEvaluator && !isRanker || isNonRankerEvaluator && isRanker);
}
/**
* Determines all attribute selection variants (search/evaluator combinations with default parametrization)
*
* @return
* @throws AlgorithmExecutionCanceledException
* @throws InterruptedException
* @throws AlgorithmTimeoutedException
*/
public static Collection<List<String>> getAdmissibleSearcherEvaluatorCombinationsForAttributeSelection() {
Collection<List<String>> preprocessors = new ArrayList<>();
List<Collection<String>> sets = new ArrayList<>();
sets.add(getSearchers());
sets.add(getFeatureEvaluators());
CartesianProductComputationProblem<String> problem = new CartesianProductComputationProblem<>(sets);
List<List<String>> combinations;
try {
combinations = new LDSRelationComputer<>(problem).call();
for (List<String> combo : combinations) {
if (isValidPreprocessorCombination(combo.get(0), combo.get(1))) {
preprocessors.add(combo);
}
}
return preprocessors;
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new UnsupportedOperationException("Have been interrupted meanwhile. This should usually not happen, we do not want to treat interrupts here.");
} catch (AlgorithmTimeoutedException | AlgorithmExecutionCanceledException e) {
throw new UnsupportedOperationException();
}
}
/**
* Returns true if there is at least one nominal attribute in the given dataset that has more than 2 values.
*
* @param wekaInstances
* dataset that is checked
* @param ignoreClassAttribute
* if true class attribute is ignored.
*/
public static boolean needsBinarization(final Instances wekaInstances, final boolean ignoreClassAttribute) {
Attribute classAttribute = wekaInstances.classAttribute();
if (!ignoreClassAttribute && classAttribute.isNominal() && classAttribute.numValues() >= 3) {
return true;
}
// iterate over every attribute and check.
for (Enumeration<Attribute> attributeEnum = wekaInstances.enumerateAttributes(); attributeEnum.hasMoreElements();) {
Attribute currentAttr = attributeEnum.nextElement();
if (currentAttr.isNominal() && currentAttr != classAttribute && currentAttr.numValues() >= 3) {
return true;
}
}
return false;
}
public static Collection<String> getPossibleClassValues(final Instance instance) {
Collection<String> labels = new ArrayList<>();
Attribute classAttr = instance.classAttribute();
for (int i = 0; i < classAttr.numValues(); i++) {
labels.add(classAttr.value(i));
}
return labels;
}
public static String getClassifierDescriptor(final Classifier c) {
return getDescriptor(c);
}
public static String getPreprocessorDescriptor(final ASSearch c) {
return getDescriptor(c);
}
public static String getPreprocessorDescriptor(final ASEvaluation c) {
return getDescriptor(c);
}
public static String getDescriptor(final Object o) {
StringBuilder sb = new StringBuilder();
sb.append(o.getClass().getName());
if (o instanceof OptionHandler) {
sb.append("- [");
int i = 0;
for (String s : ((OptionHandler) o).getOptions()) {
if (i++ > 0) {
sb.append(", ");
}
sb.append(s);
}
sb.append("]");
}
return sb.toString();
}
public static Collection<Option> getOptionsOfWekaAlgorithm(final Object o) {
List<Option> options = new ArrayList<>();
if (!(o instanceof OptionHandler)) {
return options;
}
OptionHandler oh = (OptionHandler) o;
Enumeration<Option> optionEnum = oh.listOptions();
while (optionEnum.hasMoreElements()) {
options.add(optionEnum.nextElement());
}
return options;
}
public static List<String> getClassNames(final Instance instance) {
List<String> names = new ArrayList<>();
Enumeration<Object> namesEnumration = instance.classAttribute().enumerateValues();
while (namesEnumration.hasMoreElements()) {
names.add((String) namesEnumration.nextElement());
}
return names;
}
public static Map<String, Integer> getClassNameToIDMap(final Instance instance) {
Map<String, Integer> map = new HashMap<>();
List<String> classNames = getClassNames(instance);
for (int i = 0; i < classNames.size(); i++) {
map.put(classNames.get(i), i);
}
return map;
}
public static int getIntValOfClassName(final Instance instance, final String className) {
Map<String, Integer> map = getClassNameToIDMap(instance);
return map.containsKey(className) ? map.get(className) : -1;
}
public static String getClassSplitAssignments(final List<Instances> split) {
StringBuilder sb = new StringBuilder();
Map<String, Instances> firstSet = getInstancesPerClass(split.get(0));
for (String cl : firstSet.keySet()) {
sb.append(cl);
sb.append(": ");
int i = 0;
for (Instances set : split) {
Map<String, Instances> map = getInstancesPerClass(set);
sb.append(map.containsKey(cl) ? map.get(cl).size() : 0);
if (i < split.size() - 1) {
sb.append("/");
i++;
}
}
sb.append("\n");
}
return sb.toString();
}
public static Instances getInstancesOfClass(final Instances data, final Collection<String> classNames) {
Instances newInstances = new Instances(data);
newInstances.removeIf(i -> !classNames.contains(WekaUtil.getClassName(i)));
return newInstances;
}
public static Instances getInstancesOfClass(final Instances data, final String className) {
Instances newInstances = new Instances(data);
newInstances.removeIf(i -> !WekaUtil.getClassName(i).equals(className));
return newInstances;
}
public static String getClassName(final Instance instance) {
return getClassNames(instance).get((int) instance.classValue());
}
public static Map<String, Instances> getInstancesPerClass(final Instances data) {
Instances emptyInstances = new Instances(data);
emptyInstances.clear();
Map<String, Instances> classWiseSeparation = new HashMap<>();
for (Instance i : data) {
String assignedClass = data.classAttribute().value((int) i.classValue());
if (!classWiseSeparation.containsKey(assignedClass)) {
Instances inst = new Instances(emptyInstances);
classWiseSeparation.put(assignedClass, inst);
}
classWiseSeparation.get(assignedClass).add(i);
}
return classWiseSeparation;
}
public static Map<String, Integer> getNumberOfInstancesPerClass(final Instances data) {
Map<String, Instances> instancesPerClass = getInstancesPerClass(data);
Map<String, Integer> counter = new HashMap<>();
for (Entry<String, Instances> classWithInstances : instancesPerClass.entrySet()) {
counter.put(classWithInstances.getKey(), classWithInstances.getValue().size());
}
return counter;
}
public static int getNumberOfInstancesFromClass(final Instances data, final String c) {
return getInstancesOfClass(data, c).size();
}
public static int getNumberOfInstancesFromClass(final Instances data, final Collection<String> cs) {
Map<String, Integer> map = getNumberOfInstancesPerClass(data);
int sum = 0;
for (String c : cs) {
if (map.containsKey(c)) {
sum += map.get(c);
}
}
return sum;
}
public static double getRelativeNumberOfInstancesFromClass(final Instances data, final String c) {
if (data.isEmpty()) {
return 0;
}
return getNumberOfInstancesFromClass(data, c) / (1f * data.size());
}
public static double getRelativeNumberOfInstancesFromClass(final Instances data, final Collection<String> cs) {
return getNumberOfInstancesFromClass(data, cs) / (1f * data.size());
}
public static Collection<Integer>[] getArbitrarySplit(final IWekaInstances data, final Random rand, final double... portions) {
/* check that portions sum up to s.th. smaller than 1 */
double sum = 0;
for (double p : portions) {
sum += p;
}
if (sum > 1) {
throw new IllegalArgumentException(MSG_SUM1);
}
LinkedList<Integer> indices = new LinkedList<>(ContiguousSet.create(Range.closed(0, data.size() - 1), DiscreteDomain.integers()).asList());
Collections.shuffle(indices, rand);
@SuppressWarnings("unchecked")
Collection<Integer>[] folds = new ArrayList[portions.length + 1];
Instances emptyInstances = new Instances(data.getList());
emptyInstances.clear();
/* distribute instances over the folds */
for (int i = 0; i <= portions.length; i++) {
double portion = i < portions.length ? portions[i] : 1 - sum;
int numberOfItems = (int) Math.floor(data.size() * portion);
Collection<Integer> fold = new ArrayList<>(numberOfItems);
for (int j = 0; j < numberOfItems; j++) {
fold.add(indices.poll());
}
folds[i] = fold;
}
/* distribute remaining ones over the folds */
while (!indices.isEmpty()) {
folds[rand.nextInt(folds.length)].add(indices.poll());
}
if (debug && Arrays.asList(folds).stream().mapToInt(Collection::size).sum() != data.size()) {
throw new IllegalStateException(MSG_DEVIATING_NUMBER_OF_INSTANCES);
}
return folds;
}
public static List<IWekaInstances> realizeSplit(final IWekaInstances data, final Collection<Integer>[] split) {
return realizeSplitAsCopiedInstances(data, split);
}
public static List<Instances> realizeSplit(final Instances data, final List<List<Integer>> split) {
return realizeSplitAsCopiedInstances(data, split);
}
public static List<Instances> realizeSplitAsCopiedInstances(final Instances data, final List<List<Integer>> split) {
List<Instances> folds = new ArrayList<>();
for (Collection<Integer> foldIndices : split) {
Instances fold = new Instances(data, 0);
foldIndices.stream().forEach(i -> fold.add(data.get(i)));
folds.add(fold);
}
return folds;
}
public static List<IWekaInstances> realizeSplitAsCopiedInstances(final IWekaInstances data, final Collection<Integer>[] split) {
List<Instances> folds = new ArrayList<>();
for (Collection<Integer> foldIndices : split) {
Instances fold = new Instances(data.getList(), 0);
foldIndices.stream().forEach(i -> fold.add(data.get(i).getElement()));
folds.add(fold);
}
return folds.stream().map(WekaInstances::new).collect(Collectors.toList());
}
public static ArrayNode splitToJsonArray(final Collection<Integer>[] splitDecision) {
ObjectMapper om = new ObjectMapper();
ArrayNode an = om.createArrayNode();
splitDecision[0].stream().sorted().forEach(an::add);
return an;
}
public static List<Instances> getStratifiedSplit(final Instances data, final long seed, final double portionOfFirstFold) throws SplitFailedException, InterruptedException {
return getStratifiedSplit(new WekaInstances(data), new Random(seed), portionOfFirstFold).stream().map(IWekaInstances::getInstances).collect(Collectors.toList());
}
public static List<IWekaInstances> getStratifiedSplit(final IWekaInstances data, final long seed, final double portionOfFirstFold) throws SplitFailedException, InterruptedException {
return getStratifiedSplit(data, new Random(seed), portionOfFirstFold);
}
public static List<IWekaInstances> getStratifiedSplit(final IWekaInstances data, final Random random, final double portionOfFirstFold) throws SplitFailedException, InterruptedException {
try {
List<Instances> split = new ArrayList<>();
StratifiedSampling<IWekaInstances> sampler = new LabelBasedStratifiedSampling<>(random, data);
sampler.setSampleSize((int) Math.ceil(portionOfFirstFold * data.size()));
split.add(sampler.call().getList());
split.add(sampler.getComplementOfLastSample().getList());
if (split.get(0).size() + split.get(1).size() != data.size()) {
throw new IllegalStateException("The sum of fold sizes does not correspond to the size of the original dataset!");
}
return split.stream().map(WekaInstances::new).collect(Collectors.toList());
} catch (ClassCastException | AlgorithmTimeoutedException | AlgorithmExecutionCanceledException | AlgorithmException | DatasetCreationException e) {
throw new SplitFailedException(e);
}
}
public static List<File> getDatasetsInFolder(final File folder) throws IOException {
List<File> files = new ArrayList<>();
try (Stream<Path> paths = Files.walk(folder.toPath())) {
paths.filter(f -> f.getParent().toFile().equals(folder) && f.toFile().getAbsolutePath().endsWith(".arff")).forEach(f -> files.add(f.toFile()));
}
return files.stream().sorted().collect(Collectors.toList());
}
public static Instances getRefactoredInstances(final Instances data, final Map<String, String> classMap) {
List<String> targetClasses = new ArrayList<>(new HashSet<>(classMap.values()));
Instances childData = WekaUtil.getEmptySetOfInstancesWithRefactoredClass(data, targetClasses);
for (Instance i : data) {
String className = i.classAttribute().value((int) Math.round(i.classValue()));
if (classMap.containsKey(className)) {
Instance iNew = WekaUtil.getRefactoredInstance(i, targetClasses);
iNew.setClassValue(classMap.get(className));
iNew.setDataset(childData);
childData.add(iNew);
}
}
return childData;
}
public static Instance getRefactoredInstance(final Instance instance) {
return getRefactoredInstance(instance, Arrays.asList("0.0", "1.0"));
}
public static Instance getRefactoredInstance(final Instance instance, final List<String> classes) {
/* modify instance */
Instances dataset = WekaUtil.getEmptySetOfInstancesWithRefactoredClass(instance.dataset(), classes);
int numAttributes = instance.numAttributes();
int classIndex = instance.classIndex();
Instance iNew = new DenseInstance(numAttributes);
for (int i = 0; i < numAttributes; i++) {
Attribute a = instance.attribute(i);
if (i != classIndex) {
iNew.setValue(a, instance.value(a));
} else {
iNew.setValue(a, 0.0); // the value does not matter since this should only be used for TESTING
}
}
dataset.add(iNew);
iNew.setDataset(dataset);
return iNew;
}
public static Instances getEmptySetOfInstancesWithRefactoredClass(final Instances instances) {
return getEmptySetOfInstancesWithRefactoredClass(instances, Arrays.asList("0.0", "1.0"));
}
public static Instances getEmptySetOfInstancesWithRefactoredClass(final Instances instances, final List<String> classes) {
List<Attribute> newAttributes = getAttributes(instances, false);
newAttributes.add(instances.classIndex(), getNewClassAttribute(instances.classAttribute(), classes));
Instances newData = new Instances("split", (ArrayList<Attribute>) newAttributes, 0);
newData.setClassIndex(instances.classIndex());
return newData;
}
public static List<Attribute> getAttributes(final Instances inst, final boolean includeClassAttribute) {
List<Attribute> attributes = new ArrayList<>();
Enumeration<Attribute> e = inst.enumerateAttributes();
while (e.hasMoreElements()) {
attributes.add(e.nextElement());
}
if (includeClassAttribute) {
attributes.add(inst.classAttribute());
}
return attributes;
}
public static List<Attribute> getAttributes(final Instance inst) {
List<Attribute> attributes = new ArrayList<>();
Enumeration<Attribute> e = inst.enumerateAttributes();
while (e.hasMoreElements()) {
attributes.add(e.nextElement());
}
return attributes;
}
public static boolean hasOnlyNumericAttributes(final Instances instances) {
for (Attribute a : getAttributes(instances, false)) {
if (!a.isNumeric()) {
return false;
}
}
return true;
}
public static Attribute getNewClassAttribute(final Attribute attribute) {
List<String> vals = Arrays.asList("0.0", "1.0");
return getNewClassAttribute(attribute, vals);
}
public static Attribute getNewClassAttribute(final Attribute attribute, final List<String> classes) {
return new Attribute(attribute.name(), classes);
}
public static List<Attribute> getReplacedAttributeList(final List<Attribute> attributes, final Attribute classAttribute) {
ArrayList<Attribute> newAttributes = new ArrayList<>();
for (Attribute a : attributes) {
if (classAttribute != a) {
newAttributes.add(a);
} else {
newAttributes.add(getNewClassAttribute(classAttribute));
}
}
return newAttributes;
}
public static Instances mergeClassesOfInstances(final Instances data, final Collection<String> cluster1, final Collection<String> cluster2) {
Instances newData = WekaUtil.getEmptySetOfInstancesWithRefactoredClass(data);
for (Instance i : data) {
Instance iNew = (Instance) i.copy();
String className = i.classAttribute().value((int) Math.round(i.classValue()));
if (cluster1.contains(className)) {
iNew.setClassValue(0.0);
newData.add(iNew);
} else if (cluster2.contains(className)) {
iNew.setClassValue(1.0);
newData.add(iNew);
}
}
return newData;
}
public static Instances mergeClassesOfInstances(final Instances data, final List<Set<String>> instancesCluster) {
List<String> classes = new LinkedList<>();
IntStream.range(0, instancesCluster.size()).forEach(x -> classes.add("C" + ((double) x)));
Instances newData = WekaUtil.getEmptySetOfInstancesWithRefactoredClass(data, classes);
for (Instance i : data) {
Instance iNew = (Instance) i.copy();
String className = i.classAttribute().value((int) Math.round(i.classValue()));
for (Set<String> cluster : instancesCluster) {
if (cluster.contains(className)) {
iNew.setClassValue(instancesCluster.indexOf(cluster));
iNew.setDataset(newData);
newData.add(iNew);
}
}
}
return newData;
}
public static List<String> getClassesDeclaredInDataset(final Instances data) {
List<String> classes = new ArrayList<>();
Attribute classAttribute = data.classAttribute();
for (int i = 0; i < classAttribute.numValues(); i++) {
classes.add(classAttribute.value(i));
}
return classes;
}
public static Collection<String> getClassesActuallyContainedInDataset(final Instances data) {
Map<String, Integer> counter = getNumberOfInstancesPerClass(data);
return counter.keySet().stream().filter(k -> counter.get(k) != 0).collect(Collectors.toList());
}
public static double[] getClassesAsArray(final Instances inst) {
int n = inst.size();
double[] vec = new double[n];
for (int i = 0; i < n; i++) {
vec[i] = inst.get(i).classValue();
}
return vec;
}
public static List<Double> getClassesAsList(final Instances inst) {
return inst.stream().map(Instance::classValue).collect(Collectors.toList());
}
public static boolean areInstancesEqual(final Instance a, final Instance b) {
int n = a.numAttributes();
if (b == null || b.numAttributes() != n) {
return false;
}
for (int i = 0; i < n; i++) {
if (a.value(i) != b.value(i)) {
return false;
}
}
return true;
}
public static String instancesToJsonString(final Instances data) {
StringBuilder sb = new StringBuilder();
JSONNode json = JSONInstances.toJSON(data);
json.getChild("header").removeFromParent();
StringBuffer buffer = new StringBuffer();
json.toString(buffer);
sb.append(buffer.toString());
sb.append("\n");
return sb.toString();
}
public static Instances jsonStringToInstances(final String json) throws Exception {
JSONNode node = JSONNode.read(new BufferedReader(new StringReader(json)));
return JSONInstances.toInstances(node);
}
/**
* Compute indices of instances of the original data set that are contained in the given subset. This does only work for data sets that contain an instance at most once!
*
* @param dataset
* @param subset
* @return
*/
public static int[] getIndicesOfContainedInstances(final Instances dataset, final Instances subset) {
int[] indices = new int[subset.size()];
InstanceComparator comp = new InstanceComparator();
for (int i = 0; i < indices.length; i++) {
Instance inst = subset.get(i);
int index = -1;
for (int j = 0; j < dataset.size(); j++) {
if (comp.compare(inst, dataset.get(j)) == 0) {
index = j;
break;
}
}
if (index == -1) {
throw new IllegalArgumentException("The instance " + inst + " is not contained in the given dataset.");
}
indices[i] = index;
}
return indices;
}
public static Instance useFilterOnSingleInstance(final Instance instance, final Filter filter) throws Exception {
Instances data = new Instances(instance.dataset());
data.clear();
data.add(instance);
Instances filteredInstances = Filter.useFilter(data, filter);
return filteredInstances.firstInstance();
}
public static Instances removeAttribute(final Instances data, final int attribute) throws Exception {
Remove remove = new Remove();
remove.setAttributeIndices("" + (attribute + 1));
remove.setInputFormat(data);
return Filter.useFilter(data, remove);
}
public static Instances removeAttributes(final Instances data, final Collection<Integer> attributes) throws Exception {
Remove remove = new Remove();
StringBuilder sb = new StringBuilder();
for (int att : attributes) {
if (sb.length() != 0) {
sb.append(",");
}
sb.append(att + 1);
}
remove.setAttributeIndices(sb.toString());
remove.setInputFormat(data);
return Filter.useFilter(data, remove);
}
public static Instances removeClassAttribute(final Instances data) throws Exception {
if (data.classIndex() < 0) {
throw new IllegalArgumentException("Class index of data is not set!");
}
Remove remove = new Remove();
remove.setAttributeIndices("" + (data.classIndex() + 1));
remove.setInputFormat(data);
return Filter.useFilter(data, remove);
}
public static Instance removeClassAttribute(final Instance inst) throws Exception {
Remove remove = new Remove();
remove.setAttributeIndices("" + (inst.classIndex() + 1));
remove.setInputFormat(inst.dataset());
return useFilterOnSingleInstance(inst, remove);
}
public static Classifier cloneClassifier(final Classifier c) throws Exception {
Method cloneMethod = MethodUtils.getAccessibleMethod(c.getClass(), "clone");
if (cloneMethod != null) {
return (Classifier) cloneMethod.invoke(c);
}
return AbstractClassifier.makeCopy(c);
}
public static int[] getIndicesOfSubset(final Instances data, final Instances subset) {
InstanceComparator comp = new InstanceComparator();
List<Instance> copy = new ArrayList<>(subset);
/* init rows object */
int[] result = new int[subset.size()];
int row = 0;
int i = 0;
for (Instance ref : data) {
for (int j = 0; j < copy.size(); j++) {
Instance inst = copy.get(j);
if (inst != null && comp.compare(inst, ref) == 0) {
result[i++] = row;
copy.remove(inst);
}
}
row++;
}
return result;
}
public static String printNestedWekaClassifier(final Classifier c) {
StringBuilder sb = new StringBuilder();
sb.append(c.getClass().getName());
sb.append("(");
if (c instanceof SingleClassifierEnhancer) {
sb.append(printNestedWekaClassifier(((SingleClassifierEnhancer) c).getClassifier()));
} else if (c instanceof SMO) {
sb.append(((SMO) c).getKernel().getClass().getName());
} else if (c instanceof MultipleClassifiersCombiner) {
sb.append(printNestedWekaClassifier(((MultipleClassifiersCombiner) c).getClassifier(0)));
}
sb.append(")");
return sb.toString();
}
public static boolean isDebug() {
return debug;
}
public static void setDebug(final boolean debug) {
WekaUtil.debug = debug;
}
/**
* Binarizes nominal features and returns an ND4J matrix
*
* @param inst
* @return
* @throws Exception
*/
public static INDArray instances2matrix(final Instances inst) throws Exception {
Filter n2b = new NominalToBinary();
n2b.setInputFormat(inst);
Instances reduced = Filter.useFilter(inst, n2b);
/* create ndarray */
double[][] matrix = new double[reduced.numAttributes() - 1][reduced.size()];
int index = 0;
for (int i = 0; i < reduced.numAttributes(); i++) {
if (i != reduced.classIndex()) {
matrix[index] = reduced.attributeToDoubleArray(i);
index++;
}
}
return Nd4j.create(matrix).transpose();
}
}
|
0
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka/package-info.java
|
/**
*
*/
/**
* @author mwever
*
*/
package ai.libs.jaicore.ml.weka;
|
0
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka/classification/IInstancesClassifier.java
|
package ai.libs.jaicore.ml.weka.classification;
import org.api4.java.ai.ml.core.exception.PredictionException;
import weka.core.Instances;
public interface IInstancesClassifier {
public double[] classifyInstances(Instances instances) throws PredictionException;
}
|
0
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka/classification/package-info.java
|
/**
*
*/
package ai.libs.jaicore.ml.weka.classification;
|
0
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka/classification
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka/classification/learner/AWekaLearner.java
|
package ai.libs.jaicore.ml.weka.classification.learner;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
import org.api4.java.ai.ml.core.dataset.schema.ILabeledInstanceSchema;
import org.api4.java.ai.ml.core.dataset.serialization.UnsupportedAttributeTypeException;
import org.api4.java.ai.ml.core.dataset.supervised.ILabeledDataset;
import org.api4.java.ai.ml.core.dataset.supervised.ILabeledInstance;
import org.api4.java.ai.ml.core.evaluation.IPrediction;
import org.api4.java.ai.ml.core.evaluation.IPredictionBatch;
import org.api4.java.ai.ml.core.exception.LearnerConfigurationFailedException;
import org.api4.java.ai.ml.core.exception.PredictionException;
import org.api4.java.ai.ml.core.exception.TrainingException;
import org.api4.java.common.reconstruction.IReconstructible;
import org.api4.java.common.reconstruction.IReconstructionInstruction;
import org.api4.java.common.reconstruction.IReconstructionPlan;
import ai.libs.jaicore.basic.reconstruction.ReconstructionInstruction;
import ai.libs.jaicore.basic.reconstruction.ReconstructionPlan;
import ai.libs.jaicore.ml.core.learner.ASupervisedLearner;
import ai.libs.jaicore.ml.weka.WekaUtil;
import ai.libs.jaicore.ml.weka.classification.pipeline.MLPipeline;
import ai.libs.jaicore.ml.weka.dataset.IWekaInstance;
import ai.libs.jaicore.ml.weka.dataset.IWekaInstances;
import ai.libs.jaicore.ml.weka.dataset.WekaInstance;
import ai.libs.jaicore.ml.weka.dataset.WekaInstances;
import weka.attributeSelection.ASEvaluation;
import weka.attributeSelection.ASSearch;
import weka.classifiers.AbstractClassifier;
import weka.classifiers.Classifier;
import weka.core.OptionHandler;
public abstract class AWekaLearner<P extends IPrediction, B extends IPredictionBatch> extends ASupervisedLearner<ILabeledInstance, ILabeledDataset<? extends ILabeledInstance>, P, B> implements IWekaClassifier, IReconstructible {
protected String name;
protected Classifier wrappedLearner;
protected ILabeledInstanceSchema schema;
public AWekaLearner(final Classifier learner) {
this.wrappedLearner = learner;
}
public AWekaLearner(final String name, final String[] options) {
this.name = name;
try {
this.wrappedLearner = AbstractClassifier.forName(name, options);
} catch (Exception e) {
throw new IllegalArgumentException("Could not find classifier for name " + name + " or could not set its options to " + Arrays.toString(options), e);
}
}
@Override
public void fit(final ILabeledDataset<? extends ILabeledInstance> dTrain) throws TrainingException, InterruptedException {
this.schema = dTrain.getInstanceSchema();
WekaInstances data = new WekaInstances(dTrain);
try {
this.wrappedLearner.buildClassifier(data.getInstances());
} catch (InterruptedException e) {
throw e;
} catch (Exception e) {
throw new TrainingException("Could not build " + this.getClass().getSimpleName() + " due to exception", e);
}
}
@Override
public B predict(final ILabeledDataset<? extends ILabeledInstance> dTest) throws PredictionException, InterruptedException {
IWekaInstances wInstances = new WekaInstances(dTest);
int n = dTest.size();
IWekaInstance[] instances = new IWekaInstance[n];
for (int i = 0; i < n; i++) {
if (Thread.interrupted()) {
throw new InterruptedException("Weka learner was interrupted while predicting.");
}
instances[i] = wInstances.get(i);
}
return this.predict(instances);
}
@Override
public Classifier getClassifier() {
return this.wrappedLearner;
}
public String getName() {
return this.name;
}
public String[] getOptions() {
return ((OptionHandler) this.wrappedLearner).getOptions();
}
public List<String> getOptionsAsList() {
return Arrays.asList(((OptionHandler) this.wrappedLearner).getOptions());
}
@Override
public void addInstruction(final IReconstructionInstruction instruction) {
throw new UnsupportedOperationException("The WEKAClassifier cannot be modified afterwards, so no new instruction makes sense.");
}
@Override
public IReconstructionPlan getConstructionPlan() {
try {
if (this.wrappedLearner instanceof MLPipeline) {
MLPipeline pipeline = (MLPipeline) this.wrappedLearner;
Classifier classifier = pipeline.getBaseClassifier();
ASSearch searcher = pipeline.getPreprocessors().isEmpty() ? null : pipeline.getPreprocessors().get(0).getSearcher();
ASEvaluation evaluator = pipeline.getPreprocessors().isEmpty() ? null : pipeline.getPreprocessors().get(0).getEvaluator();
return new ReconstructionPlan(Arrays.asList(new ReconstructionInstruction(WekaClassifier.class.getMethod("createPipeline", String.class, List.class, String.class, List.class, String.class, List.class),
searcher != null ? searcher.getClass().getName() : null, searcher != null ? ((OptionHandler) searcher).getOptions() : null, evaluator != null ? evaluator.getClass().getName() : null,
evaluator != null ? ((OptionHandler) evaluator).getOptions() : null, classifier.getClass().getName(), ((OptionHandler) classifier).getOptions())));
} else {
return new ReconstructionPlan(Arrays.asList(new ReconstructionInstruction(WekaClassifier.class.getMethod("createBaseClassifier", String.class, List.class), this.name, this.getOptionsAsList())));
}
} catch (NoSuchMethodException | SecurityException e) {
throw new UnsupportedOperationException(e);
}
}
@Override
public void setConfig(final Map<String, Object> config) throws LearnerConfigurationFailedException, InterruptedException {
throw new UnsupportedOperationException();
}
@Override
public String toString() {
String c = this.wrappedLearner instanceof MLPipeline ? this.wrappedLearner.toString() : WekaUtil.getClassifierDescriptor(this.wrappedLearner);
return "WekaClassifier [name=" + this.name + ", options=" + this.getOptionsAsList() + ", wrappedClassifier=" + c + "]";
}
@Override
public B predict(final ILabeledInstance[] dTest) throws PredictionException, InterruptedException {
List<P> predictions = new ArrayList<>();
for (ILabeledInstance inst : dTest) {
if (Thread.interrupted()) {
throw new InterruptedException("Weka learner was interrupted while predicting.");
}
predictions.add(this.predict(inst));
}
return this.getPredictionListAsBatch(predictions);
}
protected WekaInstance getWekaInstance(final ILabeledInstance xTest) throws PredictionException {
if (this.schema == null) {
throw new IllegalStateException("Cannot conduct predictions with the classifier, because the dataset scheme has not been defined.");
}
if (xTest instanceof WekaInstance) {
return (WekaInstance) xTest;
} else {
try {
return new WekaInstance(this.schema, xTest);
} catch (UnsupportedAttributeTypeException e) {
throw new PredictionException("Could not create WekaInstance object from given instance.");
}
}
}
protected abstract B getPredictionListAsBatch(List<P> predictionList);
}
|
0
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka/classification
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka/classification/learner/Ensemble.java
|
package ai.libs.jaicore.ml.weka.classification.learner;
import java.util.LinkedList;
import weka.classifiers.Classifier;
import weka.core.Capabilities;
import weka.core.Instance;
import weka.core.Instances;
import weka.core.Utils;
public class Ensemble extends LinkedList<Classifier> implements Classifier {
/**
*
*/
private static final long serialVersionUID = -4094417200499766748L;
@Override
public void buildClassifier(final Instances data) throws Exception {
for (Classifier c : this) {
c.buildClassifier(data);
}
}
@Override
public double classifyInstance(final Instance instance) throws Exception {
int best = 0;
double[] dist = this.distributionForInstance(instance);
for (int i = 1; i < dist.length; i++) {
if (dist[i] > dist[best]) {
best = i;
}
}
return best;
}
@Override
public double[] distributionForInstance(final Instance instance) throws Exception {
double[] sums = new double[instance.classAttribute().numValues()];
double[] newProbs;
for (Classifier c : this) {
newProbs = c.distributionForInstance(instance);
for (int j = 0; j < newProbs.length; j++) {
sums[j] += newProbs[j];
}
}
if (Utils.eq(Utils.sum(sums), 1)) {
return sums;
} else {
Utils.normalize(sums);
return sums;
}
}
@Override
public Capabilities getCapabilities() {
return null;
}
}
|
0
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka/classification
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka/classification/learner/IWekaClassifier.java
|
package ai.libs.jaicore.ml.weka.classification.learner;
import java.io.Serializable;
import org.api4.java.ai.ml.classification.IClassifier;
import weka.classifiers.Classifier;
public interface IWekaClassifier extends IClassifier, Serializable {
public Classifier getClassifier();
}
|
0
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka/classification
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka/classification/learner/IWekaClassifierConfig.java
|
package ai.libs.jaicore.ml.weka.classification.learner;
public interface IWekaClassifierConfig {
public String[] getOptions();
}
|
0
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka/classification
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka/classification/learner/IWekaLearningAlgorithm.java
|
package ai.libs.jaicore.ml.weka.classification.learner;
import org.api4.java.ai.ml.core.dataset.supervised.ILabeledDataset;
import org.api4.java.algorithm.IAlgorithm;
public interface IWekaLearningAlgorithm extends IAlgorithm<ILabeledDataset<?>, IWekaClassifier> {
}
|
0
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka/classification
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka/classification/learner/WekaClassifier.java
|
package ai.libs.jaicore.ml.weka.classification.learner;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.stream.IntStream;
import org.api4.java.ai.ml.classification.singlelabel.evaluation.ISingleLabelClassification;
import org.api4.java.ai.ml.classification.singlelabel.evaluation.ISingleLabelClassificationPredictionBatch;
import org.api4.java.ai.ml.core.dataset.supervised.ILabeledInstance;
import org.api4.java.ai.ml.core.exception.PredictionException;
import ai.libs.jaicore.ml.classification.singlelabel.SingleLabelClassification;
import ai.libs.jaicore.ml.classification.singlelabel.SingleLabelClassificationPredictionBatch;
import ai.libs.jaicore.ml.weka.classification.pipeline.MLPipeline;
import weka.attributeSelection.ASEvaluation;
import weka.attributeSelection.ASSearch;
import weka.classifiers.AbstractClassifier;
import weka.classifiers.Classifier;
public class WekaClassifier extends AWekaLearner<ISingleLabelClassification, ISingleLabelClassificationPredictionBatch> implements IWekaClassifier {
public static WekaClassifier createPipeline(final String searcher, final List<String> searcherOptions, final String evaluator, final List<String> evaluatorOptions, final String classifier, final List<String> classifierOptions)
throws Exception {
ASSearch search = searcher != null ? ASSearch.forName(searcher, searcherOptions.toArray(new String[0])) : null;
ASEvaluation eval = evaluator != null ? ASEvaluation.forName(evaluator, evaluatorOptions.toArray(new String[0])) : null;
Classifier c = AbstractClassifier.forName(classifier, classifierOptions.toArray(new String[0]));
return new WekaClassifier(new MLPipeline(search, eval, c));
}
public static WekaClassifier createBaseClassifier(final String name, final List<String> options) {
return new WekaClassifier(name, options.toArray(new String[0]));
}
public WekaClassifier(final String name, final String[] options) {
super(name, options);
}
public WekaClassifier(final Classifier classifier) {
super(classifier);
this.wrappedLearner = classifier;
this.name = classifier.getClass().getName();
}
@Override
public ISingleLabelClassification predict(final ILabeledInstance xTest) throws PredictionException, InterruptedException {
try {
Map<Integer, Double> distribution = new HashMap<>();
double[] dist = this.wrappedLearner.distributionForInstance(this.getWekaInstance(xTest).getElement());
IntStream.range(0, dist.length).forEach(x -> distribution.put(x, dist[x]));
return new SingleLabelClassification(distribution);
} catch (InterruptedException e) {
throw e;
} catch (Exception e) {
throw new PredictionException("Could not make a prediction since an exception occurred in the wrapped weka classifier.", e);
}
}
@Override
protected ISingleLabelClassificationPredictionBatch getPredictionListAsBatch(final List<ISingleLabelClassification> predictionList) {
return new SingleLabelClassificationPredictionBatch(predictionList);
}
}
|
0
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka/classification
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka/classification/learner/WekaLearningAlgorithm.java
|
package ai.libs.jaicore.ml.weka.classification.learner;
import java.util.concurrent.CancellationException;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.FutureTask;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import org.api4.java.ai.ml.core.dataset.supervised.ILabeledDataset;
import org.api4.java.algorithm.events.IAlgorithmEvent;
import org.api4.java.algorithm.exceptions.AlgorithmException;
import org.api4.java.algorithm.exceptions.AlgorithmExecutionCanceledException;
import org.api4.java.algorithm.exceptions.AlgorithmTimeoutedException;
import ai.libs.jaicore.basic.algorithm.AAlgorithm;
import ai.libs.jaicore.ml.weka.dataset.IWekaInstances;
import ai.libs.jaicore.ml.weka.dataset.WekaInstances;
public class WekaLearningAlgorithm extends AAlgorithm<ILabeledDataset<?>, IWekaClassifier> implements IWekaLearningAlgorithm {
private final Class<?> classifierClass;
private IWekaClassifier builtModel;
private FutureTask<?> futureTask;
public WekaLearningAlgorithm(final Class<?> classifierClass, final ILabeledDataset<?> input) {
super(input);
this.classifierClass = classifierClass;
}
@Override
public IAlgorithmEvent nextWithException() throws InterruptedException, AlgorithmExecutionCanceledException, AlgorithmTimeoutedException, AlgorithmException {
ExecutorService executor = null;
try {
switch (this.getState()) {
case CREATED:
this.builtModel = new WekaClassifier(this.classifierClass.getName(), new String[] {});
return this.activate();
case ACTIVE:
IWekaInstances dataset = new WekaInstances(this.getInput());
this.futureTask = new FutureTask<>(() -> {
this.builtModel.fit(dataset);
return null;
});
executor = Executors.newSingleThreadExecutor();
executor.submit(this.futureTask);
this.futureTask.get(this.getRemainingTimeToDeadline().milliseconds() - 100, TimeUnit.MILLISECONDS);
return this.terminate();
default:
throw new IllegalStateException("Don't know what to do in state " + this.getState() + ".");
}
}
catch (TimeoutException e) {
throw new AlgorithmTimeoutedException(0);
}
catch (InterruptedException e) {
throw e;
}
catch (CancellationException e) {
throw new AlgorithmExecutionCanceledException(0);
}
catch (Exception e) {
throw new AlgorithmException("Could not build classifier.", e);
}
finally {
if (executor != null) {
executor.shutdownNow();
}
}
}
@Override
public IWekaClassifier call() throws InterruptedException, AlgorithmExecutionCanceledException, AlgorithmTimeoutedException, AlgorithmException {
while (this.hasNext()) {
this.nextWithException();
}
return this.builtModel;
}
@Override
public void cancel() {
this.futureTask.cancel(true);
super.cancel();
}
}
|
0
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka/classification
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka/classification/learner/package-info.java
|
/**
*
*/
package ai.libs.jaicore.ml.weka.classification.learner;
|
0
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka/classification/learner
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka/classification/learner/reduction/AMCTreeNode.java
|
package ai.libs.jaicore.ml.weka.classification.learner.reduction;
import java.io.Serializable;
import java.util.Collection;
import java.util.List;
import weka.classifiers.Classifier;
import weka.core.Instance;
public abstract class AMCTreeNode<C extends Serializable> implements Classifier {
private static final long serialVersionUID = 3014880172602719884L;
private final List<C> containedClasses;
public AMCTreeNode(final List<C> containedClasses) {
super();
this.containedClasses = containedClasses;
}
@Override
public double classifyInstance(final Instance instance) throws Exception {
double selection = -1;
double best = 0;
double[] dist = this.distributionForInstance(instance);
for (int i = 0; i < dist.length; i++) {
double score = dist[i];
if (score > best) {
best = score;
selection = i;
}
}
return selection;
}
public Collection<C> getContainedClasses() {
return this.containedClasses;
}
}
|
0
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka/classification/learner
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka/classification/learner/reduction/AllPairsTable.java
|
package ai.libs.jaicore.ml.weka.classification.learner.reduction;
import java.util.Collection;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
import org.apache.commons.math3.stat.descriptive.DescriptiveStatistics;
import ai.libs.jaicore.basic.sets.SetUtil;
import ai.libs.jaicore.ml.weka.WekaUtil;
import weka.classifiers.Classifier;
import weka.classifiers.Evaluation;
import weka.core.Instances;
public class AllPairsTable {
private final Map<String, Integer> classCount;
private final Map<String, Map<String, Double>> separabilities = new HashMap<>();
private final int sum;
public AllPairsTable(final Instances training, final Instances validation, final Classifier c) throws Exception {
Collection<String> classes = WekaUtil.getClassesActuallyContainedInDataset(training);
for (Collection<String> set : SetUtil.getAllPossibleSubsetsWithSize(classes, 2)) {
List<String> pair = set.stream().sorted().collect(Collectors.toList());
String a = pair.get(0);
String b = pair.get(1);
Instances trainingData = WekaUtil.getInstancesOfClass(training, a);
trainingData.addAll(WekaUtil.getInstancesOfClass(training, b));
c.buildClassifier(trainingData);
Instances validationData = WekaUtil.getInstancesOfClass(validation, a);
validationData.addAll(WekaUtil.getInstancesOfClass(validation, b));
Evaluation eval = new Evaluation(trainingData);
eval.evaluateModel(c, validationData);
if (!this.separabilities.containsKey(a)) {
this.separabilities.put(a, new HashMap<>());
}
this.separabilities.get(a).put(b, eval.pctCorrect() / 100);
}
this.classCount = WekaUtil.getNumberOfInstancesPerClass(training);
this.sum = training.size();
}
public double getSeparability(final String c1, final String c2) {
if (c1.equals(c2)) {
throw new IllegalArgumentException("Cannot separate a class from itself.");
}
if (c1.compareTo(c2) > 0) {
return this.getSeparability(c2, c1);
}
return this.separabilities.get(c1).get(c2);
}
public double getUpperBoundOnSeparability(final Collection<String> classes) {
double max = 0;
for (Collection<String> pair : SetUtil.getAllPossibleSubsetsWithSize(classes, 2)) {
Iterator<String> i = pair.iterator();
String a = i.next();
String b = i.next();
double expectedContributionToError = (1 - this.getSeparability(a, b));
double relativeExpectedContributionToError = expectedContributionToError * (this.classCount.get(a) + this.classCount.get(b)) / (1f * this.sum);
max = Math.max(max, relativeExpectedContributionToError);
}
return 1 - max;
}
public double getAverageSeparability(final Collection<String> classes) {
DescriptiveStatistics stats = new DescriptiveStatistics();
for (Collection<String> pair : SetUtil.getAllPossibleSubsetsWithSize(classes, 2)) {
Iterator<String> i = pair.iterator();
String a = i.next();
String b = i.next();
stats.addValue(this.getSeparability(a, b));
}
return stats.getMean();
}
public double getMultipliedSeparability(final Collection<String> classes) {
double seperability = 1;
for (Collection<String> pair : SetUtil.getAllPossibleSubsetsWithSize(classes, 2)) {
Iterator<String> i = pair.iterator();
String a = i.next();
String b = i.next();
seperability *= this.getSeparability(a, b);
}
return seperability;
}
}
|
0
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka/classification/learner
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka/classification/learner/reduction/ClassifierCache.java
|
package ai.libs.jaicore.ml.weka.classification.learner.reduction;
import java.util.HashMap;
import org.apache.commons.lang3.builder.HashCodeBuilder;
import ai.libs.jaicore.basic.sets.Pair;
import weka.classifiers.Classifier;
import weka.core.Instances;
public class ClassifierCache extends HashMap<Integer, Pair<Classifier, Instances>> {
private static final long serialVersionUID = -8463580964568016772L;
public Classifier getCachedClassifier(final String classifierName, final EMCNodeType classificationStrategy, final Instances data) {
int hashCode = new HashCodeBuilder().append(classifierName).append(classificationStrategy).append(data.toString()).toHashCode();
Pair<Classifier, Instances> pair = this.get(hashCode);
if (pair == null) {
return null;
} else {
return this.get(hashCode).getX();
}
}
public Instances getCachedTrainingData(final String classifierName, final EMCNodeType classificationStrategy, final Instances data) {
int hashCode = new HashCodeBuilder().append(classifierName).append(classificationStrategy).append(data.toString()).toHashCode();
Pair<Classifier, Instances> pair = this.get(hashCode);
if (pair == null) {
return null;
} else {
return this.get(hashCode).getY();
}
}
}
|
0
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka/classification/learner
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka/classification/learner/reduction/ConstantClassifier.java
|
package ai.libs.jaicore.ml.weka.classification.learner.reduction;
import weka.classifiers.Classifier;
import weka.core.Capabilities;
import weka.core.Instance;
import weka.core.Instances;
public class ConstantClassifier implements Classifier {
private static final long serialVersionUID = 8190066987365474681L;
@Override
public void buildClassifier(final Instances data) throws Exception {
/* does nothing */
}
@Override
public double classifyInstance(final Instance instance) throws Exception {
return 0.0;
}
@Override
public double[] distributionForInstance(final Instance instance) throws Exception {
return new double[] { 1.0 };
}
@Override
public Capabilities getCapabilities() {
return null;
}
@Override
public Classifier clone() {
return new ConstantClassifier();
}
}
|
0
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka/classification/learner
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka/classification/learner/reduction/EMCNodeType.java
|
package ai.libs.jaicore.ml.weka.classification.learner.reduction;
public enum EMCNodeType {
MERGE, ALLPAIRS, ONEVSREST, DIRECT;
}
|
0
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka/classification/learner
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka/classification/learner/reduction/ITreeClassifier.java
|
package ai.libs.jaicore.ml.weka.classification.learner.reduction;
import java.util.List;
import weka.classifiers.Classifier;
import weka.core.Instance;
public interface ITreeClassifier extends Classifier {
public int getHeight();
@Override
public double classifyInstance(final Instance instance) throws Exception;
public int getDepthOfFirstCommonParent(List<Integer> classes);
}
|
0
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka/classification/learner
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka/classification/learner/reduction/MCTreeMergeNode.java
|
package ai.libs.jaicore.ml.weka.classification.learner.reduction;
import java.util.Collection;
import java.util.List;
import weka.classifiers.Classifier;
public class MCTreeMergeNode extends MCTreeNodeReD {
/**
* Default generated serial version UID.
*/
private static final long serialVersionUID = -6282530004580334598L;
public MCTreeMergeNode(final String innerNodeClassifier, final Collection<String> leftChildClasses, final Classifier leftChildClassifier, final Collection<String> rightChildClasses,
final Classifier rightChildClassifier) throws Exception {
super(innerNodeClassifier, leftChildClasses, leftChildClassifier, rightChildClasses, rightChildClassifier);
}
public MCTreeMergeNode(final Classifier innerNodeClassifier, final List<Collection<String>> childClasses, final List<Classifier> childClassifier) {
super(innerNodeClassifier, childClasses, childClassifier);
}
}
|
0
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka/classification/learner
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka/classification/learner/reduction/MCTreeNode.java
|
package ai.libs.jaicore.ml.weka.classification.learner.reduction;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
import org.apache.commons.lang3.builder.HashCodeBuilder;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import ai.libs.jaicore.ml.weka.WekaUtil;
import weka.classifiers.AbstractClassifier;
import weka.classifiers.Classifier;
import weka.classifiers.meta.MultiClassClassifier;
import weka.classifiers.rules.ZeroR;
import weka.core.Capabilities;
import weka.core.Instance;
import weka.core.Instances;
import weka.core.WekaException;
public class MCTreeNode extends AMCTreeNode<Integer> implements ITreeClassifier, Iterable<MCTreeNode> {
/**
*
*/
private static final long serialVersionUID = 8873192747068561266L;
private EMCNodeType nodeType;
private List<MCTreeNode> children = new ArrayList<>();
private Classifier classifier;
private String classifierID;
private boolean trained = false;
private transient Logger logger = LoggerFactory.getLogger(MCTreeNode.class);
public static final AtomicInteger cacheRetrievals = new AtomicInteger();
private static Map<String, Classifier> classifierCacheMap = new HashMap<>();
private static Lock classifierCacheMapLock = new ReentrantLock();
public MCTreeNode(final List<Integer> containedClasses) {
super(containedClasses);
}
public MCTreeNode(final List<Integer> containedClasses, final EMCNodeType nodeType, final String classifierID) throws Exception {
this(containedClasses, nodeType, AbstractClassifier.forName(classifierID, null));
}
public MCTreeNode(final List<Integer> containedClasses, final EMCNodeType nodeType, final Classifier baseClassifier) {
this(containedClasses);
this.setNodeType(nodeType);
this.setBaseClassifier(baseClassifier);
}
public EMCNodeType getNodeType() {
return this.nodeType;
}
public void addChild(final MCTreeNode newNode) {
if (newNode.getNodeType() == EMCNodeType.MERGE) {
for (MCTreeNode child : newNode.getChildren()) {
this.children.add(child);
}
} else {
this.children.add(newNode);
}
}
public List<MCTreeNode> getChildren() {
return this.children;
}
public boolean isCompletelyConfigured() {
if (this.classifier == null) {
return false;
}
if (this.children.isEmpty()) {
return false;
}
for (MCTreeNode child : this.children) {
if (!child.isCompletelyConfigured()) {
return false;
}
}
return true;
}
@Override
public void buildClassifier(final Instances data) throws Exception {
assert (this.getNodeType() != EMCNodeType.MERGE) : "MERGE node detected while building classifier. This must not happen!";
if (data.isEmpty()) {
throw new IllegalArgumentException("Cannot train MCTree with empty set of instances.");
}
if (this.children.isEmpty()) {
throw new IllegalStateException("Cannot train MCTree without children");
}
// sort class split into clusters
List<Set<String>> instancesCluster = new ArrayList<>();
IntStream.range(0, this.children.size()).forEach(x -> instancesCluster.add(new HashSet<>()));
int index = 0;
for (MCTreeNode child : this.children) {
for (int classIndex : child.getContainedClasses()) {
instancesCluster.get(index).add(data.classAttribute().value(classIndex));
}
index++;
}
String classifierKey = this.classifier.getClass().getName() + "#" + instancesCluster + "#" + data.size() + "#" + new HashCodeBuilder().append(data.toString()).toHashCode();
// refactor training data with respect to the split clusters and build the classifier
Instances trainingData = WekaUtil.mergeClassesOfInstances(data, instancesCluster);
try {
this.classifier.buildClassifier(trainingData);
} catch (WekaException e) {
this.classifier = new ZeroR();
this.classifier.buildClassifier(trainingData);
}
classifierCacheMapLock.lock();
try {
classifierCacheMap.put(classifierKey, this.classifier);
} finally {
classifierCacheMapLock.unlock();
}
// recursively build classifiers for children
this.children.stream().parallel().forEach(child -> {
try {
child.buildClassifier(data);
} catch (Exception e) {
this.logger.error("Encountered problem when training MCTreeNode.", e);
}
});
this.trained = true;
}
public void distributionForInstance(final Instance instance, final double[] distribution) throws Exception {
Instance iNew = WekaUtil.getRefactoredInstance(instance, IntStream.range(0, this.children.size()).mapToObj(x -> x + ".0").collect(Collectors.toList()));
double[] localDistribution = this.classifier.distributionForInstance(iNew);
for (MCTreeNode child : this.children) {
child.distributionForInstance(instance, distribution);
int indexOfChild = this.children.indexOf(child);
for (int classContainedInChild : child.getContainedClasses()) {
distribution[classContainedInChild] *= localDistribution[indexOfChild];
}
}
}
@Override
public double[] distributionForInstance(final Instance instance) throws Exception {
if (!this.trained) {
throw new IllegalStateException("Cannot get distribution from untrained classifier " + this.toStringWithOffset());
}
double[] classDistribution = new double[this.getContainedClasses().size()];
this.distributionForInstance(instance, classDistribution);
return classDistribution;
}
@Override
public Capabilities getCapabilities() {
return this.classifier.getCapabilities();
}
@Override
public int getHeight() {
return 1 + this.children.stream().map(MCTreeNode::getHeight).mapToInt(int.class::cast).max().getAsInt();
}
@Override
public int getDepthOfFirstCommonParent(final List<Integer> classes) {
for (MCTreeNode child : this.children) {
if (child.getContainedClasses().containsAll(classes)) {
return 1 + child.getDepthOfFirstCommonParent(classes);
}
}
return 1;
}
public static void clearCache() {
classifierCacheMap.clear();
}
public static Map<String, Classifier> getClassifierCache() {
return classifierCacheMap;
}
public Classifier getClassifier() {
return this.classifier;
}
public void setBaseClassifier(final Classifier classifier) {
if (classifier == null) {
throw new IllegalArgumentException("Cannot set null classifier!");
}
this.classifierID = classifier.getClass().getName();
switch (this.nodeType) {
case ONEVSREST:
MultiClassClassifier oneVsRestMCC = new MultiClassClassifier();
oneVsRestMCC.setClassifier(classifier);
this.classifier = oneVsRestMCC;
break;
case ALLPAIRS:
MultiClassClassifier allPairsMCC = new MultiClassClassifier();
try {
allPairsMCC.setOptions(new String[] { "-M", "" + 3 });
} catch (Exception e) {
this.logger.error("Observed problem when setting options for classifier.", e);
}
allPairsMCC.setClassifier(classifier);
this.classifier = allPairsMCC;
break;
case DIRECT:
this.classifier = classifier;
break;
default:
break;
}
}
public void setNodeType(final EMCNodeType nodeType) {
this.nodeType = nodeType;
}
@Override
public String toString() {
return this.toStringWithOffset("", null);
}
public String toStringWithOffset() {
return this.toStringWithOffset("", " ");
}
public String toStringWithOffset(final String offset, final String indent) {
StringBuilder sb = new StringBuilder();
sb.append(offset).append("(").append(this.getContainedClasses()).append(":").append(this.classifierID).append(":").append(this.nodeType).append(") {");
boolean first = true;
for (MCTreeNode child : this.children) {
if (first) {
first = false;
} else {
sb.append(",");
}
if (indent != null) {
sb.append("\n");
}
sb.append(child.toStringWithOffset(offset + (indent != null ? indent : ""), indent));
}
if (indent != null) {
sb.append("\n").append(offset);
}
sb.append("}");
return sb.toString();
}
@Override
public Iterator<MCTreeNode> iterator() {
return new Iterator<MCTreeNode>() {
private int currentlyTraversedChild = -1;
private Iterator<MCTreeNode> childIterator = null;
@Override
public boolean hasNext() {
if (this.currentlyTraversedChild < 0) {
return true;
}
if (MCTreeNode.this.children.isEmpty()) {
return false;
}
if (this.childIterator == null) {
this.childIterator = MCTreeNode.this.children.get(this.currentlyTraversedChild).iterator();
}
if (this.childIterator.hasNext()) {
return true;
}
if (this.currentlyTraversedChild == MCTreeNode.this.children.size() - 1) {
return false;
}
/* no set the iterator to the new child and return its val */
this.currentlyTraversedChild++;
this.childIterator = MCTreeNode.this.children.get(this.currentlyTraversedChild).iterator();
return this.childIterator.hasNext();
}
@Override
public MCTreeNode next() {
if (this.currentlyTraversedChild == -1) {
this.currentlyTraversedChild++;
return MCTreeNode.this;
} else {
return this.childIterator.next();
}
}
};
}
}
|
0
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka/classification/learner
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka/classification/learner/reduction/MCTreeNodeLeaf.java
|
package ai.libs.jaicore.ml.weka.classification.learner.reduction;
import java.util.Arrays;
import weka.core.Capabilities;
import weka.core.Instance;
import weka.core.Instances;
public class MCTreeNodeLeaf extends MCTreeNode {
/**
*
*/
private static final long serialVersionUID = 6991944564830487953L;
private int classIndex;
public MCTreeNodeLeaf(final int classIndex) {
super(Arrays.asList(classIndex));
this.classIndex = classIndex;
}
@Override
public void addChild(final MCTreeNode newNode) {
throw new UnsupportedOperationException();
}
@Override
public void buildClassifier(final Instances data) throws Exception {
// intentionally do nothing
}
@Override
public double classifyInstance(final Instance instance) throws Exception {
return this.classIndex;
}
@Override
public void distributionForInstance(final Instance instance, final double[] distribution) throws Exception {
distribution[this.classIndex] = 1;
}
@Override
public double[] distributionForInstance(final Instance instance) throws Exception {
return new double[] { 1.0 };
}
@Override
public int getHeight() {
return 0;
}
@Override
public Capabilities getCapabilities() {
return null;
}
@Override
public String toString() {
return this.toStringWithOffset("", null);
}
@Override
public boolean isCompletelyConfigured() {
return true;
}
@Override
public String toStringWithOffset(final String offset, final String indent) {
StringBuilder sb = new StringBuilder();
sb.append(offset).append("(").append(this.getContainedClasses()).append(")");
return sb.toString();
}
}
|
0
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka/classification/learner
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka/classification/learner/reduction/MCTreeNodeReD.java
|
package ai.libs.jaicore.ml.weka.classification.learner.reduction;
import java.io.Serializable;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import org.api4.java.ai.ml.core.exception.TrainingException;
import ai.libs.jaicore.basic.StringUtil;
import ai.libs.jaicore.ml.weka.WekaUtil;
import weka.classifiers.AbstractClassifier;
import weka.classifiers.Classifier;
import weka.classifiers.rules.ZeroR;
import weka.core.Capabilities;
import weka.core.Instance;
import weka.core.Instances;
import weka.core.WekaException;
public class MCTreeNodeReD extends AMCTreeNode<String> {
/**
* Automatically generated serial version UID.
*/
private static final long serialVersionUID = 8873192747068561266L;
private boolean debugMode = false;
@SuppressWarnings("serial")
private class ChildNode implements Serializable {
private List<String> containedClasses;
private Classifier childNodeClassifier;
private ChildNode(final List<String> containedClasses, final Classifier childNodeClassifier) {
this.containedClasses = containedClasses;
this.childNodeClassifier = childNodeClassifier;
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
if (this.childNodeClassifier instanceof MCTreeNodeReD) {
sb.append(this.childNodeClassifier.toString());
} else {
sb.append(this.childNodeClassifier.getClass().getSimpleName() + "(");
sb.append(StringUtil.implode(this.containedClasses, ","));
sb.append(")");
}
return sb.toString();
}
public String toStringWithOffset(final String offset) {
StringBuilder sb = new StringBuilder();
if (this.childNodeClassifier instanceof MCTreeNodeReD) {
sb.append(((MCTreeNodeReD) this.childNodeClassifier).toStringWithOffset(offset + "\t"));
} else {
sb.append(offset);
sb.append("(");
sb.append(this.containedClasses);
sb.append(":");
sb.append(this.childNodeClassifier.getClass().getSimpleName());
sb.append(")");
}
return sb.toString();
}
}
/**
* Classifier assigned to this inner node.
*/
private Classifier innerNodeClassifier;
/**
* List of children of this tree node.
*/
private List<ChildNode> children = new ArrayList<>();
/**
* Flag indicating whether this node is already trained.
*/
private boolean trained = false;
public MCTreeNodeReD(final String innerNodeClassifier, final Collection<String> leftChildClasses, final String leftChildClassifier, final Collection<String> rightChildClasses, final String rightChildClassifier) throws Exception {
this(innerNodeClassifier, leftChildClasses, AbstractClassifier.forName(leftChildClassifier, null), rightChildClasses, AbstractClassifier.forName(rightChildClassifier, null));
}
public MCTreeNodeReD(final Classifier innerNodeClassifier, final Collection<String> leftChildClasses, final Classifier leftChildClassifier, final Collection<String> rightChildClasses, final Classifier rightChildClassifier) {
this(innerNodeClassifier, Arrays.asList(leftChildClasses, rightChildClasses), Arrays.asList(leftChildClassifier, rightChildClassifier));
}
public MCTreeNodeReD(final String innerNodeClassifier, final Collection<String> leftChildClasses, final Classifier leftChildClassifier, final Collection<String> rightChildClasses, final Classifier rightChildClassifier)
throws Exception {
this(AbstractClassifier.forName(innerNodeClassifier, new String[] {}), leftChildClasses, leftChildClassifier, rightChildClasses, rightChildClassifier);
}
public MCTreeNodeReD(final Classifier innerNodeClassifier, final List<Collection<String>> childClasses, final List<Classifier> childClassifier) {
this();
if (childClasses.size() != childClassifier.size()) {
throw new IllegalArgumentException("Number of child classes does not equal the number of child classifiers");
}
this.innerNodeClassifier = innerNodeClassifier;
for (int i = 0; i < childClasses.size(); i++) {
this.addChild(new ArrayList<>(childClasses.get(i)), childClasses.get(i).size() > 1 ? childClassifier.get(i) : new ConstantClassifier());
}
}
public MCTreeNodeReD(final MCTreeNodeReD copy) throws Exception {
this(copy.innerNodeClassifier.getClass().getName(), copy.children.get(0).containedClasses, WekaUtil.cloneClassifier(copy.children.get(0).childNodeClassifier), copy.children.get(1).containedClasses,
WekaUtil.cloneClassifier(copy.children.get(1).childNodeClassifier));
}
protected MCTreeNodeReD() {
super(new ArrayList<>());
}
public void addChild(final List<String> childClasses, final Classifier childClassifier) {
assert !this.trained : "Cannot insert children after the tree node has been trained!";
if (childClassifier instanceof MCTreeMergeNode) {
this.children.addAll(((MCTreeMergeNode) childClassifier).getChildren());
} else {
this.children.add(new ChildNode(childClasses, childClassifier));
}
this.getContainedClasses().addAll(childClasses);
}
/**
* @return Returns a list of the child nodes of this node.
*/
public List<ChildNode> getChildren() {
return this.children;
}
public boolean isCompletelyConfigured() {
if (this.innerNodeClassifier == null || this.children.isEmpty()) {
return false;
}
for (ChildNode child : this.children) {
if (child.childNodeClassifier instanceof MCTreeNodeReD && !((MCTreeNodeReD) child.childNodeClassifier).isCompletelyConfigured()) {
return false;
}
}
return true;
}
@Override
public List<String> getContainedClasses() {
return (List<String>)super.getContainedClasses();
}
@Override
public void buildClassifier(final Instances data) throws Exception {
if (data.isEmpty()) {
throw new IllegalArgumentException("Cannot train MCTree with empty set of instances.");
}
assert !this.children.isEmpty() : "Cannot train MCTree without children";
assert !this.trained : "Cannot retrain MCTreeNodeReD";
if (this.debugMode) {
if (!this.getContainedClasses().containsAll(WekaUtil.getClassesActuallyContainedInDataset(data))) {
throw new IllegalStateException("The classes for which this MCTreeNodeReD has been defined (" + this.getContainedClasses()
+ ") is not a superset of the given training data (" + WekaUtil.getClassesActuallyContainedInDataset(data) + ") ...");
}
if (!WekaUtil.getClassesActuallyContainedInDataset(data).containsAll(this.getContainedClasses())) {
throw new IllegalStateException("The classes for which this MCTreeNodeReD has been defined (" + this.getContainedClasses() + ") is not a subset of the given training data ("
+ WekaUtil.getClassesActuallyContainedInDataset(data) + ") ...");
}
}
/* resort the contained classes based on the input data. This is necessary, because the order of classes in the given dataset might differ from the order of classes initially declared for the tree */
this.getContainedClasses().clear();
for (int i = 0; i < data.numClasses(); i++) {
this.getContainedClasses().add(data.classAttribute().value(i));
}
/* create subsets of the training data filtering for the respective class values and build child classifier */
List<Set<String>> instancesClusters = new ArrayList<>();
int childNum = 0;
for (ChildNode child : this.getChildren()) {
childNum++;
assert (!child.containedClasses.isEmpty()) : "Contained classes of child must not be empty";
Instances childData = WekaUtil.getEmptySetOfInstancesWithRefactoredClass(data, child.containedClasses);
for (Instance i : data) {
String className = i.classAttribute().value((int) Math.round(i.classValue()));
if (child.containedClasses.contains(className)) {
Instance iNew = WekaUtil.getRefactoredInstance(i, child.containedClasses);
iNew.setClassValue(className);
iNew.setDataset(childData);
childData.add(iNew);
}
}
assert child.containedClasses.containsAll(WekaUtil.getClassesActuallyContainedInDataset(childData)) : "There are data for the child node that are not contained in its declaration";
assert WekaUtil.getClassesActuallyContainedInDataset(childData).containsAll(child.containedClasses) : "There are classes declared in the child, but no corresponding data have been passed";
try {
child.childNodeClassifier.buildClassifier(childData);
} catch (Exception e) {
throw new TrainingException("Cannot train classifier in child #" + childNum, e);
}
instancesClusters.add(new HashSet<>(child.containedClasses));
}
/* build inner classifier with refactored training data */
Instances trainingData = WekaUtil.mergeClassesOfInstances(data, instancesClusters);
try {
this.innerNodeClassifier.buildClassifier(trainingData);
} catch (WekaException e) {
this.innerNodeClassifier = new ZeroR();
this.innerNodeClassifier.buildClassifier(trainingData);
} catch (Exception e) {
throw new TrainingException("Cannot train inner classifier", e);
}
this.trained = true;
}
@Override
public double[] distributionForInstance(final Instance instance) throws Exception {
assert this.trained : "Cannot get distribution from untrained classifier " + this.toStringWithOffset();
// compute distribution of the children clusters of the inner node's classifier
Instance refactoredInstance = WekaUtil.getRefactoredInstance(instance);
double[] innerNodeClassifierDistribution = this.innerNodeClassifier.distributionForInstance(refactoredInstance);
// recursively compute distribution for instance for all the children and assign the probabilities
// to classDistribution array
double[] classDistribution = new double[this.getContainedClasses().size()];
for (int childIndex = 0; childIndex < this.children.size(); childIndex++) {
ChildNode child = this.children.get(childIndex);
double[] childDistribution = child.childNodeClassifier.distributionForInstance(WekaUtil.getRefactoredInstance(instance, child.containedClasses));
assert childDistribution.length == child.containedClasses.size() : "Mismatch of child classes (" + child.containedClasses.size() + ") and distribution in child (" + childDistribution.length + ")";
for (int i = 0; i < childDistribution.length; i++) {
String classValue = child.containedClasses.get(i);
classDistribution[this.getContainedClasses().indexOf(classValue)] = childDistribution[i] * innerNodeClassifierDistribution[childIndex];
}
}
double sum = Arrays.stream(classDistribution).sum();
assert (sum - 1E-8 <= 1.0 && sum + 1E-8 >= 1.0) : "Distribution does not sum up to 1; actual some of distribution entries: " + sum;
return classDistribution;
}
@Override
public Capabilities getCapabilities() {
return this.innerNodeClassifier.getCapabilities();
}
public int getHeight() {
int maxHeightChildren = 0;
for (ChildNode child : this.children) {
if (child.childNodeClassifier instanceof MCTreeNodeReD) {
maxHeightChildren = Math.max(((MCTreeNodeReD) child.childNodeClassifier).getHeight(), maxHeightChildren);
}
}
return 1 + maxHeightChildren;
}
public int getDepthOfFirstCommonParent(final List<String> classes) {
for (ChildNode child : this.children) {
if (child.containedClasses.containsAll(classes)) {
int depth = 1;
if (child.childNodeClassifier instanceof MCTreeNodeReD) {
depth += ((MCTreeNodeReD) child.childNodeClassifier).getDepthOfFirstCommonParent(classes);
}
return depth;
}
}
return 1;
}
public Classifier getClassifier() {
return this.innerNodeClassifier;
}
public void setBaseClassifier(final Classifier classifier) {
if (classifier == null) {
throw new IllegalArgumentException("Cannot set null classifier!");
}
this.innerNodeClassifier = classifier;
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("(");
sb.append(this.innerNodeClassifier.getClass().getSimpleName());
sb.append(")");
sb.append("{");
boolean first = true;
for (ChildNode child : this.children) {
if (first) {
first = false;
} else {
sb.append(",");
}
sb.append(child);
}
sb.append("}");
return sb.toString();
}
public String toStringWithOffset() {
return this.toStringWithOffset("");
}
public String toStringWithOffset(final String offset) {
StringBuilder sb = new StringBuilder();
sb.append(offset);
sb.append("(");
sb.append(this.getContainedClasses());
sb.append(":");
sb.append(this.innerNodeClassifier.getClass().getSimpleName());
sb.append(") {");
boolean first = true;
for (ChildNode child : this.children) {
if (first) {
first = false;
} else {
sb.append(",");
}
sb.append("\n");
sb.append(child.toStringWithOffset(offset + " "));
}
sb.append("\n");
sb.append(offset);
sb.append("}");
return sb.toString();
}
}
|
0
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka/classification/learner
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka/classification/learner/reduction/MCTreeNodeReDLeaf.java
|
package ai.libs.jaicore.ml.weka.classification.learner.reduction;
import java.util.List;
import weka.classifiers.Classifier;
import weka.core.Capabilities;
import weka.core.Instance;
import weka.core.Instances;
public class MCTreeNodeReDLeaf extends MCTreeNodeReD {
/**
*
*/
private static final long serialVersionUID = 6991944564830487953L;
private String classValue;
public MCTreeNodeReDLeaf(final String classValue) {
this.classValue = classValue;
}
@Override
public void addChild(final List<String> childClasses, final Classifier childClassifier) {
throw new UnsupportedOperationException();
}
@Override
public void buildClassifier(final Instances data) throws Exception {
// intentionally do nothing
}
@Override
public double classifyInstance(final Instance instance) throws Exception {
return 0.0;
}
@Override
public double[] distributionForInstance(final Instance instance) throws Exception {
return new double[] { 1.0 };
}
@Override
public int getHeight() {
return 0;
}
@Override
public Capabilities getCapabilities() {
return null;
}
@Override
public String toString() {
return this.classValue;
}
@Override
public boolean isCompletelyConfigured() {
return true;
}
@Override
public String toStringWithOffset(final String offset) {
StringBuilder sb = new StringBuilder();
sb.append(offset);
sb.append("(");
sb.append(this.getContainedClasses());
sb.append(")");
return sb.toString();
}
}
|
0
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka/classification/learner
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka/classification/learner/reduction/package-info.java
|
/**
*
*/
/**
* @author mwever
*
*/
package ai.libs.jaicore.ml.weka.classification.learner.reduction;
|
0
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka/classification/learner/reduction
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka/classification/learner/reduction/reducer/Decision.java
|
package ai.libs.jaicore.ml.weka.classification.learner.reduction.reducer;
import java.util.Set;
import ai.libs.jaicore.ml.weka.classification.learner.reduction.EMCNodeType;
import weka.classifiers.Classifier;
class Decision {
private final Set<String> lft;
private final Set<String> rgt;
private final EMCNodeType classificationType;
private final Classifier baseClassifier;
public Decision(Set<String> lft, Set<String> rgt, EMCNodeType classificationType, Classifier baseClassifier) {
super();
this.lft = lft;
this.rgt = rgt;
this.classificationType = classificationType;
this.baseClassifier = baseClassifier;
}
public Set<String> getLft() {
return lft;
}
public Set<String> getRgt() {
return rgt;
}
public EMCNodeType getClassificationType() {
return classificationType;
}
public Classifier getBaseClassifier() {
return baseClassifier;
}
@Override
public String toString() {
return "Decision [lft=" + lft + ", rgt=" + rgt + ", classificationType=" + classificationType + ", baseClassifier=" + baseClassifier + "]";
}
}
|
0
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka/classification/learner/reduction
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka/classification/learner/reduction/reducer/HighProbClassifier.java
|
package ai.libs.jaicore.ml.weka.classification.learner.reduction.reducer;
import weka.classifiers.Classifier;
import weka.core.Capabilities;
import weka.core.Instance;
import weka.core.Instances;
public class HighProbClassifier implements Classifier {
private static final long serialVersionUID = -139880264457589983L;
private final Classifier c;
public HighProbClassifier(final Classifier c) {
super();
this.c = c;
}
@Override
public void buildClassifier(final Instances data) throws Exception {
this.c.buildClassifier(data);
}
@Override
public double classifyInstance(final Instance instance) throws Exception {
double selection = -1;
double best = 0;
double[] dist = this.distributionForInstance(instance);
for (int i = 0; i < dist.length; i++) {
double score = dist[i];
if (score > best) {
best = score;
selection = i;
}
}
return selection;
}
@Override
public double[] distributionForInstance(final Instance instance) throws Exception {
return this.c.distributionForInstance(instance);
}
@Override
public Capabilities getCapabilities() {
return this.c.getCapabilities();
}
}
|
0
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka/classification/learner/reduction
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka/classification/learner/reduction/reducer/ReductionGraphGenerator.java
|
package ai.libs.jaicore.ml.weka.classification.learner.reduction.reducer;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Random;
import java.util.Set;
import org.api4.java.datastructure.graph.implicit.IGraphGenerator;
import org.api4.java.datastructure.graph.implicit.INewNodeDescription;
import org.api4.java.datastructure.graph.implicit.ISingleRootGenerator;
import org.api4.java.datastructure.graph.implicit.ISuccessorGenerator;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import ai.libs.jaicore.ml.weka.WekaUtil;
import ai.libs.jaicore.ml.weka.classification.learner.reduction.EMCNodeType;
import ai.libs.jaicore.ml.weka.classification.learner.reduction.splitter.ISplitter;
import ai.libs.jaicore.ml.weka.classification.learner.reduction.splitter.RPNDSplitter;
import ai.libs.jaicore.search.model.NodeExpansionDescription;
import weka.classifiers.AbstractClassifier;
import weka.classifiers.Classifier;
import weka.core.Instances;
public class ReductionGraphGenerator implements IGraphGenerator<RestProblem, Decision> {
private final Logger logger = LoggerFactory.getLogger(ReductionGraphGenerator.class);
private final Random rand;
private final Instances data;
public ReductionGraphGenerator(final Random rand, final Instances data) {
super();
this.rand = rand;
this.data = data;
}
@Override
public ISingleRootGenerator<RestProblem> getRootGenerator() {
return () -> {
RestProblem root = new RestProblem(null);
root.add(new HashSet<>(WekaUtil.getClassesActuallyContainedInDataset(this.data)));
return root;
};
}
@Override
public ISuccessorGenerator<RestProblem, Decision> getSuccessorGenerator() {
return n -> {
List<INewNodeDescription<RestProblem, Decision>> restProblems = new ArrayList<>();
try {
List<String> set = new ArrayList<>(n.get(0));
if (set.size() < 2) {
throw new UnsupportedOperationException("Cannot create successor where rest problem consists of only one class.");
}
/* add remaining open problems to node */
List<Set<String>> remainingProblems = new ArrayList<>();
for (int j = 1; j < n.size(); j++) {
remainingProblems.add(n.get(j));
}
/* iterate over all considered classifiers */
String[] portfolio = new String[] { "weka.classifiers.trees.RandomForest", "weka.classifiers.functions.SMO", "weka.classifiers.lazy.IBk" };
for (String classifier : portfolio) {
/* add the simplest option, namely to solve the nodes at once */
for (EMCNodeType nodeType : EMCNodeType.values()) {
if (nodeType == EMCNodeType.MERGE) {
continue;
}
if (this.data.classAttribute().numValues() > 12 && this.data.size() > 1000 && nodeType == EMCNodeType.ALLPAIRS) {
continue;
}
RestProblem rp = new RestProblem(new Decision(null, null, nodeType, AbstractClassifier.forName(classifier, null)));
rp.addAll(remainingProblems);
restProblems.add(new NodeExpansionDescription<>(rp, rp.getEdgeToParent()));
}
/* now go for splits (here we always apply direct) */
List<ISplitter> splitters = new ArrayList<>();
Map<ISplitter, Classifier> classifiers = new HashMap<>();
for (int i = 0; i < 1; i++) {
Classifier c = AbstractClassifier.forName(classifier, null);
ISplitter splitter = new RPNDSplitter(this.rand, c);
classifiers.put(splitter, c);
splitters.add(splitter);
}
for (ISplitter splitter : splitters) {
Collection<Collection<String>> split = splitter.split(this.data);
Iterator<Collection<String>> iterator = split.iterator();
Set<String> c1 = new HashSet<>(iterator.next());
Set<String> c2 = new HashSet<>(iterator.next());
RestProblem rp = new RestProblem(new Decision(c1, c2, EMCNodeType.DIRECT, classifiers.get(splitter)));
if (c1.size() > 1) {
rp.add(c1);
}
if (c2.size() > 1) {
rp.add(c2);
}
rp.addAll(remainingProblems);
/* add rest problem */
restProblems.add(new NodeExpansionDescription<>(rp, rp.getEdgeToParent()));
}
}
} catch (InterruptedException e) {
throw e;
} catch (Exception e) {
this.logger.error("Encountered error: {}", e);
}
return restProblems;
};
}
}
|
0
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka/classification/learner/reduction
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka/classification/learner/reduction/reducer/ReductionOptimizer.java
|
package ai.libs.jaicore.ml.weka.classification.learner.reduction.reducer;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Deque;
import java.util.LinkedList;
import java.util.List;
import java.util.Optional;
import java.util.Random;
import java.util.Set;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
import org.apache.commons.math3.stat.descriptive.DescriptiveStatistics;
import org.api4.java.ai.graphsearch.problem.implicit.graphgenerator.INodeGoalTester;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import ai.libs.jaicore.logging.LoggerUtil;
import ai.libs.jaicore.ml.weka.WekaUtil;
import ai.libs.jaicore.ml.weka.classification.learner.reduction.EMCNodeType;
import ai.libs.jaicore.ml.weka.classification.learner.reduction.MCTreeNode;
import ai.libs.jaicore.ml.weka.classification.learner.reduction.MCTreeNodeLeaf;
import ai.libs.jaicore.ml.weka.dataset.IWekaInstances;
import ai.libs.jaicore.ml.weka.dataset.WekaInstances;
import ai.libs.jaicore.search.algorithms.standard.bestfirst.BestFirstEpsilon;
import ai.libs.jaicore.search.model.other.EvaluatedSearchGraphPath;
import ai.libs.jaicore.search.probleminputs.GraphSearchWithSubpathEvaluationsInput;
import weka.classifiers.Classifier;
import weka.classifiers.Evaluation;
import weka.classifiers.rules.OneR;
import weka.core.Attribute;
import weka.core.Capabilities;
import weka.core.Instance;
import weka.core.Instances;
public class ReductionOptimizer implements Classifier {
/**
*
*/
private static final long serialVersionUID = -6241267445544412443L;
private final long seed;
private MCTreeNode root;
private transient Logger logger = LoggerFactory.getLogger(ReductionOptimizer.class);
public ReductionOptimizer(final long seed) {
super();
this.seed = seed;
}
@Override
public void buildClassifier(final Instances data) throws Exception {
List<IWekaInstances> dataSplit = WekaUtil.getStratifiedSplit(new WekaInstances(data), this.seed, .6f);
Instances train = dataSplit.get(0).getList();
INodeGoalTester<RestProblem, Decision> tester = new INodeGoalTester<RestProblem, Decision>() {
@Override
public boolean isGoal(final RestProblem n) {
for (Set<String> open : n) {
if (open.size() > 1) {
return false;
}
}
return true;
}
};
GraphSearchWithSubpathEvaluationsInput<RestProblem, Decision, Double> input = new GraphSearchWithSubpathEvaluationsInput<>(new ReductionGraphGenerator(new Random(this.seed), train), tester,
n -> this.getLossForClassifier(this.getTreeFromSolution(n.getNodes(), data, false), data) * 1.0);
BestFirstEpsilon<RestProblem, Decision, Double> search = new BestFirstEpsilon<>(input, n -> n.getNodes().size() * -1.0, 0.1, false);
/* get best 20 solutions */
int i = 0;
Collection<EvaluatedSearchGraphPath<RestProblem, Decision, Double>> solutions = new ArrayList<>();
EvaluatedSearchGraphPath<RestProblem, Decision, Double> solution;
while ((solution = search.nextSolutionCandidate()) != null) {
solutions.add(solution);
if (i++ > 100) {
break;
}
}
/* select */
Optional<EvaluatedSearchGraphPath<RestProblem, Decision, Double>> bestSolution = solutions.stream().min((s1, s2) -> s1.getScore().compareTo(s2.getScore()));
if (!bestSolution.isPresent()) {
this.logger.error("No solution found");
return;
}
this.root = this.getTreeFromSolution(bestSolution.get().getNodes(), data, true);
this.root.buildClassifier(data);
}
@Override
public double classifyInstance(final Instance instance) throws Exception {
return this.root.classifyInstance(instance);
}
@Override
public double[] distributionForInstance(final Instance instance) throws Exception {
return this.root.distributionForInstance(instance);
}
@Override
public Capabilities getCapabilities() {
return null;
}
private void completeTree(final MCTreeNode tree) {
/* if the tree is not ready yet, complete it. The completion strategy is now just to set the node to "direct" with a random forest */
if (!tree.isCompletelyConfigured()) {
for (MCTreeNode node : tree) {
if (!node.getChildren().isEmpty()) {
continue;
}
if (node.getContainedClasses().size() == 1) {
continue;
}
node.setNodeType(EMCNodeType.DIRECT);
node.setBaseClassifier(new OneR());
for (int openClass : node.getContainedClasses()) {
try {
node.addChild(new MCTreeNodeLeaf(openClass));
} catch (Exception e) {
this.logger.error(LoggerUtil.getExceptionInfo(e));
}
}
}
}
}
private int getLossForClassifier(final MCTreeNode tree, final Instances data) {
this.completeTree(tree);
synchronized (this) {
/* now eval the tree */
try {
DescriptiveStatistics stats = new DescriptiveStatistics();
for (int i = 0; i < 2; i++) {
List<IWekaInstances> split = (WekaUtil.getStratifiedSplit(new WekaInstances(data), this.seed + i, .6f));
tree.buildClassifier(split.get(0).getList());
Evaluation eval = new Evaluation(data);
eval.evaluateModel(tree, split.get(1).getList());
stats.addValue(eval.pctIncorrect());
}
return (int) Math.round((stats.getMean() * 100));
} catch (Exception e) {
if (e instanceof InterruptedException) {
Thread.currentThread().interrupt();
}
this.logger.error(LoggerUtil.getExceptionInfo(e));
return Integer.MAX_VALUE;
}
}
}
private MCTreeNode getTreeFromSolution(final List<RestProblem> solution, final Instances data, final boolean mustBeComplete) {
List<Decision> decisions = solution.stream().filter(n -> n.getEdgeToParent() != null).map(RestProblem::getEdgeToParent).collect(Collectors.toList());
Deque<MCTreeNode> open = new LinkedList<>();
Attribute classAttribute = data.classAttribute();
MCTreeNode localRoot = new MCTreeNode(IntStream.range(0, classAttribute.numValues()).mapToObj(i -> i).collect(Collectors.toList()));
open.addFirst(localRoot);
for (Decision decision : decisions) {
MCTreeNode nodeToRefine = open.removeFirst(); // by construction of the search space, this node should belong to the decision
if (nodeToRefine == null) {
throw new IllegalStateException("No node to apply the decision to! Apparently, there are more decisions for nodes than there are inner nodes.");
}
/* insert decision to the node */
nodeToRefine.setNodeType(decision.getClassificationType());
nodeToRefine.setBaseClassifier(decision.getBaseClassifier());
boolean isCutOff = !(decision.getLft() != null && decision.getRgt() != null);
if (isCutOff) {
for (Integer c : nodeToRefine.getContainedClasses()) {
try {
nodeToRefine.addChild(new MCTreeNodeLeaf(c));
} catch (Exception e) {
this.logger.error(LoggerUtil.getExceptionInfo(e));
}
}
} else {
/* set left child */
boolean addedLeftChild = false;
List<String> classesLft = new ArrayList<>(decision.getLft());
if (classesLft.size() == 1) {
try {
nodeToRefine.addChild(new MCTreeNodeLeaf(classAttribute.indexOfValue(classesLft.get(0))));
} catch (Exception e) {
this.logger.error(LoggerUtil.getExceptionInfo(e));
}
} else {
MCTreeNode lft = new MCTreeNode(classesLft.stream().map(classAttribute::indexOfValue).collect(Collectors.toList()));
nodeToRefine.addChild(lft);
addedLeftChild = true;
open.push(lft);
}
/* set right child */
List<String> classesRgt = new ArrayList<>(decision.getRgt());
if (classesRgt.size() == 1) {
try {
nodeToRefine.addChild(new MCTreeNodeLeaf(data.classAttribute().indexOfValue(classesRgt.get(0))));
} catch (Exception e) {
this.logger.error(LoggerUtil.getExceptionInfo(e));
}
} else {
MCTreeNode rgt = new MCTreeNode(classesRgt.stream().map(classAttribute::indexOfValue).collect(Collectors.toList()));
nodeToRefine.addChild(rgt);
if (addedLeftChild) {
MCTreeNode lft = open.pop();
open.push(rgt);
open.push(lft);
} else {
open.push(rgt);
}
}
}
}
if (mustBeComplete && !open.isEmpty()) {
throw new IllegalStateException("Not all nodes have been equipped with decisions!");
}
return localRoot;
}
}
|
0
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka/classification/learner/reduction
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka/classification/learner/reduction/reducer/RestProblem.java
|
package ai.libs.jaicore.ml.weka.classification.learner.reduction.reducer;
import java.util.ArrayList;
import java.util.Set;
@SuppressWarnings("serial")
class RestProblem extends ArrayList<Set<String>> {
private static int counter = 0;
private final int id = (counter ++);
private final transient Decision edgeToParent;
public RestProblem(final Decision edgeToParent) {
super();
this.edgeToParent = edgeToParent;
}
public Decision getEdgeToParent() {
return this.edgeToParent;
}
@Override
public int hashCode() {
final int prime = 31;
int result = super.hashCode();
result = prime * result + ((this.edgeToParent == null) ? 0 : this.edgeToParent.hashCode());
result = prime * result + this.id;
return result;
}
@Override
public boolean equals(final Object obj) {
if (this == obj) {
return true;
}
if (!super.equals(obj)) {
return false;
}
if (this.getClass() != obj.getClass()) {
return false;
}
RestProblem other = (RestProblem) obj;
if (this.edgeToParent == null) {
if (other.edgeToParent != null) {
return false;
}
} else if (!this.edgeToParent.equals(other.edgeToParent)) {
return false;
}
return this.id == other.id;
}
}
|
0
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka/classification/learner/reduction
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka/classification/learner/reduction/reducer/package-info.java
|
/**
*
*/
/**
* @author mwever
*
*/
package ai.libs.jaicore.ml.weka.classification.learner.reduction.reducer;
|
0
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka/classification/learner/reduction
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka/classification/learner/reduction/splitter/ISplitter.java
|
package ai.libs.jaicore.ml.weka.classification.learner.reduction.splitter;
import java.util.Collection;
import org.api4.java.ai.ml.core.dataset.splitter.SplitFailedException;
import weka.core.Instances;
public interface ISplitter {
public Collection<Collection<String>> split(Instances data) throws SplitFailedException, InterruptedException;
}
|
0
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka/classification/learner/reduction
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka/classification/learner/reduction/splitter/ISplitterFactory.java
|
package ai.libs.jaicore.ml.weka.classification.learner.reduction.splitter;
public interface ISplitterFactory<T extends ISplitter> {
public T getSplitter(int randomSeed);
}
|
0
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka/classification/learner/reduction
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka/classification/learner/reduction/splitter/RPNDSplitter.java
|
package ai.libs.jaicore.ml.weka.classification.learner.reduction.splitter;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.HashSet;
import java.util.List;
import java.util.Random;
import org.api4.java.ai.ml.core.dataset.splitter.SplitFailedException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import ai.libs.jaicore.basic.sets.SetUtil;
import ai.libs.jaicore.logging.LoggerUtil;
import ai.libs.jaicore.ml.weka.WekaUtil;
import weka.classifiers.Classifier;
import weka.core.Instance;
import weka.core.Instances;
public class RPNDSplitter implements ISplitter {
private static final Logger logger = LoggerFactory.getLogger(RPNDSplitter.class);
private final Random rand;
private final Classifier rpndClassifier;
public RPNDSplitter(final Random rand, final Classifier rpndClassifier) {
super();
this.rand = rand;
this.rpndClassifier = rpndClassifier;
}
@Override
public Collection<Collection<String>> split(final Instances data) throws SplitFailedException, InterruptedException {
Collection<String> classes = WekaUtil.getClassesActuallyContainedInDataset(data);
/* 2. if we have a leaf node, abort */
if (classes.size() == 1) {
Collection<Collection<String>> split = new ArrayList<>();
split.add(classes);
return split;
}
/* 3a. otherwise select randomly two classes */
List<String> copy = new ArrayList<>(classes);
Collections.shuffle(copy, this.rand);
String c1 = copy.get(0);
String c2 = copy.get(1);
Collection<String> s1 = new HashSet<>();
s1.add(c1);
Collection<String> s2 = new HashSet<>();
s2.add(c2);
return this.split(copy, s1, s2, data);
}
public Collection<Collection<String>> split(final Collection<String> classes, final Collection<String> s1, final Collection<String> s2, final Instances data) throws SplitFailedException, InterruptedException {
logger.info("Start creation of RPND split with basis {}/{} for classes {}", s1, s2, classes);
/* 3b. and 3c. train binary classifiers for c1 vs c2 */
Instances reducedData = WekaUtil.mergeClassesOfInstances(data, s1, s2);
logger.debug("Building classifier for separating the two class sets {} and {}", s1, s2);
try {
this.rpndClassifier.buildClassifier(reducedData);
} catch (Exception e1) {
throw new SplitFailedException(e1);
}
/* 3d. insort the remaining classes */
logger.info("Now classifying the items of the other classes");
List<String> remainingClasses = new ArrayList<>(SetUtil.difference(SetUtil.difference(classes, s1), s2));
for (int i = 0; i < remainingClasses.size(); i++) {
String className = remainingClasses.get(i);
Instances testData = WekaUtil.getInstancesOfClass(data, className);
logger.debug("Classify {} instances of class {}", testData.size(), className);
int o1 = 0;
int o2 = 0;
for (Instance inst : testData) {
if (Thread.interrupted()) {
throw new InterruptedException();
}
try {
double prediction = this.rpndClassifier.classifyInstance(WekaUtil.getRefactoredInstance(inst));
if (prediction == 0) {
o1++;
} else {
o2++;
}
} catch (Exception e) {
logger.error(LoggerUtil.getExceptionInfo(e));
}
}
if (o1 > o2) {
s1.add(className);
} else {
s2.add(className);
}
}
Collection<Collection<String>> split = new ArrayList<>();
split.add(s1);
split.add(s2);
return split;
}
}
|
0
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka/classification/learner/reduction
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka/classification/learner/reduction/splitter/RandomSplitter.java
|
package ai.libs.jaicore.ml.weka.classification.learner.reduction.splitter;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
import java.util.Random;
import org.api4.java.ai.ml.core.dataset.splitter.SplitFailedException;
import ai.libs.jaicore.ml.weka.WekaUtil;
import weka.core.Instances;
public class RandomSplitter implements ISplitter {
private final Random rand;
public RandomSplitter(final Random rand) {
super();
this.rand = rand;
}
@Override
public Collection<Collection<String>> split(final Instances data) throws SplitFailedException {
Collection<Collection<String>> split = new ArrayList<>();
Collection<String> classes = WekaUtil.getClassesActuallyContainedInDataset(data);
if (classes.size() == 1) {
split.add(classes);
return split;
}
List<String> copy = new ArrayList<>(classes);
Collections.shuffle(copy, this.rand);
int splitIndex = this.rand.nextInt(classes.size()) + 1;
Collection<String> s1 = copy.subList(0, splitIndex);
Collection<String> s2 = copy.subList(splitIndex, copy.size());
split.add(s1);
split.add(s2);
return split;
}
}
|
0
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka/classification/learner/reduction
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka/classification/learner/reduction/splitter/package-info.java
|
/**
*
*/
/**
* @author mwever
*
*/
package ai.libs.jaicore.ml.weka.classification.learner.reduction.splitter;
|
0
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka/classification
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka/classification/pipeline/FeaturePreprocessor.java
|
package ai.libs.jaicore.ml.weka.classification.pipeline;
import java.io.Serializable;
import weka.core.Instance;
import weka.core.Instances;
public interface FeaturePreprocessor extends Serializable {
public void prepare(Instances data) throws PreprocessingException;
public Instance apply(Instance data) throws PreprocessingException;
public Instances apply(Instances data) throws PreprocessingException;
public boolean isPrepared();
}
|
0
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka/classification
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka/classification/pipeline/MLPipeline.java
|
package ai.libs.jaicore.ml.weka.classification.pipeline;
import java.io.Serializable;
import java.util.ArrayList;
import java.util.List;
import org.apache.commons.math3.stat.descriptive.DescriptiveStatistics;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import ai.libs.jaicore.ml.weka.WekaUtil;
import weka.attributeSelection.ASEvaluation;
import weka.attributeSelection.ASSearch;
import weka.attributeSelection.AttributeSelection;
import weka.classifiers.Classifier;
import weka.classifiers.SingleClassifierEnhancer;
import weka.core.Capabilities;
import weka.core.Instance;
import weka.core.Instances;
/**
*
* @author Felix Mohr
*
*/
@SuppressWarnings("serial")
public class MLPipeline extends SingleClassifierEnhancer implements Classifier, Serializable {
private static final Logger logger = LoggerFactory.getLogger(MLPipeline.class);
private final List<SupervisedFilterSelector> preprocessors = new ArrayList<>();
private boolean trained = false;
private int timeForTrainingPreprocessors;
private int timeForTrainingClassifier;
private DescriptiveStatistics timeForExecutingPreprocessors;
private DescriptiveStatistics timeForExecutingClassifier;
public MLPipeline(final List<SupervisedFilterSelector> preprocessors, final Classifier baseClassifier) {
super();
if (baseClassifier == null) {
throw new IllegalArgumentException("Base classifier must not be null!");
}
this.preprocessors.addAll(preprocessors);
super.setClassifier(baseClassifier);
}
public MLPipeline(final ASSearch searcher, final ASEvaluation evaluator, final Classifier baseClassifier) {
super();
if (baseClassifier == null) {
throw new IllegalArgumentException("Base classifier must not be null!");
}
if (searcher != null && evaluator != null) {
AttributeSelection selector = new AttributeSelection();
selector.setSearch(searcher);
selector.setEvaluator(evaluator);
this.preprocessors.add(new SupervisedFilterSelector(searcher, evaluator, selector));
}
super.setClassifier(baseClassifier);
}
@Override
public void buildClassifier(Instances data) throws Exception {
/* reduce dimensionality */
long start;
int numAttributesBefore = data.numAttributes();
logger.info("Starting to build the preprocessors of the pipeline.");
for (SupervisedFilterSelector pp : this.preprocessors) {
/* if the filter has not been trained yet, do so now and store it */
if (!pp.isPrepared()) {
try {
start = System.currentTimeMillis();
pp.prepare(data);
this.timeForTrainingPreprocessors = (int) (System.currentTimeMillis() - start);
int newNumberOfClasses = pp.apply(data).numClasses();
if (data.numClasses() != newNumberOfClasses) {
logger.info("{} changed number of classes from {} to {}", pp.getSelector(), data.numClasses(), newNumberOfClasses);
}
} catch (NullPointerException e) {
logger.error("Could not apply preprocessor", e);
}
}
/* now apply the attribute selector */
data = pp.apply(data);
}
logger.info("Reduced number of attributes from {} to {}", numAttributesBefore, data.numAttributes());
/* build classifier based on reduced data */
start = System.currentTimeMillis();
super.getClassifier().buildClassifier(data);
this.timeForTrainingClassifier = (int) (System.currentTimeMillis() - start);
this.trained = true;
this.timeForExecutingPreprocessors = new DescriptiveStatistics();
this.timeForExecutingClassifier = new DescriptiveStatistics();
}
private Instance applyPreprocessors(Instance data) throws PreprocessingException {
long start = System.currentTimeMillis();
for (SupervisedFilterSelector pp : this.preprocessors) {
data = pp.apply(data);
}
this.timeForExecutingPreprocessors.addValue((int) (System.currentTimeMillis() - start));
return data;
}
@Override
public double classifyInstance(Instance arg0) throws Exception {
if (!this.trained) {
throw new IllegalStateException("Cannot make predictions on untrained pipeline!");
}
int numAttributesBefore = arg0.numAttributes();
arg0 = this.applyPreprocessors(arg0);
if (numAttributesBefore != arg0.numAttributes()) {
logger.info("Reduced number of attributes from {} to {}", numAttributesBefore, arg0.numAttributes());
}
long start = System.currentTimeMillis();
double result = super.getClassifier().classifyInstance(arg0);
this.timeForExecutingClassifier.addValue((System.currentTimeMillis() - start));
return result;
}
public double[] classifyInstances(final Instances arg0) throws Exception {
int n = arg0.size();
double[] answers = new double[n];
for (int i = 0; i < n; i++) {
answers[i] = this.classifyInstance(arg0.get(i));
}
return answers;
}
@Override
public double[] distributionForInstance(Instance arg0) throws Exception {
if (!this.trained) {
throw new IllegalStateException("Cannot make predictions on untrained pipeline!");
}
if (arg0 == null) {
throw new IllegalArgumentException("Cannot make predictions for null-instance");
}
arg0 = this.applyPreprocessors(arg0);
if (arg0 == null) {
throw new IllegalStateException("The filter has turned the instance into NULL");
}
long start = System.currentTimeMillis();
double[] result = super.getClassifier().distributionForInstance(arg0);
this.timeForExecutingClassifier.addValue((int) (System.currentTimeMillis() - start));
return result;
}
@Override
public Capabilities getCapabilities() {
return super.getClassifier().getCapabilities();
}
public Classifier getBaseClassifier() {
return super.getClassifier();
}
public List<SupervisedFilterSelector> getPreprocessors() {
return this.preprocessors;
}
@Override
public String toString() {
return this.getPreprocessors() + " (preprocessors), " + WekaUtil.getClassifierDescriptor(this.getBaseClassifier()) + " (classifier)";
}
public long getTimeForTrainingPreprocessor() {
return this.timeForTrainingPreprocessors;
}
public long getTimeForTrainingClassifier() {
return this.timeForTrainingClassifier;
}
public DescriptiveStatistics getTimeForExecutingPreprocessor() {
return this.timeForExecutingPreprocessors;
}
public DescriptiveStatistics getTimeForExecutingClassifier() {
return this.timeForExecutingClassifier;
}
}
|
0
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka/classification
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka/classification/pipeline/MLSophisticatedPipeline.java
|
package ai.libs.jaicore.ml.weka.classification.pipeline;
import java.io.Serializable;
import java.util.ArrayList;
import java.util.List;
import ai.libs.jaicore.ml.weka.WekaUtil;
import ai.libs.jaicore.ml.weka.classification.pipeline.featuregen.FeatureGenerator;
import weka.classifiers.Classifier;
import weka.core.Attribute;
import weka.core.Capabilities;
import weka.core.DenseInstance;
import weka.core.Instance;
import weka.core.Instances;
/**
*
* @author Felix Mohr
*
*/
@SuppressWarnings("serial")
public class MLSophisticatedPipeline implements Classifier, FeatureGenerator, Serializable {
private final List<FeatureGenerator> featureGenerators = new ArrayList<>();
private final List<FeaturePreprocessor> featurePreprocessors = new ArrayList<>();
private final List<FeaturePreprocessor> featureSelectors = new ArrayList<>();
private final Classifier classifier;
private boolean trained = false;
private long timeForTrainingPreprocessors;
private long timeForTrainingClassifier;
private long timeForExecutingPreprocessor;
private long timeForExecutingClassifier;
private Instances emptyReferenceDataset;
public MLSophisticatedPipeline(final List<FeatureGenerator> featureGenerators, final List<FeaturePreprocessor> preprocessors, final List<FeaturePreprocessor> featureSelectors, final Classifier baseClassifier) {
super();
if (baseClassifier == null) {
throw new IllegalArgumentException("Base classifier must not be null!");
}
this.featureGenerators.addAll(featureGenerators);
this.featurePreprocessors.addAll(preprocessors);
this.featureSelectors.addAll(featureSelectors);
this.classifier = baseClassifier;
}
@Override
public void buildClassifier(Instances data) throws Exception {
/* determine features to be created */
long start;
Instances mergedInstances = new Instances(data);
int f = data.numAttributes();
/* generate features */
for (FeatureGenerator pp : this.featureGenerators) {
/* if the filter has not been trained yet, do so now and store it */
if (!pp.isPrepared()) {
start = System.currentTimeMillis();
pp.prepare(data);
this.timeForTrainingPreprocessors = System.currentTimeMillis() - start;
}
Instances modifiedInstances = pp.apply(data);
if (modifiedInstances == null) {
throw new IllegalStateException("Feature Generator " + pp + " has generated a null-dataset!");
}
/* now apply the attribute selector */
for (int i = 0; i < modifiedInstances.numAttributes(); i++) {
modifiedInstances.renameAttribute(modifiedInstances.attribute(i), "f" + (f++));
}
mergedInstances = Instances.mergeInstances(mergedInstances, modifiedInstances);
mergedInstances.setClassIndex(data.classIndex());
}
data = mergedInstances;
/* preprocess features */
for (FeaturePreprocessor pp : this.featurePreprocessors) {
pp.prepare(data);
data = pp.apply(data);
if (data.classIndex() < 0) {
throw new IllegalStateException("Preprocessor " + pp + " has removed class index!");
}
}
/* feature selection */
for (FeaturePreprocessor pp : this.featureSelectors) {
pp.prepare(data);
data = pp.apply(data);
if (data.classIndex() < 0) {
throw new IllegalStateException("Preprocessor " + pp + " has removed class index!");
}
}
/* build classifier based on reduced data */
this.emptyReferenceDataset = new Instances(data);
this.emptyReferenceDataset.clear();
start = System.currentTimeMillis();
this.classifier.buildClassifier(data);
this.timeForTrainingClassifier = System.currentTimeMillis() - start;
this.trained = true;
}
private Instance applyPreprocessors(Instance data) throws PreprocessingException {
long start = System.currentTimeMillis();
/* create features */
Instance mergedInstance = new DenseInstance(data);
mergedInstance.setDataset(data.dataset());
for (FeatureGenerator pp : this.featureGenerators) {
Instances mergedDatasetA = new Instances(mergedInstance.dataset());
mergedDatasetA.clear();
mergedDatasetA.add(mergedInstance);
Instance modifiedInstance = pp.apply(data);
if (modifiedInstance.dataset() == null) {
throw new IllegalStateException("Instance was detached from dataset by " + pp);
}
Instances mergedDatasetB = modifiedInstance.dataset();
Instances mergedDataset = Instances.mergeInstances(mergedDatasetA, mergedDatasetB);
mergedDataset.setClassIndex(mergedDatasetA.classIndex());
mergedInstance = mergedInstance.mergeInstance(modifiedInstance);
mergedInstance.setDataset(mergedDataset);
this.timeForExecutingPreprocessor = System.currentTimeMillis() - start;
}
data = mergedInstance;
/* preprocess features */
for (FeaturePreprocessor pp : this.featurePreprocessors) {
data = pp.apply(data);
}
/* feature selection */
for (FeaturePreprocessor pp : this.featureSelectors) {
data = pp.apply(data);
}
return data;
}
@Override
public double classifyInstance(Instance arg0) throws Exception {
if (!this.trained) {
throw new IllegalStateException("Cannot make predictions on untrained pipeline!");
}
arg0 = this.applyPreprocessors(arg0);
long start = System.currentTimeMillis();
double result = this.classifier.classifyInstance(arg0);
this.timeForExecutingClassifier = System.currentTimeMillis() - start;
return result;
}
@Override
public double[] distributionForInstance(Instance arg0) throws Exception {
if (!this.trained) {
throw new IllegalStateException("Cannot make predictions on untrained pipeline!");
}
if (arg0 == null) {
throw new IllegalArgumentException("Cannot make predictions for null-instance");
}
arg0 = this.applyPreprocessors(arg0);
if (arg0 == null) {
throw new IllegalStateException("The filter has turned the instance into NULL");
}
long start = System.currentTimeMillis();
double[] result = this.classifier.distributionForInstance(arg0);
this.timeForExecutingClassifier = System.currentTimeMillis() - start;
return result;
}
@Override
public Capabilities getCapabilities() {
return this.classifier.getCapabilities();
}
public Classifier getBaseClassifier() {
return this.classifier;
}
public long getTimeForTrainingPreprocessor() {
return this.timeForTrainingPreprocessors;
}
public long getTimeForTrainingClassifier() {
return this.timeForTrainingClassifier;
}
public long getTimeForExecutingPreprocessor() {
return this.timeForExecutingPreprocessor;
}
public long getTimeForExecutingClassifier() {
return this.timeForExecutingClassifier;
}
@Override
public void prepare(final Instances data) throws PreprocessingException {
try {
this.buildClassifier(data);
} catch (Exception e) {
throw new PreprocessingException(e);
}
}
private Instances getEmptyProbingResultDataset() {
if (!this.isPrepared()) {
throw new IllegalStateException("Cannot determine empty dataset, because the pipeline has not been trained yet.");
}
ArrayList<Attribute> atts = new ArrayList<>();
List<String> attributeValues = WekaUtil.getClassesDeclaredInDataset(this.emptyReferenceDataset);
for (String att : attributeValues) {
atts.add(new Attribute("probe_classprob_" + att + "_" + this));
}
return new Instances("probing", atts, 0);
}
@Override
public Instance apply(final Instance data) throws PreprocessingException {
double[] classProbs;
try {
classProbs = this.distributionForInstance(data);
Instance newInst = new DenseInstance(classProbs.length);
Instances dataset = this.getEmptyProbingResultDataset();
dataset.add(newInst);
newInst.setDataset(dataset);
for (int i = 0; i < classProbs.length; i++) {
newInst.setValue(i, classProbs[i]);
}
return newInst;
} catch (Exception e) {
throw new PreprocessingException(e);
}
}
@Override
public Instances apply(final Instances data) throws PreprocessingException{
Instances probingResults = new Instances(this.getEmptyProbingResultDataset());
for (Instance inst : data) {
Instance probedInst = this.apply(inst);
probedInst.setDataset(probingResults);
probingResults.add(probedInst);
}
return probingResults;
}
@Override
public boolean isPrepared() {
return this.trained;
}
}
|
0
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka/classification
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka/classification/pipeline/PreprocessingException.java
|
package ai.libs.jaicore.ml.weka.classification.pipeline;
public class PreprocessingException extends Exception {
private static final long serialVersionUID = -5454710107294165881L;
public PreprocessingException(final Throwable t) {
super(t);
}
public PreprocessingException(final String message) {
super(message);
}
public PreprocessingException(final String message, final Throwable t) {
super(message, t);
}
}
|
0
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka/classification
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka/classification/pipeline/SupervisedFilterSelector.java
|
package ai.libs.jaicore.ml.weka.classification.pipeline;
import java.io.Serializable;
import ai.libs.jaicore.ml.weka.WekaUtil;
import weka.attributeSelection.ASEvaluation;
import weka.attributeSelection.ASSearch;
import weka.attributeSelection.AttributeSelection;
import weka.core.Instance;
import weka.core.Instances;
@SuppressWarnings("serial")
public class SupervisedFilterSelector implements Serializable, FeaturePreprocessor {
private final ASSearch searcher;
private final ASEvaluation evaluator;
private final AttributeSelection selector;
private boolean prepared;
public SupervisedFilterSelector(final ASSearch searcher, final ASEvaluation evaluator) {
super();
this.searcher = searcher;
this.evaluator = evaluator;
this.selector = new AttributeSelection();
this.selector.setSearch(searcher);
this.selector.setEvaluator(evaluator);
}
public SupervisedFilterSelector(final ASSearch searcher, final ASEvaluation evaluator, final AttributeSelection selector) {
super();
this.searcher = searcher;
this.evaluator = evaluator;
this.selector = selector;
}
public ASSearch getSearcher() {
return this.searcher;
}
public ASEvaluation getEvaluator() {
return this.evaluator;
}
public AttributeSelection getSelector() {
return this.selector;
}
@Override
public void prepare(final Instances data) throws PreprocessingException {
try {
this.selector.SelectAttributes(data);
} catch (Exception e) {
throw new PreprocessingException(e);
}
this.prepared = true;
}
@Override
public Instance apply(final Instance data) throws PreprocessingException {
if (!this.prepared) {
throw new IllegalStateException("Cannot apply preprocessor before it has been prepared!");
}
try {
return this.selector.reduceDimensionality(data);
} catch (Exception e) {
throw new PreprocessingException(e);
}
}
@Override
public Instances apply(final Instances data) throws PreprocessingException {
if (!this.prepared) {
throw new IllegalStateException("Cannot apply preprocessor before it has been prepared!");
}
try {
return this.selector.reduceDimensionality(data);
} catch (Exception e) {
throw new PreprocessingException(e);
}
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((this.evaluator == null) ? 0 : this.evaluator.hashCode());
result = prime * result + ((this.searcher == null) ? 0 : this.searcher.hashCode());
result = prime * result + ((this.selector == null) ? 0 : this.selector.hashCode());
return result;
}
@Override
public boolean equals(final Object obj) {
if (this == obj) {
return true;
}
if (obj == null) {
return false;
}
if (this.getClass() != obj.getClass()) {
return false;
}
SupervisedFilterSelector other = (SupervisedFilterSelector) obj;
if (this.evaluator == null) {
if (other.evaluator != null) {
return false;
}
} else if (!this.evaluator.equals(other.evaluator)) {
return false;
}
if (this.searcher == null) {
if (other.searcher != null) {
return false;
}
} else if (!this.searcher.equals(other.searcher)) {
return false;
}
if (this.selector == null) {
if (other.selector != null) {
return false;
}
} else if (!this.selector.equals(other.selector)) {
return false;
}
return true;
}
@Override
public boolean isPrepared() {
return this.prepared;
}
public void setPrepared(final boolean prepared) {
this.prepared = prepared;
}
@Override
public String toString() {
return "SupervisedFilterSelector [searcher=" + WekaUtil.getPreprocessorDescriptor(this.searcher) + ", evaluator=" + WekaUtil.getPreprocessorDescriptor(this.evaluator) + "]";
}
}
|
0
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka/classification
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka/classification/pipeline/SuvervisedFilterPreprocessor.java
|
package ai.libs.jaicore.ml.weka.classification.pipeline;
import java.io.Serializable;
import ai.libs.jaicore.ml.weka.WekaUtil;
import weka.attributeSelection.ASEvaluation;
import weka.attributeSelection.ASSearch;
import weka.attributeSelection.AttributeSelection;
import weka.core.Attribute;
import weka.core.Instance;
import weka.core.Instances;
@SuppressWarnings("serial")
public class SuvervisedFilterPreprocessor implements Serializable, FeaturePreprocessor {
private final ASSearch searcher;
private final ASEvaluation evaluator;
private final AttributeSelection selector;
private boolean prepared;
public SuvervisedFilterPreprocessor(final ASSearch searcher, final ASEvaluation evaluator) {
super();
this.searcher = searcher;
this.evaluator = evaluator;
this.selector = new AttributeSelection();
this.selector.setSearch(searcher);
this.selector.setEvaluator(evaluator);
}
public SuvervisedFilterPreprocessor(final ASSearch searcher, final ASEvaluation evaluator, final AttributeSelection selector) {
super();
this.searcher = searcher;
this.evaluator = evaluator;
this.selector = selector;
}
public ASSearch getSearcher() {
return this.searcher;
}
public ASEvaluation getEvaluator() {
return this.evaluator;
}
public AttributeSelection getSelector() {
return this.selector;
}
@Override
public void prepare(final Instances data) throws PreprocessingException {
try {
this.selector.SelectAttributes(data);
} catch (Exception e) {
throw new PreprocessingException(e);
}
this.prepared = true;
}
@Override
public Instance apply(final Instance data) throws PreprocessingException {
if (!this.prepared) {
throw new IllegalStateException("Cannot apply preprocessor before it has been prepared!");
}
try {
Instance inst = this.selector.reduceDimensionality(data);
if (inst.dataset().classIndex() >= 0) {
inst = WekaUtil.removeClassAttribute(inst);
}
for (int i = 0; i < inst.dataset().numAttributes(); i++) {
Attribute a = inst.dataset().attribute(i);
inst.dataset().renameAttribute(a, this.getClass().getSimpleName() + "_" + a.name());
}
return inst;
}
catch (Exception e) {
throw new PreprocessingException(e);
}
}
@Override
public Instances apply(final Instances data) throws PreprocessingException {
if (!this.prepared) {
throw new IllegalStateException("Cannot apply preprocessor before it has been prepared!");
}
try {
Instances inst = this.selector.reduceDimensionality(data);
if (inst.classIndex() >= 0) {
inst = WekaUtil.removeClassAttribute(inst);
}
for (int i = 0; i < inst.numAttributes(); i++) {
Attribute a = inst.attribute(i);
inst.renameAttribute(a, this.getClass().getSimpleName() + "_" + a.name());
}
return inst;
}
catch (Exception e) {
throw new PreprocessingException(e);
}
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((this.evaluator == null) ? 0 : this.evaluator.hashCode());
result = prime * result + ((this.searcher == null) ? 0 : this.searcher.hashCode());
result = prime * result + ((this.selector == null) ? 0 : this.selector.hashCode());
return result;
}
@Override
public boolean equals(final Object obj) {
if (this == obj) {
return true;
}
if (obj == null) {
return false;
}
if (this.getClass() != obj.getClass()) {
return false;
}
SuvervisedFilterPreprocessor other = (SuvervisedFilterPreprocessor) obj;
if (this.evaluator == null) {
if (other.evaluator != null) {
return false;
}
} else if (!this.evaluator.equals(other.evaluator)) {
return false;
}
if (this.searcher == null) {
if (other.searcher != null) {
return false;
}
} else if (!this.searcher.equals(other.searcher)) {
return false;
}
if (this.selector == null) {
if (other.selector != null) {
return false;
}
} else if (!this.selector.equals(other.selector)) {
return false;
}
return true;
}
@Override
public boolean isPrepared() {
return this.prepared;
}
public void setPrepared(final boolean prepared) {
this.prepared = prepared;
}
}
|
0
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka/classification/pipeline
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka/classification/pipeline/featuregen/FeatureGenerator.java
|
package ai.libs.jaicore.ml.weka.classification.pipeline.featuregen;
import java.io.Serializable;
import ai.libs.jaicore.ml.weka.classification.pipeline.FeaturePreprocessor;
public interface FeatureGenerator extends FeaturePreprocessor, Serializable {
}
|
0
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka/classification/pipeline
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka/classification/pipeline/featuregen/FeatureGeneratorTree.java
|
package ai.libs.jaicore.ml.weka.classification.pipeline.featuregen;
import java.util.ArrayList;
import java.util.List;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import ai.libs.jaicore.ml.weka.classification.pipeline.PreprocessingException;
import weka.core.Instance;
import weka.core.Instances;
public class FeatureGeneratorTree implements FeatureGenerator {
private static final long serialVersionUID = 3071755243287146060L;
private final transient Logger logger = LoggerFactory.getLogger(FeatureGeneratorTree.class);
private final FeatureGenerator root;
private final List<FeatureGeneratorTree> children = new ArrayList<>();
private boolean prepared;
public FeatureGeneratorTree(final FeatureGenerator root) {
this.root = root;
}
public void addChild(final FeatureGenerator child) {
this.children.add(new FeatureGeneratorTree(child));
}
public void removeChild(final FeatureGeneratorTree child) {
this.children.removeIf(c -> c.root.equals(child));
}
public FeatureGenerator getRoot() {
return this.root;
}
@Override
public void prepare(final Instances data) throws PreprocessingException {
this.logger.info("Starting preparation of FeatureGeneratorTree ({}) for {}x{}-matrix.", this.root.getClass().getName(), data.size(), data.numAttributes());
try {
/* prepare children and apply them in order to get the data necessary to prepare the local feature generator */
for (FeatureGeneratorTree child : this.children) {
child.prepare(data);
}
Instances mergedInstances = new Instances(data);
for (FeatureGeneratorTree child : this.children) {
Instances instancesGeneratedByChild = child.apply(data);
mergedInstances = Instances.mergeInstances(mergedInstances, instancesGeneratedByChild);
}
/* prepare local feature generator */
this.root.prepare(mergedInstances);
Instances result = this.apply(data);
this.logger.info("Preparation of FeatureGeneratorTree ({}) ready. Result will be a {}x{}-matrix", this.root.getClass().getName(), result.size(), result.numAttributes());
this.prepared = true;
} catch (Exception e) {
throw new PreprocessingException(e);
}
}
@Override
public Instance apply(final Instance data) throws PreprocessingException {
try {
Instances instances = new Instances(data.dataset());
instances.clear();
instances.add(data);
return this.apply(instances).firstInstance();
} catch (Exception e) {
throw new PreprocessingException(e);
}
}
@Override
public Instances apply(final Instances data) throws PreprocessingException {
try {
Instances mergedInstances = new Instances(data);
for (FeatureGeneratorTree child : this.children) {
mergedInstances = Instances.mergeInstances(mergedInstances, child.apply(data));
}
return this.root.apply(mergedInstances);
} catch (Exception e) {
throw new PreprocessingException(e);
}
}
@Override
public boolean isPrepared() {
return this.prepared;
}
}
|
0
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka/classification/pipeline
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka/classification/pipeline/featuregen/InteractingFeatures.java
|
package ai.libs.jaicore.ml.weka.classification.pipeline.featuregen;
import java.util.ArrayList;
import java.util.List;
import ai.libs.jaicore.basic.sets.Pair;
import ai.libs.jaicore.basic.sets.SetUtil;
import ai.libs.jaicore.ml.weka.classification.pipeline.PreprocessingException;
import weka.core.Attribute;
import weka.core.DenseInstance;
import weka.core.Instance;
import weka.core.Instances;
public class InteractingFeatures implements FeatureGenerator {
private boolean isPrepared;
private List<Integer> indicesToInteract = new ArrayList<>();
@Override
public void prepare(final Instances data) throws PreprocessingException {
ArrayList<Attribute> attributes = new ArrayList<>();
this.indicesToInteract.clear();
for (int i = 0; i < data.numAttributes(); i++) {
if (data.attribute(i).isNumeric()) {
attributes.add(new weka.core.Attribute("q" + i, false));
this.indicesToInteract.add(i);
}
}
this.isPrepared = true;
}
private Instances getEmptyDataset() {
if (!this.isPrepared) {
throw new IllegalStateException("Cannot get empty dataset before preparation");
}
ArrayList<Attribute> attributes = new ArrayList<>();
for (Pair<Integer, Integer> pair : SetUtil.cartesianProduct(this.indicesToInteract, this.indicesToInteract)) {
if (pair.getX() < pair.getY()) {
attributes.add(new Attribute("interaction_" + pair.getX() + "_" + pair.getY(), false));
}
}
return new Instances("interaction", attributes, 0);
}
@Override
public Instance apply(final Instance data) throws PreprocessingException {
Instance newInstance = new DenseInstance(((int) Math.pow(this.indicesToInteract.size(), 2) - this.indicesToInteract.size()) / 2);
int index = 0;
for (Pair<Integer, Integer> pair : SetUtil.cartesianProduct(this.indicesToInteract, this.indicesToInteract)) {
if (pair.getX() < pair.getY()) {
newInstance.setValue(index++, data.value(pair.getX()) * data.value(pair.getY()));
}
}
Instances dataset = this.getEmptyDataset();
dataset.add(newInstance);
newInstance.setDataset(dataset);
return newInstance;
}
@Override
public Instances apply(final Instances data) throws PreprocessingException {
Instances newDataset = this.getEmptyDataset();
for (Instance inst : data) {
Instance modInst = this.apply(inst);
newDataset.add(modInst);
modInst.setDataset(newDataset);
}
return newDataset;
}
@Override
public boolean isPrepared() {
return this.isPrepared;
}
}
|
0
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka/classification/pipeline
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka/classification/pipeline/featuregen/PCA.java
|
package ai.libs.jaicore.ml.weka.classification.pipeline.featuregen;
import ai.libs.jaicore.ml.weka.classification.pipeline.SuvervisedFilterPreprocessor;
import weka.attributeSelection.PrincipalComponents;
import weka.attributeSelection.Ranker;
@SuppressWarnings("serial")
public class PCA extends SuvervisedFilterPreprocessor implements FeatureGenerator {
public PCA() {
super(new Ranker(), new PrincipalComponents());
}
}
|
0
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka/classification/pipeline
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka/classification/pipeline/featuregen/PolynomialFeatures.java
|
package ai.libs.jaicore.ml.weka.classification.pipeline.featuregen;
import java.util.ArrayList;
import java.util.List;
import ai.libs.jaicore.ml.weka.classification.pipeline.PreprocessingException;
import weka.core.Attribute;
import weka.core.DenseInstance;
import weka.core.Instance;
import weka.core.Instances;
public class PolynomialFeatures implements FeatureGenerator {
private static final long serialVersionUID = 5075237071717821149L;
private boolean isPrepared;
private int potence = 2;
private List<Integer> indicesToSquare = new ArrayList<>();
@Override
public void prepare(final Instances data) throws PreprocessingException {
ArrayList<Attribute> attributes = new ArrayList<>();
this.indicesToSquare.clear();
for (int i = 0; i < data.numAttributes(); i++) {
if (data.attribute(i).isNumeric()) {
attributes.add(new weka.core.Attribute("q" + i, false));
this.indicesToSquare.add(i);
}
}
this.isPrepared = true;
}
private Instances getEmptyDataset() {
if (!this.isPrepared) {
throw new IllegalStateException("Cannot get empty dataset before preparation");
}
ArrayList<Attribute> attributes = new ArrayList<>();
for (int indexToSquare : this.indicesToSquare) {
attributes.add(new Attribute("pow_" + this.potence + "_" + indexToSquare, false));
}
return new Instances("potences", attributes, 0);
}
@Override
public Instance apply(final Instance data) throws PreprocessingException {
Instance copy = new DenseInstance(this.indicesToSquare.size());
int i = 0;
for (int index : this.indicesToSquare) {
copy.setValue(i++,Math.pow(data.value(index), this.potence));
}
Instances dataset = this.getEmptyDataset();
dataset.add(copy);
copy.setDataset(dataset);
return copy;
}
@Override
public Instances apply(final Instances data) throws PreprocessingException {
Instances copy = this.getEmptyDataset();
for (Instance inst : data) {
Instance modInst = this.apply(inst);
copy.add(modInst);
modInst.setDataset(copy);
}
return copy;
}
@Override
public boolean isPrepared() {
return this.isPrepared;
}
public int getPotence() {
return this.potence;
}
public void setPotence(final int potence) {
this.potence = potence;
}
}
|
0
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka/classification/pipeline
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka/classification/pipeline/featurepreprocess/Normalization.java
|
package ai.libs.jaicore.ml.weka.classification.pipeline.featurepreprocess;
import ai.libs.jaicore.ml.weka.classification.pipeline.FeaturePreprocessor;
import ai.libs.jaicore.ml.weka.classification.pipeline.PreprocessingException;
import weka.core.Instance;
import weka.core.Instances;
import weka.filters.Filter;
public class Normalization implements FeaturePreprocessor {
private static final long serialVersionUID = 3410424109277796158L;
private weka.filters.unsupervised.attribute.Normalize norm = new weka.filters.unsupervised.attribute.Normalize();
private boolean prepared;
@Override
public void prepare(final Instances data) throws PreprocessingException {
try {
this.norm.setInputFormat(data);
Filter.useFilter(data, this.norm);
this.prepared = true;
} catch (Exception e) {
throw new PreprocessingException(e);
}
}
@Override
public Instance apply(final Instance data) throws PreprocessingException {
try {
this.norm.input(data);
return this.norm.output();
} catch (Exception e) {
throw new PreprocessingException(e);
}
}
@Override
public Instances apply(final Instances data) throws PreprocessingException {
Instances newInstances = new Instances(data);
newInstances.clear();
for (Instance i : data) {
newInstances.add(this.apply(i));
}
return newInstances;
}
@Override
public boolean isPrepared() {
return this.prepared;
}
}
|
0
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka/classification/pipeline
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka/classification/pipeline/featurepreprocess/Standardization.java
|
package ai.libs.jaicore.ml.weka.classification.pipeline.featurepreprocess;
import ai.libs.jaicore.ml.weka.classification.pipeline.FeaturePreprocessor;
import ai.libs.jaicore.ml.weka.classification.pipeline.PreprocessingException;
import weka.core.Instance;
import weka.core.Instances;
import weka.filters.Filter;
public class Standardization implements FeaturePreprocessor {
private static final long serialVersionUID = -6540039548736716606L;
private weka.filters.unsupervised.attribute.Standardize stand = new weka.filters.unsupervised.attribute.Standardize();
private boolean prepared;
@Override
public void prepare(final Instances data) throws PreprocessingException {
try {
this.stand.setInputFormat(data);
this.stand.setIgnoreClass(true);
Filter.useFilter(data, this.stand);
this.prepared = true;
} catch (Exception e) {
throw new PreprocessingException(e);
}
}
@Override
public Instance apply(final Instance data) throws PreprocessingException {
try {
this.stand.input(data);
return this.stand.output();
} catch (Exception e) {
throw new PreprocessingException(e);
}
}
@Override
public Instances apply(final Instances data) throws PreprocessingException {
Instances newInstances = new Instances(data);
newInstances.clear();
for (Instance i : data) {
newInstances.add(this.apply(i));
}
return newInstances;
}
@Override
public boolean isPrepared() {
return this.prepared;
}
}
|
0
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka/classification/singlelabel/timeseries/learner
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka/classification/singlelabel/timeseries/learner/ensemble/MajorityConfidenceVote.java
|
package ai.libs.jaicore.ml.weka.classification.singlelabel.timeseries.learner.ensemble;
import java.util.Arrays;
import java.util.Random;
import weka.classifiers.evaluation.Evaluation;
import weka.classifiers.meta.Vote;
import weka.core.Instance;
import weka.core.Instances;
import weka.core.Utils;
/**
* Vote implementation for majority confidence. The ensemble's distributions of
* each classifier are aggregated using the sum of each unique values times
* classifier weights. The classifier weights are determined during training
* using a CV.
*
* @author Julian Lienen
*
*/
public class MajorityConfidenceVote extends Vote {
/**
* Generated serial version UID.
*/
private static final long serialVersionUID = -7128109840679632228L;
/**
* Number of folds used for determining the classifier weights by test accuracy
* within a CV on the training data.
*/
private int numFolds;
/**
* The classifier weights which are used within distribution for instance
* calculation.
*/
private double[] classifierWeights;
/**
* Seed used within CV for splitting the data.
*/
private int seed;
/**
* Constructor for a majority confidence vote ensemble classifier.
*
* @param numFolds
* See {@link MajorityConfidenceVote#numFolds}
* @param seed
* See {@link MajorityConfidenceVote#seed}
*/
public MajorityConfidenceVote(final int numFolds, final long seed) {
super();
this.numFolds = numFolds;
}
/**
* Builds the ensemble by assessing the classifier weights using a cross
* validation of each classifier of the ensemble and then training the
* classifiers using the complete <code>data</code>.
*
* @param data
* Training instances
*/
@Override
public void buildClassifier(final Instances data) throws Exception {
this.classifierWeights = new double[this.m_Classifiers.length];
// remove instances with missing class
Instances newData = new Instances(data);
newData.deleteWithMissingClass();
this.m_structure = new Instances(newData, 0);
// can classifier handle the data?
this.getCapabilities().testWithFail(data);
for (int i = 0; i < this.m_Classifiers.length; i++) {
if (Thread.currentThread().isInterrupted()) {
throw new InterruptedException();
}
// Perform cross validation to determine the classifier weights
for (int n = 0; n < this.numFolds; n++) {
Instances train = data.trainCV(this.numFolds, n, new Random(this.seed));
Instances test = data.testCV(this.numFolds, n);
this.getClassifier(i).buildClassifier(train);
Evaluation eval = new Evaluation(train);
eval.evaluateModel(this.getClassifier(i), test);
this.classifierWeights[i] += eval.pctCorrect() / 100d;
}
this.classifierWeights[i] = Math.pow(this.classifierWeights[i], 2);
this.classifierWeights[i] /= this.numFolds;
this.getClassifier(i).buildClassifier(newData);
}
// If no classifier predicted something correctly, assume uniform distribution
if (Arrays.stream(this.classifierWeights).allMatch(d -> d < 0.000001d)) {
for (int i = 0; i < this.classifierWeights.length; i++) {
this.classifierWeights[i] = 1d / this.classifierWeights.length;
}
}
}
/**
* Function calculating the distribution for a instance by predicting the
* distributions for each classifier and multiplying the result by the
* classifier weights. The final result is the sum of each probabilities for
* each class.
*
* @param instace
* Instance to be predicted
* @return Returns the final probability distribution for each class for the
* given <code>instance</code>
*
*/
@Override
public double[] distributionForInstance(final Instance instance) throws Exception {
double[] probs = new double[instance.numClasses()];
for (int i = 0; i < probs.length; i++) {
probs[i] = 1.0;
}
int numPredictions = 0;
for (int i = 0; i < this.m_Classifiers.length; i++) {
if (Thread.currentThread().isInterrupted()) {
throw new InterruptedException();
}
double[] dist = this.getClassifier(i).distributionForInstance(instance);
if (Utils.sum(dist) > 0) {
for (int j = 0; j < dist.length; j++) {
probs[j] += this.classifierWeights[i] * dist[j];
}
numPredictions++;
}
}
for (int i = 0; i < this.m_preBuiltClassifiers.size(); i++) {
if (Thread.currentThread().isInterrupted()) {
throw new InterruptedException();
}
double[] dist = this.m_preBuiltClassifiers.get(i).distributionForInstance(instance);
if (Utils.sum(dist) > 0) {
for (int j = 0; j < dist.length; j++) {
probs[j] *= dist[j];
}
numPredictions++;
}
}
// No predictions?
if (numPredictions == 0) {
return new double[instance.numClasses()];
}
// Should normalize to get "probabilities"
if (Utils.sum(probs) > 0) {
Utils.normalize(probs);
}
return probs;
}
/**
* {@inheritDoc}
*/
@Override
public double classifyInstance(final Instance instance) throws Exception {
double result;
int index;
double[] dist = this.distributionForInstance(instance);
if (instance.classAttribute().isNominal()) {
index = Utils.maxIndex(dist);
if (dist[index] == 0) {
result = Utils.missingValue();
} else {
result = index;
}
} else if (instance.classAttribute().isNumeric()) {
result = dist[0];
} else {
result = Utils.missingValue();
}
return result;
}
}
|
0
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka/classification/singlelabel/timeseries/learner
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka/classification/singlelabel/timeseries/learner/ensemble/package-info.java
|
/**
* A package consisting of ensemble classifiers used in implemented time series
* classifiers.
*
* @author Julian Lienen
*
*/
package ai.libs.jaicore.ml.weka.classification.singlelabel.timeseries.learner.ensemble;
|
0
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka/classification/singlelabel/timeseries/learner
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka/classification/singlelabel/timeseries/learner/shapelets/ILearnShapeletsLearningAlgorithmConfig.java
|
package ai.libs.jaicore.ml.weka.classification.singlelabel.timeseries.learner.shapelets;
import ai.libs.jaicore.basic.IOwnerBasedRandomizedAlgorithmConfig;
public interface ILearnShapeletsLearningAlgorithmConfig extends IOwnerBasedRandomizedAlgorithmConfig {
public static final String K_NUM_SHAPELETS = "numshapelets";
public static final String K_LEARNINGRATE = "learningrate";
public static final String K_REGULARIZATION = "regularization";
public static final String K_SHAPELETLENGTH_MIN = "minshapeletlength";
public static final String K_SHAPELETLENGTH_RELMIN = "relativeminshapeletlength";
public static final String K_SCALER = "scaler";
public static final String K_MAXITER = "maxiter";
public static final String K_GAMMA = "gamma";
public static final String K_ESTIMATEK = "estimatek";
/**
* Parameter which determines how many of the most-informative shapelets should be used.
* Corresponds to K in the paper
*/
@Key(K_NUM_SHAPELETS)
@DefaultValue("1")
public int numShapelets();
/**
* The learning rate used within the SGD.
*/
@Key(K_LEARNINGRATE)
@DefaultValue("0.01")
public double learningRate();
/**
* The regularization used wihtin the SGD.
*/
@Key(K_REGULARIZATION)
@DefaultValue("0.01")
public double regularization();
/**
* The minimum shapelet of the shapelets to be learned. Internally derived by
* the time series lengths and the <code>minShapeLengthPercentage</code>.
*/
@Key(K_SHAPELETLENGTH_MIN)
public int minShapeletLength();
/**
* The minimum shape length percentage used to calculate the minimum shape length.
*/
@Key(K_SHAPELETLENGTH_RELMIN)
@DefaultValue("0.1")
public double minShapeLengthPercentage();
/**
* The number of scales used for the shapelet lengths.
*/
@Key(K_SCALER)
@DefaultValue("2")
public int scaleR();
/**
* The maximum iterations used for the SGD.
*/
@Key(K_MAXITER)
@DefaultValue("300")
public int maxIterations();
/**
* Gamma value used for momentum during gradient descent. Defaults to 0.5.
*/
@Key(K_GAMMA)
@DefaultValue("0.5")
public double gamma();
/**
* Parameter indicator whether estimation of K (number of learned shapelets)
* should be derived from the number of total segments. False by default.
*/
@Key(K_ESTIMATEK)
@DefaultValue("false")
public boolean estimateK();
}
|
0
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka/classification/singlelabel/timeseries/learner
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka/classification/singlelabel/timeseries/learner/shapelets/LearnShapeletsClassifier.java
|
package ai.libs.jaicore.ml.weka.classification.singlelabel.timeseries.learner.shapelets;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.aeonbits.owner.ConfigCache;
import org.api4.java.ai.ml.core.exception.PredictionException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import ai.libs.jaicore.basic.IOwnerBasedRandomizedAlgorithmConfig;
import ai.libs.jaicore.ml.classification.singlelabel.timeseries.dataset.TimeSeriesDataset2;
import ai.libs.jaicore.ml.classification.singlelabel.timeseries.learner.ASimplifiedTSClassifier;
import ai.libs.jaicore.ml.classification.singlelabel.timeseries.util.MathUtil;
import ai.libs.jaicore.ml.classification.singlelabel.timeseries.util.TimeSeriesUtil;
import ai.libs.jaicore.ml.weka.classification.singlelabel.timeseries.learner.shapelets.LearnShapeletsLearningAlgorithm.ILearnShapeletsLearningAlgorithmConfig;
/**
* <code>LearnShapeletsClassifier</code> published in "J. Grabocka, N.
* Schilling, M. Wistuba, L. Schmidt-Thieme: Learning Time-Series Shapelets"
* (https://www.ismll.uni-hildesheim.de/pub/pdfs/grabocka2014e-kdd.pdf).
*
* This classifier only supports univariate time series prediction.
*
* @author Julian Lienen
*
*/
public class LearnShapeletsClassifier extends ASimplifiedTSClassifier<Integer> {
/**
* The log4j logger.
*/
private static final Logger LOGGER = LoggerFactory.getLogger(LearnShapeletsClassifier.class);
/**
* The tensor storing the derived shapelets.
*/
private double[][][] s;
/**
* The model's weights used for the class prediction learned by the training
* algorithm.
*/
private double[][][] w;
/**
* The model's bias weights.
*/
private double[] w0;
/**
* The number of classes.
*/
private int c;
private final ILearnShapeletsLearningAlgorithmConfig config;
/**
* Constructor of the {@link LearnShapeletsClassifier}.
*
* @param K
* See {@link LearnShapeletsLearningAlgorithm#K}
* @param learningRate
* See {@link LearnShapeletsLearningAlgorithm#learningRate}
* @param regularization
* See {@link LearnShapeletsLearningAlgorithm#regularization}
* @param scaleR
* See {@link LearnShapeletsLearningAlgorithm#scaleR}
* @param minShapeLengthPercentage
* See {@link LearnShapeletsLearningAlgorithm#minShapeLengthPercentage}
* @param maxIter
* See {@link LearnShapeletsLearningAlgorithm#maxIter}
* @param seed
* See {@link LearnShapeletsLearningAlgorithm#seed}
* @param seed
* See {@link LearnShapeletsLearningAlgorithm#timeout}
*/
public LearnShapeletsClassifier(final int K, final double learningRate, final double regularization, final int scaleR, final double minShapeLengthPercentage, final int maxIter, final int seed) {
this(K, learningRate, regularization, scaleR, minShapeLengthPercentage, maxIter, 0.5, seed);
}
/**
* Constructor of the {@link LearnShapeletsClassifier}.
*
* @param K
* See {@link LearnShapeletsLearningAlgorithm#K}
* @param learningRate
* See {@link LearnShapeletsLearningAlgorithm#learningRate}
* @param regularization
* See {@link LearnShapeletsLearningAlgorithm#regularization}
* @param scaleR
* See {@link LearnShapeletsLearningAlgorithm#scaleR}
* @param minShapeLengthPercentage
* See {@link LearnShapeletsLearningAlgorithm#minShapeLengthPercentage}
* @param maxIter
* See {@link LearnShapeletsLearningAlgorithm#maxIter}
* @param seed
* See {@link LearnShapeletsLearningAlgorithm#seed}
* @param gamma
* See {@link LearnShapeletsLearningAlgorithm#gamma}
*/
public LearnShapeletsClassifier(final int K, final double learningRate, final double regularization, final int scaleR, final double minShapeLengthPercentage, final int maxIter, final double gamma, final int seed) {
this.config = ConfigCache.getOrCreate(ILearnShapeletsLearningAlgorithmConfig.class);
this.config.setProperty(ILearnShapeletsLearningAlgorithmConfig.K_NUMSHAPELETS, "" + K);
this.config.setProperty(ILearnShapeletsLearningAlgorithmConfig.K_REGULARIZATION, "" + regularization);
this.config.setProperty(ILearnShapeletsLearningAlgorithmConfig.K_SCALER, "" + scaleR);
this.config.setProperty(ILearnShapeletsLearningAlgorithmConfig.K_SHAPELETLENGTH_RELMIN, "" + minShapeLengthPercentage);
this.config.setProperty(IOwnerBasedRandomizedAlgorithmConfig.K_SEED, "" + seed);
this.config.setProperty(ILearnShapeletsLearningAlgorithmConfig.K_MAXITER, "" + maxIter);
this.config.setProperty(ILearnShapeletsLearningAlgorithmConfig.K_LEARNINGRATE, "" + learningRate);
this.config.setProperty(ILearnShapeletsLearningAlgorithmConfig.K_GAMMA, "" + gamma);
}
/**
* Enables / disabled the parameter estimation of K within the training
* algorithm.
*
* @param estimateK
* Value to be set
*/
public void setEstimateK(final boolean estimateK) {
this.config.setProperty(ILearnShapeletsLearningAlgorithmConfig.K_ESTIMATEK, "" + estimateK);
}
/**
* @return {@link LearnShapeletsClassifier#s}.
*/
public double[][][] getS() {
return this.s;
}
/**
* Setter for {@link LearnShapeletsClassifier#s}
*
* @param s
* New value to be set
*/
public void setS(final double[][][] s) {
this.s = s;
}
/**
* @return {@link LearnShapeletsClassifier#w}.
*/
public double[][][] getW() {
return this.w;
}
/**
* Setter for {@link LearnShapeletsClassifier#w}
*
* @param w
* New value to be set
*/
public void setW(final double[][][] w) {
this.w = w;
}
/**
* @return {@link LearnShapeletsClassifier#w0}.
*/
public double[] getW0() {
return this.w0;
}
/**
* Setter for {@link LearnShapeletsClassifier#w0}
*
* @param w0
* New value to be set
*/
public void setW0(final double[] w0) {
this.w0 = w0;
}
/**
* Setter for {@link LearnShapeletsClassifier#c}
*
* @param c
* New value to be set
*/
public void setC(final int c) {
this.c = c;
}
/**
* Setter for {@link LearnShapeletsClassifier#minShapeLength}
*
* @param minShapeLength
* New value to be set
*/
public void setMinShapeLength(final int minShapeLength) {
this.config.setProperty(ILearnShapeletsLearningAlgorithmConfig.K_SHAPELETLENGTH_MIN, "" + minShapeLength);
}
/**
* {@inheritDoc}
*/
@Override
public Integer predict(double[] univInstance) throws PredictionException {
if (!this.isTrained()) {
throw new PredictionException("Model has not been built before!");
}
final HashMap<Integer, Double> scoring = new HashMap<>();
univInstance = TimeSeriesUtil.zNormalize(univInstance, LearnShapeletsLearningAlgorithm.USE_BIAS_CORRECTION);
// Calculate target class according to the paper's section 5.3
for (int i = 0; i < this.c; i++) {
double tmpScore = this.w0[i];
for (int r = 0; r < this.config.scaleR(); r++) {
for (int k = 0; k < this.s[r].length; k++) {
tmpScore += LearnShapeletsLearningAlgorithm.calculateMHat(this.s, this.config.minShapeletLength(), r, univInstance, k, univInstance.length, LearnShapeletsLearningAlgorithm.ALPHA) * this.w[i][r][k];
}
}
scoring.put(i, MathUtil.sigmoid(tmpScore));
}
return Collections.max(scoring.entrySet(), Map.Entry.comparingByValue()).getKey();
}
/**
* {@inheritDoc}
*/
@Override
public Integer predict(final List<double[]> multivInstance) throws PredictionException {
LOGGER.warn("Dataset to be predicted is multivariate but only first time series (univariate) will be considered.");
return this.predict(multivInstance.get(0));
}
/**
* {@inheritDoc}
*/
@Override
public List<Integer> predict(final ai.libs.jaicore.ml.classification.singlelabel.timeseries.dataset.TimeSeriesDataset2 dataset) throws PredictionException {
if (!this.isTrained()) {
throw new PredictionException("Model has not been built before!");
}
if (dataset.isMultivariate()) {
LOGGER.warn("Dataset to be predicted is multivariate but only first time series (univariate) will be considered.");
}
double[][] timeSeries = dataset.getValuesOrNull(0);
if (timeSeries == null) {
throw new IllegalArgumentException("Dataset matrix of the instances to be predicted must not be null!");
}
List<Integer> predictions = new ArrayList<>();
LOGGER.debug("Starting prediction...");
for (int inst = 0; inst < timeSeries.length; inst++) {
double[] instanceValues = timeSeries[inst];
predictions.add(this.predict(instanceValues));
}
LOGGER.debug("Finished prediction.");
return predictions;
}
@Override
public LearnShapeletsLearningAlgorithm getLearningAlgorithm(final TimeSeriesDataset2 dataset) {
return new LearnShapeletsLearningAlgorithm(this.config, this, dataset);
}
}
|
0
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka/classification/singlelabel/timeseries/learner
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka/classification/singlelabel/timeseries/learner/shapelets/LearnShapeletsLearningAlgorithm.java
|
package ai.libs.jaicore.ml.weka.classification.singlelabel.timeseries.learner.shapelets;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Random;
import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
import org.api4.java.ai.ml.core.exception.TrainingException;
import org.api4.java.algorithm.IAlgorithm;
import org.api4.java.algorithm.Timeout;
import org.api4.java.algorithm.events.IAlgorithmEvent;
import org.api4.java.algorithm.exceptions.AlgorithmException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.collect.Iterables;
import ai.libs.jaicore.basic.IOwnerBasedRandomizedAlgorithmConfig;
import ai.libs.jaicore.ml.classification.singlelabel.timeseries.dataset.TimeSeriesDataset2;
import ai.libs.jaicore.ml.classification.singlelabel.timeseries.learner.ASimplifiedTSCLearningAlgorithm;
import ai.libs.jaicore.ml.classification.singlelabel.timeseries.util.MathUtil;
import ai.libs.jaicore.ml.classification.singlelabel.timeseries.util.TimeSeriesUtil;
import ai.libs.jaicore.ml.weka.classification.singlelabel.timeseries.util.WekaTimeseriesUtil;
import weka.clusterers.SimpleKMeans;
import weka.core.Instances;
/**
* Generalized Shapelets Learning implementation for
* <code>LearnShapeletsClassifier</code> published in "J. Grabocka, N.
* Schilling, M. Wistuba, L. Schmidt-Thieme: Learning Time-Series Shapelets"
* (https://www.ismll.uni-hildesheim.de/pub/pdfs/grabocka2014e-kdd.pdf)
*
* @author Julian Lienen
*
*/
public class LearnShapeletsLearningAlgorithm extends ASimplifiedTSCLearningAlgorithm<Integer, LearnShapeletsClassifier> {
public interface ILearnShapeletsLearningAlgorithmConfig extends IOwnerBasedRandomizedAlgorithmConfig {
public static final String K_NUMSHAPELETS = "numshapelets";
public static final String K_LEARNINGRATE = "learningrate";
public static final String K_REGULARIZATION = "regularization";
public static final String K_SHAPELETLENGTH_MIN = "minshapeletlength";
public static final String K_SHAPELETLENGTH_RELMIN = "relativeminshapeletlength";
public static final String K_SCALER = "scaler";
public static final String K_MAXITER = "maxiter";
public static final String K_GAMMA = "gamma";
public static final String K_ESTIMATEK = "estimatek";
/**
* Parameter which determines how many of the most-informative shapelets should be used.
* Corresponds to K in the paper
*/
@Key(K_NUMSHAPELETS)
public int numShapelets();
/**
* The learning rate used within the SGD.
*/
@Key(K_LEARNINGRATE)
public double learningRate();
/**
* The regularization used wihtin the SGD.
*/
@Key(K_REGULARIZATION)
public double regularization();
/**
* The minimum shapelet of the shapelets to be learned. Internally derived by
* the time series lengths and the <code>minShapeLengthPercentage</code>.
*/
@Key(K_SHAPELETLENGTH_MIN)
public int minShapeletLength();
/**
* The minimum shape length percentage used to calculate the minimum shape length.
*/
@Key(K_SHAPELETLENGTH_RELMIN)
public double minShapeLengthPercentage();
/**
* The number of scales used for the shapelet lengths.
*/
@Key(K_SCALER)
public int scaleR();
/**
* The maximum iterations used for the SGD.
*/
@Key(K_MAXITER)
public int maxIterations();
/**
* Gamma value used for momentum during gradient descent. Defaults to 0.5.
*/
@Key(K_GAMMA)
@DefaultValue("0.5")
public double gamma();
/**
* Parameter indicator whether estimation of K (number of learned shapelets)
* should be derived from the number of total segments. False by default.
*/
@Key(K_ESTIMATEK)
@DefaultValue("false")
public boolean estimateK();
}
/**
* The log4j logger.
*/
private static final Logger LOGGER = LoggerFactory.getLogger(LearnShapeletsLearningAlgorithm.class);
/**
* The number of instances. This is parameter I of the paper.
*/
private int numInstances;
/**
* The number of attributes (i. e. the time series lengths without the class attribute).
*/
private int q;
/**
* The number of classes. This is parameter C of the paper.
*/
private int numClasses;
/**
* Indicator whether Bessel's correction should be used when normalizing arrays.
*/
public static final boolean USE_BIAS_CORRECTION = false;
/**
* Predefined alpha parameter used within the calculations.
*/
public static final double ALPHA = -30d; // Used in implementation. Paper says -100d
/**
* Epsilon value used to prevent dividing by zero occurrences.
*/
private static final double EPS = 0.000000000000000000001d;
/**
* See {@link IAlgorithm#getTimeout()}.
*/
private Timeout timeout = new Timeout(Integer.MAX_VALUE, TimeUnit.SECONDS);
/**
* Indicator whether instances used for training should be reordered s.t. the
* classes are used in an alternating manner.
*/
private boolean useInstanceReordering = true;
/**
* Constructor of the algorithm to train a {@link LearnShapeletsClassifier}.
*
*/
public LearnShapeletsLearningAlgorithm(final ILearnShapeletsLearningAlgorithmConfig config, final LearnShapeletsClassifier classifier, final TimeSeriesDataset2 dataset) {
super(config, classifier, dataset);
}
/**
* Initializes the tensor <code>S</code> storing the shapelets for each scale.
* The initialization is done by deriving inital shapelets from all normalized
* segments.
*
* @param trainingMatrix
* The training matrix used for the initialization of <code>S</code>.
* @return Return the initialized tensor storing an initial guess for the
* shapelets based on the clustering
* @throws TrainingException
*/
public double[][][] initializeS(final double[][] trainingMatrix) throws TrainingException {
LOGGER.debug("Initializing S...");
/* read config locally */
final int scaleR = this.getConfig().scaleR();
final long seed = this.getConfig().seed();
final int minShapeLength = this.getConfig().minShapeletLength();
final double[][][] result = new double[scaleR][][];
for (int r = 0; r < scaleR; r++) {
final int numberOfSegments = getNumberOfSegments(this.q, minShapeLength, r);
if (numberOfSegments < 1) {
throw new TrainingException("The number of segments is lower than 1. Can not train the LearnShapelets model.");
}
final int L = (r + 1) * minShapeLength;
final double[][] tmpSegments = new double[trainingMatrix.length * numberOfSegments][L];
// Prepare training data for finding the centroids
for (int i = 0; i < trainingMatrix.length; i++) {
for (int j = 0; j < numberOfSegments; j++) {
for (int l = 0; l < L; l++) {
tmpSegments[i * numberOfSegments + j][l] = trainingMatrix[i][j + l];
}
tmpSegments[i * numberOfSegments + j] = TimeSeriesUtil.zNormalize(tmpSegments[i * numberOfSegments + j], USE_BIAS_CORRECTION);
}
}
// Transform instances
Instances wekaInstances = WekaTimeseriesUtil.matrixToWekaInstances(tmpSegments);
// Cluster using k-Means
SimpleKMeans kMeans = new SimpleKMeans();
try {
kMeans.setNumClusters(this.getConfig().numShapelets());
kMeans.setSeed((int) seed);
kMeans.setMaxIterations(100);
kMeans.buildClusterer(wekaInstances);
} catch (Exception e) {
LOGGER.warn("Could not initialize matrix S using kMeans clustering for r={} due to the following problem: {}. " + "Using zero matrix instead (possibly leading to a poor training performance).", r, e.getMessage());
result[r] = new double[this.getConfig().numShapelets()][r * minShapeLength];
continue;
}
Instances clusterCentroids = kMeans.getClusterCentroids();
double[][] tmpResult = new double[clusterCentroids.numInstances()][clusterCentroids.numAttributes()];
for (int j = 0; j < tmpResult.length; j++) {
double[] instValues = clusterCentroids.get(j).toDoubleArray();
tmpResult[j] = Arrays.copyOf(instValues, tmpResult[j].length);
}
result[r] = tmpResult;
}
LOGGER.debug("Initialized S.");
return result;
}
/**
* Main function to train a <code>LearnShapeletsClassifier</code>.
*
* @throws AlgorithmException
*/
@Override
public LearnShapeletsClassifier call() throws AlgorithmException {
// Training
long beginTime = System.currentTimeMillis();
TimeSeriesDataset2 data = this.getInput();
if (data.isMultivariate()) {
throw new UnsupportedOperationException("Multivariate datasets are not supported.");
}
if (data.isEmpty()) {
throw new IllegalArgumentException("The training dataset must not be null!");
}
final double[][] dataMatrix = data.getValuesOrNull(0);
if (dataMatrix == null) {
throw new IllegalArgumentException("Timestamp matrix must be a valid 2D matrix containing the time series values for all instances!");
}
// Get occurring classes which can be used for index extraction
final int[] targetMatrix = data.getTargets();
final List<Integer> occuringClasses = TimeSeriesUtil.getClassesInDataset(data);
this.numInstances = data.getNumberOfInstances(); // I
this.q = dataMatrix[0].length; // Q
this.numClasses = occuringClasses.size(); // C
/* update knowledge about the absolute min length of the shapelets */
this.getConfig().setProperty(ILearnShapeletsLearningAlgorithmConfig.K_SHAPELETLENGTH_MIN, "" + (this.getConfig().minShapeLengthPercentage() * this.q));
final int minShapeLength = this.getConfig().minShapeletLength();
final int scaleR = this.getConfig().scaleR();
// Prepare binary classes
int[][] y = new int[this.numInstances][this.numClasses];
for (int i = 0; i < this.numInstances; i++) {
Integer instanceClass = targetMatrix[i];
y[i][occuringClasses.indexOf(instanceClass)] = 1;
}
// Estimate parameter K by the maximum number of segments
if (this.getConfig().estimateK()) {
int totalSegments = 0;
for (int r = 0; r < scaleR; r++) {
final int numberOfSegments = getNumberOfSegments(this.q, minShapeLength, r);
totalSegments += numberOfSegments * this.numInstances;
}
int k = (int) (Math.log(totalSegments) * (this.numClasses - 1));
this.getConfig().setProperty(ILearnShapeletsLearningAlgorithmConfig.K_NUMSHAPELETS, "" + (k >= 0 ? k : 1));
}
final int k = this.getConfig().numShapelets();
LOGGER.info("Parameters: k={}, learningRate={}, reg={}, r={}, minShapeLength={}, maxIter={}, Q={}, C={}", k, this.getConfig().learningRate(), this.getConfig().regularization(), scaleR, this.getConfig().minShapeletLength(),
this.getConfig().maxIterations(), this.q, this.numClasses);
// Initialization
double[][][] s;
try {
s = this.initializeS(dataMatrix);
} catch (TrainingException e) {
throw new AlgorithmException("Can not train LearnShapelets model due to error during initialization of S.", e);
}
double[][][] sHist = new double[scaleR][][];
for (int r = 0; r < scaleR; r++) {
sHist[r] = new double[s[r].length][s[r][0].length];
}
// Initializes the given weights nearly around zeros (as opposed to the paper
// due to vanish effects)
double[][][] w = new double[this.numClasses][scaleR][k];
double[][][] wHist = new double[this.numClasses][scaleR][k];
double[] w0 = new double[this.numClasses];
double[] w0Hist = new double[this.numClasses];
this.initializeWeights(w, w0);
// Perform stochastic gradient descent
LOGGER.debug("Starting training for {} iterations...", this.getConfig().maxIterations());
this.performSGD(w, wHist, w0, w0Hist, s, sHist, dataMatrix, y, beginTime, targetMatrix);
LOGGER.debug("Finished training.");
// Update model
LearnShapeletsClassifier model = this.getClassifier();
model.setS(s);
model.setW(w);
model.setW0(w0);
model.setC(this.numClasses);
return model;
}
/**
* Randomly initializes the weights around zero. As opposed to the paper, the
* approach has been changed to a different standard deviation as used in the
* reference implementation for performance reasons.
*
* @param w
* The weight matrix
* @param w0
* The bias vector
*/
public void initializeWeights(final double[][][] w, final double[] w0) {
Random rand = new Random(this.getConfig().seed());
final int scaleR = this.getConfig().scaleR();
final int numShapelets = this.getConfig().numShapelets();
for (int i = 0; i < this.numClasses; i++) {
w0[i] = EPS * rand.nextDouble() * Math.pow(-1, rand.nextInt(2));
for (int j = 0; j < scaleR; j++) {
for (int k = 0; k < numShapelets; k++) {
w[i][j][k] = EPS * rand.nextDouble() * Math.pow(-1, rand.nextInt(2));
}
}
}
}
/**
* Method performing the stochastic gradient descent to learn the weights and
* shapelets.
*
* @param w
* The weight matrix
* @param wHist
* The weight's history matrix used for smoothing learning
* @param w0
* The bias vector
* @param w0Hist
* The bias' history vector used for smoothing learning
* @param s
* The shapelet matrix
* @param sHist
* The shapelet's history matrix used for smoothing learning
* @param dataMatrix
* The data values matrix
* @param y
* The binarized target matrix
* @param beginTime
* The begin time used to check for the timeout
*/
public void performSGD(final double[][][] w, final double[][][] wHist, final double[] w0, final double[] w0Hist, final double[][][] s, final double[][][] sHist, final double[][] dataMatrix, final int[][] y, final long beginTime,
final int[] targets) {
// Define the "helper" matrices used for the gradient calculations
final int scaleR = this.getConfig().scaleR();
final int minShapeLength = this.getConfig().minShapeletLength();
final int maxIter = this.getConfig().maxIterations();
final long seed = this.getConfig().seed();
final int numShapelets = this.getConfig().numShapelets();
final double learningRate = this.getConfig().learningRate();
final double regularization = this.getConfig().regularization();
final double gamma = this.getConfig().gamma();
double[][][][] d = new double[scaleR][][][];
double[][][][] xi = new double[scaleR][][][];
double[][][][] phi = new double[scaleR][][][];
int[] numberOfSegments = new int[scaleR];
for (int r = 0; r < scaleR; r++) {
numberOfSegments[r] = getNumberOfSegments(this.q, minShapeLength, r);
d[r] = new double[this.numInstances][numShapelets][numberOfSegments[r]];
xi[r] = new double[this.numInstances][numShapelets][numberOfSegments[r]];
phi[r] = new double[this.numInstances][numShapelets][numberOfSegments[r]];
}
double[][][] psi = new double[scaleR][this.numInstances][numShapelets];
double[][][] mHat = new double[scaleR][this.numInstances][numShapelets];
double[][] theta = new double[this.numInstances][this.numClasses];
List<Integer> indices = IntStream.range(0, this.numInstances).boxed().collect(Collectors.toList());
// Stochastic gradient descent
LOGGER.debug("Starting training for {} iterations...", maxIter);
// Initialize velocities used within training with zeros
double[][][] velocitiesW = new double[w.length][w[0].length][w[0][0].length];
double[] velocitiesW0 = new double[w0.length];
double[][][] velocitiesS = new double[s.length][][];
for (int i = 0; i < s.length; i++) {
velocitiesS[i] = new double[s[i].length][];
for (int j = 0; j < s[i].length; j++) {
velocitiesS[i][j] = new double[s[i][j].length];
}
}
for (int it = 0; it < maxIter; it++) {
// Shuffle instances
if (this.useInstanceReordering) {
indices = this.shuffleAccordingToAlternatingClassScheme(indices, targets, new Random(seed + it));
} else {
Collections.shuffle(indices, new Random(seed + it));
}
for (int idx = 0; idx < this.numInstances; idx++) {
int i = indices.get(idx);
// Pre-compute terms
for (int r = 0; r < scaleR; r++) {
long kBound = s[r].length;
for (int k = 0; k < kBound; k++) { // this.K
int jr = numberOfSegments[r];
for (int j = 0; j < jr; j++) {
double newDValue = calculateD(s, minShapeLength, r, dataMatrix[i], k, j);
d[r][i][k][j] = newDValue;
newDValue = Math.exp(ALPHA * newDValue);
xi[r][i][k][j] = newDValue;
}
double newPsiValue = 0;
double newMHatValue = 0;
for (int j = 0; j < jr; j++) {
newPsiValue += xi[r][i][k][j];
newMHatValue += d[r][i][k][j] * xi[r][i][k][j];
}
psi[r][i][k] = newPsiValue;
newMHatValue /= psi[r][i][k];
mHat[r][i][k] = newMHatValue;
}
}
for (int c = 0; c < this.numClasses; c++) {
double newThetaValue = 0;
for (int r = 0; r < scaleR; r++) {
for (int k = 0; k < numShapelets; k++) {
newThetaValue += mHat[r][i][k] * w[c][r][k];
}
}
theta[i][c] = y[i][c] - MathUtil.sigmoid(newThetaValue);
}
// Learn shapelets and classification weights
for (int c = 0; c < this.numClasses; c++) {
double gradw0 = theta[i][c];
for (int r = 0; r < scaleR; r++) {
for (int k = 0; k < s[r].length; k++) { // this differs from paper: this.K instead of
// shapelet length
double wStep = (-1d) * theta[i][c] * mHat[r][i][k] + 2d * regularization / (this.numInstances) * w[c][r][k];
velocitiesW[c][r][k] = gamma * velocitiesW[c][r][k] + learningRate * wStep;
wHist[c][r][k] += wStep * wStep;
w[c][r][k] -= (velocitiesW[c][r][k] / Math.sqrt(wHist[c][r][k] + EPS));
int jr = numberOfSegments[r];
double phiDenominator = 1d / ((r + 1d) * minShapeLength * psi[r][i][k]);
double[] distDiff = new double[jr];
for (int j = 0; j < jr; j++) {
distDiff[j] = xi[r][i][k][j] * (1d + ALPHA * (d[r][i][k][j] - mHat[r][i][k]));
}
for (int l = 0; l < (r + 1) * minShapeLength; l++) {
double shapeletDiff = 0;
for (int j = 0; j < jr; j++) {
shapeletDiff += distDiff[j] * (s[r][k][l] - dataMatrix[i][j + l]);
}
double sStep = (-1d) * gradw0 * shapeletDiff * w[c][r][k] * phiDenominator;
velocitiesS[r][k][l] = gamma * velocitiesS[r][k][l] + learningRate * sStep;
sHist[r][k][l] += sStep * sStep;
s[r][k][l] -= velocitiesS[r][k][l] / Math.sqrt(sHist[r][k][l] + EPS);
}
}
}
velocitiesW0[c] = gamma * velocitiesW0[c] + learningRate * gradw0;
w0Hist[c] += gradw0 * gradw0;
w0[c] += velocitiesW0[c] / Math.sqrt(w0Hist[c] + EPS);
}
}
if (it % 10 == 0) {
LOGGER.debug("Iteration {}/{}", it, maxIter);
long currTime = System.currentTimeMillis();
if (currTime - beginTime > this.timeout.milliseconds()) {
LOGGER.debug("Stopping training due to timeout.");
break;
}
}
}
}
/**
* Shuffles the data in a class alternating scheme. That means that at first,
* all indices per class are shuffled. Then, the randomized indices are selected
* in a round robin fashion among the classes.
*
* @param instanceIndices
* The instance indices the original dataset
* @param targets
* The targets of each instance
* @param random
* Random object used for randomized shuffling
* @return Returns the list of the shuffled indices of the alternating class
* scheme. Each index of <code>instanceIndices</code> is only used once
* (without replacement)
*/
public List<Integer> shuffleAccordingToAlternatingClassScheme(final List<Integer> instanceIndices, final int[] targets, final Random random) {
if (instanceIndices.size() != targets.length) {
throw new IllegalArgumentException("The number of instances must be equal to the number of available target values!");
}
// Extract indices per class
Map<Integer, List<Integer>> indicesPerClass = new HashMap<>();
for (int i = 0; i < instanceIndices.size(); i++) {
int classIdx = targets[i];
if (!indicesPerClass.containsKey(classIdx)) {
indicesPerClass.put(classIdx, new ArrayList<>());
}
indicesPerClass.get(classIdx).add(i);
}
// Shuffle all class indices
List<Iterator<Integer>> iteratorList = new ArrayList<>();
for (List<Integer> list : indicesPerClass.values()) {
Collections.shuffle(list, random);
iteratorList.add(list.iterator());
}
// Add indices to result list based on the alternating scheme
List<Integer> resultList = new ArrayList<>();
Iterator<Iterator<Integer>> roundRobinIt = Iterables.cycle(iteratorList).iterator();
for (int i = 0; i < instanceIndices.size(); i++) {
int tmpCounter = 0;
while (roundRobinIt.hasNext() && tmpCounter < this.numClasses) {
Iterator<Integer> tmpIt = roundRobinIt.next();
if (!tmpIt.hasNext()) {
tmpCounter++;
} else {
resultList.add(tmpIt.next());
break;
}
}
}
return resultList;
}
/**
* Function to calculate the soft-minimum function which is a differentiable
* approximation of the minimum distance matrix given in the paper in section
* 3.1.4.
*
* @param s
* The tensor storing the shapelets for different scales
* @param minShapeLength
* The minimum shape length
* @param r
* The number of scale to look at
* @param instance
* The instance time series vector
* @param k
* The index of the shapelet to look at
* @param Q
* The number of attributes (time series length)
* @param alpha
* Parameter to control the desired precision of the M_hat
* approximation
* @return Returns the approximation of the minimum distance of the instance and
* the shapelet given by the parameters <code>r</code> and
* <code>k</code>.
*/
public static double calculateMHat(final double[][][] s, final int minShapeLength, final int r, final double[] instance, final int k, final int Q, final double alpha) {
double nominator = 0;
double denominator = 0;
for (int j = 0; j < getNumberOfSegments(Q, minShapeLength, r); j++) {
double d = calculateD(s, minShapeLength, r, instance, k, j);
double expD = Math.exp(alpha * d);
nominator += d * expD;
denominator += expD;
}
denominator = denominator == 0d ? EPS : denominator;
return nominator / denominator;
}
/**
* Function to calculate the distance between the <code>j</code>-th segment of
* the given time series <code>instance</code> and the <code>k</code>-th
* shapelet stored in the shapelet tensor <code>S</code>.
*
* @param s
* The tensor storing the shapelets for different scales
* @param minShapeLength
* The minimum shape length
* @param r
* The number of scale to look at
* @param instance
* The instance time series vector
* @param k
* The index of the shapelet to look at
* @param j
* The segment of the instance time series to look at
* @return Returns the minimum distance of the <code>j</code>-th segment of the
* instance and the shapelet given by the parameters <code>r</code>,
* <code>k</code> and <code>j</code>.
*/
public static double calculateD(final double[][][] s, final int minShapeLength, final int r, final double[] instance, final int k, final int j) {
double result = 0;
for (int l = 0; l < (r + 1) * minShapeLength; l++) {
result += Math.pow(instance[j + l] - s[r][k][l], 2);
}
return result / ((r + 1) * minShapeLength);
}
/**
* {@inheritDoc}
*/
@Override
public IAlgorithmEvent nextWithException() {
throw new UnsupportedOperationException("The operation to be performed is not supported.");
}
/**
* Returns the number of segments which are available for a instance with
* <code>Q</code> attributes for a given scale <code>r</code> and a minimum
* shape length <code>minShapeLength</code>.
*
* @param Q
* Number of attributes of an instance
* @param minShapeLength
* Minimum shapelet length
* @param r
* Scale to be looked at
* @return Returns the number of segments which can be looked at for an instance
* with <code>Q</code> time series attributes
*/
public static int getNumberOfSegments(final int Q, final int minShapeLength, final int r) {
return Q - (r + 1) * minShapeLength;
}
/**
* {@inheritDoc}
*/
@Override
public ILearnShapeletsLearningAlgorithmConfig getConfig() {
return (ILearnShapeletsLearningAlgorithmConfig) super.getConfig();
}
/**
* @return the useInstanceReordering
*/
public boolean isUseInstanceReordering() {
return this.useInstanceReordering;
}
/**
* @param useInstanceReordering
* the useInstanceReordering to set
*/
public void setUseInstanceReordering(final boolean useInstanceReordering) {
this.useInstanceReordering = useInstanceReordering;
}
/**
* @return the c
*/
public int getC() {
return this.numClasses;
}
/**
* @param c
* the c to set
*/
public void setC(final int c) {
this.numClasses = c;
}
}
|
0
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka/classification/singlelabel/timeseries/learner
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka/classification/singlelabel/timeseries/learner/shapelets/package-info.java
|
/**
* This package contains implementations for Shapelet based classifier and
* training algorithms.
*
* @author Julian Lienen
*/
package ai.libs.jaicore.ml.weka.classification.singlelabel.timeseries.learner.shapelets;
|
0
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka/classification/singlelabel/timeseries/learner
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka/classification/singlelabel/timeseries/learner/trees/AccessibleRandomTree.java
|
package ai.libs.jaicore.ml.weka.classification.singlelabel.timeseries.learner.trees;
import java.util.Random;
import org.api4.java.ai.ml.core.exception.PredictionException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import weka.classifiers.trees.RandomTree;
import weka.core.Instance;
import weka.core.Instances;
import weka.core.Utils;
/**
* Random Tree extension providing leaf node information of the constructed
* tree.
*
*/
public class AccessibleRandomTree extends RandomTree {
/**
* Serial version UID.
*/
private static final long serialVersionUID = 1L;
/**
* Number of constructed leaf nodes.
*/
private int nosLeafNodes;
/**
* Last leaf node in the prediction.
*/
private int lastNode = 0;
private static final Logger logger = LoggerFactory.getLogger(AccessibleRandomTree.class);
/**
* Internal tree object providing access to leaf node information.
*/
protected AccessibleTree tree = null;
@Override
public double[] distributionForInstance(final Instance instance) throws Exception {
if (this.m_zeroR != null) {
return this.m_zeroR.distributionForInstance(instance);
} else {
return this.tree.distributionForInstance(instance);
}
}
@Override
public void buildClassifier(Instances data) throws Exception {
this.nosLeafNodes = 0;
if (this.m_computeImpurityDecreases) {
this.m_impurityDecreasees = new double[data.numAttributes()][2];
}
// Make sure K value is in range
if (this.m_KValue > data.numAttributes() - 1) {
this.m_KValue = data.numAttributes() - 1;
}
if (this.m_KValue < 1) {
this.m_KValue = (int) Utils.log2(data.numAttributes() - 1.0) + 1;
}
// can classifier handle the data?
this.getCapabilities().testWithFail(data);
// remove instances with missing class
data = new Instances(data);
data.deleteWithMissingClass();
// only class? -> build ZeroR model
if (data.numAttributes() == 1) {
logger.error("Cannot build model (only class attribute present in data!), using ZeroR model instead!");
this.m_zeroR = new weka.classifiers.rules.ZeroR();
this.m_zeroR.buildClassifier(data);
return;
} else {
this.m_zeroR = null;
}
// Figure out appropriate datasets
Instances train = null;
Instances backfit = null;
Random rand = data.getRandomNumberGenerator(this.m_randomSeed);
if (this.m_NumFolds <= 0) {
train = data;
} else {
data.randomize(rand);
data.stratify(this.m_NumFolds);
train = data.trainCV(this.m_NumFolds, 1, rand);
backfit = data.testCV(this.m_NumFolds, 1);
}
// Create the attribute indices window
int[] attIndicesWindow = new int[data.numAttributes() - 1];
int j = 0;
for (int i = 0; i < attIndicesWindow.length; i++) {
if (Thread.currentThread().isInterrupted()) {
throw new InterruptedException("Thread got interrupted, thus, kill WEKA.");
}
if (j == data.classIndex()) {
j++; // do not include the class
}
attIndicesWindow[i] = j++;
}
double totalWeight = 0;
double totalSumSquared = 0;
// Compute initial class counts
double[] classProbs = new double[train.numClasses()];
for (int i = 0; i < train.numInstances(); i++) {
if (Thread.currentThread().isInterrupted()) {
throw new InterruptedException("Thread got interrupted, thus, kill WEKA.");
}
Instance inst = train.instance(i);
if (data.classAttribute().isNominal()) {
classProbs[(int) inst.classValue()] += inst.weight();
totalWeight += inst.weight();
} else {
classProbs[0] += inst.classValue() * inst.weight();
totalSumSquared += inst.classValue() * inst.classValue() * inst.weight();
totalWeight += inst.weight();
}
}
double trainVariance = 0;
if (totalWeight == 0) {
throw new IllegalStateException("Total weight must not be 0 at this point.");
}
if (data.classAttribute().isNumeric()) {
trainVariance = RandomTree.singleVariance(classProbs[0], totalSumSquared, totalWeight) / totalWeight;
classProbs[0] /= totalWeight;
}
// Build tree
this.tree = new AccessibleTree();
this.m_Info = new Instances(data, 0);
this.tree.buildTree(train, classProbs, attIndicesWindow, totalWeight, rand, 0, this.m_MinVarianceProp * trainVariance);
// Backfit if required
if (backfit != null) {
this.tree.backfitData(backfit);
}
}
/**
* @return the m_Tree
*/
public AccessibleTree getMTree() {
return this.tree;
}
public class AccessibleTree extends Tree {
/**
* Default generated serial version UID.
*/
private static final long serialVersionUID = 1L;
/** The subtrees appended to this tree. */
protected AccessibleTree[] successors;
/**
* ID of the last leaf node in the prediction.
*/
private int leafNodeID;
@Override
protected void buildTree(final Instances data, final double[] classProbs, final int[] attIndicesWindow, double totalWeight, final Random random, final int depth, final double minVariance) throws Exception {
// Make leaf if there are no training instances
if (data.numInstances() == 0) {
this.m_Attribute = -1;
this.m_ClassDistribution = null;
this.m_Prop = null;
if (data.classAttribute().isNumeric()) {
this.m_Distribution = new double[2];
}
this.leafNodeID = AccessibleRandomTree.this.nosLeafNodes++;
return;
}
double priorVar = 0;
if (data.classAttribute().isNumeric()) {
// Compute prior variance
double totalSum = 0;
double totalSumSquared = 0;
double totalSumOfWeights = 0;
for (int i = 0; i < data.numInstances(); i++) {
Instance inst = data.instance(i);
totalSum += inst.classValue() * inst.weight();
totalSumSquared += inst.classValue() * inst.classValue() * inst.weight();
totalSumOfWeights += inst.weight();
}
priorVar = AccessibleRandomTree.singleVariance(totalSum, totalSumSquared, totalSumOfWeights);
}
// Check if node doesn't contain enough instances or is pure or maximum depth reached
if (data.classAttribute().isNominal()) {
totalWeight = Utils.sum(classProbs);
}
if (totalWeight < 2 * AccessibleRandomTree.this.m_MinNum ||
// Nominal case
(data.classAttribute().isNominal() && Utils.eq(classProbs[Utils.maxIndex(classProbs)], Utils.sum(classProbs)))
||
// Numeric case
(data.classAttribute().isNumeric() && priorVar / totalWeight < minVariance)
||
// check tree depth
((AccessibleRandomTree.this.getMaxDepth() > 0) && (depth >= AccessibleRandomTree.this.getMaxDepth()))) {
// Make leaf
this.m_Attribute = -1;
this.m_ClassDistribution = classProbs.clone();
if (data.classAttribute().isNumeric()) {
this.m_Distribution = new double[2];
this.m_Distribution[0] = priorVar;
this.m_Distribution[1] = totalWeight;
}
this.leafNodeID = AccessibleRandomTree.this.nosLeafNodes++;
this.m_Prop = null;
return;
}
// Compute class distributions and value of splitting
// criterion for each attribute
double val = -Double.MAX_VALUE;
double split = -Double.MAX_VALUE;
double[][] bestDists = null;
double[] bestProps = null;
int bestIndex = 0;
// Handles to get arrays out of distribution method
double[][] props = new double[1][0];
double[][][] dists = new double[1][0][0];
double[][] totalSubsetWeights = new double[data.numAttributes()][0];
// Investigate K random attributes
int attIndex = 0;
int windowSize = attIndicesWindow.length;
int k = AccessibleRandomTree.this.m_KValue;
boolean gainFound = false;
double[] tempNumericVals = new double[data.numAttributes()];
while ((windowSize > 0) && (k-- > 0 || !gainFound)) {
int chosenIndex = random.nextInt(windowSize);
attIndex = attIndicesWindow[chosenIndex];
// shift chosen attIndex out of window
attIndicesWindow[chosenIndex] = attIndicesWindow[windowSize - 1];
attIndicesWindow[windowSize - 1] = attIndex;
windowSize--;
double currSplit = data.classAttribute().isNominal() ? this.distribution(props, dists, attIndex, data) : this.numericDistribution(props, dists, attIndex, totalSubsetWeights, data, tempNumericVals);
double currVal = data.classAttribute().isNominal() ? this.gain(dists[0], this.priorVal(dists[0])) : tempNumericVals[attIndex];
if (Utils.gr(currVal, 0)) {
gainFound = true;
}
if ((currVal > val) || ((!AccessibleRandomTree.this.getBreakTiesRandomly()) && (currVal == val) && (attIndex < bestIndex))) {
val = currVal;
bestIndex = attIndex;
split = currSplit;
bestProps = props[0];
bestDists = dists[0];
}
}
// Find best attribute
this.m_Attribute = bestIndex;
// Any useful split found?
if (Utils.gr(val, 0)) {
// Build subtrees
this.m_SplitPoint = split;
this.m_Prop = bestProps;
Instances[] subsets = this.splitData(data);
this.successors = new AccessibleTree[bestDists.length];
double[] attTotalSubsetWeights = totalSubsetWeights[bestIndex];
for (int i = 0; i < bestDists.length; i++) {
this.successors[i] = new AccessibleTree();
this.successors[i].buildTree(subsets[i], bestDists[i], attIndicesWindow, data.classAttribute().isNominal() ? 0 : attTotalSubsetWeights[i], random, depth + 1, minVariance);
}
// If all successors are non-empty, we don't need to store the class
// distribution
boolean emptySuccessor = false;
for (int i = 0; i < subsets.length; i++) {
if (this.successors[i].m_ClassDistribution == null) {
emptySuccessor = true;
break;
}
}
if (emptySuccessor) {
this.m_ClassDistribution = classProbs.clone();
}
} else {
// Make leaf
this.m_Attribute = -1;
this.m_ClassDistribution = classProbs.clone();
if (data.classAttribute().isNumeric()) {
this.m_Distribution = new double[2];
this.m_Distribution[0] = priorVar;
this.m_Distribution[1] = totalWeight;
}
}
}
@Override
public double[] distributionForInstance(final Instance instance) throws Exception {
double[] returnedDist = null;
if (this.m_Attribute > -1) {
// Node is not a leaf
if (instance.isMissing(this.m_Attribute)) {
// Value is missing
returnedDist = new double[AccessibleRandomTree.this.m_Info.numClasses()];
// Split instance up
for (int i = 0; i < this.successors.length; i++) {
double[] help = this.successors[i].distributionForInstance(instance);
if (help != null) {
for (int j = 0; j < help.length; j++) {
returnedDist[j] += this.m_Prop[i] * help[j];
}
}
}
} else if (AccessibleRandomTree.this.m_Info.attribute(this.m_Attribute).isNominal()) {
// For nominal attributes
returnedDist = this.successors[(int) instance.value(this.m_Attribute)].distributionForInstance(instance);
} else {
// For numeric attributes
if (instance.value(this.m_Attribute) < this.m_SplitPoint) {
returnedDist = this.successors[0].distributionForInstance(instance);
} else {
returnedDist = this.successors[1].distributionForInstance(instance);
}
}
}
// Node is a leaf or successor is empty?
if ((this.m_Attribute == -1) || (returnedDist == null)) {
AccessibleRandomTree.this.lastNode = this.leafNodeID;
// Is node empty?
if (this.m_ClassDistribution == null) {
if (AccessibleRandomTree.this.getAllowUnclassifiedInstances()) {
double[] result = new double[AccessibleRandomTree.this.m_Info.numClasses()];
if (AccessibleRandomTree.this.m_Info.classAttribute().isNumeric()) {
result[0] = Utils.missingValue();
}
return result;
} else {
throw new PredictionException("Could not obtain a prediction.");
}
}
// Else return normalized distribution
double[] normalizedDistribution = this.m_ClassDistribution.clone();
if (AccessibleRandomTree.this.m_Info.classAttribute().isNominal()) {
Utils.normalize(normalizedDistribution);
}
return normalizedDistribution;
} else {
return returnedDist;
}
}
public AccessibleTree[] getSuccessors() {
return this.successors;
}
public int getAttribute() {
return super.getM_Attribute();
}
public double getSplitPoint() {
return super.getM_SplitPoint();
}
}
/**
* @return the nosLeafNodes
*/
public int getNosLeafNodes() {
return this.nosLeafNodes;
}
/**
* @return the lastNode
*/
public int getLastNode() {
return this.lastNode;
}
/**
* Computes the variance for a single set
*
* @param s
* @param sS
* @param weight
* the weight
* @return the variance
*/
protected static double singleVariance(final double s, final double sS, final double weight) {
return sS - ((s * s) / weight);
}
}
|
0
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka/classification/singlelabel/timeseries/learner
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka/classification/singlelabel/timeseries/learner/trees/LearnPatternSimilarityClassifier.java
|
package ai.libs.jaicore.ml.weka.classification.singlelabel.timeseries.learner.trees;
import java.util.ArrayList;
import java.util.List;
import org.aeonbits.owner.ConfigCache;
import org.api4.java.ai.ml.core.exception.PredictionException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import ai.libs.jaicore.basic.IOwnerBasedRandomizedAlgorithmConfig;
import ai.libs.jaicore.ml.classification.singlelabel.timeseries.dataset.TimeSeriesDataset2;
import ai.libs.jaicore.ml.classification.singlelabel.timeseries.learner.ASimplifiedTSClassifier;
import ai.libs.jaicore.ml.classification.singlelabel.timeseries.util.MathUtil;
import ai.libs.jaicore.ml.weka.classification.singlelabel.timeseries.learner.trees.LearnPatternSimilarityLearningAlgorithm.IPatternSimilarityConfig;
import weka.core.Attribute;
import weka.core.Instance;
import weka.core.Instances;
/**
* Class representing the Learn Pattern Similarity classifier as described in
* Baydogan, Mustafa & Runger, George. (2015). Time series representation and
* similarity based on local autopatterns. Data Mining and Knowledge Discovery.
* 30. 1-34. 10.1007/s10618-015-0425-y.
*
* This classifier currently only supports univariate time series prediction.
*
* @author Julian Lienen
*
*/
public class LearnPatternSimilarityClassifier extends ASimplifiedTSClassifier<Integer> {
/**
* Log4j logger
*/
private static final Logger LOGGER = LoggerFactory.getLogger(LearnPatternSimilarityClassifier.class);
/**
* Segments (storing the start indexes) used for feature generation. The
* segments are randomly generated in the training phase.
*/
private int[][] segments;
/**
* Segment differences (storing the start indexes) used for feature generation.
* The segments are randomly generated in the training phase.
*/
private int[][] segmentsDifference;
/**
* The segments interval lengths used for each tree.
*/
private int[] lengthPerTree;
/**
* The class attribute index per tree (as described in chapter 3.1 of the
* original paper)
*/
private int[] classAttIndexPerTree;
/**
* The random regression model trees used for prediction.
*/
private AccessibleRandomTree[] trees;
/**
* The predicted leaf nodes for each instance per segment for each tree used
* within the 1NN search to predict the class values.
*/
private int[][][] trainLeafNodes;
/**
* The targets of the training instances which are used within the 1NN search to
* predict the class values.
*/
private int[] trainTargets;
/**
* Attributes used for the generation of Weka instances to use the internal Weka
* models.
*/
private List<Attribute> attributes;
private final IPatternSimilarityConfig config;
/**
* Standard constructor.
*
* @param seed
* Seed used for randomized operations
* @param numTrees
* Number of trees being trained
* @param maxTreeDepth
* Maximum depth of the trained trees
* @param numSegments
* Number of segments used per tree for feature generation
*/
public LearnPatternSimilarityClassifier(final int seed, final int numTrees, final int maxTreeDepth, final int numSegments) {
this.config = ConfigCache.getOrCreate(IPatternSimilarityConfig.class);
this.config.setProperty(IOwnerBasedRandomizedAlgorithmConfig.K_SEED, "" + seed);
this.config.setProperty(IPatternSimilarityConfig.K_NUMTREES, "" + numTrees);
this.config.setProperty(IPatternSimilarityConfig.K_MAXDEPTH, "" + maxTreeDepth);
this.config.setProperty(IPatternSimilarityConfig.K_NUMSEGMENTS, "" + numSegments);
}
/**
* Predicts the class by generated segment and segment difference features based
* on <code>segments</code> and <code>segmentsDifference</code>. The induced
* instances are propagated to the forest of {@link RandomRegressionTree}s
* <code>trees</code>. The predicted leaf nodes are used within a 1NN search on
* the training leaf nodes to find the nearest instance and taking its class as
* prediction value.
*
* @param univInstance
* Univariate instance to be predicted
*
*/
@Override
public Integer predict(final double[] univInstance) throws PredictionException {
if (!this.isTrained()) {
throw new PredictionException("Model has not been built before!");
}
if (univInstance == null) {
throw new IllegalArgumentException("Instance to be predicted must not be null or empty!");
}
int[][] leafNodeCounts = new int[this.trees.length][];
for (int i = 0; i < this.trees.length; i++) {
// Generate subseries features
Instances seqInstances = new Instances("SeqFeatures", new ArrayList<>(this.attributes), this.lengthPerTree[i]);
for (int len = 0; len < this.lengthPerTree[i]; len++) {
Instance instance = LearnPatternSimilarityLearningAlgorithm.generateSubseriesFeatureInstance(univInstance, this.segments[i], this.segmentsDifference[i], len);
seqInstances.add(instance);
}
seqInstances.setClassIndex(this.classAttIndexPerTree[i]);
leafNodeCounts[i] = new int[this.trees[i].getNosLeafNodes()];
for (int inst = 0; inst < seqInstances.numInstances(); inst++) {
LearnPatternSimilarityLearningAlgorithm.collectLeafCounts(leafNodeCounts[i], seqInstances.get(inst), this.trees[i]);
}
}
return this.trainTargets[this.findNearestInstanceIndex(leafNodeCounts)];
}
/**
* Performs a simple nearest neighbor search on the stored
* <code>trainLeafNodes</code> for the given <code>leafNodeCounts</code> using
* Manhattan distance.
*
* @param leafNodeCounts
* Leaf node counts induced during the prediction phase
* @return Returns the index of the nearest neighbor instance
*/
public int findNearestInstanceIndex(final int[][] leafNodeCounts) {
double minDistance = Double.MAX_VALUE;
int nearestInstIdx = 0;
for (int inst = 0; inst < this.trainLeafNodes.length; inst++) {
double tmpDist = 0;
for (int i = 0; i < this.trainLeafNodes[inst].length; i++) {
tmpDist += MathUtil.intManhattanDistance(this.trainLeafNodes[inst][i], leafNodeCounts[i]);
}
if (tmpDist < minDistance) {
minDistance = tmpDist;
nearestInstIdx = inst;
}
}
return nearestInstIdx;
}
/**
* {@inheritDoc}
*/
@Override
public Integer predict(final List<double[]> multivInstance) throws PredictionException {
LOGGER.warn("Dataset to be predicted is multivariate but only first time series (univariate) will be considered.");
return this.predict(multivInstance.get(0));
}
/**
* {@inheritDoc}
*/
@Override
public List<Integer> predict(final TimeSeriesDataset2 dataset) throws PredictionException {
double[][] data = this.checkWhetherPredictionIsPossible(dataset);
if (dataset.isMultivariate()) {
throw new UnsupportedOperationException("Multivariate instances are not supported yet.");
}
List<Integer> predictions = new ArrayList<>();
LOGGER.debug("Starting prediction...");
for (int i = 0; i < data.length; i++) {
predictions.add(this.predict(data[i]));
}
LOGGER.debug("Finished prediction.");
return predictions;
}
/**
* @return the segments
*/
public int[][] getSegments() {
return this.segments;
}
/**
* @param segments
* the segments to set
*/
public void setSegments(final int[][] segments) {
this.segments = segments;
}
/**
* @return the segmentsDifference
*/
public int[][] getSegmentsDifference() {
return this.segmentsDifference;
}
/**
* @param segmentsDifference
* the segmentsDifference to set
*/
public void setSegmentsDifference(final int[][] segmentsDifference) {
this.segmentsDifference = segmentsDifference;
}
/**
* @return the lengthPerTree
*/
public int[] getLengthPerTree() {
return this.lengthPerTree;
}
/**
* @param lengthPerTree
* the lengthPerTree to set
*/
public void setLengthPerTree(final int[] lengthPerTree) {
this.lengthPerTree = lengthPerTree;
}
/**
* @return the classAttIndexPerTree
*/
public int[] getClassAttIndexPerTree() {
return this.classAttIndexPerTree;
}
/**
* @param classAttIndexPerTree
* the classAttIndexPerTree to set
*/
public void setClassAttIndexPerTree(final int[] classAttIndexPerTree) {
this.classAttIndexPerTree = classAttIndexPerTree;
}
/**
* @return the trees
*/
public AccessibleRandomTree[] getTrees() {
return this.trees;
}
/**
* @param trees
* the trees to set
*/
public void setTrees(final AccessibleRandomTree[] trees) {
this.trees = trees;
}
/**
* @return the trainLeafNodes
*/
public int[][][] getTrainLeafNodes() {
return this.trainLeafNodes;
}
/**
* @param trainLeafNodes
* the trainLeafNodes to set
*/
public void setTrainLeafNodes(final int[][][] trainLeafNodes) {
this.trainLeafNodes = trainLeafNodes;
}
/**
* @return the trainTargets
*/
public int[] getTrainTargets() {
return this.trainTargets;
}
/**
* @param trainTargets
* the trainTargets to set
*/
public void setTrainTargets(final int[] trainTargets) {
this.trainTargets = trainTargets;
}
/**
* @return the attributes
*/
public List<Attribute> getAttributes() {
return this.attributes;
}
/**
* @param attributes
* the attributes to set
*/
public void setAttributes(final List<Attribute> attributes) {
this.attributes = attributes;
}
@Override
public LearnPatternSimilarityLearningAlgorithm getLearningAlgorithm(final TimeSeriesDataset2 dataset) {
return new LearnPatternSimilarityLearningAlgorithm(this.config, this, dataset);
}
}
|
0
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka/classification/singlelabel/timeseries/learner
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka/classification/singlelabel/timeseries/learner/trees/LearnPatternSimilarityLearningAlgorithm.java
|
package ai.libs.jaicore.ml.weka.classification.singlelabel.timeseries.learner.trees;
import java.util.ArrayList;
import java.util.List;
import java.util.Random;
import org.api4.java.ai.ml.core.exception.PredictionException;
import org.api4.java.algorithm.exceptions.AlgorithmException;
import org.api4.java.algorithm.exceptions.AlgorithmTimeoutedException;
import ai.libs.jaicore.basic.IOwnerBasedRandomizedAlgorithmConfig;
import ai.libs.jaicore.ml.classification.singlelabel.timeseries.dataset.TimeSeriesDataset2;
import ai.libs.jaicore.ml.classification.singlelabel.timeseries.learner.ASimplifiedTSCLearningAlgorithm;
import weka.core.Attribute;
import weka.core.DenseInstance;
import weka.core.Instance;
import weka.core.Instances;
/**
* Algorithm training a {@link LearnPatternSimilarityClassifier} as described in
* Baydogan, Mustafa & Runger, George. (2015). Time series representation and
* similarity based on local autopatterns. Data Mining and Knowledge Discovery.
* 30. 1-34. 10.1007/s10618-015-0425-y.
*
* @author Julian Lienen
*
*/
public class LearnPatternSimilarityLearningAlgorithm extends ASimplifiedTSCLearningAlgorithm<Integer, LearnPatternSimilarityClassifier> {
public interface IPatternSimilarityConfig extends IOwnerBasedRandomizedAlgorithmConfig {
public static final String K_NUMTREES = "numtrees";
public static final String K_MAXDEPTH = "maxdepth";
public static final String K_NUMSEGMENTS = "numsegments";
/**
* Number of trees to be trained.
*/
@Key(K_NUMTREES)
@DefaultValue("-1")
public int numTrees();
/**
* Maximum depth of the trained trees.
*/
@Key(K_MAXDEPTH)
@DefaultValue("-1")
public int maxDepth();
/**
* Number of segments used for feature generation for each tree.
*/
@Key(K_NUMSEGMENTS)
@DefaultValue("1")
public int numSegments();
}
/**
* Standard constructor.
*
* @param seed
* See {@link LearnPatternSimilarityLearningAlgorithm#seed}.
* @param numTrees
* See {@link LearnPatternSimilarityLearningAlgorithm#numTrees}.
* @param maxTreeDepth
* See {@link LearnPatternSimilarityLearningAlgorithm#maxTreeDepth}.
* @param numSegments
* See {@link LearnPatternSimilarityLearningAlgorithm#numSegments}.
*/
public LearnPatternSimilarityLearningAlgorithm(final IPatternSimilarityConfig config, final LearnPatternSimilarityClassifier model, final TimeSeriesDataset2 dataset) {
super(config, model, dataset);
}
/**
* Training procedure for a {@link LearnPatternSimilarityClassifier}. At first,
* it generates randomly subsequences (segments) and subsequence difference
* locations used for feature generation. The generated features are used to
* train a forest of {@link RandomRegressionTree} models. The predicted leaf
* nodes are stored in the model for a 1NN search for an equally generated
* prediction leaf node matrix.
* @throws AlgorithmException
* @throws AlgorithmTimeoutedException
*/
@Override
public LearnPatternSimilarityClassifier call() throws AlgorithmException, AlgorithmTimeoutedException {
// Training procedure
long beginTimeMs = System.currentTimeMillis();
TimeSeriesDataset2 data = this.getInput();
if (data == null || data.isEmpty()) {
throw new IllegalStateException("The time series input data must not be null or empty!");
}
final double[][] dataMatrix = data.getValuesOrNull(0);
if (dataMatrix == null) {
throw new IllegalArgumentException("Value matrix must be a valid 2D matrix containing the time series values for all instances!");
}
final int[] targetMatrix = data.getTargets();
final int timeSeriesLength = dataMatrix[0].length;
int minLength = (int) (0.1d * timeSeriesLength);
int maxLength = (int) (0.9d * timeSeriesLength);
Random random = new Random(this.getConfig().seed());
int numTrees = this.getConfig().numTrees();
int numSegments = this.getConfig().numSegments();
final int[][] segments = new int[numTrees][numSegments]; // Refers to matrix A in tsc algorithm
// description
final int[][] segmentsDifference = new int[numTrees][numSegments]; // Refers to matrix B in tsc
// algorithm description
final int[] lengthPerTree = new int[numTrees];
final int[] classAttIndex = new int[numTrees];
final AccessibleRandomTree[] trees = new AccessibleRandomTree[numTrees];
final int[] numLeavesPerTree = new int[numTrees];
final int[][][] leafNodeCounts = new int[data.getNumberOfInstances()][numTrees][];
ArrayList<Attribute> attributes = new ArrayList<>();
for (int j = 0; j < 2 * numSegments; j++) {
attributes.add(new Attribute("val" + j));
}
for (int i = 0; i < numTrees; i++) {
if ((System.currentTimeMillis() - beginTimeMs) > this.getTimeout().milliseconds()) {
throw new AlgorithmTimeoutedException((System.currentTimeMillis() - beginTimeMs) - this.getTimeout().milliseconds());
}
// Generate subseries length
lengthPerTree[i] = random.nextInt(maxLength - minLength) + minLength;
// Generate random subseries locations as described in chapter 3.1 and random
// subseries difference locations as described in chapter 3.4
this.generateSegmentsAndDifferencesForTree(segments[i], segmentsDifference[i], lengthPerTree[i], timeSeriesLength, random);
// Generate subseries features
Instances seqInstances = generateSubseriesFeaturesInstances(attributes, lengthPerTree[i], segments[i], segmentsDifference[i], dataMatrix);
classAttIndex[i] = random.nextInt(attributes.size());
seqInstances.setClassIndex(classAttIndex[i]);
trees[i] = this.initializeRegressionTree(seqInstances.numInstances());
try {
trees[i].buildClassifier(seqInstances);
} catch (Exception e) {
throw new AlgorithmException("Could not build tree in iteration " + i + " due to the following exception: " + e.getMessage());
}
numLeavesPerTree[i] = trees[i].getNosLeafNodes();
for (int inst = 0; inst < data.getNumberOfInstances(); inst++) {
leafNodeCounts[inst][i] = new int[numLeavesPerTree[i]];
for (int len = 0; len < lengthPerTree[i]; len++) {
int instanceIdx = inst * lengthPerTree[i] + len;
try {
collectLeafCounts(leafNodeCounts[inst][i], seqInstances.get(instanceIdx), trees[i]);
} catch (PredictionException e1) {
throw new AlgorithmException("Could not prediction using the tree in iteration " + i + " due to the following exception: " + e1.getMessage());
}
}
}
}
// Update model
LearnPatternSimilarityClassifier model = this.getClassifier();
model.setSegments(segments);
model.setSegmentsDifference(segmentsDifference);
model.setLengthPerTree(lengthPerTree);
model.setClassAttIndexPerTree(classAttIndex);
model.setTrees(trees);
model.setTrainLeafNodes(leafNodeCounts);
model.setTrainTargets(targetMatrix);
model.setAttributes(attributes);
return model;
}
/**
* Method generating the segment start indices and the segment difference
* locations randomly using <code>random</code>. This method used the
* <code>length</code> to specify the interval being generated.
* {@link LearnPatternSimilarityLearningAlgorithm#numSegments} * 2 many indices are
* generated. The start indices can only be between
* <code>[0, timeSeriesLength - length]</code> and the segment differences can
* only between <code>[0, timeSeriesLength - length -1]</code> (-1 due to the
* fact that the next index is used for the difference calculation).
*
* @param segments
* Segment start indices used for feature generation
* @param segmentsDifference
* Segment difference start indices used for feature generation
* @param length
* The length of the segments
* @param timeSeriesLength
* The total length of the complete time series
* @param random
* Generator for the random numbers
*/
public void generateSegmentsAndDifferencesForTree(final int[] segments, final int[] segmentsDifference, final int length, final int timeSeriesLength, final Random random) {
for (int i = 0; i < this.getConfig().numSegments(); i++) {
segments[i] = random.nextInt(timeSeriesLength - length); // Length is always l
segmentsDifference[i] = random.nextInt(timeSeriesLength - length - 1);
}
}
/**
* Initializes a new instance of {@link RandomRegressionTree}.
*
* @param numInstances
* The number of instance used for latter training (used for setting
* the minimum number of instances per leaf)
* @return Returns the initialized tree
*/
public AccessibleRandomTree initializeRegressionTree(final int numInstances) {
AccessibleRandomTree regTree = new AccessibleRandomTree();
regTree.setSeed((int) this.getConfig().seed());
regTree.setMaxDepth(this.getConfig().maxDepth());
regTree.setKValue(1);
regTree.setMinNum((int) (numInstances * 0.01));
return regTree;
}
/**
* Function collecting the leaf counts for the given <code>instance</code> as
* predicted by <code>regTree</code>. The result is stored at the induced leaf
* node index in <code>leafNodeCountsForInstance</code>.
*
* @param leafNodeCountsForInstance
* The vector storing the frequencies for each leaf node of the tree
* being the last node within the prediction
* @param instance
* The given Weka instance which is fed to the <code>regTree</code>
* @param regTree
* The regression tree used for prediction
* @throws PredictionException
* Thrown if the random regression tree could not predict anything
* for the given <code>instance</code>
*/
public static void collectLeafCounts(final int[] leafNodeCountsForInstance, final Instance instance, final AccessibleRandomTree regTree) throws PredictionException {
try {
regTree.distributionForInstance(instance);
} catch (Exception e) {
throw new PredictionException("Could not predict the distribution for instance for the given instance '" + instance.toString() + "' due to an internal Weka exception.", e);
}
int leafNodeIdx = regTree.getLastNode();
leafNodeCountsForInstance[leafNodeIdx]++;
}
/**
* Function generating a dataset storing the features being generated as
* described in the original paper. The <code>segments</code> and
* <code>segmentsDifference</code> indices are used to extract subsequences of
* the given <code>dataMatrix</code> and generating value differences,
* respectively.
*
* @param attributes
* The attributes used by Weka to create the dataset
* @param length
* The length considered for the feature generation
* @param segments
* Segment start indices used for feature generation
* @param segmentsDifference
* Segment difference start indices used for feature generation
* @param dataMatrix
* Matrix storing the instance values used for feature generation
* @return Returns Weka instances storing the generated features
*/
public static Instances generateSubseriesFeaturesInstances(final List<Attribute> attributes, final int length, final int[] segments, final int[] segmentsDifference, final double[][] dataMatrix) {
Instances seqInstances = new Instances("SeqFeatures", new ArrayList<>(attributes), dataMatrix.length * length);
for (int inst = 0; inst < dataMatrix.length; inst++) {
double[] instValues = dataMatrix[inst];
for (int len = 0; len < length; len++) {
seqInstances.add(generateSubseriesFeatureInstance(instValues, segments, segmentsDifference, len));
}
}
return seqInstances;
}
/**
* Function generating subseries feature instances based on the given
* <code>segments</code> and <code>segmentsDifference</code> matrices. The
* <code>len</code> parameter indicates which subsequence instance is generated
* within this call. The values are extracted and used for calculation (for
* difference) from <code>instValues</code>.
*
* @param instValues
* Instance values used for feature generation
* @param segments
* Segment start indices used for feature generation
* @param segmentsDifference
* Segment difference start indices used for feature generation
* @param len
* Current length (is added to the segment and segment difference
* locations)
* @return Returns a Weka instance storing the generated features
*/
public static Instance generateSubseriesFeatureInstance(final double[] instValues, final int[] segments, final int[] segmentsDifference, final int len) {
if (segments.length != segmentsDifference.length) {
throw new IllegalArgumentException("The number of segments and the number of segments differences must be the same!");
}
if (instValues.length < len) {
throw new IllegalArgumentException("If the segments' length is set to '" + len + "', the number of time series variables must be greater or equals!");
}
DenseInstance instance = new DenseInstance(2 * segments.length);
for (int seq = 0; seq < segments.length; seq++) {
instance.setValue(seq * 2, instValues[segments[seq] + len]);
double difference = instValues[segmentsDifference[seq] + len + 1] - instValues[segmentsDifference[seq] + len];
instance.setValue(seq * 2 + 1, difference);
}
return instance;
}
@Override
public IPatternSimilarityConfig getConfig() {
return (IPatternSimilarityConfig) super.getConfig();
}
}
|
0
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka/classification/singlelabel/timeseries/learner
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka/classification/singlelabel/timeseries/learner/trees/TimeSeriesBagOfFeaturesClassifier.java
|
package ai.libs.jaicore.ml.weka.classification.singlelabel.timeseries.learner.trees;
import java.util.ArrayList;
import java.util.List;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
import org.aeonbits.owner.ConfigCache;
import org.api4.java.ai.ml.core.exception.PredictionException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import ai.libs.jaicore.basic.IOwnerBasedRandomizedAlgorithmConfig;
import ai.libs.jaicore.basic.sets.Pair;
import ai.libs.jaicore.ml.classification.singlelabel.timeseries.dataset.TimeSeriesDataset2;
import ai.libs.jaicore.ml.classification.singlelabel.timeseries.dataset.TimeSeriesFeature;
import ai.libs.jaicore.ml.classification.singlelabel.timeseries.learner.ASimplifiedTSClassifier;
import ai.libs.jaicore.ml.classification.singlelabel.timeseries.util.TimeSeriesUtil;
import ai.libs.jaicore.ml.weka.classification.singlelabel.timeseries.learner.trees.TimeSeriesBagOfFeaturesLearningAlgorithm.ITimeSeriesBagOfFeaturesConfig;
import ai.libs.jaicore.ml.weka.classification.singlelabel.timeseries.util.WekaTimeseriesUtil;
import weka.classifiers.trees.RandomForest;
import weka.core.Instances;
/**
* Implementation of the Time Series Bag-of-Features (TSBF) classifier as
* described in Baydogan, Mustafa & Runger, George & Tuv, Eugene. (2013). A
* Bag-of-Features Framework to Classify Time Series. IEEE Transactions on
* Pattern Analysis and Machine Intelligence. 35. 2796-802.
* 10.1109/TPAMI.2013.72.
*
* This classifier only supports univariate time series prediction.
*
* @author Julian Lienen
*
*/
public class TimeSeriesBagOfFeaturesClassifier extends ASimplifiedTSClassifier<Integer> {
/**
* Log4j logger.
*/
private static final Logger LOGGER = LoggerFactory.getLogger(TimeSeriesBagOfFeaturesClassifier.class);
/**
* Random Forest classifier used for the internal OOB probability estimation.
*/
private RandomForest subseriesClf;
/**
* Random Forest classifier used for the final class prediction.
*/
private RandomForest finalClf;
/**
* Number of total classes used within training.
*/
private int numClasses;
/**
* Intervals of each subsequence storing the start and exclusive end index. It
* is used for feature generation.
*/
private int[][][] intervals;
/**
* Subsequences storing the start and exclusive end index. Used for feature
* generation.
*/
private int[][] subsequences;
private final ITimeSeriesBagOfFeaturesConfig config;
/**
* Standard constructor using the default parameters (numBins = 10, numFolds =
* 10, zProp = 0.1, minIntervalLength = 5) for the TSBF classifier.
*
* @param seed
* Seed used for randomized operations
*/
public TimeSeriesBagOfFeaturesClassifier(final int seed) {
this(seed, 10, 10, 0.1d, 5, false);
}
/**
* Constructor specifying parameters (cf.
* {@link TimeSeriesBagOfFeaturesClassifier#TimeSeriesBagOfFeaturesClassifier(int)}).
*
* @param seed
* Seed used for randomized operations
* @param numBins
* See {@link TimeSeriesBagOfFeaturesClassifier#numBins}
* @param numFolds
* Number of folds for the internal OOB probability CV estimation
* @param zProp
* Proportion of the total time series length to be used for the
* subseries generation
* @param minIntervalLength
* The minimal interval length used for the interval generation
*/
public TimeSeriesBagOfFeaturesClassifier(final int seed, final int numBins, final int numFolds, final double zProp, final int minIntervalLength) {
this(seed, numBins, numFolds, zProp, minIntervalLength, false);
}
/**
* Constructor specifying parameters (cf.
* {@link TimeSeriesBagOfFeaturesClassifier#TimeSeriesBagOfFeaturesClassifier(int)}).
*
* @param seed
* Seed used for randomized operations
* @param numBins
* See {@link TimeSeriesBagOfFeaturesClassifier#numBins}
* @param numFolds
* Number of folds for the internal OOB probability CV estimation
* @param zProp
* Proportion of the total time series length to be used for the
* subseries generation
* @param minIntervalLength
* The minimal interval length used for the interval generation
* @param useZNormalization
* Indicator whether the Z normalization should be used
*/
public TimeSeriesBagOfFeaturesClassifier(final int seed, final int numBins, final int numFolds, final double zProp, final int minIntervalLength, final boolean useZNormalization) {
this.config = ConfigCache.getOrCreate(ITimeSeriesBagOfFeaturesConfig.class);
this.config.setProperty(IOwnerBasedRandomizedAlgorithmConfig.K_SEED, "" + seed);
this.setNumBins(numBins);
this.config.setProperty(ITimeSeriesBagOfFeaturesConfig.K_NUMFOLDS, "" + numFolds);
this.config.setProperty(ITimeSeriesBagOfFeaturesConfig.K_ZPROP, "" + zProp);
this.config.setProperty(ITimeSeriesBagOfFeaturesConfig.K_MIN_INTERVAL_LENGTH, "" + minIntervalLength);
this.config.setProperty(ITimeSeriesBagOfFeaturesConfig.K_USE_ZNORMALIZATION, "" + useZNormalization);
}
/**
* Method predicting the class of the given <code>univInstance</code>. At first,
* an internal feature representation using a bag of features is generated by
* the previously trained {@link TimeSeriesBagOfFeaturesClassifier#subsequences}
* and {@link TimeSeriesBagOfFeaturesClassifier#intervals}. These internal
* instances are used to get an internal class probability estimation for each
* subsequence and interval for each instance using a Random Forest classifier.
* These probabilities are aggregated to a histogram which is then fed to a
* final Random Forest classifier predicting the instance's target class.
*/
@Override
public Integer predict(double[] univInstance) throws PredictionException {
if (!this.isTrained()) {
throw new PredictionException("Model has not been built before!");
}
// Z-Normalize if enabled
if (this.config.zNormalization()) {
univInstance = TimeSeriesUtil.zNormalize(univInstance, true);
}
// Generate features and interval instances
double[][] intervalFeatures = new double[this.intervals.length][(this.intervals[0].length + 1) * 3 + 2];
for (int i = 0; i < this.intervals.length; i++) {
// Feature generation for each interval
for (int j = 0; j < this.intervals[i].length; j++) {
double[] tmpFeatures = TimeSeriesFeature.getFeatures(univInstance, this.intervals[i][j][0], this.intervals[i][j][1] - 1, TimeSeriesBagOfFeaturesLearningAlgorithm.USE_BIAS_CORRECTION);
intervalFeatures[i][j * 3] = tmpFeatures[0];
intervalFeatures[i][j * 3 + 1] = tmpFeatures[1] * tmpFeatures[1];
intervalFeatures[i][j * 3 + 2] = tmpFeatures[2];
}
// Feature generation for each subseries itself
double[] subseriesFeatures = TimeSeriesFeature.getFeatures(univInstance, this.subsequences[i][0], this.subsequences[i][1] - 1, TimeSeriesBagOfFeaturesLearningAlgorithm.USE_BIAS_CORRECTION);
intervalFeatures[i][this.intervals[i].length * 3] = subseriesFeatures[0];
intervalFeatures[i][this.intervals[i].length * 3 + 1] = subseriesFeatures[1] * subseriesFeatures[1];
intervalFeatures[i][this.intervals[i].length * 3 + 2] = subseriesFeatures[2];
// Add start and end indices of subseries to features
intervalFeatures[i][intervalFeatures[i].length - 2] = this.subsequences[i][0];
intervalFeatures[i][intervalFeatures[i].length - 1] = this.subsequences[i][1];
}
// Prepare Weka instances for generated features
Instances subseriesInstances = WekaTimeseriesUtil.simplifiedTimeSeriesDatasetToWekaInstances(TimeSeriesUtil.createDatasetForMatrix(intervalFeatures),
IntStream.rangeClosed(0, this.numClasses - 1).boxed().map(String::valueOf).collect(Collectors.toList()));
// Predict probabilities using the subseries Random Forest classifier
double[][] probs = null;
int[] predictedTargets = new int[subseriesInstances.numInstances()];
try {
probs = this.subseriesClf.distributionsForInstances(subseriesInstances);
for (int i = 0; i < subseriesInstances.numInstances(); i++) {
predictedTargets[i] = (int) this.subseriesClf.classifyInstance(subseriesInstances.get(i));
}
} catch (Exception e) {
throw new PredictionException("Cannot derive the probabilities using the subseries classifier due to an internal Weka exception.", e);
}
// Discretize probabilities and create histograms for final Weka instance
int[][] discretizedProbs = TimeSeriesBagOfFeaturesLearningAlgorithm.discretizeProbs(this.getNumBins(), probs);
Pair<int[][][], int[][]> histFreqPair = TimeSeriesBagOfFeaturesLearningAlgorithm.formHistogramsAndRelativeFreqs(discretizedProbs, 1, this.numClasses, this.getNumBins());
int[][][] histograms = histFreqPair.getX();
int[][] relativeFrequencies = histFreqPair.getY();
// Prepare final Weka instance
double[][] finalHistogramInstances = TimeSeriesBagOfFeaturesLearningAlgorithm.generateHistogramInstances(histograms, relativeFrequencies);
Instances finalInstances = WekaTimeseriesUtil.simplifiedTimeSeriesDatasetToWekaInstances(TimeSeriesUtil.createDatasetForMatrix(finalHistogramInstances),
IntStream.rangeClosed(0, this.numClasses - 1).boxed().map(String::valueOf).collect(Collectors.toList()));
// Ensure that only on instance has been generated out of the given
// probabilities
if (finalInstances.size() != 1) {
final String errorMessage = "There should be only one instance given to the final Random Forest classifier.";
throw new PredictionException(errorMessage, new IllegalStateException(errorMessage));
}
// Predict using the generated Weka instance
try {
return (int) this.finalClf.classifyInstance(finalInstances.firstInstance());
} catch (Exception e) {
throw new PredictionException("Could not predict instance due to an internal Weka exception.", e);
}
}
/**
* {@inheritDoc}
*/
@Override
public Integer predict(final List<double[]> multivInstance) throws PredictionException {
LOGGER.warn("Dataset to be predicted is multivariate but only first time series (univariate) will be considered.");
return this.predict(multivInstance.get(0));
}
/**
* {@inheritDoc}
*/
@Override
public List<Integer> predict(final TimeSeriesDataset2 dataset) throws PredictionException {
if (!this.isTrained()) {
throw new PredictionException("Model has not been built before!");
}
// Uses the prediction of single instances
final List<Integer> result = new ArrayList<>();
for (int i = 0; i < dataset.getValues(0).length; i++) {
result.add(this.predict(dataset.getValues(0)[i]));
}
return result;
}
/**
* @return the subseriesClf
*/
public RandomForest getSubseriesClf() {
return this.subseriesClf;
}
/**
* @param subseriesClf
* the subseriesClf to set
*/
public void setSubseriesClf(final RandomForest subseriesClf) {
this.subseriesClf = subseriesClf;
}
/**
* @return the finalClf
*/
public RandomForest getFinalClf() {
return this.finalClf;
}
/**
* @param finalClf
* the finalClf to set
*/
public void setFinalClf(final RandomForest finalClf) {
this.finalClf = finalClf;
}
/**
* @return the numBins
*/
public int getNumBins() {
return this.config.numBins();
}
/**
* @param numBins
* the numBins to set
*/
public void setNumBins(final int numBins) {
this.config.setProperty(ITimeSeriesBagOfFeaturesConfig.K_NUMBINS, "" + numBins);
}
/**
* @return the numClasses
*/
public int getNumClasses() {
return this.numClasses;
}
/**
* @param numClasses
* the numClasses to set
*/
public void setNumClasses(final int numClasses) {
this.numClasses = numClasses;
}
/**
* @return the intervals
*/
public int[][][] getIntervals() {
return this.intervals;
}
/**
* @param intervals
* the intervals to set
*/
public void setIntervals(final int[][][] intervals) {
this.intervals = intervals;
}
/**
* @return the subsequences
*/
public int[][] getSubsequences() {
return this.subsequences;
}
/**
* @param subsequences
* the subsequences to set
*/
public void setSubsequences(final int[][] subsequences) {
this.subsequences = subsequences;
}
@Override
public TimeSeriesBagOfFeaturesLearningAlgorithm getLearningAlgorithm(final TimeSeriesDataset2 dataset) {
return new TimeSeriesBagOfFeaturesLearningAlgorithm(this.config, this, dataset);
}
public ITimeSeriesBagOfFeaturesConfig getConfig() {
return this.config;
}
}
|
0
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka/classification/singlelabel/timeseries/learner
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka/classification/singlelabel/timeseries/learner/trees/TimeSeriesBagOfFeaturesLearningAlgorithm.java
|
package ai.libs.jaicore.ml.weka.classification.singlelabel.timeseries.learner.trees;
import java.util.Random;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
import org.api4.java.ai.ml.core.exception.TrainingException;
import org.api4.java.algorithm.exceptions.AlgorithmException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import ai.libs.jaicore.basic.IOwnerBasedRandomizedAlgorithmConfig;
import ai.libs.jaicore.basic.sets.Pair;
import ai.libs.jaicore.ml.classification.singlelabel.timeseries.dataset.TimeSeriesDataset2;
import ai.libs.jaicore.ml.classification.singlelabel.timeseries.dataset.TimeSeriesFeature;
import ai.libs.jaicore.ml.classification.singlelabel.timeseries.learner.ASimplifiedTSCLearningAlgorithm;
import ai.libs.jaicore.ml.classification.singlelabel.timeseries.util.MathUtil;
import ai.libs.jaicore.ml.classification.singlelabel.timeseries.util.TimeSeriesUtil;
import ai.libs.jaicore.ml.weka.classification.singlelabel.timeseries.util.WekaTimeseriesUtil;
import weka.classifiers.trees.RandomForest;
import weka.core.Instances;
/**
* Algorithm to train a Time Series Bag-of-Features (TSBF) classifier as
* described in Baydogan, Mustafa & Runger, George & Tuv, Eugene. (2013). A
* Bag-of-Features Framework to Classify Time Series. IEEE Transactions on
* Pattern Analysis and Machine Intelligence. 35. 2796-802.
* 10.1109/TPAMI.2013.72.
*
* @author Julian Lienen
*
*/
public class TimeSeriesBagOfFeaturesLearningAlgorithm extends ASimplifiedTSCLearningAlgorithm<Integer, TimeSeriesBagOfFeaturesClassifier> {
public interface ITimeSeriesBagOfFeaturesConfig extends IOwnerBasedRandomizedAlgorithmConfig {
public static final String K_NUMBINS = "numbins";
public static final String K_NUMFOLDS = "numfolds";
public static final String K_ZPROP = "zprop";
public static final String K_MIN_INTERVAL_LENGTH = "minintervallength";
public static final String K_USE_ZNORMALIZATION = "useznormalization";
/**
* Number of bins used for the CPEs.
*/
@Key(K_NUMBINS)
@DefaultValue("-1")
public int numBins();
/**
* Number of folds used for the OOB probability estimation in the training phase.
*/
@Key(K_NUMFOLDS)
@DefaultValue("-1")
public int numFolds();
/**
* Proportion of the total time series length to be used for the subseries generation.
*/
@Key(K_ZPROP)
@DefaultValue("1.0")
public double zProportion();
/**
* Indicator whether the z transformation should be used for the instances at
* training and prediction time.
*/
@Key(K_USE_ZNORMALIZATION)
@DefaultValue("false")
public boolean zNormalization();
/**
* The minimal interval length used for the interval generation.
*/
@Key(K_MIN_INTERVAL_LENGTH)
@DefaultValue("1")
public int minIntervalLength();
}
/**
* Log4j logger.
*/
private static final Logger LOGGER = LoggerFactory.getLogger(TimeSeriesBagOfFeaturesLearningAlgorithm.class);
/**
* Indicator whether Bessel's correction should in feature generation.
*/
public static final boolean USE_BIAS_CORRECTION = false;
/**
* Number of trees used in the internal Random Forest classifier.
*/
private static final int NUM_TREES_IN_FOREST = 500;
/**
* Constructor for a TSBF training algorithm.
*/
public TimeSeriesBagOfFeaturesLearningAlgorithm(final ITimeSeriesBagOfFeaturesConfig config, final TimeSeriesBagOfFeaturesClassifier classifier, final TimeSeriesDataset2 data) {
super(config, classifier, data);
if (config.zProportion() < 0d || config.zProportion() > 1) {
throw new IllegalArgumentException("Parameter zProportion is set to " + config.zProportion() + " but must be between 0 and 1!");
}
}
/**
* Training procedure construction a Time Series Bag-of-Features (TSBF)
* classifier using the given input data.
* @throws AlgorithmException
*/
@Override
public TimeSeriesBagOfFeaturesClassifier call() throws AlgorithmException {
// Training procedure
TimeSeriesDataset2 dataset = this.getInput();
if (dataset == null || dataset.isEmpty()) {
throw new IllegalArgumentException("Dataset used for training must not be null or empty!");
}
if (dataset.isMultivariate()) {
LOGGER.info("Only univariate data is used for training (matrix index 0), although multivariate data is available.");
}
// Shuffle instances
TimeSeriesUtil.shuffleTimeSeriesDataset(dataset, (int) this.getConfig().seed());
double[][] data = dataset.getValuesOrNull(0);
int[] targets = dataset.getTargets();
if (data == null || data.length == 0 || targets == null || targets.length == 0) {
throw new IllegalArgumentException("The given dataset for training must not contain a null or empty data or target matrix.");
}
// Get number classes
int numClasses = TimeSeriesUtil.getNumberOfClasses(dataset);
// Standardize each time series to zero mean and unit standard deviation (z-transformation)
if (this.getConfig().zNormalization()) {
for (int i = 0; i < dataset.getNumberOfInstances(); i++) {
data[i] = TimeSeriesUtil.zNormalize(data[i], true);
}
}
// Specify parameters used for subsequence and interval generation
int length = data[0].length; // Time series length
int lMin = (int) (this.getConfig().zProportion() * length); // Minimum subsequence length
// Check lower bound for minimum subsequence length
int minIntervalLength = this.getConfig().minIntervalLength();
if (lMin < minIntervalLength) {
lMin = minIntervalLength;
}
// Check upper bound for minimum subsequence length
if (lMin >= length - minIntervalLength) {
lMin -= minIntervalLength;
}
// Number of intervals for each subsequence
int d = this.getD(lMin);
// Number of possible intervals in a time series
int r = this.getR(length);
// Generate r-d subsequences with each d intervals and calculate features
Pair<int[][], int[][][]> subSeqIntervals = this.generateSubsequencesAndIntervals(r, d, lMin, length);
int[][] subsequences = subSeqIntervals.getX();
int[][][] intervals = subSeqIntervals.getY();
// Generate features
double[][][][] generatedFeatures = generateFeatures(data, subsequences, intervals);
// Generate class probability estimate (CPE) for each instance using a
// classifier
int numFeatures = (d + 1) * 3 + 2;
double[][] subSeqValueMatrix = new double[(r - d) * data.length][numFeatures];
int[] targetMatrix = new int[(r - d) * data.length];
for (int i = 0; i < r - d; i++) {
for (int j = 0; j < data.length; j++) {
double[] intervalFeatures = new double[numFeatures];
for (int k = 0; k < d + 1; k++) {
intervalFeatures[k * 3] = generatedFeatures[j][i][k][0];
intervalFeatures[k * 3 + 1] = generatedFeatures[j][i][k][1];
intervalFeatures[k * 3 + 2] = generatedFeatures[j][i][k][2];
}
intervalFeatures[intervalFeatures.length - 2] = subsequences[i][0];
intervalFeatures[intervalFeatures.length - 1] = subsequences[i][1];
subSeqValueMatrix[j * (r - d) + i] = intervalFeatures;
targetMatrix[j * (r - d) + i] = targets[j];
}
}
// Measure OOB probabilities
RandomForest subseriesClf = new RandomForest();
subseriesClf.setNumIterations(NUM_TREES_IN_FOREST);
double[][] probs = null;
try {
probs = measureOOBProbabilitiesUsingCV(subSeqValueMatrix, targetMatrix, (r - d) * data.length, this.getConfig().numFolds(), numClasses, subseriesClf);
} catch (TrainingException e1) {
throw new AlgorithmException("Could not measure OOB probabilities using CV.", e1);
}
// Train final subseries classifier
try {
WekaTimeseriesUtil.buildWekaClassifierFromSimplifiedTS(subseriesClf, TimeSeriesUtil.createDatasetForMatrix(targetMatrix, subSeqValueMatrix));
} catch (TrainingException e) {
throw new AlgorithmException("Could not train the sub series Random Forest classifier due to an internal Weka exception.", e);
}
// Discretize probability and form histogram
int[][] discretizedProbs = discretizeProbs(this.getConfig().numBins(), probs);
Pair<int[][][], int[][]> histFreqPair = formHistogramsAndRelativeFreqs(discretizedProbs, data.length, numClasses, this.getConfig().numBins());
int[][][] histograms = histFreqPair.getX();
int[][] relativeFrequencies = histFreqPair.getY();
// Build final classifier
double[][] finalInstances = generateHistogramInstances(histograms, relativeFrequencies);
RandomForest finalClf = new RandomForest();
finalClf.setNumIterations(NUM_TREES_IN_FOREST);
try {
WekaTimeseriesUtil.buildWekaClassifierFromSimplifiedTS(finalClf, TimeSeriesUtil.createDatasetForMatrix(targets, finalInstances));
} catch (TrainingException e) {
throw new AlgorithmException("Could not train the final Random Forest classifier due to an internal Weka exception.", e);
}
// Update model
TimeSeriesBagOfFeaturesClassifier model = this.getClassifier();
model.setSubseriesClf(subseriesClf);
model.setFinalClf(finalClf);
model.setNumClasses(numClasses);
model.setIntervals(intervals);
model.setSubsequences(subsequences);
return model;
}
/**
* Method randomly determining the subsequences and their intervals to be used
* for feature generation of the instances. As a result, a pair of each
* subsequence's start and end index and the intervals' start and end indices is
* returned.
*
* @param r
* The number of possible intervals in a time series
* @param d
* The number of intervals for each subsequence
* @param lMin
* The minimum subsequence length
* @param T
* The length of the time series
* @return a pair of each subsequence's start and end index and the intervals'
* start and end indices
*/
public Pair<int[][], int[][][]> generateSubsequencesAndIntervals(final int r, final int d, final int lMin, final int T) {
int[][] subsequences = new int[r - d][2];
int[][][] intervals = new int[r - d][d][2];
int minIntervalLength = this.getConfig().minIntervalLength();
Random random = new Random(this.getConfig().seed());
for (int i = 0; i < r - d; i++) {
int startIndex = random.nextInt(T - lMin);
int subSeqLength = random.nextInt(T - lMin - startIndex) + lMin;
// Store subseries borders (also used for feature generation)
subsequences[i][0] = startIndex;
subsequences[i][1] = startIndex + subSeqLength + 1; // exclusive
int intervalLength = (int) ((double) (subsequences[i][1] - subsequences[i][0]) / ((double) d));
if (intervalLength < minIntervalLength) {
throw new IllegalStateException("The induced interval length must not be lower than the minimum interval length!");
}
if (intervalLength > minIntervalLength) {
// Select random length for interval
intervalLength = random.nextInt(intervalLength - minIntervalLength + 1) + minIntervalLength;
}
for (int j = 0; j < d; j++) {
intervals[i][j][0] = subsequences[i][0] + j * intervalLength;
intervals[i][j][1] = subsequences[i][0] + (j + 1) * intervalLength; // exclusive
}
}
return new Pair<>(subsequences, intervals);
}
/**
* Function generating the features for the internal probability measurement
* model based on the given <code>subseries</code> and their corresponding
* </code>intervals</code>. The features are built using the
* {@link TimeSeriesFeature} implementation. As a result, a tensor consisting of
* the generated features for each interval in each subsequence for each
* instance is returned (4 dimensions).
*
* @param data
* The data used for feature generation
* @param subsequences
* The subsequences used for feature generation (the start and end
* [exclusive] index is stored for each subsequence)
* @param intervals
* The intervals of each subsequence used for the feature generation
* (the start and end [exclusive] index is stored for each interval)
* @return Returns a tensor consisting of the generated features for each
* interval in each subsequence for each instance
*/
public static double[][][][] generateFeatures(final double[][] data, final int[][] subsequences, final int[][][] intervals) {
double[][][][] generatedFeatures = new double[data.length][subsequences.length][intervals[0].length + 1][TimeSeriesFeature.NUM_FEATURE_TYPES];
for (int i = 0; i < data.length; i++) {
for (int j = 0; j < subsequences.length; j++) {
for (int k = 0; k < intervals[j].length; k++) {
generatedFeatures[i][j][k] = TimeSeriesFeature.getFeatures(data[i], intervals[j][k][0], intervals[j][k][1] - 1, USE_BIAS_CORRECTION);
generatedFeatures[i][j][k][1] *= generatedFeatures[i][j][k][1];
}
generatedFeatures[i][j][intervals[j].length] = TimeSeriesFeature.getFeatures(data[i], subsequences[j][0], subsequences[j][1] - 1, USE_BIAS_CORRECTION);
generatedFeatures[i][j][intervals[j].length][1] *= generatedFeatures[i][j][intervals[j].length][1];
}
}
return generatedFeatures;
}
/**
* Method calculating the number of intervals for each subsequence.
*
* @param lMin
* The minimum subsequence length
* @return Returns the number of intervals for each subsequence
*/
private int getD(final int lMin) {
return lMin > this.getConfig().minIntervalLength() ? (int) Math.floor(lMin / (double) this.getConfig().minIntervalLength()) : 1;
}
/**
* Method returning the number of possible intervals in the time series used for
* subsequences and intervals generation.
*
* @param T
* The length of the time series
* @return Returns the number of possible intervals in the time series
*/
private int getR(final int T) {
return (int) Math.floor(T / (double) this.getConfig().minIntervalLength());
}
/**
* Generates a matrix consisting of the histogram values for each instance out
* of the given <code>histograms</code> and the relative frequencies of classes
* for each instance. The histogram values for each instance, class and bin are
* concatenated. Furthermore, the relative frequencies are also added to the
* instance's features.
*
* @param histograms
* The histograms for each instance (number of instances x number of
* classes - 1 x number of bins)
* @param relativeFreqsOfClasses
* The relative frequencies of the classes for each instance
* (previously extracted from each subseries instance per origin
* instance; dimensionality is number of instances x number of
* classes)
* @return Returns a matrix storing the features for each instance (number of
* instances x number of features)
*/
public static double[][] generateHistogramInstances(final int[][][] histograms, final int[][] relativeFreqsOfClasses) {
int featureLength = histograms[0].length * histograms[0][0].length + relativeFreqsOfClasses[0].length;
final double[][] results = new double[histograms.length][featureLength];
for (int i = 0; i < results.length; i++) {
double[] instFeatures = new double[featureLength];
int featureIdx = 0;
for (int j = 0; j < histograms[i].length; j++) {
for (int k = 0; k < histograms[i][j].length; k++) {
instFeatures[featureIdx++] = histograms[i][j][k];
}
}
for (int j = 0; j < relativeFreqsOfClasses[i].length; j++) {
instFeatures[featureIdx++] = relativeFreqsOfClasses[i][j];
}
results[i] = instFeatures;
}
return results;
}
/**
* Function measuring the out-of-bag (OOB) probabilities using a cross
* validation with <code>numFolds</code> many folds. For each fold, the data
* given by <code>subSeqValueMatrix</code> is split into a training and test
* set. The test set's probabilities are then derived by a trained Random Forest
* classifier.
*
* @param subSeqValueMatrix
* Input data used to derive the OOB probabilities
* @param targetMatrix
* The target values of the input data
* @param numProbInstances
* Number of instances for which the probabilities should be derived
* @param numFolds
* Number of folds used for the measurement
* @param numClasses
* Number of total classes
* @param rf
* Random Forest classifier which is retrained in each fold
* @return Returns a matrix storing the probability for each input instance
* given by <code>subSeqValueMatrix</code>
* @throws TrainingException
* Thrown when the classifier <code>rf</code> could not be trained
* in any fold
*/
public static double[][] measureOOBProbabilitiesUsingCV(final double[][] subSeqValueMatrix, final int[] targetMatrix, final int numProbInstances, final int numFolds, final int numClasses, final RandomForest rf)
throws TrainingException {
double[][] probs = new double[numProbInstances][numClasses];
int numTestInstsPerFold = (int) ((double) probs.length / (double) numFolds);
for (int i = 0; i < numFolds; i++) {
// Generate training instances for fold
Pair<TimeSeriesDataset2, TimeSeriesDataset2> trainingTestDatasets = TimeSeriesUtil.getTrainingAndTestDataForFold(i, numFolds, subSeqValueMatrix, targetMatrix);
TimeSeriesDataset2 trainingDS = trainingTestDatasets.getX();
WekaTimeseriesUtil.buildWekaClassifierFromSimplifiedTS(rf, trainingDS);
// Prepare test instances
TimeSeriesDataset2 testDataset = trainingTestDatasets.getY();
Instances testInstances = WekaTimeseriesUtil.simplifiedTimeSeriesDatasetToWekaInstances(testDataset, IntStream.rangeClosed(0, numClasses - 1).boxed().map(String::valueOf).collect(Collectors.toList()));
double[][] testProbs = null;
try {
testProbs = rf.distributionsForInstances(testInstances);
} catch (Exception e) {
throw new TrainingException("Could not induce test probabilities in OOB probability estimation due to an internal Weka error.", e);
}
// Store induced probabilities
for (int j = 0; j < testProbs.length; j++) {
probs[i * numTestInstsPerFold + j] = testProbs[j];
}
}
return probs;
}
/**
* Function calculating the histograms as described in the paper's section 2.2
* ("Codebook and Learning"). All probabilities rows belonging to one instance
* are aggregated by evaluating the discretized probabilities
* <code>discretizedProbs</code>. Furthermore, the relative frequencies of the
* classes are collected. As the result, a pair of the generated histograms for
* all instances and the corresponding normalized relative class frequencies is
* returned.
*
* @param discretizedProbs
* The discretized (binned) probabilities of all instance's subseries
* rows (the number of rows must be divisible by the number of total
* instances)
* @param targets
* The targets corresponding to the discretized probabilities
* @param numInstances
* The total number of instances (must be <= the number of rows in
* <code>discretizedProbs</code>
* @param numClasses
* The total number of classes
* @param numBins
* The number of bins using within the discretization
* @return Returns a pair of the histograms per instance
* (<code>numInstances</code> in total) and the corresponding relative
* frequencies (normalized)
*/
public static Pair<int[][][], int[][]> formHistogramsAndRelativeFreqs(final int[][] discretizedProbs, final int numInstances, final int numClasses, final int numBins) {
if (discretizedProbs.length < numInstances) {
throw new IllegalArgumentException("The number of discretized probabilities must not be lower than the number of instances!");
}
if (discretizedProbs.length % numInstances != 0) {
throw new IllegalArgumentException("The number of discretized probabilities must be divisible by the number of instances!");
}
final int[][][] histograms = new int[numInstances][numClasses - 1][numBins];
final int[][] relativeFrequencies = new int[numInstances][numClasses];
int numEntries = (discretizedProbs.length / numInstances);
for (int i = 0; i < discretizedProbs.length; i++) {
// Index of the instance
int instanceIdx = i / numEntries;
for (int c = 0; c < numClasses - 1; c++) {
int bin = discretizedProbs[i][c];
histograms[instanceIdx][c][bin]++;
}
// Select predicted class
int predClass = MathUtil.argmax(discretizedProbs[i]);
relativeFrequencies[instanceIdx][predClass]++;
}
// Normalize the relative frequencies
for (int i = 0; i < relativeFrequencies.length; i++) {
for (int j = 0; j < relativeFrequencies[i].length; j++) {
relativeFrequencies[i][j] /= numEntries;
}
}
return new Pair<>(histograms, relativeFrequencies);
}
/**
* Function discretizing probabilities into bins. The bins are determined by
* steps of 1 / <code>numBins</code>. The result is a matrix with the same
* dimensionality as <code>probs</code> storing the identifier of the
* corresponding bins.
*
* @param numBins
* Number of bins, determines the probability steps for each bin
* @param probs
* Matrix storing the probabilities of each row for each class
* (columns)
* @return Returns a matrix sharing the dimensionality of <code>probs</code>
* with the discrete bin identifier
*/
public static int[][] discretizeProbs(final int numBins, final double[][] probs) {
int[][] results = new int[probs.length][probs[0].length];
final double steps = 1d / numBins;
for (int i = 0; i < results.length; i++) {
int[] discretizedProbs = new int[probs[i].length];
for (int j = 0; j < discretizedProbs.length; j++) {
if (probs[i][j] == 1) {
discretizedProbs[j] = numBins - 1;
} else {
discretizedProbs[j] = (int) ((probs[i][j]) / steps);
}
}
results[i] = discretizedProbs;
}
return results;
}
/**
* {@inheritDoc}
*/
@Override
public ITimeSeriesBagOfFeaturesConfig getConfig() {
return (ITimeSeriesBagOfFeaturesConfig) super.getConfig();
}
}
|
0
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka/classification/singlelabel/timeseries/learner
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka/classification/singlelabel/timeseries/learner/trees/TimeSeriesForestClassifier.java
|
package ai.libs.jaicore.ml.weka.classification.singlelabel.timeseries.learner.trees;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import org.aeonbits.owner.ConfigCache;
import org.api4.java.ai.ml.core.exception.PredictionException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import ai.libs.jaicore.basic.IOwnerBasedRandomizedAlgorithmConfig;
import ai.libs.jaicore.ml.classification.singlelabel.timeseries.dataset.TimeSeriesDataset2;
import ai.libs.jaicore.ml.classification.singlelabel.timeseries.learner.ASimplifiedTSClassifier;
import ai.libs.jaicore.ml.classification.singlelabel.timeseries.util.TimeSeriesUtil;
import ai.libs.jaicore.ml.weka.classification.singlelabel.timeseries.learner.trees.TimeSeriesForestLearningAlgorithm.ITimeSeriesForestConfig;
/**
* Time series forest classifier as described in Deng, Houtao et al. "A Time
* Series Forest for Classification and Feature Extraction." Inf. Sci. 239
* (2013): 142-153. Consists of mutliple {@link TimeSeriesTreeClassifier} classifier.
*
* This classifier only supports univariate time series prediction.
*
* @author Julian Lienen
*
*/
public class TimeSeriesForestClassifier extends ASimplifiedTSClassifier<Integer> {
/**
* Log4j logger
*/
private static final Logger LOGGER = LoggerFactory.getLogger(TimeSeriesForestClassifier.class);
private final ITimeSeriesForestConfig config;
/**
* Time time series trees forming the ensemble
*/
private TimeSeriesTreeClassifier[] trees;
/**
* Constructing an untrained ensemble of time series trees.
*/
public TimeSeriesForestClassifier() {
this(ConfigCache.getOrCreate(ITimeSeriesForestConfig.class));
}
/**
* Constructing an untrained ensemble of time series trees.
*/
public TimeSeriesForestClassifier(final ITimeSeriesForestConfig config) {
this.config = config;
}
public void setNumberOfTrees(final int numTrees) {
this.config.setProperty(ITimeSeriesForestConfig.K_NUMTREES, "" + numTrees);
}
public void setMaxDepth(final int maxDepth) {
this.config.setProperty(ITimeSeriesForestConfig.K_MAXDEPTH, "" + maxDepth);
}
public void setFeatureCaching(final boolean enableFeatureCaching) {
this.config.setProperty(ITimeSeriesForestConfig.K_FEATURECACHING, "" + enableFeatureCaching);
}
public void setSeed(final int seed) {
this.config.setProperty(IOwnerBasedRandomizedAlgorithmConfig.K_SEED, "" + seed);
}
/**
* Predicts the class of the given instance by taking the majority vote of all
* trees.
*
* @param univInstance
* Univariate instance to be predicted
*/
@Override
public Integer predict(final double[] univInstance) throws PredictionException {
if (!this.isTrained()) {
throw new PredictionException("Model has not been built before!");
}
if (univInstance == null) {
throw new IllegalArgumentException("Instance to be predicted must not be null or empty!");
}
HashMap<Integer, Integer> votes = new HashMap<>();
for (int i = 0; i < this.trees.length; i++) {
int prediction = this.trees[i].predict(univInstance);
if (!votes.containsKey(prediction)) {
votes.put(prediction, 1);
} else {
votes.replace(prediction, votes.get(prediction) + 1);
}
}
return TimeSeriesUtil.getMaximumKeyByValue(votes);
}
/**
* {@inheritDoc}
*/
@Override
public Integer predict(final List<double[]> multivInstance) throws PredictionException {
LOGGER.warn("Dataset to be predicted is multivariate but only first time series (univariate) will be considered.");
return this.predict(multivInstance.get(0));
}
/**
* {@inheritDoc}
*/
@Override
public List<Integer> predict(final TimeSeriesDataset2 dataset) throws PredictionException {
double[][] data = this.checkWhetherPredictionIsPossible(dataset);
if (dataset.isMultivariate()) {
throw new UnsupportedOperationException("Multivariate instances are not supported yet.");
}
List<Integer> predictions = new ArrayList<>();
LOGGER.debug("Starting prediction...");
for (int i = 0; i < data.length; i++) {
predictions.add(this.predict(data[i]));
}
LOGGER.debug("Finished prediction.");
return predictions;
}
/**
* Getter for the time series trees.
*
* @return Returns an array consisting of all forest trees.
*/
public TimeSeriesTreeClassifier[] getTrees() {
return this.trees;
}
/**
* Setter for the time series trees.
*
* @param trees
* Trees to be set
*/
public void setTrees(final TimeSeriesTreeClassifier[] trees) {
this.trees = trees;
}
@Override
public TimeSeriesForestLearningAlgorithm getLearningAlgorithm(final TimeSeriesDataset2 dataset) {
return new TimeSeriesForestLearningAlgorithm(this.config, this, dataset);
}
}
|
0
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka/classification/singlelabel/timeseries/learner
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka/classification/singlelabel/timeseries/learner/trees/TimeSeriesForestLearningAlgorithm.java
|
package ai.libs.jaicore.ml.weka.classification.singlelabel.timeseries.learner.trees;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import org.aeonbits.owner.ConfigCache;
import org.api4.java.algorithm.exceptions.AlgorithmException;
import ai.libs.jaicore.basic.IOwnerBasedRandomizedAlgorithmConfig;
import ai.libs.jaicore.ml.classification.singlelabel.timeseries.dataset.TimeSeriesDataset2;
import ai.libs.jaicore.ml.classification.singlelabel.timeseries.learner.ASimplifiedTSCLearningAlgorithm;
import ai.libs.jaicore.ml.weka.classification.singlelabel.timeseries.learner.trees.TimeSeriesTreeLearningAlgorithm.ITimeSeriesTreeConfig;
/**
* Algorithm to train a time series forest classifier as described in Deng,
* Houtao et al. "A Time Series Forest for Classification and Feature
* Extraction." Inf. Sci. 239 (2013): 142-153. Consists of mutliple
* {@link TimeSeriesTreeClassifier} classifier.
*
* @author Julian Lienen
*
*/
public class TimeSeriesForestLearningAlgorithm extends ASimplifiedTSCLearningAlgorithm<Integer, TimeSeriesForestClassifier> {
public interface ITimeSeriesForestConfig extends IOwnerBasedRandomizedAlgorithmConfig {
public static final String K_NUMTREES = "numtrees";
public static final String K_MAXDEPTH = "maxdepth";
public static final String K_FEATURECACHING = "featurecaching";
/**
* Number of trees to be trained.
*/
@Key(K_NUMTREES)
@DefaultValue("-1")
public int numTrees();
/**
* Maximum depth of the trained trees.
*/
@Key(K_MAXDEPTH)
@DefaultValue("-1")
public int maxDepth();
/**
* Indicator whether feature caching should be used. Usage for datasets with
* many attributes is not recommended due to a high number of possible
* intervals.
*/
@Key(K_FEATURECACHING)
@DefaultValue("false")
public boolean useFeatureCaching();
}
/**
* Constructor for a time series forest training algorithm.
*/
public TimeSeriesForestLearningAlgorithm(final ITimeSeriesForestConfig config, final TimeSeriesForestClassifier classifier, final TimeSeriesDataset2 data) {
super(config, classifier, data);
}
/**
* {@inheritDoc}
*/
@Override
public ITimeSeriesForestConfig getConfig() {
return (ITimeSeriesForestConfig)super.getConfig();
}
/**
* Training procedure construction a time series tree using the given input
* data.
* @throws InterruptedException
* @throws AlgorithmException
*/
@Override
public TimeSeriesForestClassifier call() throws InterruptedException, AlgorithmException {
ITimeSeriesForestConfig config = this.getConfig();
TimeSeriesDataset2 dataset = this.getInput();
// Perform Training
final TimeSeriesTreeClassifier[] trees = new TimeSeriesTreeClassifier[config.numTrees()];
ExecutorService execService = Executors.newFixedThreadPool(config.cpus());
@SuppressWarnings("unchecked")
Future<TimeSeriesTreeClassifier>[] futures = new Future[config.numTrees()];
for (int i = 0; i < config.numTrees(); i++) {
ITimeSeriesTreeConfig configOfTree = ConfigCache.getOrCreate(ITimeSeriesTreeConfig.class);
configOfTree.setProperty(ITimeSeriesTreeConfig.K_MAXDEPTH, "" + config.maxDepth());
configOfTree.setProperty(IOwnerBasedRandomizedAlgorithmConfig.K_SEED, "" + config.seed() + i);
configOfTree.setProperty(ITimeSeriesTreeConfig.K_FEATURECACHING, "" + config.useFeatureCaching());
TimeSeriesTreeClassifier tst = new TimeSeriesTreeClassifier(configOfTree);
futures[i] = execService.submit(new Callable<TimeSeriesTreeClassifier>() {
@Override
public TimeSeriesTreeClassifier call() throws Exception {
tst.train(dataset);
return tst;
}
});
}
// Wait for completion
execService.shutdown();
execService.awaitTermination(this.getTimeout().seconds(), TimeUnit.SECONDS);
for (int i = 0; i < config.numTrees(); i++) {
try {
TimeSeriesTreeClassifier tst = futures[i].get();
trees[i] = tst;
} catch (ExecutionException e) {
throw new AlgorithmException("Could not train time series tree due to training exception: " + e.getMessage());
}
}
this.getClassifier().setTrees(trees);
return this.getClassifier();
}
}
|
0
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka/classification/singlelabel/timeseries/learner
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka/classification/singlelabel/timeseries/learner/trees/TimeSeriesTreeClassifier.java
|
package ai.libs.jaicore.ml.weka.classification.singlelabel.timeseries.learner.trees;
import java.util.ArrayList;
import java.util.List;
import org.api4.java.ai.ml.core.exception.PredictionException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import ai.libs.jaicore.graph.TreeNode;
import ai.libs.jaicore.ml.classification.singlelabel.timeseries.dataset.TimeSeriesDataset2;
import ai.libs.jaicore.ml.classification.singlelabel.timeseries.dataset.TimeSeriesFeature;
import ai.libs.jaicore.ml.classification.singlelabel.timeseries.learner.ASimplifiedTSClassifier;
import ai.libs.jaicore.ml.weka.classification.singlelabel.timeseries.learner.trees.TimeSeriesTreeLearningAlgorithm.ITimeSeriesTreeConfig;
/**
* Time series tree as described in Deng, Houtao et al. "A Time Series Forest
* for Classification and Feature Extraction." Inf. Sci. 239 (2013): 142-153.
*
* This classifier only supports univariate time series prediction.
*
* @author Julian Lienen
*
*/
public class TimeSeriesTreeClassifier extends ASimplifiedTSClassifier<Integer> {
/**
* Log4j logger
*/
private static final Logger LOGGER = LoggerFactory.getLogger(TimeSeriesTreeClassifier.class);
private final ITimeSeriesTreeConfig config;
/**
* Decision information for a tree node within a <code>TimeSeriesTree</code>.
*/
static class TimeSeriesTreeNodeDecisionFunction {
protected TimeSeriesFeature.FeatureType f;
protected int t1;
protected int t2;
protected double threshold;
protected int classPrediction = -1;
@Override
public String toString() {
return "TimeSeriesTreeNodeDecisionFunction [f=" + this.f + ", t1=" + this.t1 + ", t2=" + this.t2 + ", threshold=" + this.threshold + ", classPrediction=" + this.classPrediction + "]";
}
}
/**
* The root node of the time series tree
*/
private final TreeNode<TimeSeriesTreeNodeDecisionFunction> rootNode;
/**
* Constructs an empty time series tree.
*
*/
public TimeSeriesTreeClassifier(final ITimeSeriesTreeConfig config) {
this.config = config;
this.rootNode = new TreeNode<>(new TimeSeriesTreeNodeDecisionFunction(), null);
}
/**
* Getter for the root node.
*
* @return Returns the root node of the time series tree
*/
public TreeNode<TimeSeriesTreeNodeDecisionFunction> getRootNode() {
return this.rootNode;
}
/**
* Predicts the class of the given univariate instance by iterating through the
* tree starting from the root node to a leaf node to induce a class prediction.
*
* @param univInstance
* Univariate instance to be predicted
*/
@Override
public Integer predict(final double[] univInstance) throws PredictionException {
if (!this.isTrained()) {
throw new PredictionException("Model has not been built before!");
}
TreeNode<TimeSeriesTreeNodeDecisionFunction> currNode = this.rootNode;
TreeNode<TimeSeriesTreeNodeDecisionFunction> tmpNode;
while ((tmpNode = decide(currNode, univInstance)) != null) {
currNode = tmpNode;
}
return currNode.getValue().classPrediction;
}
/**
* {@inheritDoc}
*/
@Override
public Integer predict(final List<double[]> multivInstance) throws PredictionException {
LOGGER.warn("Dataset to be predicted is multivariate but only first time series (univariate) will be considered.");
return this.predict(multivInstance.get(0));
}
/**
* {@inheritDoc}
*/
@Override
public List<Integer> predict(final TimeSeriesDataset2 dataset) throws PredictionException {
if (!this.isTrained()) {
throw new PredictionException("Model has not been built before!");
}
if (dataset.isMultivariate()) {
throw new UnsupportedOperationException("Multivariate instances are not supported yet.");
}
if (dataset.isEmpty()) {
throw new IllegalArgumentException("The dataset to be predicted must not be null!");
}
double[][] data = dataset.getValuesOrNull(0);
List<Integer> predictions = new ArrayList<>();
for (int i = 0; i < data.length; i++) {
predictions.add(this.predict(data[i]));
}
return predictions;
}
/**
* Function performing the decision on a <code>treeNode</code> given the
* <code>instance</code> based on the locally stored splitting criterion.
*
* @param treeNode
* Tree node where the decision is taken place
* @param instance
* Instance values
* @return Returns the child node where the next decision can be done, null if
* <code>treeNode</code> is a tree node
*/
public static TreeNode<TimeSeriesTreeNodeDecisionFunction> decide(final TreeNode<TimeSeriesTreeNodeDecisionFunction> treeNode, final double[] instance) {
if (treeNode.getValue().classPrediction != -1) {
return null;
}
if (treeNode.getChildren().size() != 2) {
throw new IllegalStateException("A binary tree node assumed to be complete has not two children nodes.");
}
// Check decision function
if (TimeSeriesFeature.calculateFeature(treeNode.getValue().f, instance, treeNode.getValue().t1, treeNode.getValue().t2, TimeSeriesTreeLearningAlgorithm.USE_BIAS_CORRECTION) <= treeNode.getValue().threshold) {
return treeNode.getChildren().get(0);
} else {
return treeNode.getChildren().get(1);
}
}
@Override
public TimeSeriesTreeLearningAlgorithm getLearningAlgorithm(final TimeSeriesDataset2 dataset) {
return new TimeSeriesTreeLearningAlgorithm(this.config, this, dataset);
}
}
|
0
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka/classification/singlelabel/timeseries/learner
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka/classification/singlelabel/timeseries/learner/trees/TimeSeriesTreeLearningAlgorithm.java
|
package ai.libs.jaicore.ml.weka.classification.singlelabel.timeseries.learner.trees;
import static ai.libs.jaicore.ml.classification.singlelabel.timeseries.dataset.TimeSeriesFeature.NUM_FEATURE_TYPES;
import static ai.libs.jaicore.ml.classification.singlelabel.timeseries.dataset.TimeSeriesFeature.getFeatures;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.NoSuchElementException;
import java.util.Random;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
import org.apache.commons.lang3.ArrayUtils;
import org.api4.java.algorithm.Timeout;
import org.api4.java.algorithm.events.IAlgorithmEvent;
import ai.libs.jaicore.basic.IOwnerBasedRandomizedAlgorithmConfig;
import ai.libs.jaicore.basic.sets.Pair;
import ai.libs.jaicore.graph.TreeNode;
import ai.libs.jaicore.ml.classification.singlelabel.timeseries.dataset.TimeSeriesDataset2;
import ai.libs.jaicore.ml.classification.singlelabel.timeseries.dataset.TimeSeriesFeature.FeatureType;
import ai.libs.jaicore.ml.classification.singlelabel.timeseries.learner.ASimplifiedTSCLearningAlgorithm;
import ai.libs.jaicore.ml.classification.singlelabel.timeseries.util.TimeSeriesUtil;
import ai.libs.jaicore.ml.weka.classification.singlelabel.timeseries.learner.trees.TimeSeriesTreeClassifier.TimeSeriesTreeNodeDecisionFunction;
/**
* Algorithm to build a time series tree as described in Deng, Houtao et al. "A
* Time Series Forest for Classification and Feature Extraction." Inf. Sci. 239
* (2013): 142-153.
*
* @author Julian Lienen
*
*/
public class TimeSeriesTreeLearningAlgorithm extends ASimplifiedTSCLearningAlgorithm<Integer, TimeSeriesTreeClassifier> {
public interface ITimeSeriesTreeConfig extends IOwnerBasedRandomizedAlgorithmConfig {
public static final String K_MAXDEPTH = "maxdepth";
public static final String K_FEATURECACHING = "featurecaching";
@Key(K_MAXDEPTH)
@DefaultValue("-1")
public int maxDepth();
/**
* Indicator whether feature caching should be used. Usage for datasets with
* many attributes is not recommended due to a high number of possible
* intervals.
*/
@Key(K_FEATURECACHING)
@DefaultValue("false")
public boolean useFeatureCaching();
}
/**
* Number of threshold candidates created in each tree recursion step.
*/
public static final int NUM_THRESH_CANDIDATES = 20;
/**
* Alpha parameter used to weight the importance of the feature's margins to the
* threshold candidates.
*/
public static final double ENTROPY_APLHA = 0.0000000000000000000001;
/**
* Precision delta used to overcome imprecision, e. g. for values very close to
* but not exactly zero.
*/
private static final double PRECISION_DELTA = 0.000000001d;
/**
* Sparse cache used for already generated feature values.
*/
private HashMap<Long, double[]> transformedFeaturesCache = null;
/**
* Indicator that the bias (Bessel's) correction should be used for the
* calculation of the standard deviation.
*/
public static final boolean USE_BIAS_CORRECTION = true;
/**
* Constructor.
*
* @param maxDepth
* Maximal depth of the tree to be trained
* @param seed
* Seed used for randomized operations
* @param useFeatureCaching
* Indicator whether feature caching should be used. Since feature
* generation is very efficient, this should be only used if the time
* series is very long
*/
public TimeSeriesTreeLearningAlgorithm(final ITimeSeriesTreeConfig config, final TimeSeriesTreeClassifier tree, final TimeSeriesDataset2 data) {
super(config, tree, data);
}
/**
* {@inheritDoc}
*/
@Override
public void registerListener(final Object listener) {
throw new UnsupportedOperationException();
}
/**
* {@inheritDoc}
*/
@Override
public int getNumCPUs() {
throw new UnsupportedOperationException();
}
/**
* {@inheritDoc}
*/
@Override
public void setNumCPUs(final int numberOfCPUs) {
throw new UnsupportedOperationException();
}
/**
* {@inheritDoc}
*/
@Override
public void setTimeout(final Timeout timeout) {
throw new UnsupportedOperationException();
}
/**
* {@inheritDoc}
*/
@Override
public Timeout getTimeout() {
throw new UnsupportedOperationException();
}
/**
* {@inheritDoc}
*/
@Override
public IAlgorithmEvent nextWithException() {
throw new UnsupportedOperationException();
}
/**
* Training procedure construction a time series tree using the given input
* data.
*/
@Override
public TimeSeriesTreeClassifier call() {
// Training
TimeSeriesDataset2 data = this.getInput();
if (data.isEmpty()) {
throw new IllegalArgumentException("The dataset used for training must not be null!");
}
if (data.isMultivariate()) {
throw new UnsupportedOperationException("Multivariate instances are not supported yet.");
}
double[][] dataMatrix = data.getValuesOrNull(0);
// Also check for number of instances
int n = dataMatrix.length;
if (n <= 0) {
throw new IllegalArgumentException("The traning data's matrix must contain at least one instance!");
}
// Initial prior parentEntropy value, affects the scale of delta entropy values
// in each recursion step
double parentEntropy = 2d;
// Set up feature caching
if (((ITimeSeriesTreeConfig)this.getConfig()).useFeatureCaching()) {
int q = dataMatrix[0].length;
this.transformedFeaturesCache = new HashMap<>(q * q * n);
}
// Build tree
this.tree(dataMatrix, data.getTargets(), parentEntropy, this.getClassifier().getRootNode(), 0);
return this.getClassifier();
}
/**
* {@inheritDoc}
*/
@Override
public Iterator<IAlgorithmEvent> iterator() {
throw new UnsupportedOperationException();
}
/**
* {@inheritDoc}
*/
@Override
public boolean hasNext() {
throw new UnsupportedOperationException();
}
/**
* {@inheritDoc}
*/
@Override
public IAlgorithmEvent next() {
throw new NoSuchElementException("Cannot enumerate this algorithm!");
}
/**
* {@inheritDoc}
*/
@Override
public void cancel() {
throw new UnsupportedOperationException();
}
/**
* Tree generation (cf. Algorithm 2 of original paper). Samples the intervals in
* each recursion step and calculates the features (using caches if
* {@link TimeSeriesTreeLearningAlgorithm#useFeatureCaching} was set true). It then
* searches for an optimal split regarding several threshold candidates for
* feature splits. The splitting criterion is based on a metric called Entrance
* gain which is a combination of the entropy induced by the class proportions
* and the feature margins to the threshold (cf. chapter 4.1 in the paper). The
* tree's recursion is stopped at a leaf node if there is no entropy gain, the
* <code>maxDepth</code> has been reached or the local entropy is zero.
*
* @param data
* The untransformed data which will be used for the split in the
* transformed feature representation
* @param targets
* The targets of the instances
* @param parentEntropy
* The parent entropy calculated in the recursion's previous step
* @param nodeToBeFilled
* The tree node which should be filled with the splitting
* information to use it for predictions
* @param depth
* The current depth to be compared to the
* {@link TimeSeriesTreeLearningAlgorithm#maxDepth}
*/
public void tree(final double[][] data, final int[] targets, final double parentEntropy, final TreeNode<TimeSeriesTreeNodeDecisionFunction> nodeToBeFilled, final int depth) {
int n = targets.length;
ITimeSeriesTreeConfig config = (ITimeSeriesTreeConfig)this.getConfig();
// Sample the intervals used for the feature generation
Pair<List<Integer>, List<Integer>> pairOfIntervalLists = sampleIntervals(data[0].length, config.seed());
// Transform instances
double[][][] transformedInstances = this.transformInstances(data, pairOfIntervalLists);
List<List<Double>> thresholdCandidates = generateThresholdCandidates(pairOfIntervalLists, NUM_THRESH_CANDIDATES, transformedInstances);
// Get unique classes
final List<Integer> classes = new ArrayList<>(new HashSet<>(Arrays.asList(ArrayUtils.toObject(targets))));
// Initialize solution storing variables
double deltaEntropyStar = 0;
double thresholdStar = 0;
int t1t2Star = -1;
int fStar = -1;
double[] eStarPerFeatureType = new double[NUM_FEATURE_TYPES];
for (int i = 0; i < eStarPerFeatureType.length; i++) {
eStarPerFeatureType[i] = Integer.MIN_VALUE;
}
double[] deltaEntropyStarPerFeatureType = new double[NUM_FEATURE_TYPES];
int[] t1t2StarPerFeatureType = new int[NUM_FEATURE_TYPES];
double[] thresholdStarPerFeatureType = new double[NUM_FEATURE_TYPES];
// Search for the best splitting criterion in terms of the best Entrance gain
// for each feature type due to different feature scales
List<Integer> t1 = pairOfIntervalLists.getX();
List<Integer> t2 = pairOfIntervalLists.getY();
for (int i = 0; i < t1.size(); i++) {
for (int k = 0; k < NUM_FEATURE_TYPES; k++) {
for (final double cand : thresholdCandidates.get(k)) {
// Calculate delta entropy and E for f_k(t1,t2) <= cand
double localDeltaEntropy = calculateDeltaEntropy(transformedInstances[k][i], targets, cand, classes, parentEntropy);
double localE = calculateEntrance(localDeltaEntropy, calculateMargin(transformedInstances[k][i], cand));
// Update solution if it has the best Entrance value
if (localE > eStarPerFeatureType[k]) {
eStarPerFeatureType[k] = localE;
deltaEntropyStarPerFeatureType[k] = localDeltaEntropy;
t1t2StarPerFeatureType[k] = i;
thresholdStarPerFeatureType[k] = cand;
}
}
}
}
// Set best solution among all feature types
int bestK = this.getBestSplitIndex(deltaEntropyStarPerFeatureType);
deltaEntropyStar = deltaEntropyStarPerFeatureType[bestK];
t1t2Star = t1t2StarPerFeatureType[bestK];
thresholdStar = thresholdStarPerFeatureType[bestK];
fStar = bestK;
// Check for recursion stop condition (=> leaf node condition)
if (Math.abs(deltaEntropyStar) <= PRECISION_DELTA || depth == config.maxDepth() - 1 || (depth != 0 && Math.abs(deltaEntropyStar - parentEntropy) <= PRECISION_DELTA)) {
// Label this node as a leaf and return majority class
nodeToBeFilled.getValue().classPrediction = TimeSeriesUtil.getMode(targets);
return;
}
// Update node's decision function
nodeToBeFilled.getValue().f = FeatureType.values()[fStar];
nodeToBeFilled.getValue().t1 = t1.get(t1t2Star);
nodeToBeFilled.getValue().t2 = t2.get(t1t2Star);
nodeToBeFilled.getValue().threshold = thresholdStar;
// Assign data instances and the corresponding targets to the child nodes
Pair<List<Integer>, List<Integer>> childDataIndices = getChildDataIndices(transformedInstances, n, fStar, t1t2Star, thresholdStar);
double[][] dataLeft = new double[childDataIndices.getX().size()][data[0].length];
int[] targetsLeft = new int[childDataIndices.getX().size()];
double[][] dataRight = new double[childDataIndices.getY().size()][data[0].length];
int[] targetsRight = new int[childDataIndices.getY().size()];
for (int i = 0; i < childDataIndices.getX().size(); i++) {
dataLeft[i] = data[childDataIndices.getX().get(i)];
targetsLeft[i] = targets[childDataIndices.getX().get(i)];
}
for (int i = 0; i < childDataIndices.getY().size(); i++) {
dataRight[i] = data[childDataIndices.getY().get(i)];
targetsRight[i] = targets[childDataIndices.getY().get(i)];
}
// Prepare the child nodes
TreeNode<TimeSeriesTreeNodeDecisionFunction> leftNode = nodeToBeFilled.addChild(new TimeSeriesTreeNodeDecisionFunction());
TreeNode<TimeSeriesTreeNodeDecisionFunction> rightNode = nodeToBeFilled.addChild(new TimeSeriesTreeNodeDecisionFunction());
// Recursion
this.tree(dataLeft, targetsLeft, deltaEntropyStar, leftNode, depth + 1);
this.tree(dataRight, targetsRight, deltaEntropyStar, rightNode, depth + 1);
}
/**
* Function returning the data indices assigned to the left and the right child
* of a binary tree based on the splitting criterion given by the feature type
* <code>fType</code>, the intervals index <code>t1t2</code> in the transformed
* data set <code>transformedData</code> and the <code>threshold</code>.
*
* @param transformedData
* Transformed data on which the split is calculated
* @param n
* The number of instances
* @param fType
* The feature type to be used for the split
* @param t1t2
* The interval's index in the <code>transformedData</code> to be
* used for the split
* @param threshold
* The threshold to be used for the split
* @return Returns a pair of two lists, storing the data indices for the data
* points assigned to the left child of the current node (X) and the
* data indices assigned to the right child (Y)
*/
public static Pair<List<Integer>, List<Integer>> getChildDataIndices(final double[][][] transformedData, final int n, final int fType, final int t1t2, final double threshold) {
List<Integer> leftIndices = new ArrayList<>();
List<Integer> rightIndices = new ArrayList<>();
// Check for every instance whether it should be assigned to the left or right
// child
for (int i = 0; i < n; i++) {
if (transformedData[fType][t1t2][i] <= threshold) {
leftIndices.add(i);
} else {
rightIndices.add(i);
}
}
return new Pair<>(leftIndices, rightIndices);
}
/**
* Function returning feature type used for the split based on given the
* deltaEntropy star values. If multiple feature types have generated the same
* deltaEntropy value, a random decision is taken.
*
* @param deltaEntropyStarPerFeatureType
* The delta entropy star value per feature
* @return Returns the feature type index which has been chosen
*/
public int getBestSplitIndex(final double[] deltaEntropyStarPerFeatureType) {
if (deltaEntropyStarPerFeatureType.length != NUM_FEATURE_TYPES) {
throw new IllegalArgumentException("A delta entropy star value has to be given for each feature type!");
}
double max = Integer.MIN_VALUE;
List<Integer> maxIndexes = new ArrayList<>();
// Search for the indices storing the best value
for (int i = 0; i < deltaEntropyStarPerFeatureType.length; i++) {
if (deltaEntropyStarPerFeatureType[i] > max) {
max = deltaEntropyStarPerFeatureType[i];
maxIndexes.clear();
maxIndexes.add(i);
} else if (deltaEntropyStarPerFeatureType[i] == max) {
// Multiple best candidates
maxIndexes.add(i);
}
}
if (maxIndexes.isEmpty()) {
throw new IllegalArgumentException("Could not find any maximum delta entropy star for any feature type for the given array " + Arrays.toString(deltaEntropyStarPerFeatureType) + ".");
}
// Return random index among best ones if multiple solutions exist
if (maxIndexes.size() > 1) {
Collections.shuffle(maxIndexes, new Random(((ITimeSeriesTreeConfig)this.getConfig()).seed()));
}
return maxIndexes.get(0);
}
/**
* Function calculating the delta entropy for a given
* <code>thresholdCandidate</code> and <code>parentEntropy</code>. The values of
* the data are the feature type's values for each instance. The delta entropy
* is formed of the difference between the parent entropy and the weighted sum
* of the entropy values of the children and their instance assignments based on
* the split.
*
* @param dataValues
* The transformed feature type values for each instance
* @param targets
* The targets of each instance
* @param thresholdCandidate
* The threshold candidate to be evaluated
* @param classes
* List storing the classes whose indices can be looked up
* @param parentEntropy
* The parent entropy used for the delta calculation
* @return Returns the delta entropy for the threshold candidate of the current
* feature type
*/
public static double calculateDeltaEntropy(final double[] dataValues, final int[] targets, final double thresholdCandidate, final List<Integer> classes, final double parentEntropy) {
if (dataValues.length != targets.length) {
throw new IllegalArgumentException("The number of data values must be the same as the number of target values!");
}
// Initialization
double[] entropyValues = new double[2];
int numClasses = classes.size();
int[][] classNodeStatistic = new int[2][numClasses];
int[] intCounter = new int[2];
// Calculate class statistics based on the split
for (int i = 0; i < dataValues.length; i++) {
if (dataValues[i] <= thresholdCandidate) {
classNodeStatistic[0][classes.indexOf(targets[i])]++;
intCounter[0]++;
} else {
classNodeStatistic[1][classes.indexOf(targets[i])]++;
intCounter[1]++;
}
}
// Calculate the entropy values for each child
for (int i = 0; i < entropyValues.length; i++) {
double entropySum = 0;
for (int c = 0; c < numClasses; c++) {
double gammaC = 0;
if (intCounter[i] != 0) {
gammaC = (double) classNodeStatistic[i][c] / (double) intCounter[i];
}
entropySum += gammaC < PRECISION_DELTA ? 0 : gammaC * Math.log(gammaC);
}
entropyValues[i] = (-1) * entropySum;
}
// Get the weighted sum of the children based on the proportions of the
// instances assigned to the corresponding nodes
double weightedSum = 0;
for (int i = 0; i < entropyValues.length; i++) {
weightedSum += (double) intCounter[i] / (double) dataValues.length * entropyValues[i];
}
return parentEntropy - weightedSum;
}
/**
* Calculates the entrance gain specified by Deng et. al. in the paper's chapter
* 4.1.
*
* @param deltaEntropy
* The delta entropy
* @param margin
* The features margin
* @return Returns the entrance gain
*/
public static double calculateEntrance(final double deltaEntropy, final double margin) {
return deltaEntropy + ENTROPY_APLHA * margin;
}
/**
* Function calculating the margin between the given
* <code>thresholdCandidate</code> and the nearest feature value from the given
* <code>dataValues</code>.
*
* @param dataValues
* The feature values compared to the candidate
* @param thresholdCandidate
* The threshold candidate which is assessed
* @return Returns the minimum distance among the feature values and the
* threshold candidate
*/
public static double calculateMargin(final double[] dataValues, final double thresholdCandidate) {
double min = Double.MAX_VALUE;
for (int i = 0; i < dataValues.length; i++) {
double localDist = Math.abs(dataValues[i] - thresholdCandidate);
if (localDist < min) {
min = localDist;
}
}
return min;
}
/**
* Method transforming the given <code>dataset</code> using the interval pairs
* specified in <code>T1T2</code> by calculating each {@link FeatureType} for
* every instance and interval pair.
*
* @param dataset
* The dataset which should be transformed
* @param pairOfItervalLists
* The start and end interval pairs (see
* {@link TimeSeriesTreeLearningAlgorithm#sampleIntervals(int, int)})
* @return Returns the transformed instances (shape: number of feature types x
* number of interval pairs x number of instances)
*/
public double[][][] transformInstances(final double[][] dataset, final Pair<List<Integer>, List<Integer>> pairOfItervalLists) {
double[][][] result = new double[NUM_FEATURE_TYPES][pairOfItervalLists.getX().size()][dataset.length];
int n = dataset.length;
boolean useFeatureCaching = ((ITimeSeriesTreeConfig)this.getConfig()).useFeatureCaching();
for (int i = 0; i < n; i++) {
for (int j = 0; j < pairOfItervalLists.getX().size(); j++) {
int t1 = pairOfItervalLists.getX().get(j);
int t2 = pairOfItervalLists.getY().get(j);
double[] features;
// If caching is used, calculate and store the generated features
if (useFeatureCaching) {
long key = (long)i + dataset[i].length * t1 + dataset[i].length * dataset[i].length * t2;
if (!this.transformedFeaturesCache.containsKey(key)) {
features = getFeatures(dataset[i], t1, t2, USE_BIAS_CORRECTION);
this.transformedFeaturesCache.put(key, features);
} else {
features = this.transformedFeaturesCache.get(key);
}
} else {
features = getFeatures(dataset[i], t1, t2, USE_BIAS_CORRECTION);
}
result[0][j][i] = features[0];
result[1][j][i] = features[1];
result[2][j][i] = features[2];
}
}
return result;
}
/**
* Function generating threshold candidates for each feature type. It calculates
* the interval [min f_k(t1,t2), max f_k(t1,t2)] among all instances for every
* feature type and every possible interval and generates
* <code>numberOfCandidates</code> candidates using equal-width intervals.
*
* @param pairOfIntervalLists
* The pair of start and end interval pairs (see
* {@link TimeSeriesTreeLearningAlgorithm#sampleIntervals(int, int)})
* @param numOfCandidates
* The number of candidates to be generated per feature type
* @param transformedFeatures
* The transformed data instances
* @return Returns a list consisting of a list for each feature type storing the
* threshold candidates
*/
public static List<List<Double>> generateThresholdCandidates(final Pair<List<Integer>, List<Integer>> pairOfIntervalLists, final int numOfCandidates, final double[][][] transformedFeatures) {
if (numOfCandidates < 1) {
throw new IllegalArgumentException("At least one candidate must be calculated!");
}
List<List<Double>> result = new ArrayList<>();
int numInstances = transformedFeatures[0][0].length;
double[] min = new double[NUM_FEATURE_TYPES];
double[] max = new double[NUM_FEATURE_TYPES];
// Initialize
for (int i = 0; i < NUM_FEATURE_TYPES; i++) {
result.add(new ArrayList<>());
min[i] = Double.MAX_VALUE;
max[i] = Integer.MIN_VALUE;
}
// Find min and max
for (int i = 0; i < NUM_FEATURE_TYPES; i++) {
for (int j = 0; j < numInstances; j++) {
for (int l = 0; l < pairOfIntervalLists.getX().size(); l++) {
if (transformedFeatures[i][l][j] < min[i]) {
min[i] = transformedFeatures[i][l][j];
}
if (transformedFeatures[i][l][j] > max[i]) {
max[i] = transformedFeatures[i][l][j];
}
}
}
}
// Calculate equal-width candidate threshold
for (int i = 0; i < NUM_FEATURE_TYPES; i++) {
double width = (max[i] - min[i]) / (numOfCandidates + 1);
for (int j = 0; j < numOfCandidates; j++) {
result.get(i).add(min[i] + (j + 1) * width);
}
}
return result;
}
/**
* Function sampling intervals based on the length of the time series
* <code>m</code> and the given <code>seed</code>. Refers to algorithm 1 of the
* paper. The sampled intervals are stored in a pair of lists where each index
* of the first list is related to the same index in the second list. Sampling
* is done without replacement.
*
* @param m
* Number of time series attributes (steps)
* @param seed
* The seed used for the randomized sampling
* @return Returns a pair of lists consisting of the start indices (X) and the
* end indices (Y)
*/
public static Pair<List<Integer>, List<Integer>> sampleIntervals(final int m, final long seed) {
if (m < 1) {
throw new IllegalArgumentException("The series' length m must be greater than zero.");
}
List<Integer> iList1 = new ArrayList<>();
List<Integer> iList2 = new ArrayList<>();
List<Integer> bigW = randomlySampleNoReplacement(IntStream.rangeClosed(1, m).boxed().collect(Collectors.toList()), (int) Math.sqrt(m), seed);
for (int w : bigW) {
List<Integer> tmpSampling = randomlySampleNoReplacement(IntStream.rangeClosed(0, m - w).boxed().collect(Collectors.toList()), (int) Math.sqrt(m - w + 1.0), seed);
iList1.addAll(tmpSampling);
for (int t1 : tmpSampling) {
iList2.add(t1 + w - 1);
}
}
return new Pair<>(iList1, iList2);
}
/**
* Function sampling a given <code>list</code> randomly without replacement
* using the given <code>seed</code>. <code>sampleSize</code> many elements are
* sampled and returned.
*
* @param list
* List to be sampled from without replacement
* @param sampleSize
* Number of elements to be sampled (must be <= list.size())
* @param seed
* The seed used for the randomized sampling
* @return Returns a list of elements which have been sampled
*/
public static List<Integer> randomlySampleNoReplacement(final List<Integer> list, final int sampleSize, final long seed) {
if (list == null) {
throw new IllegalArgumentException("The list to be sampled from must not be null!");
}
if (sampleSize < 1 || sampleSize > list.size()) {
throw new IllegalArgumentException("Sample size must lower equals the size of the list to be sampled from without replacement and greater zero.");
}
final List<Integer> listCopy = new ArrayList<>(list);
Collections.shuffle(listCopy, new Random(seed));
return listCopy.subList(0, sampleSize);
}
}
|
0
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka/classification/singlelabel/timeseries/learner
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka/classification/singlelabel/timeseries/learner/trees/package-info.java
|
/**
*
*/
/**
* @author Julian
*
*/
package ai.libs.jaicore.ml.weka.classification.singlelabel.timeseries.learner.trees;
|
0
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka/classification/singlelabel/timeseries
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka/classification/singlelabel/timeseries/util/WekaTimeseriesUtil.java
|
package ai.libs.jaicore.ml.weka.classification.singlelabel.timeseries.util;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
import org.apache.commons.lang3.ArrayUtils;
import org.api4.java.ai.ml.core.exception.TrainingException;
import weka.classifiers.Classifier;
import weka.core.Attribute;
import weka.core.DenseInstance;
import weka.core.Instance;
import weka.core.Instances;
/**
* WekaUtil
*/
public class WekaTimeseriesUtil {
private static final String I_NAME = "Instances";
private WekaTimeseriesUtil() {
/* no instantiation desired */
}
/**
* Maps an univariate simplified time series instance to a Weka instance.
*
* @param instance
* The time series instance storing the time series data
* @return Returns the Weka instance containing the time series
*/
public static Instance simplifiedTSInstanceToWekaInstance(final double[] instance) {
return new DenseInstance(1, instance);
}
/**
* Trains a given Weka <code>classifier</code> using the simplified time series
* data set <code>timeSeriesDataset</code>.
*
* @param classifier
* The Weka {@link weka.Classifier} object
* @param timeSeriesDataset
* The time series data set which is transformed to Weka instances
* used for the training
* @throws TrainingException
* Throws exception if the training could not be finished
* successfully
*/
public static void buildWekaClassifierFromSimplifiedTS(final Classifier classifier, final ai.libs.jaicore.ml.classification.singlelabel.timeseries.dataset.TimeSeriesDataset2 timeSeriesDataset) throws TrainingException {
final Instances trainingInstances = simplifiedTimeSeriesDatasetToWekaInstances(timeSeriesDataset);
try {
classifier.buildClassifier(trainingInstances);
} catch (Exception e) {
throw new TrainingException(String.format("Could not train classifier %s due to a Weka exception.", classifier.getClass().getName()), e);
}
}
/**
* Converts a given simplified {@link ai.libs.jaicore.ml.classification.singlelabel.timeseries.dataset.TimeSeriesDataset2}
* object to a Weka Instances object.
*
* @param dataSet
* Data set which is transformed
* @return Transformed Weka Instances object
*/
public static Instances simplifiedTimeSeriesDatasetToWekaInstances(final ai.libs.jaicore.ml.classification.singlelabel.timeseries.dataset.TimeSeriesDataset2 dataSet) {
final int[] targets = dataSet.getTargets();
List<Integer> targetList = Arrays.asList(ArrayUtils.toObject(targets));
int min = Collections.min(targetList);
int max = Collections.max(targetList);
List<String> classValues = IntStream.rangeClosed(min, max).boxed().map(String::valueOf).collect(Collectors.toList());
return simplifiedTimeSeriesDatasetToWekaInstances(dataSet, classValues);
}
/**
* Converts a given simplified {@link ai.libs.jaicore.ml.classification.singlelabel.timeseries.dataset.TimeSeriesDataset2}
* object to a Weka Instances object.
*
* @param dataSet
* Data set which is transformed
* @return Transformed Weka Instances object
*/
public static Instances simplifiedTimeSeriesDatasetToWekaInstances(final ai.libs.jaicore.ml.classification.singlelabel.timeseries.dataset.TimeSeriesDataset2 dataSet, final List<String> classValues) {
List<double[][]> matrices = new ArrayList<>();
for (int i = 0; i < dataSet.getNumberOfVariables(); i++) {
matrices.add(dataSet.getValues(i));
}
// Create attributes
final ArrayList<Attribute> attributes = new ArrayList<>();
for (int m = 0; m < matrices.size(); m++) {
double[][] matrix = matrices.get(m);
if (matrix == null) {
continue;
}
for (int i = 0; i < matrix[0].length; i++) {
final Attribute newAtt = new Attribute(String.format("val_%d_%d", m, i));
attributes.add(newAtt);
}
}
// Add class attribute
final int[] targets = dataSet.getTargets();
attributes.add(new Attribute("class", classValues));
final Instances result = new Instances(I_NAME, attributes, dataSet.getNumberOfInstances());
result.setClassIndex(result.numAttributes() - 1);
// Create instances
for (int i = 0; i < dataSet.getNumberOfInstances(); i++) {
double[] concatenatedRow = matrices.get(0)[i];
for (int j = 1; j < matrices.size(); j++) {
concatenatedRow = ArrayUtils.addAll(concatenatedRow, matrices.get(j)[i]);
}
concatenatedRow = ArrayUtils.addAll(concatenatedRow, targets[i]);
// Initialize instance
final Instance inst = new DenseInstance(1, concatenatedRow);
inst.setDataset(result);
result.add(inst);
}
return result;
}
/**
* Converts a double[][] matrix (number of instances x number of attributes) to
* Weka instances without any class attribute.
*
* @param matrix
* The double[][] matrix storing all the attribute values of the
* instances
* @return Returns the Weka Instances object consisting of all instances and the
* attribute values
*/
public static Instances matrixToWekaInstances(final double[][] matrix) {
final ArrayList<Attribute> attributes = new ArrayList<>();
for (int i = 0; i < matrix[0].length; i++) {
final Attribute newAtt = new Attribute("val" + i);
attributes.add(newAtt);
}
Instances wekaInstances = new Instances(I_NAME, attributes, matrix.length);
for (int i = 0; i < matrix[0].length; i++) {
final Instance inst = new DenseInstance(1, matrix[i]);
inst.setDataset(wekaInstances);
wekaInstances.add(inst);
}
return wekaInstances;
}
}
|
0
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka/classification/timeseries
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka/classification/timeseries/learner/package-info.java
|
/**
*
*/
package ai.libs.jaicore.ml.weka.classification.timeseries.learner;
|
0
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka/classification/timeseries/learner
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka/classification/timeseries/learner/ensemble/EnsembleProvider.java
|
package ai.libs.jaicore.ml.weka.classification.timeseries.learner.ensemble;
import ai.libs.jaicore.ml.weka.classification.singlelabel.timeseries.learner.ensemble.MajorityConfidenceVote;
import weka.classifiers.Classifier;
import weka.classifiers.bayes.NaiveBayes;
import weka.classifiers.functions.Logistic;
import weka.classifiers.functions.MultilayerPerceptron;
import weka.classifiers.functions.SMO;
import weka.classifiers.functions.supportVector.PolyKernel;
import weka.classifiers.lazy.IBk;
import weka.classifiers.meta.RotationForest;
import weka.classifiers.meta.Vote;
import weka.classifiers.trees.J48;
import weka.classifiers.trees.RandomForest;
import weka.core.EuclideanDistance;
/**
* Class statically providing preconfigured ensembles as commonly used in TSC
* implementations.
*
* @author Julian Lienen
*
*/
public class EnsembleProvider {
private EnsembleProvider() {
/* no instantiation desired */
}
/**
* Initializes the CAWPE ensemble model consisting of five classifiers (SMO,
* KNN, J48, Logistic and MLP) using a majority voting strategy. The ensemble
* uses Weka classifiers. It refers to "Heterogeneous ensemble of standard
* classification algorithms" (HESCA) as described in Lines, Jason & Taylor,
* Sarah & Bagnall, Anthony. (2018). Time Series Classification with HIVE-COTE:
* The Hierarchical Vote Collective of Transformation-Based Ensembles. ACM
* Transactions on Knowledge Discovery from Data. 12. 1-35. 10.1145/3182382.
*
* @param seed
* Seed used within the classifiers and the majority confidence
* voting scheme
* @param numFolds
* Number of folds used within the determination of the classifier
* weights for the {@link MajorityConfidenceVote}
* @return Returns an initialized (but untrained) ensemble model.
* @throws Exception
* Thrown when the initialization has failed
*/
public static Classifier provideCAWPEEnsembleModel(final int seed, final int numFolds) throws Exception {
Classifier[] classifiers = new Classifier[5];
Vote voter = new MajorityConfidenceVote(numFolds, seed);
SMO smo = new SMO();
smo.turnChecksOff();
smo.setBuildCalibrationModels(true);
PolyKernel kl = new PolyKernel();
kl.setExponent(1);
smo.setKernel(kl);
smo.setRandomSeed(seed);
classifiers[0] = smo;
IBk k = new IBk(100);
k.setCrossValidate(true);
EuclideanDistance ed = new EuclideanDistance();
ed.setDontNormalize(true);
k.getNearestNeighbourSearchAlgorithm().setDistanceFunction(ed);
classifiers[1] = k;
J48 c45 = new J48();
c45.setSeed(seed);
classifiers[2] = c45;
classifiers[3] = new Logistic();
classifiers[4] = new MultilayerPerceptron();
voter.setClassifiers(classifiers);
return voter;
}
/**
* Initializes the HIVE COTE ensemble consisting of 7 classifiers using a
* majority voting strategy as described in J. Lines, S. Taylor and A. Bagnall,
* "HIVE-COTE: The Hierarchical Vote Collective of Transformation-Based
* Ensembles for Time Series Classification," 2016 IEEE 16th International
* Conference on Data Mining (ICDM), Barcelona, 2016, pp. 1041-1046. doi:
* 10.1109/ICDM.2016.0133.
*
* @param seed
* Seed used within the classifiers and the majority confidence
* voting scheme
* @param numFolds
* Number of folds used within the determination of the classifier
* weights for the {@link MajorityConfidenceVote}
* @return Returns the initialized (but untrained) HIVE COTE ensemble model.
*/
public static Classifier provideHIVECOTEEnsembleModel(final long seed) {
Classifier[] classifier = new Classifier[7];
Vote voter = new MajorityConfidenceVote(5, seed);
// SMO poly2
SMO smop = new SMO();
smop.turnChecksOff();
smop.setBuildCalibrationModels(true);
PolyKernel kernel = new PolyKernel();
kernel.setExponent(2);
smop.setKernel(kernel);
smop.setRandomSeed((int)seed);
classifier[0] = smop;
// Random Forest
RandomForest rf = new RandomForest();
rf.setSeed((int)seed);
rf.setNumIterations(500);
classifier[1] = rf;
// Rotation forest
RotationForest rotF = new RotationForest();
rotF.setSeed((int)seed);
rotF.setNumIterations(100);
classifier[2] = rotF;
// NN
IBk nn = new IBk();
classifier[3] = nn;
// Naive Bayes
NaiveBayes nb = new NaiveBayes();
classifier[4] = nb;
// C45
J48 c45 = new J48();
c45.setSeed((int)seed);
classifier[5] = c45;
// SMO linear
SMO smol = new SMO();
smol.turnChecksOff();
smol.setBuildCalibrationModels(true);
PolyKernel linearKernel = new PolyKernel();
linearKernel.setExponent(1);
smol.setKernel(linearKernel);
classifier[6] = smol;
voter.setClassifiers(classifier);
return voter;
}
}
|
0
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka/classification/timeseries/learner
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka/classification/timeseries/learner/ensemble/package-info.java
|
/**
*
*/
package ai.libs.jaicore.ml.weka.classification.timeseries.learner.ensemble;
|
0
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka/classification/timeseries/learner
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka/classification/timeseries/learner/shapelets/ShapeletTransformLearningAlgorithm.java
|
package ai.libs.jaicore.ml.weka.classification.timeseries.learner.shapelets;
import java.util.AbstractMap;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Random;
import java.util.Set;
import java.util.stream.Collectors;
import org.api4.java.ai.ml.core.exception.TrainingException;
import org.api4.java.algorithm.Timeout;
import org.api4.java.algorithm.events.IAlgorithmEvent;
import org.api4.java.algorithm.exceptions.AlgorithmException;
import org.nd4j.linalg.api.ndarray.INDArray;
import org.nd4j.linalg.factory.Nd4j;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import ai.libs.jaicore.basic.IOwnerBasedRandomizedAlgorithmConfig;
import ai.libs.jaicore.ml.classification.singlelabel.timeseries.dataset.TimeSeriesDataset2;
import ai.libs.jaicore.ml.classification.singlelabel.timeseries.learner.ASimplifiedTSCLearningAlgorithm;
import ai.libs.jaicore.ml.classification.singlelabel.timeseries.quality.IQualityMeasure;
import ai.libs.jaicore.ml.classification.singlelabel.timeseries.shapelets.Shapelet;
import ai.libs.jaicore.ml.classification.singlelabel.timeseries.shapelets.search.AMinimumDistanceSearchStrategy;
import ai.libs.jaicore.ml.classification.singlelabel.timeseries.shapelets.search.EarlyAbandonMinimumDistanceSearchStrategy;
import ai.libs.jaicore.ml.classification.singlelabel.timeseries.util.TimeSeriesUtil;
import ai.libs.jaicore.ml.weka.classification.singlelabel.timeseries.learner.ensemble.MajorityConfidenceVote;
import ai.libs.jaicore.ml.weka.classification.singlelabel.timeseries.util.WekaTimeseriesUtil;
import ai.libs.jaicore.ml.weka.classification.timeseries.learner.ensemble.EnsembleProvider;
import weka.classifiers.Classifier;
/**
* Algorithm training a ShapeletTransform classifier as described in Jason
* Lines, Luke M. Davis, Jon Hills, and Anthony Bagnall. 2012. A shapelet
* transform for time series classification. In Proceedings of the 18th ACM
* SIGKDD international conference on Knowledge discovery and data mining (KDD
* '12). ACM, New York, NY, USA, 289-297.
*
* @author Julian Lienen
*
*/
public class ShapeletTransformLearningAlgorithm extends ASimplifiedTSCLearningAlgorithm<Integer, ShapeletTransformTSClassifier> {
public interface IShapeletTransformLearningAlgorithmConfig extends IOwnerBasedRandomizedAlgorithmConfig {
public static final String K_NUMSHAPELETS = "numshapelets";
public static final String K_NUMCLUSTERS = "numclusters";
public static final String K_CLUSTERSHAPELETS = "clustershapelets";
public static final String K_SHAPELETLENGTH_MIN = "minshapeletlength";
public static final String K_SHAPELETLENGTH_MAX = "maxshapeletlength";
public static final String K_USEHIVECOTEENSEMBLE = "usehivecoteensemble";
public static final String K_ESTIMATESHAPELETLENGTHBORDERS = "estimateshapeletlengthborders";
public static final String K_NUMFOLDS = "numfolds";
/**
* Number of shapelets extracted in the shapelet search
*/
@Key(K_NUMSHAPELETS)
@DefaultValue("10")
public int numShapelets();
/**
* Number of shapelet clusters when shapelet clustering is used.
*/
@Key(K_NUMCLUSTERS)
@DefaultValue("10")
public int numClusters();
/**
* Indicator whether clustering of shapelets should be used.
*/
@Key(K_CLUSTERSHAPELETS)
@DefaultValue("false")
public boolean clusterShapelets();
/**
* The minimum length of shapelets to be considered. Defaults to 3.
*/
@Key(K_SHAPELETLENGTH_MIN)
@DefaultValue("3")
public int minShapeletLength();
/**
* The maximum length of shapelets to be considered.
*/
@Key(K_SHAPELETLENGTH_MAX)
public int maxShapeletLength();
/**
* Indicator whether the HIVE COTE ensemble should be used. If it is set to
* false, the CAWPE ensemble model will be used instead.
*/
@Key(K_USEHIVECOTEENSEMBLE)
public boolean useHIVECOTEEnsemble();
/**
* Indicator whether the min max estimation should be performed.
*/
@Key(K_ESTIMATESHAPELETLENGTHBORDERS)
public boolean estimateShapeletLengthBorders();
/**
* Number of folds used within the {@link MajorityConfidenceVote} scheme for the
* ensembles. Defaults to 5.
*/
@Key(K_NUMFOLDS)
@DefaultValue("5")
public int numFolds();
}
/**
* Log4j logger
*/
private static final Logger logger = LoggerFactory.getLogger(ShapeletTransformLearningAlgorithm.class);
/**
* Quality measure function used to assess shapelets.
*/
private final IQualityMeasure qualityMeasure;
/**
* Number of shapelets used for the min and max estimation as described in
* algorithm 4 of the original paper.
*/
private static final int MIN_MAX_ESTIMATION_SAMPLES = 10;
/**
* Static indicator whether the bias (Bessel's) correction should be used.
*/
private static final boolean USE_BIAS_CORRECTION = true;
/**
* Strategy used for the minimum distance search.
*/
private AMinimumDistanceSearchStrategy minDistanceSearchStrategy = new EarlyAbandonMinimumDistanceSearchStrategy(USE_BIAS_CORRECTION);
/**
* Exception message given when the learning algorithm has been interrupted.
*/
private static final String INTERRUPTION_MESSAGE = "Interrupted training due to timeout.";
/**
* Constructs a training algorithm for the {@link ShapeletTransformTSClassifier}
* classifier specified by the given parameters.
*
* @param qualityMeasure
* Quality measure used to assess the shapelets
*/
public ShapeletTransformLearningAlgorithm(final IShapeletTransformLearningAlgorithmConfig config, final ShapeletTransformTSClassifier classifier, final TimeSeriesDataset2 dataset, final IQualityMeasure qualityMeasure) {
super(config, classifier, dataset);
this.qualityMeasure = qualityMeasure;
}
/**
* Training procedure for {@link ShapeletTransformTSClassifier} using the
* training algorithm described in the paper.
*
* @return Returns the trained model
* @throws AlgorithmException
* Thrown if the training could not be finished
* @throws InterruptedException
*/
@Override
public ShapeletTransformTSClassifier call() throws AlgorithmException, InterruptedException {
if (this.getNumCPUs() > 1) {
logger.warn("Multithreading is not supported for LearnShapelets yet. Therefore, the number of CPUs is not considered.");
}
long beginTime = System.currentTimeMillis();
// Extract time series data and the corresponding targets
TimeSeriesDataset2 data = this.getInput();
if (data == null || data.isEmpty()) {
throw new IllegalStateException("The time series input data must not be null or empty!");
}
if (data.isMultivariate()) {
throw new UnsupportedOperationException("Multivariate datasets are not supported.");
}
final double[][] dataMatrix = data.getValuesOrNull(0);
if (dataMatrix == null) {
throw new IllegalArgumentException("Value matrix must be a valid 2D matrix containing the time series values for all instances!");
}
final int[] targetMatrix = data.getTargets();
int minShapeletLength = this.getConfig().minShapeletLength();
int maxShapeletLength = this.getConfig().maxShapeletLength();
long seed = this.getConfig().seed();
ShapeletTransformTSClassifier model = this.getClassifier();
final int timeSeriesLength = dataMatrix[0].length;
// Estimate min and max
if (this.getConfig().estimateShapeletLengthBorders()) {
logger.debug("Starting min max estimation.");
int[] minMax = this.estimateMinMax(dataMatrix, targetMatrix, beginTime);
minShapeletLength = minMax[0];
maxShapeletLength = minMax[1];
logger.debug("Finished min max estimation. min={}, max={}", minShapeletLength, maxShapeletLength);
} else {
if (maxShapeletLength == -1) {
maxShapeletLength = timeSeriesLength - 1;
}
}
if (maxShapeletLength >= timeSeriesLength) {
logger.debug("The maximum shapelet length was larger than the total time series length. Therefore, it will be set to time series length - 1.");
maxShapeletLength = timeSeriesLength - 1;
}
// Determine shapelets
logger.debug("Starting cached shapelet selection with min={}, max={} and k={}...", minShapeletLength, maxShapeletLength, this.getConfig().numShapelets());
List<Shapelet> shapelets = null;
shapelets = this.shapeletCachedSelection(dataMatrix, minShapeletLength, maxShapeletLength, this.getConfig().numShapelets(), targetMatrix, beginTime);
logger.debug("Finished cached shapelet selection. Extracted {} shapelets.", shapelets.size());
// Cluster shapelets
if (this.getConfig().clusterShapelets()) {
logger.debug("Starting shapelet clustering...");
shapelets = this.clusterShapelets(shapelets, this.getConfig().numClusters(), beginTime);
logger.debug("Finished shapelet clustering. Staying with {} shapelets.", shapelets.size());
}
model.setShapelets(shapelets);
// Transforming the data using the extracted shapelets
logger.debug("Transforming the training data using the extracted shapelets.");
TimeSeriesDataset2 transfTrainingData = shapeletTransform(data, model.getShapelets(), this.getTimeout(), beginTime, this.minDistanceSearchStrategy);
logger.debug("Finished transforming the training data.");
// Inititalize Weka ensemble
logger.debug("Initializing ensemble classifier...");
Classifier classifier = null;
try {
classifier = this.getConfig().useHIVECOTEEnsemble() ? EnsembleProvider.provideHIVECOTEEnsembleModel(seed) : EnsembleProvider.provideCAWPEEnsembleModel((int) seed, this.getConfig().numFolds());
} catch (Exception e1) {
throw new AlgorithmException("Could not train model due to ensemble exception.", e1);
}
logger.debug("Initialized ensemble classifier.");
// Train Weka ensemble using the data
logger.debug("Starting ensemble training...");
try {
WekaTimeseriesUtil.buildWekaClassifierFromSimplifiedTS(classifier, transfTrainingData);
} catch (TrainingException e) {
throw new AlgorithmException("Could not train classifier due to a training exception.", e);
}
logger.debug("Finished ensemble training.");
model.setClassifier(classifier);
return model;
}
/**
* Implements the min max estimation (algorithm 4 in the paper) for an initial
* value used as parameters for the shapelet selection.
*
* @param data
* Input data which is sampled from
* @param classes
* Classes of the input data instances
* @param beginTime
* Start timer used for the timeout checks
* @return Returns an int[] object of length 2 storing the min (index 0) and the
* max (index 1) estimation
* @throws InterruptedException
*/
private int[] estimateMinMax(final double[][] data, final int[] classes, final long beginTime) throws InterruptedException {
int[] result = new int[2];
long numInstances = data.length;
List<Shapelet> shapelets = new ArrayList<>();
for (int i = 0; i < MIN_MAX_ESTIMATION_SAMPLES; i++) {
double[][] tmpMatrix = new double[MIN_MAX_ESTIMATION_SAMPLES][data[0].length];
Random rand = new Random(this.getConfig().seed());
int[] tmpClasses = new int[MIN_MAX_ESTIMATION_SAMPLES];
for (int j = 0; j < MIN_MAX_ESTIMATION_SAMPLES; j++) {
int nextIndex = (int) (rand.nextInt() % numInstances);
if (nextIndex < 0) {
nextIndex += numInstances;
}
for (int k = 0; k < data[0].length; k++) {
tmpMatrix[j] = Arrays.copyOf(data[nextIndex], tmpMatrix[j].length);
}
tmpClasses[j] = classes[nextIndex];
}
shapelets.addAll(this.shapeletCachedSelection(tmpMatrix, 3, data[0].length, 10, tmpClasses, beginTime));
}
Shapelet.sortByLengthAsc(shapelets);
logger.debug("Number of shapelets found in min/max estimation: {}", shapelets.size());
// Min
result[0] = shapelets.get(25).getLength();
// Max
result[1] = shapelets.get(75).getLength();
return result;
}
/**
* Clusters the given <code>shapelets</code> into <code>noClusters</code>
* clusters (cf. algorithm 6 of the original paper).
*
* @param shapelets
* Shapelets to be clustered.
* @param noClusters
* Number of clusters to be used, i. e. the size of the output list
* @param beginTime
* Begin time of the training execution used for the timeout checks
* @return Returns the clustered shapelets
* @throws InterruptedException
* Thrown when a timeout occurred
*/
public List<Shapelet> clusterShapelets(final List<Shapelet> shapelets, final int noClusters, final long beginTime) throws InterruptedException {
final List<List<Shapelet>> clusters = new ArrayList<>();
for (final Shapelet shapelet : shapelets) {
List<Shapelet> list = new ArrayList<>();
list.add(shapelet);
clusters.add(list);
}
// Get clusters
while (clusters.size() > noClusters) {
if ((System.currentTimeMillis() - beginTime) > this.getTimeout().milliseconds()) {
throw new InterruptedException(INTERRUPTION_MESSAGE);
}
INDArray distanceMatrix = Nd4j.create(clusters.size(), clusters.size());
for (int i = 0; i < clusters.size(); i++) {
for (int j = 0; j < clusters.size(); j++) {
double distance = 0;
int comparisons = clusters.get(i).size() * clusters.get(j).size();
for (int l = 0; l < clusters.get(i).size(); l++) {
for (int k = 0; k < clusters.get(j).size(); k++) {
Shapelet cl = clusters.get(i).get(l);
Shapelet ck = clusters.get(j).get(k);
if (cl.getLength() > ck.getLength()) {
distance += this.minDistanceSearchStrategy.findMinimumDistance(ck, cl.getData());
} else {
distance += this.minDistanceSearchStrategy.findMinimumDistance(cl, ck.getData());
}
}
}
distanceMatrix.putScalar(new int[] { i, j }, distance / comparisons);
}
}
double best = Double.MAX_VALUE;
int x = 0;
int y = 0;
for (int i = 0; i < distanceMatrix.shape()[0]; i++) {
for (int j = 0; j < distanceMatrix.shape()[1]; j++) {
if (distanceMatrix.getDouble(i, j) < best && i != j) {
x = i;
y = j;
best = distanceMatrix.getDouble(i, j);
}
}
}
final List<Shapelet> clusterUpdate = clusters.get(x);
clusterUpdate.addAll(clusters.get(y));
Shapelet maxClusterShapelet = Shapelet.getHighestQualityShapeletInList(clusterUpdate);
if (x > y) {
clusters.remove(x);
clusters.remove(y);
} else {
clusters.remove(y);
clusters.remove(x);
}
clusters.add(Arrays.asList(maxClusterShapelet));
}
// Flatten list
return clusters.stream().flatMap(List::stream).collect(Collectors.toList());
}
/**
* Function implementing the shapelet cached selection described in algorithm 3
* in the original paper. The function searches for the best k shapelets based
* on the quality measure {@link ShapeletTransformLearningAlgorithm#qualityMeasure}.
*
* @param data
* The training data which is used for cache extraction and
* evaluation
* @param min
* The minimal length of the shapelets
* @param max
* The maximal length of the shapelets
* @param k
* The number of shapelets to be kept
* @param classes
* The classes of the instances
* @param beginTime
* Begin time of the training execution used for the timeout checks
* @return Returns the k best shapelets found in the search procedure
* @throws InterruptedException
* Thrown when a timeout occurred
*/
private List<Shapelet> shapeletCachedSelection(final double[][] data, final int min, final int max, final int k, final int[] classes, final long beginTime) throws InterruptedException {
List<Map.Entry<Shapelet, Double>> kShapelets = new ArrayList<>();
final int numInstances = data.length;
for (int i = 0; i < numInstances; i++) {
if ((System.currentTimeMillis() - beginTime) > this.getTimeout().milliseconds()) {
throw new InterruptedException(INTERRUPTION_MESSAGE);
}
List<Map.Entry<Shapelet, Double>> shapelets = new ArrayList<>();
for (int l = min; l < max; l++) {
Set<Shapelet> candidates = generateCandidates(data[i], l, i);
for (Shapelet s : candidates) {
List<Double> distances = this.findDistances(s, data);
double quality = this.qualityMeasure.assessQuality(distances, classes);
s.setDeterminedQuality(quality);
shapelets.add(new AbstractMap.SimpleEntry<>(s, quality));
}
}
sortByQualityDesc(shapelets);
shapelets = removeSelfSimilar(shapelets);
kShapelets = merge(k, kShapelets, shapelets);
}
return kShapelets.stream().map(Entry::getKey).collect(Collectors.toList());
}
/**
* Function merging shapelet lists based on their quality scores. Only the k
* best shapelets of the union of both lists are retained.
*
* @param k
* Number of elements to be retained
* @param kShapelets
* The previous best shapelets
* @param shapelets
* The new shapelets to be added to the top-k list
* @return Returns the list containing the k best shapelets only
*/
public static List<Map.Entry<Shapelet, Double>> merge(final int k, final List<Map.Entry<Shapelet, Double>> kShapelets, final List<Map.Entry<Shapelet, Double>> shapelets) {
kShapelets.addAll(shapelets);
// Retain only k
sortByQualityDesc(kShapelets);
int numRemoveItems = kShapelets.size() - k;
for (int i = 0; i < numRemoveItems; i++) {
kShapelets.remove(kShapelets.size() - 1);
}
return kShapelets;
}
/**
* Sorts a list of shapelets together with their quality values descending based
* on the qualities.
*
* @param list
* The list to be sorted in place
*/
private static void sortByQualityDesc(final List<Map.Entry<Shapelet, Double>> list) {
list.sort((e1, e2) -> (-1) * e1.getValue().compareTo(e2.getValue()));
}
/**
* Function removing self-similar shapelets from a list storing shapelet and
* their quality entries. See
* {@link ShapeletTransformLearningAlgorithm#isSelfSimilar(Shapelet, Shapelet)}.
*
* @param shapelets
* Shapelets to be compared
* @return A sublist from the given <code>shapelets</code> list which does not
* contain any self-similar shapelets
*/
public static List<Map.Entry<Shapelet, Double>> removeSelfSimilar(final List<Map.Entry<Shapelet, Double>> shapelets) {
List<Map.Entry<Shapelet, Double>> result = new ArrayList<>();
for (final Map.Entry<Shapelet, Double> entry : shapelets) {
// Check whether there is already a self similar shapelet in the result list
boolean selfSimilarExisting = false;
for (final Map.Entry<Shapelet, Double> s : result) {
if (isSelfSimilar(entry.getKey(), s.getKey())) {
selfSimilarExisting = true;
}
}
if (!selfSimilarExisting) {
result.add(entry);
}
}
return result;
}
/**
* Function checking whether the two given shapelets are self-similar, i. e. if
* their indices overlap. Assumes that both shapelets are from the same time
* series.
*
* @param s1
* First shapelet to be compared
* @param s2
* Second shapelet to be compared
* @return Returns whether the indices of the given shapelets overlap
*/
private static boolean isSelfSimilar(final Shapelet s1, final Shapelet s2) {
if (s1.getInstanceIndex() == s2.getInstanceIndex()) {
return (s1.getStartIndex() < (s2.getStartIndex() + s2.getLength())) && (s2.getStartIndex() < (s1.getStartIndex() + s1.getLength()));
} else {
return false;
}
}
/**
* Function finding the minimum single squared Euclidean distance for each
* instance among all of its subsequences compared to the shapelet
* <code>s</code>.
*
* @param s
* Shapelet which is compared to the instances
* @param matrix
* Matrix storing the data instance vectors
* @return Returns the list of all minimum distances of the shapelet and all the
* instances
*/
public List<Double> findDistances(final Shapelet s, final double[][] matrix) {
List<Double> result = new ArrayList<>();
for (int i = 0; i < matrix.length; i++) {
result.add(this.minDistanceSearchStrategy.findMinimumDistance(s, matrix[i]));
}
return result;
}
/**
* Function generation shapelet candidates for a given instance vector
* <code>data</code>, the length <code>l</code> and the candidate index which is
* used to identify the source of the shapelet's data.
*
* @param data
* Data vector from which the values are extracted
* @param l
* The length of the generated candidate shapelet
* @param candidateIndex
* Instance index which is used to identify the generated shapelets
* @return Returns a set of shapelet candidates with the length <code>l</code>
*/
public static Set<Shapelet> generateCandidates(final double[] data, final int l, final int candidateIndex) {
Set<Shapelet> result = new HashSet<>();
for (int i = 0; i < data.length - l + 1; i++) {
double[] tmpData = TimeSeriesUtil.getInterval(data, i, i + l);
result.add(new Shapelet(TimeSeriesUtil.zNormalize(tmpData, USE_BIAS_CORRECTION), i, l, candidateIndex));
}
return result;
}
/**
* Performs a shapelet transform on a complete <code>dataSet</code>. See
* {@link ShapeletTransformLearningAlgorithm#shapeletTransform(double[], List)}.
*
* @param dataSet
* Data set to be transformed
* @param shapelets
* Shapelets used as new feature dimensions
* @param timeout
* Timeout compared to the current time difference to the
* <code>beginTime</code>
* @param beginTime
* System time in ms when the training algorithm has started
* @param searchStrategy
* Search strategy used to find the minimum distance from a shapelet
* to the time series
* @return Returns the transformed data set
* @throws InterruptedException
* Thrown if there was a timeout
*/
public static TimeSeriesDataset2 shapeletTransform(final TimeSeriesDataset2 dataSet, final List<Shapelet> shapelets, final Timeout timeout, final long beginTime, final AMinimumDistanceSearchStrategy searchStrategy)
throws InterruptedException {
// Since the original paper only works on univariate data, this is assumed to be
// the case
if (dataSet.isMultivariate()) {
throw new UnsupportedOperationException("Multivariate datasets are not supported yet!");
}
double[][] timeSeries = dataSet.getValuesOrNull(0);
if (timeSeries == null) {
throw new IllegalArgumentException("Time series matrix must be a valid 2d matrix!");
}
double[][] transformedTS = new double[timeSeries.length][];
for (int i = 0; i < timeSeries.length; i++) {
if (timeout != null && (System.currentTimeMillis() - beginTime) > timeout.milliseconds()) {
throw new InterruptedException(INTERRUPTION_MESSAGE);
}
transformedTS[i] = shapeletTransform(timeSeries[i], shapelets, searchStrategy);
}
dataSet.replace(0, transformedTS, dataSet.getTimestampsOrNull(0));
return dataSet;
}
/**
* Function transforming the given <code>instance</code> into the new feature
* space spanned by the shapelets. Uses the minimum squared Euclidean distance
* of the corresponding shapelets to the instance as feature values.
*
* @param instance
* The instance to be transformed
* @param shapelets
* The shapelets to be used as new feature dimensions
* @param searchStrategy
* Search strategy used to find the minimum distance from a shapelet
* to the time series
* @return Returns the transformed instance feature vector
*/
public static double[] shapeletTransform(final double[] instance, final List<Shapelet> shapelets, final AMinimumDistanceSearchStrategy searchStrategy) {
double[] transformedTS = new double[shapelets.size()];
for (int j = 0; j < shapelets.size(); j++) {
transformedTS[j] = searchStrategy.findMinimumDistance(shapelets.get(j), instance);
}
return transformedTS;
}
/**
* Getter for {@link ShapeletTransformLearningAlgorithm#minDistanceSearchStrategy}.
*
* @return the minDistanceSearchStrategy
*/
public AMinimumDistanceSearchStrategy getMinDistanceSearchStrategy() {
return this.minDistanceSearchStrategy;
}
/**
* Setter for {@link ShapeletTransformLearningAlgorithm#minDistanceSearchStrategy}.
*
* @param minDistanceSearchStrategy
* the minDistanceSearchStrategy to set
*/
public void setMinDistanceSearchStrategy(final AMinimumDistanceSearchStrategy minDistanceSearchStrategy) {
this.minDistanceSearchStrategy = minDistanceSearchStrategy;
}
/**
* {@inheritDoc}
*/
@Override
public void registerListener(final Object listener) {
throw new UnsupportedOperationException("The operation to be performed is not supported.");
}
/**
* {@inheritDoc}
*/
@Override
public IAlgorithmEvent nextWithException() {
throw new UnsupportedOperationException("The operation to be performed is not supported.");
}
/**
* {@inheritDoc}
*/
@Override
public IShapeletTransformLearningAlgorithmConfig getConfig() {
return (IShapeletTransformLearningAlgorithmConfig) super.getConfig();
}
}
|
0
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka/classification/timeseries/learner
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka/classification/timeseries/learner/shapelets/ShapeletTransformTSClassifier.java
|
package ai.libs.jaicore.ml.weka.classification.timeseries.learner.shapelets;
import java.util.ArrayList;
import java.util.List;
import org.aeonbits.owner.ConfigCache;
import org.api4.java.ai.ml.core.exception.PredictionException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import ai.libs.jaicore.basic.IOwnerBasedRandomizedAlgorithmConfig;
import ai.libs.jaicore.ml.classification.singlelabel.timeseries.dataset.TimeSeriesDataset2;
import ai.libs.jaicore.ml.classification.singlelabel.timeseries.learner.ASimplifiedTSClassifier;
import ai.libs.jaicore.ml.classification.singlelabel.timeseries.quality.FStat;
import ai.libs.jaicore.ml.classification.singlelabel.timeseries.quality.IQualityMeasure;
import ai.libs.jaicore.ml.classification.singlelabel.timeseries.shapelets.Shapelet;
import ai.libs.jaicore.ml.classification.singlelabel.timeseries.shapelets.search.AMinimumDistanceSearchStrategy;
import ai.libs.jaicore.ml.weka.classification.singlelabel.timeseries.util.WekaTimeseriesUtil;
import ai.libs.jaicore.ml.weka.classification.timeseries.learner.shapelets.ShapeletTransformLearningAlgorithm.IShapeletTransformLearningAlgorithmConfig;
import weka.classifiers.Classifier;
import weka.core.Instance;
import weka.core.Instances;
/**
* Class for a ShapeletTransform classifier as described in Jason Lines, Luke M.
* Davis, Jon Hills, and Anthony Bagnall. 2012. A shapelet transform for time
* series classification. In Proceedings of the 18th ACM SIGKDD international
* conference on Knowledge discovery and data mining (KDD '12). ACM, New York,
* NY, USA, 289-297.
*
* The classifier model is built of shapelets which are used for the
* transformation of instances to the new feature space built by the shapelets
* as dimensions. The feature values are the minimum distances of a time series
* to the feature dimension's shapelet. An ensemble classifier trained on the
* derived representation is then used for prediction.
*
* This classifier only supports univariate time series prediction.
*
* @author Julian Lienen
*
*/
public class ShapeletTransformTSClassifier extends ASimplifiedTSClassifier<Integer> {
/**
* Log4j logger
*/
private static final Logger LOGGER = LoggerFactory.getLogger(ShapeletTransformTSClassifier.class);
/**
* Model shapelets used as dimensions for the transformation.
*/
private List<Shapelet> shapelets;
/**
* Weka ensemble classifier predicting the class based on the derived feature
* representation.
*/
private Classifier classifier;
/**
* Strategy used for the minimum distance search.
*/
private AMinimumDistanceSearchStrategy minDistanceSearchStrategy;
private final IShapeletTransformLearningAlgorithmConfig config;
private final IQualityMeasure qualityMeasure;
/**
* Constructs an Shapelet Transform classifier using <code>k</code> shapelets,
* </code>k/2</code> clusters of the shapelets after shapelet extraction and the
* {@link FStat} quality measure.
*
* @param k
* Number of shapelets searched for and used as shapelet clustering
* input
* @param seed
* Seed for randomized operations
*/
public ShapeletTransformTSClassifier(final int k, final int seed) {
this(k, new FStat(), seed, true);
}
/**
* Constructs an Shapelet Transform classifier using <code>k</code> shapelets,
* </code>k/2</code> clusters of the shapelets after shapelet extraction (if
* <code>clusterShapelets</code> is true and the quality measure function
* <code>qm</code>.
*
* @param k
* Number of shapelets searched for and used as shapelet clustering
* input if enabled
* @param qm
* Quality measure function to be used to assess shapelets
* @param seed
* See for randomized operations
* @param clusterShapelets
* Indicator whether shapelet clustering should be used after
* extracting the best k shapelets
*/
public ShapeletTransformTSClassifier(final int k, final IQualityMeasure qm, final int seed, final boolean clusterShapelets) {
this(k, (k / 2), qm, seed, clusterShapelets, 3, 0, false, 1);
}
/**
* Constructs an Shapelet Transform classifier using <code>k</code> shapelets,
* </code>k/2</code> clusters of the shapelets after shapelet extraction (if
* <code>clusterShapelets</code> is true and the quality measure function
* <code>qm</code>.
*
* @param k
* Number of shapelets searched for and used as shapelet clustering
* input if enabled
* @param numClusters
* Number of clusters into which the shapelets are clustered
* @param qm
* Quality measure function to be used to assess shapelets
* @param seed
* See for randomized operations
* @param clusterShapelets
* Indicator whether shapelet clustering should be used after
* extracting the best k shapelets
*/
public ShapeletTransformTSClassifier(final int k, final int numClusters, final IQualityMeasure qm, final int seed, final boolean clusterShapelets) {
this(k, numClusters, qm, seed, clusterShapelets, 3, 0, false, 1);
}
/**
* Constructs an Shapelet Transform classifier using <code>k</code> shapelets,
* </code>k/2</code> clusters of the shapelets after shapelet extraction (if
* <code>clusterShapelets</code> is true and the quality measure function
* <code>qm</code>. <code>minShapeletLength</code> and
* <code>maxShapeletLength</code> specify the shapelet length borders, while
* <code>useHIVECOTEEnsemble</code> defines whether the HIVE COTE ensemble or
* the CAWPE ensemble should be used.
*
* @param k
* Number of shapelets searched for and used as shapelet clustering
* input if enabled
* @param qm
* Quality measure function to be used to assess shapelets
* @param seed
* See for randomized operations
* @param clusterShapelets
* Indicator whether shapelet clustering should be used after
* extracting the best k shapelets
* @param minShapeletLength
* The minimal length of the shapelets
* @param maxShapeletLength
* The maximal length of the shapelets
* @param useHIVECOTEEnsemble
* Indicator whether the HIVE COTE ensemble should be used (CAWPE
* otherwise)
* @param timeout
* The timeout used for the training
* @param numFolds
* See {@link ShapeletTransformLearningAlgorithm#numFolds}
*/
public ShapeletTransformTSClassifier(final int k, final int numClusters, final IQualityMeasure qm, final int seed, final boolean clusterShapelets, final int minShapeletLength, final int maxShapeletLength,
final boolean useHIVECOTEEnsemble, final int numFolds) {
super();
this.config = ConfigCache.getOrCreate(IShapeletTransformLearningAlgorithmConfig.class);
this.config.setProperty(IShapeletTransformLearningAlgorithmConfig.K_NUMSHAPELETS, "" + k);
this.config.setProperty(IOwnerBasedRandomizedAlgorithmConfig.K_SEED, "" + seed);
this.config.setProperty(IShapeletTransformLearningAlgorithmConfig.K_CLUSTERSHAPELETS, "" + clusterShapelets);
this.config.setProperty(IShapeletTransformLearningAlgorithmConfig.K_SHAPELETLENGTH_MIN, "" + minShapeletLength);
this.config.setProperty(IShapeletTransformLearningAlgorithmConfig.K_SHAPELETLENGTH_MAX, "" + maxShapeletLength);
this.config.setProperty(IShapeletTransformLearningAlgorithmConfig.K_USEHIVECOTEENSEMBLE, "" + useHIVECOTEEnsemble);
this.config.setProperty(IShapeletTransformLearningAlgorithmConfig.K_NUMFOLDS, "" + numFolds);
this.config.setProperty(IShapeletTransformLearningAlgorithmConfig.K_NUMCLUSTERS, "" + numClusters);
this.qualityMeasure = qm;
}
/**
* Getter for {@link ShapeletTransformTSClassifier#shapelets}.
*
* @return The actual list of shapelets used for the transformation
*/
public List<Shapelet> getShapelets() {
return this.shapelets;
}
/**
* Setter for {@link ShapeletTransformTSClassifier#shapelets}.
*
* @param shapelets
* The new list of shapelets to be set
*/
public void setShapelets(final List<Shapelet> shapelets) {
this.shapelets = shapelets;
}
/**
* Setter for {@link ShapeletTransformTSClassifier#classifier}.
*
* @param classifier
* The classifier to be set
*/
public void setClassifier(final Classifier classifier) {
this.classifier = classifier;
}
/**
* {@inheritDoc}
*/
@Override
public Integer predict(final double[] univInstance) throws PredictionException {
if (!this.isTrained()) {
throw new PredictionException("Model has not been built before!");
}
double[] transformedInstance = ShapeletTransformLearningAlgorithm.shapeletTransform(univInstance, this.shapelets, this.minDistanceSearchStrategy);
Instance inst = WekaTimeseriesUtil.simplifiedTSInstanceToWekaInstance(transformedInstance);
try {
return (int) Math.round(this.classifier.classifyInstance(inst));
} catch (Exception e) {
throw new PredictionException(String.format("Could not predict Weka instance %s.", inst.toString()), e);
}
}
/**
* {@inheritDoc}
*/
@Override
public Integer predict(final List<double[]> multivInstance) throws PredictionException {
LOGGER.warn("Dataset to be predicted is multivariate but only first time series (univariate) will be considered.");
return this.predict(multivInstance.get(0));
}
/**
* {@inheritDoc}
*/
@Override
public List<Integer> predict(final TimeSeriesDataset2 dataset) throws PredictionException {
if (!this.isTrained()) {
throw new PredictionException("Model has not been built before!");
}
// Multivariate support is not supported by the original paper's version
if (dataset.isMultivariate()) {
LOGGER.warn("Dataset to be predicted is multivariate but only first time series (univariate) will be considered.");
}
// Transforming the dataset using the extracted shapelets
LOGGER.debug("Transforming dataset...");
TimeSeriesDataset2 transformedDataset = null;
try {
transformedDataset = ShapeletTransformLearningAlgorithm.shapeletTransform(dataset, this.shapelets, null, -1, this.minDistanceSearchStrategy);
} catch (InterruptedException e1) {
Thread.currentThread().interrupt();
return new ArrayList<>();
}
LOGGER.debug("Transformed dataset.");
double[][] timeSeries = transformedDataset.getValuesOrNull(0);
if (timeSeries == null) {
throw new IllegalArgumentException("Dataset matrix of the instances to be predicted must not be null!");
}
// Prepare transformed Weka instances to let the ensemble predict
LOGGER.debug("Converting time series dataset to Weka instances...");
Instances insts = WekaTimeseriesUtil.simplifiedTimeSeriesDatasetToWekaInstances(transformedDataset);
LOGGER.debug("Converted time series dataset to Weka instances.");
// Prediction
LOGGER.debug("Starting prediction...");
final List<Integer> predictions = new ArrayList<>();
for (final Instance inst : insts) {
try {
double prediction = this.classifier.classifyInstance(inst);
predictions.add((int) Math.round(prediction));
} catch (Exception e) {
throw new PredictionException(String.format("Could not predict Weka instance %s.", inst.toString()), e);
}
}
LOGGER.debug("Finished prediction.");
return predictions;
}
/**
* Getter for {@link ShapeletTransformTSClassifier#minDistanceSearchStrategy}.
*
* @return the minDistanceSearchStrategy
*/
public AMinimumDistanceSearchStrategy getMinDistanceSearchStrategy() {
return this.minDistanceSearchStrategy;
}
@Override
public ShapeletTransformLearningAlgorithm getLearningAlgorithm(final TimeSeriesDataset2 dataset) {
return new ShapeletTransformLearningAlgorithm(this.config, this, dataset, this.qualityMeasure);
}
}
|
0
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka/classification/timeseries/learner
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka/classification/timeseries/learner/shapelets/package-info.java
|
/**
*
*/
package ai.libs.jaicore.ml.weka.classification.timeseries.learner.shapelets;
|
0
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka/dataset/IWekaInstance.java
|
package ai.libs.jaicore.ml.weka.dataset;
import org.apache.commons.math3.ml.clustering.Clusterable;
import org.api4.java.ai.ml.core.dataset.supervised.ILabeledInstance;
import org.api4.java.common.attributedobjects.IElementDecorator;
import weka.core.Instance;
public interface IWekaInstance extends ILabeledInstance, IElementDecorator<Instance>, Clusterable {
@Override
public Instance getElement();
}
|
0
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka/dataset/IWekaInstances.java
|
package ai.libs.jaicore.ml.weka.dataset;
import org.api4.java.ai.ml.core.dataset.supervised.ILabeledDataset;
import org.api4.java.ai.ml.core.exception.DatasetCreationException;
import weka.core.Instances;
public interface IWekaInstances extends ILabeledDataset<IWekaInstance> {
public Instances getList();
public default Instances getInstances() {
return this.getList();
}
@Override
public IWekaInstances createEmptyCopy() throws DatasetCreationException, InterruptedException;
}
|
0
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka/dataset/WekaInstance.java
|
package ai.libs.jaicore.ml.weka.dataset;
import static ai.libs.jaicore.ml.weka.dataset.WekaInstancesUtil.transformInstanceToWekaInstance;
import java.util.stream.IntStream;
import org.apache.commons.lang3.builder.HashCodeBuilder;
import org.api4.java.ai.ml.core.dataset.schema.ILabeledInstanceSchema;
import org.api4.java.ai.ml.core.dataset.serialization.UnsupportedAttributeTypeException;
import org.api4.java.ai.ml.core.dataset.supervised.ILabeledInstance;
import ai.libs.jaicore.basic.sets.ElementDecorator;
import ai.libs.jaicore.ml.weka.WekaUtil;
import weka.core.Attribute;
import weka.core.Instance;
public class WekaInstance extends ElementDecorator<Instance> implements IWekaInstance {
public WekaInstance(final Instance instance) {
super(instance);
}
public WekaInstance(final ILabeledInstanceSchema schema, final ILabeledInstance instance) throws UnsupportedAttributeTypeException {
super(transformInstanceToWekaInstance(schema, instance));
if (schema.getNumAttributes() != instance.getNumAttributes()) {
throw new IllegalStateException("Number of attributes in the instance deviate from those in the scheme.");
}
}
@Override
public Number getLabel() {
boolean isInteger = this.getElement().classAttribute().isNominal();
double classValue = this.getElement().classValue();
if (isInteger) {
int intClassValue = (int) classValue; // do NOT return this value directly. The explicit cast into a new variable is required, because the var remains a double elsewise. Maybe this is a Java bug
return intClassValue;
} else {
return classValue;
}
}
@Override
public Object getAttributeValue(final int pos) {
Instance inst = this.getElement();
double val = inst.value(pos);
if (inst.attribute(pos).isNumeric()) {
return val;
} else {
return (int) val; // map categorical values to ints instead of doubles
}
}
@Override
public Object[] getAttributes() {
return IntStream.range(0, this.getElement().numAttributes()).filter(x -> x != this.getElement().classIndex()).mapToObj(x -> this.getElement().attribute(x)).map(this::transformAttributeValueToData).toArray();
}
private Object transformAttributeValueToData(final Attribute att) {
double internalValue = this.getElement().value(att);
return att.isNumeric() ? internalValue : att.value((int) internalValue);
}
@Override
public double[] getPoint() {
return this.getElement().toDoubleArray();
}
@Override
public double getPointValue(final int pos) {
return this.getElement().value(pos);
}
@Override
public void removeColumn(final int columnPos) {
throw new UnsupportedOperationException("Not yet implemented!");
}
@Override
public void setLabel(final Object obj) {
if (obj instanceof String) {
this.getElement().setClassValue((String) obj);
} else if (obj instanceof Double) {
this.getElement().setClassValue((Double) obj);
} else {
throw new IllegalArgumentException("The value for the label must not be of type " + obj.getClass().getName() + ". The only valid types are Double and String.");
}
}
@Override
public void setAttributeValue(final int pos, final Object value) {
if (value instanceof String) {
this.getElement().setValue(pos, (String) value);
} else if (value instanceof Double) {
this.getElement().setValue(pos, (Double) value);
} else {
throw new IllegalArgumentException("The value for the label must not be of type " + value.getClass().getName() + ". The only valid types are Double and String.");
}
}
@Override
public int hashCode() {
return new HashCodeBuilder().append(this.getElement().toDoubleArray()).toHashCode();
}
@Override
public boolean equals(final Object obj) {
if (this == obj) {
return true;
}
if (obj == null) {
return false;
}
if (!(obj instanceof WekaInstance)) {
return false;
}
return WekaUtil.areInstancesEqual(this.getElement(), ((WekaInstance) obj).getElement());
}
@Override
public String toString() {
return this.getElement().toString();
}
}
|
0
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka/dataset/WekaInstances.java
|
package ai.libs.jaicore.ml.weka.dataset;
import static ai.libs.jaicore.ml.weka.dataset.WekaInstancesUtil.extractSchema;
import java.lang.reflect.Constructor;
import java.util.ArrayList;
import java.util.List;
import java.util.stream.Collectors;
import org.apache.commons.lang3.builder.HashCodeBuilder;
import org.api4.java.ai.ml.core.dataset.IDataset;
import org.api4.java.ai.ml.core.dataset.schema.ILabeledInstanceSchema;
import org.api4.java.ai.ml.core.dataset.schema.attribute.IAttribute;
import org.api4.java.ai.ml.core.dataset.serialization.UnsupportedAttributeTypeException;
import org.api4.java.ai.ml.core.dataset.supervised.ILabeledDataset;
import org.api4.java.ai.ml.core.dataset.supervised.ILabeledInstance;
import org.api4.java.ai.ml.core.exception.DatasetCreationException;
import org.api4.java.common.attributedobjects.IListDecorator;
import org.api4.java.common.reconstruction.IReconstructible;
import org.api4.java.common.reconstruction.IReconstructionInstruction;
import org.api4.java.common.reconstruction.IReconstructionPlan;
import ai.libs.jaicore.basic.reconstruction.ReconstructionInstruction;
import ai.libs.jaicore.basic.reconstruction.ReconstructionPlan;
import ai.libs.jaicore.ml.weka.WekaUtil;
import weka.core.Instance;
import weka.core.Instances;
public class WekaInstances implements IWekaInstances, IListDecorator<Instances, Instance, IWekaInstance>, IReconstructible {
/**
*
*/
private static final long serialVersionUID = -1980814429448333405L;
private ILabeledInstanceSchema schema;
private final List<IReconstructionInstruction> reconstructionInstructions;
private Instances dataset;
public WekaInstances(final Instances dataset) {
this(dataset, extractSchema(dataset));
}
public WekaInstances(final Instances dataset, final ILabeledInstanceSchema schema) {
this.schema = schema;
this.dataset = dataset;
this.reconstructionInstructions = new ArrayList<>();
}
public WekaInstances(final ILabeledDataset<? extends ILabeledInstance> dataset) {
this.schema = dataset.getInstanceSchema();
if (dataset instanceof WekaInstances) {
this.dataset = new Instances(((WekaInstances) dataset).dataset);
} else {
try {
this.dataset = WekaInstancesUtil.datasetToWekaInstances(dataset);
} catch (UnsupportedAttributeTypeException e) {
throw new IllegalArgumentException("Could not convert dataset to weka's Instances.", e);
} catch (InterruptedException e) {
Thread.currentThread().interrupt(); // re-interrupting the thread
throw new IllegalStateException("Could not finish instantiating weka instances.", e);
}
}
if (this.dataset.numAttributes() != dataset.getNumAttributes() + 1) {
throw new IllegalStateException("Number of attributes in the WekaInstances do not coincide. We have " + this.dataset.numAttributes() + " while given dataset had " + dataset.getNumAttributes()
+ ". There should be a difference of 1, because WEKA counts the label as an attribute.");
}
this.reconstructionInstructions = (dataset instanceof IReconstructible) ? ((ReconstructionPlan) ((IReconstructible) dataset).getConstructionPlan()).getInstructions() : null;
}
@Override
public Instances getInstances() {
return this.dataset;
}
@Override
public void removeColumn(final int columnPos) {
throw new UnsupportedOperationException("Not yet implemented.");
}
@Override
public IWekaInstances createEmptyCopy() throws DatasetCreationException {
return new WekaInstances(new Instances(this.dataset, 0));
}
@Override
public int hashCode() {
HashCodeBuilder hb = new HashCodeBuilder();
for (IWekaInstance inst : this) {
hb.append(inst.hashCode());
}
return hb.toHashCode();
}
@Override
public boolean equals(final Object obj) {
if (this == obj) {
return true;
}
if (obj == null) {
return false;
}
if (this.getClass() != obj.getClass()) {
return false;
}
WekaInstances other = (WekaInstances) obj;
int n = this.size();
for (int i = 0; i < n; i++) {
if (!this.get(i).equals(other.get(i))) {
return false;
}
}
return true;
}
public int getFrequency(final IWekaInstance instance) {
return (int) this.stream().filter(instance::equals).count();
}
@Override
public String toString() {
return "WekaInstances [schema=" + this.getInstanceSchema() + "]\n" + this.dataset;
}
@Override
public Class<IWekaInstance> getTypeOfDecoratingItems() {
return IWekaInstance.class;
}
@Override
public Class<Instance> getTypeOfDecoratedItems() {
return Instance.class;
}
@Override
public Constructor<? extends IWekaInstance> getConstructorForDecoratingItems() {
try {
return WekaInstance.class.getConstructor(this.getTypeOfDecoratedItems());
} catch (Exception e) {
throw new IllegalArgumentException("The constructor of the list class could not be invoked.");
}
}
@Override
public Instances getList() {
return this.dataset;
}
@Override
public IDataset<IWekaInstance> createCopy() throws DatasetCreationException, InterruptedException {
return new WekaInstances(this);
}
@Override
public Object[] getLabelVector() {
return WekaUtil.getClassesAsList(this.dataset).toArray();
}
@Override
public ILabeledInstanceSchema getInstanceSchema() {
return this.schema;
}
@Override
public Object[][] getFeatureMatrix() {
throw new UnsupportedOperationException();
}
@Override
public void removeColumn(final String columnName) {
throw new UnsupportedOperationException();
}
@Override
public void removeColumn(final IAttribute attribute) {
throw new UnsupportedOperationException();
}
@Override
public IReconstructionPlan getConstructionPlan() {
return new ReconstructionPlan(this.reconstructionInstructions.stream().map(i -> (ReconstructionInstruction) i).collect(Collectors.toList()));
}
@Override
public void addInstruction(final IReconstructionInstruction instruction) {
this.reconstructionInstructions.add(instruction);
}
}
|
0
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka/dataset/WekaInstancesUtil.java
|
package ai.libs.jaicore.ml.weka.dataset;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.LinkedList;
import java.util.List;
import java.util.Objects;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
import org.api4.java.ai.ml.core.dataset.schema.ILabeledInstanceSchema;
import org.api4.java.ai.ml.core.dataset.schema.attribute.IAttribute;
import org.api4.java.ai.ml.core.dataset.schema.attribute.ICategoricalAttribute;
import org.api4.java.ai.ml.core.dataset.schema.attribute.ICategoricalAttributeValue;
import org.api4.java.ai.ml.core.dataset.schema.attribute.INumericAttribute;
import org.api4.java.ai.ml.core.dataset.schema.attribute.INumericAttributeValue;
import org.api4.java.ai.ml.core.dataset.serialization.UnsupportedAttributeTypeException;
import org.api4.java.ai.ml.core.dataset.supervised.ILabeledDataset;
import org.api4.java.ai.ml.core.dataset.supervised.ILabeledInstance;
import ai.libs.jaicore.ml.core.dataset.schema.LabeledInstanceSchema;
import ai.libs.jaicore.ml.core.dataset.schema.attribute.IntBasedCategoricalAttribute;
import ai.libs.jaicore.ml.core.dataset.schema.attribute.NumericAttribute;
import weka.core.Attribute;
import weka.core.DenseInstance;
import weka.core.Instance;
import weka.core.Instances;
public class WekaInstancesUtil {
private WekaInstancesUtil() {
/* Intentionally blank, hiding standard constructor for this util class. */
}
public static ILabeledInstanceSchema extractSchema(final Instances dataset) {
int targetIndex = dataset.classIndex();
if (targetIndex < 0) {
throw new IllegalArgumentException("Class index of Instances object is not set!");
}
List<IAttribute> attributes = IntStream.range(0, dataset.numAttributes()).mapToObj(dataset::attribute).map(WekaInstancesUtil::transformWEKAAttributeToAttributeType).collect(Collectors.toList());
IAttribute labelAttribute = attributes.remove(targetIndex);
return new LabeledInstanceSchema(dataset.relationName(), attributes, labelAttribute);
}
public static Instances datasetToWekaInstances(final ILabeledDataset<? extends ILabeledInstance> dataset) throws UnsupportedAttributeTypeException, InterruptedException {
Instances wekaInstances = createDatasetFromSchema(dataset.getInstanceSchema());
int expectedAttributes = dataset.getInstanceSchema().getNumAttributes();
for (ILabeledInstance inst : dataset) {
if (Thread.interrupted()) {
throw new InterruptedException("Received interrupt.");
}
if (inst.getNumAttributes() != expectedAttributes) {
throw new IllegalStateException("Dataset scheme defines a number of " + expectedAttributes + " attributes, but instance has " + inst.getNumAttributes() + ".");
}
double[] point = inst.getPoint();
double[] pointWithLabel = Arrays.copyOf(point, point.length + 1);
DenseInstance iNew = new DenseInstance(1, pointWithLabel);
iNew.setDataset(wekaInstances);
if (dataset.getLabelAttribute() instanceof ICategoricalAttribute) {
iNew.setClassValue(((ICategoricalAttribute) dataset.getLabelAttribute()).getLabelOfCategory((int) inst.getLabel()));
} else if (inst.getLabel() != null) {
iNew.setClassValue(Double.parseDouble(inst.getLabel().toString()));
}
wekaInstances.add(iNew); // this MUST come here AFTER having set the class value; otherwise, the class is not registered correctly in the Instances object!!
}
return wekaInstances;
}
public static Instances createDatasetFromSchema(final ILabeledInstanceSchema schema) throws UnsupportedAttributeTypeException {
Objects.requireNonNull(schema);
List<Attribute> attributes = new LinkedList<>();
for (int i = 0; i < schema.getNumAttributes(); i++) {
IAttribute attType = schema.getAttributeList().get(i);
if (attType instanceof INumericAttribute) {
attributes.add(new Attribute(attType.getName()));
} else if (attType instanceof ICategoricalAttribute) {
attributes.add(new Attribute(attType.getName(), ((ICategoricalAttribute) attType).getLabels()));
} else {
throw new UnsupportedAttributeTypeException("The class attribute has an unsupported attribute type " + attType.getClass().getName() + " of attribute " + attType.getName() + ".");
}
}
IAttribute classType = schema.getLabelAttribute();
Attribute classAttribute;
if (classType instanceof INumericAttribute) {
classAttribute = new Attribute(classType.getName());
} else if (classType instanceof ICategoricalAttribute) {
classAttribute = new Attribute(classType.getName(), ((IntBasedCategoricalAttribute) classType).getLabels());
} else {
throw new UnsupportedAttributeTypeException("The class attribute has an unsupported attribute type.");
}
ArrayList<Attribute> attributeList = new ArrayList<>(attributes);
attributeList.add(classAttribute);
Instances wekaInstances = new Instances("weka-instances", attributeList, 0);
wekaInstances.setClassIndex(wekaInstances.numAttributes() - 1);
return wekaInstances;
}
public static IAttribute transformWEKAAttributeToAttributeType(final Attribute att) {
String attributeName = att.name();
if (att.isNumeric()) {
return new NumericAttribute(attributeName);
} else if (att.isNominal()) {
List<String> domain = new LinkedList<>();
for (int i = 0; i < att.numValues(); i++) {
domain.add(att.value(i));
}
return new IntBasedCategoricalAttribute(attributeName, domain);
}
throw new IllegalArgumentException("Can only transform numeric or categorical attributes");
}
public static Instance transformInstanceToWekaInstance(final ILabeledInstanceSchema schema, final ILabeledInstance instance) throws UnsupportedAttributeTypeException {
if (instance.getNumAttributes() != schema.getNumAttributes()) {
throw new IllegalArgumentException("Schema and instance do not coincide. The schema defines " + schema.getNumAttributes() + " attributes but the instance has " + instance.getNumAttributes() + " attributes.");
}
if (instance instanceof WekaInstance) {
return ((WekaInstance) instance).getElement();
}
Objects.requireNonNull(schema);
Instances dataset = createDatasetFromSchema(schema);
Instance iNew = new DenseInstance(dataset.numAttributes());
iNew.setDataset(dataset);
for (int i = 0; i < instance.getNumAttributes(); i++) {
if (schema.getAttribute(i) instanceof INumericAttribute) {
INumericAttributeValue value = ((INumericAttribute) schema.getAttribute(i)).getAsAttributeValue(instance.getAttributeValue(i));
if (value != null) {
iNew.setValue(i, value.getValue());
} else {
iNew.setMissing(i);
}
} else if (schema.getAttribute(i) instanceof ICategoricalAttribute) {
ICategoricalAttributeValue value = ((ICategoricalAttribute) schema.getAttribute(i)).getAsAttributeValue(instance.getAttributeValue(i));
if (value != null) {
iNew.setValue(i, value.getValue());
} else {
iNew.setMissing(i);
}
} else {
throw new UnsupportedAttributeTypeException("Only categorical and numeric attributes are supported!");
}
}
if (schema.getLabelAttribute() instanceof INumericAttribute) {
iNew.setValue(iNew.numAttributes() - 1, ((INumericAttribute) schema.getLabelAttribute()).getAsAttributeValue(instance.getLabel()).getValue());
} else if (schema.getLabelAttribute() instanceof ICategoricalAttribute) {
iNew.setValue(iNew.numAttributes() - 1, ((ICategoricalAttribute) schema.getLabelAttribute()).getAsAttributeValue(instance.getLabel()).getValue());
} else {
throw new UnsupportedAttributeTypeException("Only categorical and numeric attributes are supported!");
}
if (iNew.numClasses() != dataset.numClasses()) {
throw new IllegalStateException();
}
return iNew;
}
}
|
0
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka/dataset/package-info.java
|
/**
* This package contains classes for weka-specific logics regarding the dataset.
* For instance, it contains a util for transforming weka's Instances to IDataset.
*
* @author mwever
*/
package ai.libs.jaicore.ml.weka.dataset;
|
0
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka/preprocessing/IWekaPreprocessingAlgorithm.java
|
package ai.libs.jaicore.ml.weka.preprocessing;
import org.api4.java.ai.ml.core.dataset.supervised.ILabeledDataset;
import org.api4.java.algorithm.IAlgorithm;
/**
* A WEKA preprocessing algorithm takes a labeled dataset and produces itself as to allow for applying the
* obtained dimensionality reduction to some new data.
*
* @author Felix Mohr
*
*/
public interface IWekaPreprocessingAlgorithm extends IAlgorithm<ILabeledDataset<?>, IWekaPreprocessingAlgorithm> {
}
|
0
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka/preprocessing/WekaPreprocessorFitter.java
|
package ai.libs.jaicore.ml.weka.preprocessing;
import java.util.concurrent.CancellationException;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.FutureTask;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import org.api4.java.ai.ml.core.dataset.supervised.ILabeledDataset;
import org.api4.java.algorithm.events.IAlgorithmEvent;
import org.api4.java.algorithm.exceptions.AlgorithmException;
import org.api4.java.algorithm.exceptions.AlgorithmExecutionCanceledException;
import org.api4.java.algorithm.exceptions.AlgorithmTimeoutedException;
import ai.libs.jaicore.basic.algorithm.AAlgorithm;
import ai.libs.jaicore.ml.weka.dataset.WekaInstances;
import weka.attributeSelection.ASEvaluation;
import weka.attributeSelection.ASSearch;
import weka.attributeSelection.AttributeSelection;
public class WekaPreprocessorFitter extends AAlgorithm<ILabeledDataset<?>, IWekaPreprocessingAlgorithm> implements IWekaPreprocessingAlgorithm {
private final ASSearch searcher;
private final ASEvaluation evaluator;
private final AttributeSelection as;
private FutureTask<?> futureTask;
public WekaPreprocessorFitter(final ILabeledDataset<?> input, final String searcher, final String evaluator) {
super(input);
try {
this.searcher = ASSearch.forName(searcher, null);
this.evaluator = ASEvaluation.forName(evaluator, null);
this.as = new AttributeSelection();
this.as.setSearch(this.searcher);
this.as.setEvaluator(this.evaluator);
} catch (Exception e) {
throw new UnsupportedOperationException("Cannot instantiate a pre-processor with " + searcher + " and " + evaluator);
}
}
@Override
public IAlgorithmEvent nextWithException() throws InterruptedException, AlgorithmExecutionCanceledException, AlgorithmTimeoutedException, AlgorithmException {
ExecutorService executor = null;
try {
switch (this.getState()) {
case CREATED:
return this.activate();
case ACTIVE:
this.futureTask = new FutureTask<>(() -> {
this.as.SelectAttributes(new WekaInstances(this.getInput()).getInstances());
return null;
});
executor = Executors.newSingleThreadExecutor();
executor.submit(this.futureTask);
this.futureTask.get(this.getRemainingTimeToDeadline().milliseconds() - 100, TimeUnit.MILLISECONDS);
return this.terminate();
default:
throw new IllegalStateException();
}
}
catch (TimeoutException e) {
throw new AlgorithmTimeoutedException(0);
}
catch (InterruptedException e) {
throw e;
}
catch (CancellationException e) {
throw new AlgorithmExecutionCanceledException(0);
}
catch(Exception e) {
throw new AlgorithmException("Could not build model.", e);
}
finally {
if (executor != null) {
executor.shutdownNow();
}
}
}
@Override
public IWekaPreprocessingAlgorithm call() throws InterruptedException, AlgorithmExecutionCanceledException, AlgorithmTimeoutedException, AlgorithmException {
while (this.hasNext()) {
this.nextWithException();
}
return this;
}
public AttributeSelection getSelector() {
return this.as;
}
@Override
public void cancel() {
this.futureTask.cancel(true);
super.cancel();
}
}
|
0
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka/rangequery/AbstractAugmentedSpaceSampler.java
|
package ai.libs.jaicore.ml.weka.rangequery;
import java.util.List;
import java.util.Random;
import weka.core.DenseInstance;
import weka.core.Instance;
import weka.core.Instances;
public abstract class AbstractAugmentedSpaceSampler implements IAugmentedSpaceSampler {
private final Instances preciseInsts;
private final Random rng;
public AbstractAugmentedSpaceSampler(final Instances preciseInsts, final Random rng) {
super();
this.preciseInsts = preciseInsts;
this.rng = rng;
}
/*
* Generates a point in the augmented space from a given list of precise data points,
* i.e. chooses the respective minimum and maximum from the given points as lower and upper bounds for each attribute.
*/
protected static Instance generateAugPoint(final List<Instance> insts) {
if(insts.isEmpty()) {
throw new IllegalArgumentException("Cannot generate augmented point from an empty list.");
}
int numAttributes = insts.get(0).numAttributes();
Instance augPoint = new DenseInstance(numAttributes * 2);
for (int i = 0; i < numAttributes; i++) {
double lowerBound = Double.POSITIVE_INFINITY;
double upperBound = Double.NEGATIVE_INFINITY;
for (Instance inst : insts) {
double attrValue = inst.value(i);
lowerBound = Math.min(lowerBound, attrValue);
upperBound = Math.max(upperBound, attrValue);
}
augPoint.setValue(2 * i, lowerBound);
augPoint.setValue((2 * i) + 1, upperBound);
}
return augPoint;
}
/**
* @return the preciseInsts
*/
public Instances getPreciseInsts() {
return this.preciseInsts;
}
/**
* @return the rng
*/
public Random getRng() {
return this.rng;
}
}
|
0
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka
|
java-sources/ai/libs/jaicore-ml-weka/0.2.7/ai/libs/jaicore/ml/weka/rangequery/AugSpaceAllPairs.java
|
package ai.libs.jaicore.ml.weka.rangequery;
import java.util.ArrayList;
import weka.core.Attribute;
import weka.core.Instance;
import weka.core.Instances;
public class AugSpaceAllPairs implements IAugSpaceSamplingFunction {
@Override
public Instances apply(final Instances input) {
int nPrecise = input.numInstances();
ArrayList<Attribute> augAttrs = new ArrayList<>(input.numAttributes() * 2);
for (int attr = 0; attr < input.numAttributes() - 1; attr++) {
augAttrs.add(new Attribute("x" + attr + "_lower"));
augAttrs.add(new Attribute("x" + attr + "_upper"));
}
augAttrs.add(new Attribute("y_min"));
augAttrs.add(new Attribute("y_max"));
int nAllPairs = (nPrecise * (nPrecise - 1)) / 2;
Instances augInstances = new Instances("aug_space_train", augAttrs, nAllPairs);
for (int i = 0; i < nPrecise; i++) {
for (int j = 0; j < nPrecise; j++) {
ArrayList<Instance> sampledPoints = new ArrayList<>();
Instance x1 = input.get(i);
Instance x2 = input.get(j);
// Assume last attribute is the class
int numFeatures = input.numAttributes() - 1;
for (Instance inst : input) {
boolean inInterval = true;
for (int att = 0; att < numFeatures && inInterval; att++) {
if (inst.value(att) < Math.min(x1.value(att), x2.value(att)) || inst.value(att) > Math.max(x1.value(att), x2.value(att))) {
inInterval = false;
}
}
if (inInterval) {
sampledPoints.add(inst);
}
}
augInstances.add(AbstractAugmentedSpaceSampler.generateAugPoint(sampledPoints));
}
}
return augInstances;
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.