index
int64
repo_id
string
file_path
string
content
string
0
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan/metamining
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan/metamining/pipelinecharacterizing/IOntologyConnector.java
package ai.libs.mlplan.metamining.pipelinecharacterizing; import java.util.List; /** * Encapsulates the connection to an ontology which holds knowledge about * classifiers, searchers, evaluators, and kernel function. * * @author Helena Graf * */ public interface IOntologyConnector { /** * Gets the ancestor concepts of this algorithm including the algorithms itself * from most general to most specific concept. The algorithmName must be the * name of an individual that is available in the ontology. * * @param algorithmName * The algorithm for which to get the ancestors * @return The ancestors of the algorithm in the ontology from farthest ancestor * to the algorithm itself */ public List<String> getAncestorsOfAlgorithm(String algorithmName); }
0
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan/metamining
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan/metamining/pipelinecharacterizing/IPerformanceDecisionTreeBasedFeatureGenerator.java
package ai.libs.mlplan.metamining.pipelinecharacterizing; import java.util.Map; import ai.libs.jaicore.math.linearalgebra.Vector; import ai.libs.jaicore.ml.core.exception.TrainingException; /** * A feature generator that is based on a decision tree. Generates new features * for given features together with performance values based on paths in a * decision tree constructed from the given examples. * * @author Helena Graf * */ public interface IPerformanceDecisionTreeBasedFeatureGenerator { /** * Constructs an internal decision tree so that the feature generator can be * used in the future to predict features for some new vector * ({@link #predict(Vector)}). * * @param intermediatePipelineRepresentationsWithPerformanceValues * maps a features to performance value. Should only contain * numerical features. * @throws Exception * if something goes wrong while constructing the tree */ void train(Map<Vector, Double> intermediatePipelineRepresentationsWithPerformanceValues) throws TrainingException; /** * Predicts a feature vector based on a path in the constructed decision tree: * Each node in the tree is given a unique index. Then, for the given vector, * the tree is traversed and a feature vector is generated based on which nodes * are encountered during the traversal. * * @param intermediatePipelineRepresentation * the feature vector for which to generate a new representation * @return the new representation of the given feature vector */ Vector predict(Vector intermediatePipelineRepresentation); }
0
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan/metamining
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan/metamining/pipelinecharacterizing/IPipelineCharacterizer.java
package ai.libs.mlplan.metamining.pipelinecharacterizing; import java.util.List; import ai.libs.hasco.model.ComponentInstance; import ai.libs.mlplan.multiclass.wekamlplan.weka.model.MLPipeline; /** * Finds patterns in given MLPipelines. A pipeline characterizer first has to be * built with {@link #build(List)}, where it identifies patterns in the given * data base of pipelines. Subsequently, it can be used to check for these * patterns in a new pipeline. * * @author Helena Graf, Mirko Jürgens * */ public interface IPipelineCharacterizer { /** * Finds frequent patterns in the given list of pipelines. * * @param pipelines * The pipelines to go through for patterns * @throws InterruptedException */ public void build(List<ComponentInstance> pipelines) throws InterruptedException; /** * Checks which of the found patterns (found during the training phase in * {@link IPipelineCharacterizer#build(List)}) occur in this pipeline. * * If in the returned list l, l[j]=1, pattern j occurs in this pipeline. * Otherwise l[j]=0 and pattern j doesn't occur in this pipeline. * * @param pipeline * The pipeline for which pattern occurrence is checked * @return A list representing pattern occurrences in the pipeline */ public double[] characterize(ComponentInstance pipeline); /** * For each {@link MLPipeline} that was used in the training (given by its * ComponentInstance), return which found pattern (found during the training * phase in {@link IPipelineCharacterizer#build(List)}) occurs in which * pipeline. * * If in the returned matrix m, m[i][j]=1, pattern j occurs in training pipeline * i. Otherwise m[i][j]=0 and pattern j doesn't occur in training pipeline i. * * @return A matrix representing pattern occurrences in pipelines */ public double[][] getCharacterizationsOfTrainingExamples(); /** * Returns the amount of found pipeline patterns, which is the length of a * characterization. * * @return the length of any array produced by * {@link #characterize(ComponentInstance)}. */ public int getLengthOfCharacterization(); }
0
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan/metamining
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan/metamining/pipelinecharacterizing/OntologyNotFoundException.java
package ai.libs.mlplan.metamining.pipelinecharacterizing; /** * Exception thrown when an ontology can not be loaded. * * @author Helena Graf * */ public class OntologyNotFoundException extends RuntimeException { /** * generated id */ private static final long serialVersionUID = 1847206504544179128L; /** * Create a new generic exception. */ public OntologyNotFoundException() { super(); } /** * Create a new exception with the given message. * * @param message * a message describing the exception */ public OntologyNotFoundException(String message) { super(message); } /** * Create a new exception with the given cause. * * @param cause * the cause of the exception */ public OntologyNotFoundException(Throwable cause) { super(cause); } /** * Create a new exception with a given message and cause. * * @param message * a message describing the exception * @param cause * the cause of the exception */ public OntologyNotFoundException(String message, Throwable cause) { super(message, cause); } }
0
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan/metamining
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan/metamining/pipelinecharacterizing/RandomTreePerformanceBasedFeatureGenerator.java
package ai.libs.mlplan.metamining.pipelinecharacterizing; import java.util.HashMap; import java.util.Map; import ai.libs.jaicore.basic.algorithm.exceptions.AlgorithmException; import ai.libs.jaicore.math.linearalgebra.DenseDoubleVector; import ai.libs.jaicore.math.linearalgebra.Vector; import weka.classifiers.trees.RandomTree; import weka.classifiers.trees.RandomTree.Tree; import weka.core.Instances; /** * A {@link AWEKAPerformanceDecisionTreeBasedFeatureGenerator} that uses a * {@link RandomTree}. * * @author Helena Graf * */ public class RandomTreePerformanceBasedFeatureGenerator extends AWEKAPerformanceDecisionTreeBasedFeatureGenerator { private RandomTree randomTree = new RandomTree(); private Tree tree; private Map<Tree, Integer> nodesIndices = new HashMap<>(); private boolean allowUnsetValues = false; private double incomingUnsetValueValue = Double.NaN; private double outgoingUnsetValueValue = 0; private double occurenceValue = 1; private double nonOccurenceValue = -1; @Override public void train(final Instances data) throws AlgorithmException { // Step 1: Train Random Tree try { this.randomTree.buildClassifier(data); } catch(Exception e) { throw new AlgorithmException(e, "Random Tree could not be trained!"); } // Step 2: Count the nodes in the tree (DF Traversal Index Mapping) this.addIndexToMap(0, this.randomTree.getM_Tree()); this.tree = this.randomTree.getM_Tree(); } private int addIndexToMap(int subTreeIndex, final Tree subTree) { this.nodesIndices.put(subTree, subTreeIndex); subTreeIndex++; int numberOfSuccessors = 0; if (subTree.getM_Successors() != null) { for (int i = 0; i < subTree.getM_Successors().length; i++) { subTreeIndex += numberOfSuccessors; numberOfSuccessors += this.addIndexToMap(subTreeIndex, subTree.getM_Successors()[i]) + 1; } } return numberOfSuccessors; } @Override public Vector predict(final Vector intermediatePipelineRepresentation) { Vector pipelineRepresentation = new DenseDoubleVector(this.nodesIndices.size(), this.nonOccurenceValue); // Query the RandomTree Tree subTree = this.tree; while (subTree != null) { if (subTree.getM_Attribute() == -1) { // We are at a leaf node - The current node occurs pipelineRepresentation.setValue(this.nodesIndices.get(subTree), this.occurenceValue); // We are at a leaf - stop subTree = null; } else if (this.allowUnsetValues && !this.isValueUnset(intermediatePipelineRepresentation.getValue(subTree.getM_Attribute())) || !this.allowUnsetValues) { // The current node occurs pipelineRepresentation.setValue(this.nodesIndices.get(subTree), this.occurenceValue); if (intermediatePipelineRepresentation.getValue(subTree.getM_Attribute()) < subTree.getM_SplitPoint()) { // we go to the left subTree = subTree.getM_Successors()[0]; } else { // we go to the right subTree = subTree.getM_Successors()[1]; } } else { // We do allow unset values and the value is unset - set the subtree to non // occurence and end the traversal this.setSubTreeToValue(subTree, this.outgoingUnsetValueValue, pipelineRepresentation); subTree = null; } } return pipelineRepresentation; } private boolean isValueUnset(final double value) { if (Double.isNaN(this.incomingUnsetValueValue)) { return Double.isNaN(value); } else { return value == this.incomingUnsetValueValue; } } private void setSubTreeToValue(final Tree subTree, final double value, final Vector featureRepresentation) { featureRepresentation.setValue(this.nodesIndices.get(subTree), value); if (subTree.getM_Successors() != null) { for (int i = 0; i < subTree.getM_Successors().length; i++) { this.setSubTreeToValue(subTree.getM_Successors()[i], value, featureRepresentation); } } } @Override public String toString() { StringBuilder builder = new StringBuilder(); try { builder.append(this.randomTree); } catch (Exception e) { builder.append("Can not print tree"); } builder.append(System.lineSeparator()); builder.append(this.nodesIndices); builder.append(System.lineSeparator()); return builder.toString(); } /** * Get the value that is assumed to mean a missing value for incoming feature * values. Only relevant if missing values for incoming feature vectors are * allowed. * * @return the value that is assumed to mean a missing value for incoming * feature values */ public double getIncomingUnsetValueValue() { return this.incomingUnsetValueValue; } /** * Allow incoming feature vectors to have missing values. * * @param unsetValueValue * the value that represents a value to be missing in incoming * feature vectors. */ public void setAllowNonOccurence(final double unsetValueValue) { this.allowUnsetValues = true; this.incomingUnsetValueValue = unsetValueValue; } /** * Disallow incoming feature vectors from having missing values. */ public void disallowNonOccurence() { this.allowUnsetValues = false; } /** * Get the value that this feature generator sets for areas of the trees that * are not encountered because an attribute that is used as a split in a node * that is encountered is not set in a given feature representation. * * @return the produced value for areas of the tree that are blocked by a * missing feature value */ public double getOutgoingUnsetValueValue() { return this.outgoingUnsetValueValue; } /** * Set the value that this feature generator sets for areas of the trees that * are not encountered because an attribute that is used as a split in a node * that is encountered is not set in a given feature representation. * * @param outgoingUnsetValueValue * the produced value for areas of the tree that are blocked by a * missing feature value */ public void setOutgoingUnsetValueValue(final double outgoingUnsetValueValue) { this.outgoingUnsetValueValue = outgoingUnsetValueValue; } /** * Get the value that this feature generator sets for nodes in the tree that are * encountered during the traversal based on a given feature vector. * * @return the value that this feature generator sets for nodes in the tree that * are encountered */ public double getOccurenceValue() { return this.occurenceValue; } /** * Set the value that this feature generator sets for nodes in the tree that are * encountered during the traversal based on a given feature vector. * * @param occurenceValue * the value that this feature generator sets for nodes in the tree * that are encountered */ public void setOccurenceValue(final double occurenceValue) { this.occurenceValue = occurenceValue; } /** * Get the value that this feature generator sets for nodes in the tree that are * not encountered during the traversal based on a given feature vector. * * @return the value that this feature generator sets for nodes in the tree that * are not encountered */ public double getNonOccurenceValue() { return this.nonOccurenceValue; } /** * Set the value that this feature generator sets for nodes in the tree that are * not encountered during the traversal based on a given feature vector. * * @param nonOccurenceValue * the value that this feature generator sets for nodes in the tree * that are not encountered */ public void setNonOccurenceValue(final double nonOccurenceValue) { this.nonOccurenceValue = nonOccurenceValue; } }
0
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan/metamining
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan/metamining/pipelinecharacterizing/WEKAOntologyConnector.java
package ai.libs.mlplan.metamining.pipelinecharacterizing; import java.io.InputStream; import java.util.ArrayList; import java.util.Arrays; import java.util.List; import org.semanticweb.owlapi.apibinding.OWLManager; import org.semanticweb.owlapi.model.ClassExpressionType; import org.semanticweb.owlapi.model.OWLClass; import org.semanticweb.owlapi.model.OWLDataFactory; import org.semanticweb.owlapi.model.OWLNamedIndividual; import org.semanticweb.owlapi.model.OWLOntology; import org.semanticweb.owlapi.model.OWLOntologyCreationException; import org.semanticweb.owlapi.model.OWLOntologyManager; /** * Represents the connection to the data minining optimization ontology (DMOP) * enriched by the implementations of algorithms by WEKA. Thus, an object of * this class can be queried for WEKA classifiers as well as instances of * ASSearch and ASEvaluation. * * @author Helena Graf * */ public class WEKAOntologyConnector implements IOntologyConnector { /** * Location of the ontology used by this connector */ private static final String ONTOLOGY_FILE_NAME = "DMOP_modified.owl"; /** * IRI of the elements in this ontology */ private static final String ONTOLOGY_IRI = "http://www.e-lico.eu/ontologies/dmo/DMOP/DMOP.owl"; /** * Separator that separates the ontology IRI from the name of an ontology * element */ private static final String ONTOLOGY_IRI_SEPARATOR = "#"; /** * List of all classifiers that can be characterized by this ontology connector */ private static final List<String> classifierPortfolio = Arrays.asList("weka.classifiers.bayes.BayesNet", "weka.classifiers.bayes.NaiveBayes", "weka.classifiers.bayes.NaiveBayesMultinomial", "weka.classifiers.functions.Logistic", "weka.classifiers.functions.MultilayerPerceptron", "weka.classifiers.functions.SGD", "weka.classifiers.functions.SimpleLogistic", "weka.classifiers.functions.SMO", "weka.classifiers.functions.VotedPerceptron", "weka.classifiers.lazy.IBk", "weka.classifiers.lazy.KStar", "weka.classifiers.rules.DecisionTable", "weka.classifiers.rules.JRip", "weka.classifiers.rules.OneR", "weka.classifiers.rules.PART", "weka.classifiers.rules.ZeroR", "weka.classifiers.trees.DecisionStump", "weka.classifiers.trees.J48", "weka.classifiers.trees.LMT", "weka.classifiers.trees.RandomForest", "weka.classifiers.trees.RandomTree", "weka.classifiers.trees.REPTree", "weka.classifiers.meta.Vote", "weka.classifiers.meta.Stacking", "weka.classifiers.meta.RandomSubSpace", "weka.classifiers.meta.RandomCommittee", "weka.classifiers.meta.MultiClassClassifier", "weka.classifiers.meta.LogitBoost", "weka.classifiers.meta.ClassificationViaRegression", "weka.classifiers.meta.Bagging", "weka.classifiers.meta.AdditiveRegression", "weka.classifiers.meta.AdaBoostM1", "weka.classifiers.trees.M5P", "weka.classifiers.rules.M5Rules", "weka.classifiers.functions.SimpleLinearRegression"); /** * List of all evaluators (for a data-preprocessor) that can be characterized by * this ontology connector */ private static final List<String> evaluatorPortfolio = Arrays.asList("weka.attributeSelection.CfsSubsetEval", "weka.attributeSelection.CorrelationAttributeEval", "weka.attributeSelection.GainRatioAttributeEval", "weka.attributeSelection.InfoGainAttributeEval", "weka.attributeSelection.OneRAttributeEval", "weka.attributeSelection.PrincipalComponents", "weka.attributeSelection.ReliefFAttributeEval", "weka.attributeSelection.SymmetricalUncertAttributeEval"); /** * List of all searchers (for a data-preprocessor) that can be characterized by * this ontology connector */ private static final List<String> searcherPortfolio = Arrays.asList("weka.attributeSelection.BestFirst", "weka.attributeSelection.GreedyStepwise", "weka.attributeSelection.Ranker"); /** * List of all kernel functions that can be characterized by this ontology * connector */ private static final List<String> kernelFunctionPortfolio = Arrays.asList( "weka.classifiers.functions.supportVector.Puk", "weka.classifiers.functions.supportVector.RBFKernel", "weka.classifiers.functions.supportVector.PolyKernel", "weka.classifiers.functions.supportVector.NormalizedPolyKernel"); /** * The common ancestor of all classifiers in the ontology */ private static final String CLASSIFIER_TOP_NODE = "ModelingAlgorithm"; /** * The common ancestor of all searchers in the ontology */ private static final String SEARCHER_TOP_NODE = "DataProcessingAlgorithm"; /** * The common ancestor of all evaluator in the ontology */ private static final String EVALUTATOR_TOP_NODE = "DataProcessingAlgorithm"; /** * The common ancestor of all kernel functions in the ontology */ private static final String KERNEL_FUNCTION_TOP_NODE = "KernelFunction"; /** * The data factory used to get ontology elements from Strings */ private OWLDataFactory dataFactory; /** * The used ontology as an object */ private OWLOntology ontology; /** * Whether equals relations shall also be included in the returned * characterization */ private boolean includeEqualSuperClasses = true; /** * Creates an ontology connector using the standard ontology. * * @throws OWLOntologyCreationException * If the ontology cannot be created */ public WEKAOntologyConnector() throws OWLOntologyCreationException { OWLOntologyManager ontologyManager = OWLManager.createOWLOntologyManager(); dataFactory = ontologyManager.getOWLDataFactory(); InputStream inputStream = Thread.currentThread().getContextClassLoader() .getResourceAsStream(ONTOLOGY_FILE_NAME); ontology = ontologyManager.loadOntologyFromOntologyDocument(inputStream); } @Override public List<String> getAncestorsOfAlgorithm(String algorithmName) { if (classifierPortfolio.contains(algorithmName)) { return getAncestorsOfAlgorithmUntil(algorithmName, CLASSIFIER_TOP_NODE); } else if (searcherPortfolio.contains(algorithmName)) { return getAncestorsOfAlgorithmUntil(algorithmName, SEARCHER_TOP_NODE); } else if (evaluatorPortfolio.contains(algorithmName)) { return getAncestorsOfAlgorithmUntil(algorithmName, EVALUTATOR_TOP_NODE); } else if (kernelFunctionPortfolio.contains(algorithmName)) { return getAncestorsOfAlgorithmUntil(algorithmName, KERNEL_FUNCTION_TOP_NODE); } else { StringBuilder builder = new StringBuilder(); builder.append(algorithmName); builder.append(" is not supported by the used ontology."); throw new IllegalArgumentException(builder.toString()); } } /** * Get the list of ancestors from most general to most specific concept up until * the specified concept including the specified child and highest concept. * * @param algorithm * The child algorithm * @param until * The highest ancestor * @return The ancestors of the child algorithm from the highest ancestor to the * child algorithm itself */ protected List<String> getAncestorsOfAlgorithmUntil(String algorithm, String until) { // Get the individual represented by the algorithm OWLNamedIndividual algorithmAsIndividual = dataFactory.getOWLNamedIndividual(getAsOntologyElement(algorithm)); // Get ancestors ArrayList<OWLClass> ancestors = new ArrayList<>(); ontology.classAssertionAxioms(algorithmAsIndividual).findFirst().ifPresent(algorithmClass -> ancestors.add(algorithmClass.getClassExpression().asOWLClass())); for (int i = 0; i < ancestors.size(); i++) { // If we have found the last element, stop if (ancestors.get(ancestors.size() - 1).getIRI().getShortForm().equals(until)) { break; } int previousAncestorSize = ancestors.size(); ontology.subClassAxiomsForSubClass(ancestors.get(i)) .filter(axiom -> axiom.getSuperClass().getClassExpressionType() == ClassExpressionType.OWL_CLASS) .forEach(axiom -> { OWLClass toAdd = axiom.getSuperClass().asOWLClass(); ancestors.add(toAdd); }); // If we have not added an element if (includeEqualSuperClasses && ancestors.size() == previousAncestorSize) { ontology.equivalentClassesAxioms(ancestors.get(i)).forEach(axiom -> axiom.classExpressions().forEach(elem -> { if (!ancestors.contains(elem.conjunctSet().findFirst().get().asOWLClass())) { ancestors.add(elem.conjunctSet().findFirst().get().asOWLClass()); } }) ); } } // Get names and invert order List<String> ancestorNames = new ArrayList<>(); for (int i = ancestors.size() - 1; i >= 0; i--) { String ancestorName = ancestors.get(i).getIRI().getShortForm(); if (ancestorName.equals(until)) { ancestorNames.add(ancestorName); } } ancestorNames.add(algorithmAsIndividual.getIRI().getShortForm()); return ancestorNames; } /** * Appends the given name of an ontology element to the IRI of the used * ontology, separated by a specified separator. * * @param name * The name of the ontology element * @return The fully qualified name of the ontology element */ private String getAsOntologyElement(String name) { StringBuilder builder = new StringBuilder(); builder.append(ONTOLOGY_IRI); builder.append(ONTOLOGY_IRI_SEPARATOR); builder.append(name); return builder.toString(); } /** * Get the ontology this connector uses. * * @return The used ontology */ public OWLOntology getOntology() { return ontology; } /** * Get the fully qualified names of WEKA classifiers that this ontology * connector can be queried for. * * @return The available classifiers */ public List<String> getAvailableClassifiers() { return classifierPortfolio; } /** * Get the fully qualified names of WEKA ASSearch algorithms that this ontology * can be queried for. * * @return The available searchers */ public List<String> getAvailableSearchers() { return searcherPortfolio; } /** * Get the fully qualified names of WEKA ASEvaluation algorithms that this * ontology can be queried for. * * @return The available evaluators */ public List<String> getAvailableEvaluators() { return evaluatorPortfolio; } /** * Get the fully qualified names of kernel functions that this ontology can be * queried for * * @return */ public List<String> getAvailableKernelFunctions() { return kernelFunctionPortfolio; } /** * Get the highest common node in the ontology for all classifiers * * @return The classifier top node */ public String getClassifierTopNode() { return CLASSIFIER_TOP_NODE; } /** * Get the highest common node in the ontology for all searchers * * @return The searcher top node */ public String getSearcherTopNode() { return SEARCHER_TOP_NODE; } /** * Get the highest common node in the ontology for all evaluators * * @return The evaluator top node */ public String getEvaluatorTopNode() { return EVALUTATOR_TOP_NODE; } /** * Get the highest common node in the ontology for all kernel function * * @return The kernel function top node */ public String getKernelFunctionTopNode() { return KERNEL_FUNCTION_TOP_NODE; } }
0
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan/metamining
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan/metamining/pipelinecharacterizing/WEKAPipelineCharacterizer.java
package ai.libs.mlplan.metamining.pipelinecharacterizing; import java.io.File; import java.io.IOException; import java.net.URISyntaxException; import java.nio.file.Paths; import java.util.ArrayList; import java.util.List; import java.util.Map; import java.util.Scanner; import org.semanticweb.owlapi.model.OWLOntologyCreationException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import ai.libs.hasco.model.Component; import ai.libs.hasco.model.ComponentInstance; import ai.libs.hasco.model.Parameter; import ai.libs.hasco.model.ParameterRefinementConfiguration; import treeminer.FrequentSubtreeFinder; import treeminer.TreeMiner; import treeminer.util.TreeRepresentationUtils; /** * A characterizer for MLPipelines. It characterizes pipelines using an ontology * and a tree mining algorithm. The ontology is used to get a characterization * of a pipeline element; from the characterization of all pipelines elements * and their parameters, a tree is then built. The trees retrieved from a number * of training examples for pipelines are then used to find frequent patterns in * the pipelines. A new pipeline is then characterizes by which of these * patterns appear in it. * * @author Helena Graf * */ public class WEKAPipelineCharacterizer implements IPipelineCharacterizer { private static final Logger logger = LoggerFactory.getLogger(WEKAPipelineCharacterizer.class); /** The default path for pre computed algorithm patterns. */ private static final String ALGORITHM_PATTERNS_SUPPORT_5_PATH = "draco/patterns_support_5.csv"; /** * Number of concurrent threads maximally used by the characterizer */ private int cpus = 1; /** * The ontology connector used to characterize a single pipeline element */ private IOntologyConnector ontologyConnector; /** * The algorithm used by the pipeline characterizer to find frequent subtrees in * deduced tree representations of given pipelines */ private FrequentSubtreeFinder treeMiner; /** * The frequent patterns found in the tree representations of pipelines by the * tree mining algorithm */ private List<String> foundPipelinePatterns; /** * The minimum support required for a pattern to be considered frequent by the * tree miner */ private int patternMinSupport = 5; private Map<Component, Map<Parameter, ParameterRefinementConfiguration>> componentParameters; /** * Creates a new pipeline characterizer that uses the given descriptions of * parameters to characterize MLPipelines. * * @param componentParameters * The description of parameters in the current configuration * together with their refinements. */ public WEKAPipelineCharacterizer( Map<Component, Map<Parameter, ParameterRefinementConfiguration>> componentParameters) { TreeMiner miner = new TreeMiner(); miner.setCountMultipleOccurrences(false); miner.setOnlySearchForPatternsThatStartWithTheRoot(true); this.treeMiner = new TreeMiner(); this.componentParameters = componentParameters; try { ontologyConnector = new WEKAOntologyConnector(); } catch (OWLOntologyCreationException e) { logger.error("Cannot connect to Ontology!"); throw new OntologyNotFoundException(e); } } /** * Build this pipeline characterizer from a file of patterns. The pattern need * to be UTF-8 encoded strings and each line specifies exactly one pattern. * * @param file * the file to read from */ public void buildFromFile(File file) { List<String> foundPatterns = new ArrayList<>(); try (Scanner scanner = new Scanner(file)) { while (scanner.hasNextLine()) { String pattern = scanner.nextLine(); foundPatterns.add(pattern); } } catch (IOException e) { logger.error("Couldn't initialize pipeline characterizer", e); } this.foundPipelinePatterns = foundPatterns; } /** * Builds the pipeline characterizer with a default list of patterns, which was * generated by a random search over the algorithm space of weka. */ public void buildFromFile() { try { this.buildFromFile( Paths.get(getClass().getClassLoader().getResource(ALGORITHM_PATTERNS_SUPPORT_5_PATH).toURI()).toFile()); } catch (URISyntaxException e) { logger.error("Couldn't find default algorithm patterns!", e); } } @Override public void build(List<ComponentInstance> pipelines) throws InterruptedException { // Convert the pipelines to String representations logger.info("Converting training examples to trees. With support {}", patternMinSupport); int chunkSize = Math.floorDiv(pipelines.size(), cpus); int lastchunkSize = pipelines.size() - (chunkSize * (cpus - 1)); ComponentInstanceStringConverter[] threads = new ComponentInstanceStringConverter[cpus]; for (int i = 0; i < threads.length; i++) { threads[i] = new ComponentInstanceStringConverter(ontologyConnector, pipelines.subList(i * chunkSize, i == threads.length - 1 ? (i * chunkSize) + lastchunkSize : (i + 1) * chunkSize), componentParameters); threads[i].start(); } List<String> pipelineRepresentations = new ArrayList<>(pipelines.size()); for (int i = 0; i < threads.length; i++) { threads[i].join(); pipelineRepresentations.addAll(threads[i].getConvertedPipelines()); } // Use the tree miner to find patterns logger.info("Finding frequent subtrees"); foundPipelinePatterns = treeMiner.findFrequentSubtrees(pipelineRepresentations, patternMinSupport); } @Override public double[] characterize(ComponentInstance pipeline) { // Make tree representation from this pipeline String treeRepresentation = new ComponentInstanceStringConverter(ontologyConnector, new ArrayList<>(), componentParameters).makeStringTreeRepresentation(pipeline); // Ask the treeMiner which of the patterns are included in this pipeline double[] pipelineCharacterization = new double[foundPipelinePatterns.size()]; for (int i = 0; i < foundPipelinePatterns.size(); i++) { if (TreeRepresentationUtils.containsSubtree(treeRepresentation, foundPipelinePatterns.get(i))) { pipelineCharacterization[i] = 1; } else { pipelineCharacterization[i] = 0; } } return pipelineCharacterization; } @Override public double[][] getCharacterizationsOfTrainingExamples() { return treeMiner.getCharacterizationsOfTrainingExamples(); } /** * Returns the amount of found pipeline patterns, which is the length of a * characterization. * * @return the length of any array produced by {@link #characterize(ComponentInstance)}. */ @Override public int getLengthOfCharacterization() { return this.foundPipelinePatterns.size(); } /** * Get the used ontology connector. * * @return The used ontology connector */ public IOntologyConnector getOntologyConnector() { return ontologyConnector; } /** * Set the ontology connector to be used. * * @param ontologyConnector * the ontologyConnector to be used */ public void setOntologyConnector(IOntologyConnector ontologyConnector) { this.ontologyConnector = ontologyConnector; } /** * Get the minimum support required for a pattern to be considered frequent for * the tree mining algorithm. * * @return The minimum support a tree pattern must have to be considered * frequent */ public int getMinSupport() { return patternMinSupport; } /** * Set the minimum support required for a pattern to be considered frequent for * the tree mining algorithm. * * @param minSupport * The minimum support a tree pattern must have to be considered * frequent */ public void setMinSupport(int minSupport) { this.patternMinSupport = minSupport; } /** * Inform the Characterizer about resource usage. * * @param cpus * Maximum number of threads that will be used by the characterizer */ public void setCPUs(int cpus) { this.cpus = cpus; } /** * Get the patterns found among the given training examples. * * @return A list of patterns */ public List<String> getFoundPipelinePatterns() { return foundPipelinePatterns; } }
0
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan/metamining
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan/metamining/pipelinecharacterizing/package-info.java
/** * Package containing the handling of MLPipeline characterization * * @author Helena Graf * */ package ai.libs.mlplan.metamining.pipelinecharacterizing;
0
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan/metamining
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan/metamining/similaritymeasures/F1Optimizer.java
package ai.libs.mlplan.metamining.similaritymeasures; import java.util.Random; import org.nd4j.linalg.api.ndarray.INDArray; import org.nd4j.linalg.factory.Nd4j; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import de.jungblut.math.DoubleVector; import de.jungblut.math.dense.DenseDoubleVector; import de.jungblut.math.minimize.CostFunction; import de.jungblut.math.minimize.CostGradientTuple; import de.jungblut.math.minimize.GradientDescent; public class F1Optimizer implements IHeterogenousSimilarityMeasureComputer { private Logger logger = LoggerFactory.getLogger(F1Optimizer.class); private static final double ALPHA_START = 0.000000001; // learning rate private static final double ALPHA_MAX = 1e-5; private static final int ITERATIONS_PER_PROBE = 100; private static final int LIMIT = 1; // as long as the solution improves by at least this value, continue private static final double MAX_DESIRED_ERROR = 0; private INDArray rrt; private INDArray x; private INDArray u; // the learned matrix private final Random rand = new Random(); /** * Learns a matrix U that minimizes F1 (W is ignored here) * * @return */ @Override public void build(final INDArray x, final INDArray w, final INDArray r) { this.rrt = r.mmul(r.transpose()); this.x = x; final int m = x.columns(); /* generate initial U vector */ final int numberOfImplicitFeatures = 1; double[] denseVector = new double[m * numberOfImplicitFeatures]; int c = 0; for (int i = 0; i < m; i++) { for (int j = 0; j < numberOfImplicitFeatures; j++) { denseVector[c++] = (this.rand.nextDouble() - 0.5) * 100; } } DoubleVector currentSolutionAsVector = new DenseDoubleVector(denseVector); INDArray currentSolutionAsMatrix = this.vector2matrix(currentSolutionAsVector, m, numberOfImplicitFeatures); double currentCost = this.getCost(currentSolutionAsMatrix); this.logger.debug("X = {}",x); this.logger.debug("randomly initialized U = {}",currentSolutionAsMatrix); this.logger.debug("loss of randomly initialized U: {}",currentCost); CostFunction cf = input -> { INDArray uIntermediate = this.vector2matrix(input, x.columns(), numberOfImplicitFeatures); double cost = this.getCost(uIntermediate); INDArray gradientMatrix = this.getGradientAsMatrix(uIntermediate); return new CostGradientTuple(cost, this.matrix2vector(gradientMatrix)); }; /* probe algorithm with different alphas */ double alpha = ALPHA_START; while (currentCost > MAX_DESIRED_ERROR) { double lastCost = currentCost; DoubleVector lastSolution = currentSolutionAsVector; GradientDescent gd = new GradientDescent(alpha, LIMIT); currentSolutionAsVector = gd.minimize(cf, currentSolutionAsVector, ITERATIONS_PER_PROBE, false); currentSolutionAsMatrix = this.vector2matrix(currentSolutionAsVector, m, numberOfImplicitFeatures); currentCost = this.getCost(currentSolutionAsMatrix); if (lastCost < currentCost) { currentSolutionAsVector = lastSolution; currentCost = lastCost; alpha /= 2; } else if (lastCost > currentCost) { alpha *= 2; } else { break; } alpha = Math.min(alpha, ALPHA_MAX); this.logger.debug("Current Cost {} (alpha = {})",currentCost,alpha); } this.u = currentSolutionAsMatrix; } /** * creates a matrix of the Nd4j framework from a vector of Thomas Jungblut's math framework * * @param vector * @param m * @param n * @return */ public INDArray vector2matrix(final DoubleVector vector, final int m, final int n) { double[] inputs = new double[vector.getLength()]; for (int i = 0; i < vector.getLength(); i++) { inputs[i] = vector.get(i); } return Nd4j.create(inputs, new int[] {m, n}); } /** * collapses a matrix of the Nd4j framework into a double vector of Thomas Jungblut's framework * * @param matrix * @return */ public DoubleVector matrix2vector(final INDArray matrix) { int m = matrix.rows(); int n = matrix.columns(); double[] denseVector = new double[m * n]; int c = 0; for (int i = 0; i < m; i++) { for (int j = 0; j < n; j++) { denseVector[c++] = matrix.getDouble(i,j); } } return new DenseDoubleVector(denseVector); } /** * This evaluates F1 * * @param rrt * @param u * @param x * @return */ public double getCost(final INDArray u) { INDArray z1 = this.x.mmul(u); INDArray z2 = z1.transpose(); INDArray z = z1.mmul(z2); INDArray q = this.rrt.sub(z); double cost = 0; int n = q.columns(); for (int i = 0; i < n; i++) { for (int j = 0; j < n; j++) { cost += Math.pow(q.getDouble(i,j),2); } } return cost; } /** * This computes the gradient of F1 in matrix form * * @param R * @param u * @param x * @return */ public INDArray getGradientAsMatrix(final INDArray u) { int m = this.x.columns(); int n = u.columns(); float[][] derivatives = new float[m][n]; for (int k = 0; k < m; k++) { for (int l = 0; l < n; l++) { derivatives[k][l] = this.getFirstDerivative(u, k, l); } } return Nd4j.create(derivatives); } /** * This compute the derivative of F1 for the (k,l)-th element of the U matrix * * @param rrt * @param u * @param x * @param k * @param l * @return */ public float getFirstDerivative(final INDArray u, final int k, final int l) { /* compute inner product Z := XU(XU)^-1 */ INDArray z1 = this.x.mmul(u); INDArray z2 = z1.transpose(); INDArray z = z1.mmul(z2); /* define the difference of RR^-1 and Z in Q */ INDArray q = this.rrt.sub(z); /* now compute the inner product of the i-th row of X and the i-th column of U */ int n = this.x.rows(); float[] sums = new float[n]; for (int i = 0; i < n; i++) { sums[i] = this.x.getRow(i).mmul(u.getColumn(l)).getFloat(0,0); } /* now compute the actual derivative */ float derivative = 0; for (int i = 0; i < n; i++) { float xik = this.x.getFloat(i,k); for (int j = 0; j < n; j++) { float sumA = xik * sums[j]; float sumB = this.x.getFloat(j,k) * sums[i]; derivative += -2 * q.getFloat(i,j) * (sumA + sumB); } } return derivative; } @Override public double computeSimilarity(final INDArray x, final INDArray w) { return 0; } public INDArray getX() { return this.x; } public INDArray getU() { return this.u; } }
0
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan/metamining
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan/metamining/similaritymeasures/F3Optimizer.java
package ai.libs.mlplan.metamining.similaritymeasures; import java.util.ArrayList; import java.util.List; import java.util.Random; import org.nd4j.linalg.api.ndarray.INDArray; import org.nd4j.linalg.factory.Nd4j; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import ai.libs.jaicore.basic.sets.Pair; import de.jungblut.math.DoubleVector; import de.jungblut.math.dense.DenseDoubleVector; import de.jungblut.math.minimize.CostFunction; import de.jungblut.math.minimize.CostGradientTuple; import de.jungblut.math.minimize.GradientDescent; public class F3Optimizer implements IHeterogenousSimilarityMeasureComputer { private static final Logger logger = LoggerFactory.getLogger(F3Optimizer.class); private static final double ALPHA_START = 0.000000001; // learning rate private static final double ALPHA_MAX = 1e-6; private static final int ITERATIONS_PER_PROBE = 100; private static final int LIMIT = 1; // as long as the solution improves by at least this value, continue private static final double MAX_DESIRED_ERROR = 0; private final double mu; // regularization constant private INDArray r; private INDArray x; private INDArray w; private INDArray u; // the learned matrices private INDArray v; // the learned matrices private final Random rand = new Random(); public F3Optimizer(final double mu) { super(); this.mu = mu; } /** * Learns a matrix U that minimizes F1 (W is ignored here) * * @return */ @Override public void build(final INDArray x, final INDArray w, final INDArray r) { this.r = r; this.w = w; this.x = x; final int n = x.rows(); final int d = x.columns(); final int m = w.rows(); final int l = w.columns(); final int numberOfImplicitFeatures = 1; logger.debug("X = ( {} x {} )",n,x.columns()); logger.debug("W = ( {} x {} )",m,w.columns()); /* generate initial U and V vectors */ boolean succesfullyBooted = false; DoubleVector currentSolutionAsVector = this.getRandomInitSolution(d, l, numberOfImplicitFeatures); Pair<INDArray, INDArray> currentUAndVAsMatrix = this.vector2matrices(currentSolutionAsVector, d, numberOfImplicitFeatures, l, numberOfImplicitFeatures); logger.debug("randomly initialized U = {} ({} x {})",currentUAndVAsMatrix.getX(),d,numberOfImplicitFeatures); logger.debug("randomly initialized V = {} ({} x {})",currentUAndVAsMatrix.getY(),l,numberOfImplicitFeatures); /* determine cost */ double currentCost = this.getCost(currentUAndVAsMatrix.getX(), currentUAndVAsMatrix.getY()); logger.debug("loss of randomly initialized U and V: {}",currentCost); CostFunction cf = input -> { Pair<INDArray, INDArray> uAndV = this.vector2matrices(input, d, numberOfImplicitFeatures, l, numberOfImplicitFeatures); INDArray uIntermediate = uAndV.getX(); INDArray vIntermediate = uAndV.getY(); assert uIntermediate.rows() == d && uIntermediate.columns() == numberOfImplicitFeatures : "Incorrect shape of U: (" + uIntermediate.rows() + " x " + uIntermediate.columns() + ") instead of (" + d + " x " + numberOfImplicitFeatures + ")"; assert vIntermediate.rows() == l && vIntermediate.columns() == numberOfImplicitFeatures : "Incorrect shape of V: (" + vIntermediate.rows() + " x " + vIntermediate.columns() + ") instead of (" + l + " x " + numberOfImplicitFeatures + ")"; double cost = this.getCost(uIntermediate, vIntermediate); INDArray gradientMatrixForU = this.getGradientAsMatrix(uIntermediate, vIntermediate, true); INDArray gradientMatrixForV = this.getGradientAsMatrix(uIntermediate, vIntermediate, false); return new CostGradientTuple(cost, this.matrices2vector(gradientMatrixForU, gradientMatrixForV)); }; /* probe algorithm with different alphas */ double alpha = ALPHA_START; int turnsWithoutImprovement = 0; while (currentCost > MAX_DESIRED_ERROR) { double lastCost = currentCost; DoubleVector lastSolution = currentSolutionAsVector; GradientDescent gd = new GradientDescent(alpha, LIMIT); currentSolutionAsVector = gd.minimize(cf, currentSolutionAsVector, ITERATIONS_PER_PROBE, false); logger.debug("Produced gd solution vector {}", currentSolutionAsVector); /* if the current solution contains non-numbers, shrink alpha and use last solution again */ boolean hasNanEntry = false; for (int i = 0; i < currentSolutionAsVector.getLength(); i++) { if (Double.valueOf(currentSolutionAsVector.get(i)).equals(Double.NaN)) { hasNanEntry = true; break; } } if (hasNanEntry) { currentSolutionAsVector = lastSolution; currentCost = lastCost; if (alpha > 1e-20) { alpha /= 2; } } else { currentUAndVAsMatrix = this.vector2matrices(currentSolutionAsVector, d, numberOfImplicitFeatures, l, numberOfImplicitFeatures); currentCost = this.getCost(currentUAndVAsMatrix.getX(), currentUAndVAsMatrix.getY()); if (lastCost <= currentCost) { currentSolutionAsVector = lastSolution; currentCost = lastCost; if (lastCost == currentCost) { turnsWithoutImprovement ++; alpha *= 2; } else if (alpha > 1e-20) { alpha /= 2; } if (turnsWithoutImprovement > 10) { logger.debug("No further improvement, canceling"); break; } } else { if (!succesfullyBooted) { succesfullyBooted = true; } turnsWithoutImprovement = 0; alpha *= 2; } alpha = Math.min(alpha, ALPHA_MAX); logger.debug("Current cost: {} (alpha= {})",currentCost,alpha); } /* if we have not successfully booted yet, draw a new random initialization */ if (!succesfullyBooted) { currentSolutionAsVector = this.getRandomInitSolution(d, l, numberOfImplicitFeatures); currentUAndVAsMatrix = this.vector2matrices(currentSolutionAsVector, d, numberOfImplicitFeatures, l, numberOfImplicitFeatures); currentCost = this.getCost(currentUAndVAsMatrix.getX(), currentUAndVAsMatrix.getY()); alpha = ALPHA_START; logger.info("Rebooting approach with solution vector {} that has cost {}", currentSolutionAsVector, currentCost); } } this.u = currentUAndVAsMatrix.getX(); this.v = currentUAndVAsMatrix.getY(); logger.info("Finished learning"); logger.debug("U = {}",this.u); logger.debug("V = {}",this.v); } private DoubleVector getRandomInitSolution(final int d, final int l, final int numberOfImplicitFeatures) { double[] denseVector = new double[(d + l) * numberOfImplicitFeatures]; int c = 0; for (int i = 0; i < d; i++) { for (int j = 0; j < numberOfImplicitFeatures; j++) { denseVector[c++] = (this.rand.nextDouble() - 0.5) * 100; } } for (int i = 0; i < l; i++) { for (int j = 0; j < numberOfImplicitFeatures; j++) { denseVector[c++] = (this.rand.nextDouble() - 0.5) * 100; } } return new DenseDoubleVector(denseVector); } /** * creates a matrix of the Nd4j framework from a vector of Thomas Jungblut's * math framework * * @param vector * @param m * @param n * @return */ public INDArray vector2matrix(final DoubleVector vector, final int m, final int n) { double[] inputs = new double[vector.getLength()]; for (int i = 0; i < vector.getLength(); i++) { inputs[i] = vector.get(i); } return Nd4j.create(inputs, new int[] { m, n }); } public Pair<INDArray, INDArray> vector2matrices(final DoubleVector vector, final int n, final int d, final int m, final int l) { DoubleVector inputForU = vector.sliceByLength(0, n * d); DoubleVector inputForV = vector.sliceByLength(n * d, vector.getLength() - inputForU.getLength()); INDArray uIntermediate = this.vector2matrix(inputForU, n, d); INDArray vIntermediate = this.vector2matrix(inputForV, m, l); return new Pair<>(uIntermediate, vIntermediate); } /** * collapses a matrix of the Nd4j framework into a double vector of Thomas * Jungblut's framework * * @param matrix * @return */ public DoubleVector matrix2vector(final INDArray matrix) { int m = matrix.rows(); int n = matrix.columns(); double[] denseVector = new double[m * n]; int c = 0; for (int i = 0; i < m; i++) { for (int j = 0; j < n; j++) { denseVector[c++] = matrix.getDouble(i, j); } } return new DenseDoubleVector(denseVector); } public DoubleVector matrices2vector(final INDArray... matrices) { List<DoubleVector> vectors = new ArrayList<>(); int length = 0; for (INDArray matrix : matrices) { DoubleVector vector = this.matrix2vector(matrix); vectors.add(vector); length += vector.getLength(); } double[] collapsed = new double[length]; int c = 0; for (DoubleVector vector : vectors) { for (int i = 0; i < vector.getLength(); i++) { collapsed[c++] = vector.get(i); } } return new DenseDoubleVector(collapsed); } /** * This evaluates F1 * * @param RRT * @param u * @param x * @return */ public double getCost(final INDArray u, final INDArray v) { INDArray z1 = this.x.mmul(u); INDArray z2 = this.w.mmul(v).transpose(); INDArray z = z1.mmul(z2); INDArray q = this.r.sub(z); return this.getSquaredFrobeniusNorm(q) + this.mu * (this.getSquaredFrobeniusNorm(u) + this.getSquaredFrobeniusNorm(v)); } public double getSquaredFrobeniusNorm(final INDArray matrix) { double norm = 0; int m = matrix.rows(); int n = matrix.columns(); for (int i = 0; i < m; i++) { for (int j = 0; j < n; j++) { norm += Math.pow(matrix.getDouble(i, j), 2); } } return norm; } /** * This computes the gradient of F1 in matrix form * * @param r * @param u * @param x * @return */ public INDArray getGradientAsMatrix(final INDArray u, final INDArray v, final boolean computeDerivationsOfU) { if (computeDerivationsOfU) { int m = u.rows(); int n = u.columns(); float[][] derivatives = new float[m][n]; for (int s = 0; s < m; s++) { for (int t = 0; t < n; t++) { derivatives[s][t] = this.getFirstDerivative(u, v, s, t, true); } } return Nd4j.create(derivatives); } else { int m = v.rows(); int n = v.columns(); float[][] derivatives = new float[m][n]; for (int s = 0; s < m; s++) { for (int t = 0; t < n; t++) { derivatives[s][t] = this.getFirstDerivative(u, v, s, t, false); } } return Nd4j.create(derivatives); } } /** * This compute the derivative of F1 for the (k,l)-th element of the U matrix * * @param RRT * @param u * @param x * @param k * @param l * @return */ public float getFirstDerivative(final INDArray u, final INDArray v, final int s, final int t, final boolean deriveForU) { /* compute inner product Z := XUV^TW^T */ INDArray z1 = this.x.mmul(u); INDArray z2 = this.w.mmul(v).transpose(); INDArray z = z1.mmul(z2); /* define the difference of R and Z in Q */ INDArray q = this.r.sub(z); float derivative = 0; int n = q.rows(); int m = q.columns(); assert m == this.w.rows() : "W has " + this.w.rows() + " but is expected to have m = " + m + " rows"; if (t >= v.columns()) { throw new IllegalArgumentException("V has only " + v.columns() + " but would have to have " + (t + 1) + " columns to proceed! I.e. deriving a derivative for t = " + t + " is not possible."); } /* compute derivative based on whether it is for a u-element or a v-element */ if (deriveForU) { for (int i = 0; i < n; i++) { float xis = this.x.getFloat(i, s); for (int j = 0; j < m; j++) { double factor1 = q.getFloat(i, j); double factor2 = xis; double scalarProduct = this.w.getRow(j).mmul(v.getColumn(t)).getDouble(0, 0); derivative -= 2 * factor1 * factor2 * scalarProduct; } } derivative += 2 * this.mu * u.getDouble(s,t); } else { for (int i = 0; i < n; i++) { double scalarProduct = this.x.getRow(i).mmul(v.getColumn(t)).getDouble(0, 0); for (int j = 0; j < m; j++) { double factor1 = q.getFloat(i, j); double wjs = this.w.getFloat(j, s); derivative -= 2 * factor1 * wjs * scalarProduct; } } derivative += 2 * this.mu * v.getDouble(s,t); } return derivative; } @Override public double computeSimilarity(final INDArray x, final INDArray w) { return x.mmul(this.u).mmul(this.v.transpose()).mmul(w.transpose()).getDouble(0); } }
0
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan/metamining
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan/metamining/similaritymeasures/IHeterogenousSimilarityMeasureComputer.java
package ai.libs.mlplan.metamining.similaritymeasures; import org.nd4j.linalg.api.ndarray.INDArray; /** * Encapsulates a model that is trained to compute the similarity between two * multidimensional measures, e.g. data set meta features, algorithm meta * features and algorithm performance on a data set. * * @author Helena Graf * */ public interface IHeterogenousSimilarityMeasureComputer { /** * Build a model based on training data that can then be used to estimate the * similarity of two measures for a new problem. * * @param x * Feature values for instances of the first measure (One row = * features of one instance, e.g. meta features of a data set) * @param w * Feature values for instances of the second measure (One row = * features of one instance, e.g. a characterization of a machine * learning pipeline) * @param r * A matrix giving an indication of how good of a match a specific * instance of the first measure is to a specific instance of the * second measure, i.e. how well a pipeline performs on a data set */ public void build(INDArray x, INDArray w, INDArray r); /** * Compute the 'quality of the match' of given feature values for a new problem * instance based on the training. * * @param x * Feature values for the instance for the first measure (e.g. meta * data of a new data set) * @param w * Feature values for the instance for the second measure (e.g. a * characterization of machine learning pipeline) * @return The quality of the match, or similarity for the given vectors */ public double computeSimilarity(INDArray x, INDArray w); }
0
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan/metamining
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan/metamining/similaritymeasures/IRelativeRankMatrixComputer.java
package ai.libs.mlplan.metamining.similaritymeasures; import org.nd4j.linalg.api.ndarray.INDArray; /** * Represents an algorithm that, presented with a list of performance values of * a pipeline on a data set computes a relative rank of this performance on the * data set compared to other pipelines' performance on the same data set. * * @author Helena Graf * */ public interface IRelativeRankMatrixComputer { /** * Computes the relative rank matrix for the given performance values of * pipelines on datasets. * * @param performanceValues * The results of pipelines on datasets: rows: data sets, columns: * pipelines, entries: array of results of pipeline on data set * @return The converted matrix as an INDArray for more efficient computing */ INDArray computeRelativeRankMatrix(double[][][] performanceValues); }
0
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan/metamining
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan/metamining/similaritymeasures/ISignificanceTest.java
package ai.libs.mlplan.metamining.similaritymeasures; /** * Represents a significance test that can be used to decide whether one of two * algorithms is better on a specific dataset. * * @author Helena Graf * */ public interface ISignificanceTest { /** * Computes the significance of whether the first array of given performance * values is considered to be better than the second * * @param performanceValues1 * The first array of performance values to compare * @param performanceValues2 * The second array of performance values to compare * @return The resulting significance level of the test */ public double computeSignificance(double[] performanceValues1, double[] performanceValues2); }
0
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan/metamining
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan/metamining/similaritymeasures/MannWhitneyUSignificance.java
package ai.libs.mlplan.metamining.similaritymeasures; import org.apache.commons.math3.stat.inference.MannWhitneyUTest; /** * A wrapper for the Mann-Whitney U Significance test. * * @author Helena Graf * */ public class MannWhitneyUSignificance implements ISignificanceTest { /** * The test object used to compute the significance for new given values */ private MannWhitneyUTest significanceTest = new MannWhitneyUTest(); @Override public double computeSignificance(final double[] performanceValues1, final double[] performanceValues2) { return this.significanceTest.mannWhitneyUTest(performanceValues1, performanceValues2); } }
0
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan/metamining
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan/metamining/similaritymeasures/RelativeRankMatricComputer.java
package ai.libs.mlplan.metamining.similaritymeasures; import java.util.Arrays; import org.nd4j.linalg.api.ndarray.INDArray; import org.nd4j.linalg.factory.Nd4j; /** * Uses a given significance test to compute the relative rank matrix for a * given matrix of absolute performance values of pipelines on datasets. * * @author Helena Graf * */ public class RelativeRankMatricComputer implements IRelativeRankMatrixComputer { /** * The used significance test */ private ISignificanceTest significanceTest; /** * Creates an new RelativeRankMatrixComputer object using a Mann-Whitney U significance test. */ public RelativeRankMatricComputer() { this(new MannWhitneyUSignificance()); } /** * Creates a new RelativeRankMatrixComputer object using the given significance test. * * @param significanceTest */ public RelativeRankMatricComputer(ISignificanceTest significanceTest) { this.significanceTest = significanceTest; } @Override public INDArray computeRelativeRankMatrix(double[][][] performanceValues) { double[][] significances = new double[performanceValues.length][performanceValues[1].length]; // For all datasets for (int i = 0; i < performanceValues.length; i++) { // For all pipelines for (int j = 0; j < performanceValues[i].length; j++) { double score = 0; if (performanceValues[i][j] != null && performanceValues[i][j].length > 0) { // Compared with all other workflows, compute a score for (int k = 0; k < performanceValues[i].length; k++) { if (performanceValues[i][k] != null && performanceValues[i][k].length > 0) { double significance = significanceTest.computeSignificance(performanceValues[i][j], performanceValues[i][k]); if (significance < 0.05) { double mean1 = Arrays.stream(performanceValues[i][j]).average().orElse(Double.NaN); double mean2 = Arrays.stream(performanceValues[i][k]).average().orElse(Double.NaN); if (mean1 > mean2) { score++; } } else { score += 0.5; } } else { score++; } } } significances[i][j] = score; } } return Nd4j.create(significances); } }
0
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan/metamining
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan/metamining/similaritymeasures/package-info.java
/** *Package containing classes that determine the similarity of pipelines. * * @author Helena Graf * */ package ai.libs.mlplan.metamining.similaritymeasures;
0
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan/multiclass/MLPlanClassifierConfig.java
package ai.libs.mlplan.multiclass; import java.io.File; import org.aeonbits.owner.Config.Sources; import ai.libs.hasco.variants.forwarddecomposition.twophase.TwoPhaseHASCOConfig; @Sources({ "file:conf/mlplan.properties" }) public interface MLPlanClassifierConfig extends TwoPhaseHASCOConfig { public static final String PREFERRED_COMPONENTS = "mlplan.preferredComponents"; public static final String SELECTION_PORTION = "mlplan.selection.mccvPortion"; @Key(SELECTION_PORTION) @DefaultValue("0.3") public double dataPortionForSelection(); @Key(PREFERRED_COMPONENTS) @DefaultValue("conf/mlplan/precedenceList.txt") public File preferredComponents(); }
0
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan/multiclass
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan/multiclass/wekamlplan/IClassifierFactory.java
package ai.libs.mlplan.multiclass.wekamlplan; import ai.libs.hasco.optimizingfactory.BaseFactory; import weka.classifiers.Classifier; public interface IClassifierFactory extends BaseFactory<Classifier> { }
0
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan/multiclass
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan/multiclass/wekamlplan/MLPlanWekaClassifier.java
package ai.libs.mlplan.multiclass.wekamlplan; import java.io.IOException; import java.util.ArrayList; import java.util.Collection; import java.util.Enumeration; import java.util.List; import java.util.Objects; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import ai.libs.hasco.gui.civiewplugin.TFDNodeAsCIViewInfoGenerator; import ai.libs.hasco.gui.statsplugin.HASCOModelStatisticsPlugin; import ai.libs.hasco.model.Component; import ai.libs.jaicore.basic.ILoggingCustomizable; import ai.libs.jaicore.basic.TimeOut; import ai.libs.jaicore.basic.events.IEventEmitter; import ai.libs.jaicore.graphvisualizer.plugin.graphview.GraphViewPlugin; import ai.libs.jaicore.graphvisualizer.plugin.nodeinfo.NodeInfoGUIPlugin; import ai.libs.jaicore.graphvisualizer.plugin.solutionperformanceplotter.SolutionPerformanceTimelinePlugin; import ai.libs.jaicore.graphvisualizer.window.AlgorithmVisualizationWindow; import ai.libs.jaicore.ml.evaluation.IInstancesClassifier; import ai.libs.jaicore.planning.hierarchical.algorithms.forwarddecomposition.graphgenerators.tfd.TFDNodeInfoGenerator; import ai.libs.jaicore.search.gui.plugins.rollouthistograms.SearchRolloutHistogramPlugin; import ai.libs.jaicore.search.model.travesaltree.JaicoreNodeInfoGenerator; import ai.libs.mlplan.core.AbstractMLPlanBuilder; import ai.libs.mlplan.core.MLPlan; import ai.libs.mlplan.multiclass.MLPlanClassifierConfig; import javafx.application.Platform; import javafx.embed.swing.JFXPanel; import weka.classifiers.Classifier; import weka.core.Capabilities; import weka.core.Capabilities.Capability; import weka.core.CapabilitiesHandler; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.OptionHandler; /** * A WEKA classifier wrapping the functionality of ML-Plan where the constructed * object is a WEKA classifier. * * It implements the algorithm interface with itself (with modified state) as an * output * * @author wever, fmohr * */ @SuppressWarnings("serial") public class MLPlanWekaClassifier implements Classifier, CapabilitiesHandler, OptionHandler, ILoggingCustomizable, IInstancesClassifier, IEventEmitter { /* Logger for controlled output. */ private transient Logger logger = LoggerFactory.getLogger(MLPlanWekaClassifier.class); private String loggerName; private boolean visualizationEnabled = false; /* MLPlan Builder and the instance of mlplan */ private final transient AbstractMLPlanBuilder builder; /* The timeout for the selecting a classifier. */ private TimeOut timeout; /* The output of mlplan, i.e., the selected classifier and the internal validation error measured on the given data. */ private Classifier classifierFoundByMLPlan; private double internalValidationErrorOfSelectedClassifier; private final transient List<Object> listeners = new ArrayList<>(); public MLPlanWekaClassifier(final AbstractMLPlanBuilder builder) { this.builder = builder; this.timeout = builder.getTimeOut(); } @Override public void buildClassifier(final Instances data) throws Exception { Objects.requireNonNull(this.timeout, "Timeout must be set before running ML-Plan."); MLPlan mlplan = new MLPlan(this.builder, data); this.listeners.forEach(mlplan::registerListener); mlplan.setTimeout(this.timeout); if (this.loggerName != null) { mlplan.setLoggerName(this.loggerName + "." + "mlplan"); } if (this.visualizationEnabled) { new JFXPanel(); AlgorithmVisualizationWindow window = new AlgorithmVisualizationWindow(mlplan, new GraphViewPlugin(), new NodeInfoGUIPlugin<>(new TFDNodeAsCIViewInfoGenerator(this.builder.getComponents())), new NodeInfoGUIPlugin<>(new JaicoreNodeInfoGenerator<>(new TFDNodeInfoGenerator())), new SearchRolloutHistogramPlugin<>(), new SolutionPerformanceTimelinePlugin(), new HASCOModelStatisticsPlugin()); Platform.runLater(window); } this.classifierFoundByMLPlan = mlplan.call(); } @Override public double[] classifyInstances(final Instances instances) throws Exception { /* If the selected classifier can handle batch classification, use this feature. */ if (this.getSelectedClassifier() instanceof IInstancesClassifier) { return ((IInstancesClassifier) this.getSelectedClassifier()).classifyInstances(instances); } double[] predictions = new double[instances.size()]; for (int i = 0; i < instances.size(); i++) { predictions[i] = this.getSelectedClassifier().classifyInstance(instances.get(i)); } return predictions; } @Override public double classifyInstance(final Instance instance) throws Exception { if (this.classifierFoundByMLPlan == null) { throw new IllegalStateException("Classifier has not been built yet."); } return this.classifierFoundByMLPlan.classifyInstance(instance); } @Override public double[] distributionForInstance(final Instance instance) throws Exception { if (this.classifierFoundByMLPlan == null) { throw new IllegalStateException("Classifier has not been built yet."); } return this.classifierFoundByMLPlan.distributionForInstance(instance); } @Override public Capabilities getCapabilities() { Capabilities result = new Capabilities(this); result.disableAll(); // attributes result.enable(Capability.NOMINAL_ATTRIBUTES); result.enable(Capability.NUMERIC_ATTRIBUTES); result.enable(Capability.DATE_ATTRIBUTES); result.enable(Capability.STRING_ATTRIBUTES); result.enable(Capability.RELATIONAL_ATTRIBUTES); result.enable(Capability.MISSING_VALUES); // class result.enable(Capability.NOMINAL_CLASS); result.enable(Capability.NUMERIC_CLASS); result.enable(Capability.DATE_CLASS); result.enable(Capability.MISSING_CLASS_VALUES); // instances result.setMinimumNumberInstances(1); return result; } @Override public Enumeration<Option> listOptions() { /* As there are no options, simply return null. */ return null; } @Override public void setOptions(final String[] options) throws Exception { /* Intentionally left blank. */ } @Override public String[] getOptions() { /* As there are no options, simply return an empty array. */ return new String[] {}; } public void setTimeout(final TimeOut timeout) { this.timeout = timeout; } public MLPlanClassifierConfig getMLPlanConfig() { return this.builder.getAlgorithmConfig(); } public Collection<Component> getComponents() throws IOException { return this.builder.getComponents(); } /** * Enables the GUI of the MLPlanWekaClassifier if set to true. By default the visualization is deactivated. * The flag needs to be set before buildClassifier is called. * * @param visualizationEnabled Flag whether the visualization is enabled or not. */ public void setVisualizationEnabled(final boolean visualizationEnabled) { this.visualizationEnabled = visualizationEnabled; } /** * @return An object of the classifier ML-Plan has selected during the build. */ public Classifier getSelectedClassifier() { return this.classifierFoundByMLPlan; } /** * @return The internal validation error (during selection phase) of the selected classifier. */ public double getInternalValidationErrorOfSelectedClassifier() { return this.internalValidationErrorOfSelectedClassifier; } @Override public void setLoggerName(final String name) { this.loggerName = name; this.logger.info("Switching logger name to {}", name); this.logger = LoggerFactory.getLogger(name); this.logger.info("Switched ML-Plan logger to {}", name); } @Override public String getLoggerName() { return this.loggerName; } @Override public void registerListener(final Object listener) { this.listeners.add(listener); } }
0
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan/multiclass/wekamlplan
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan/multiclass/wekamlplan/sklearn/MLPlanScikitLearnClassifierConfig.java
package ai.libs.mlplan.multiclass.wekamlplan.sklearn; import org.aeonbits.owner.Config.LoadPolicy; import org.aeonbits.owner.Config.LoadType; import org.aeonbits.owner.Config.Sources; import ai.libs.hasco.variants.forwarddecomposition.twophase.TwoPhaseHASCOConfig; @LoadPolicy(LoadType.MERGE) @Sources({ "file:conf/hasco/hasco.properties", "file:conf/mlplan/scikitlearn.properties" }) public interface MLPlanScikitLearnClassifierConfig extends TwoPhaseHASCOConfig { }
0
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan/multiclass/wekamlplan
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan/multiclass/wekamlplan/sklearn/SKLearnClassifierFactory.java
package ai.libs.mlplan.multiclass.wekamlplan.sklearn; import java.io.IOException; import java.util.Arrays; import java.util.HashSet; import java.util.Map.Entry; import java.util.Set; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import ai.libs.hasco.exceptions.ComponentInstantiationFailedException; import ai.libs.hasco.model.CategoricalParameterDomain; import ai.libs.hasco.model.ComponentInstance; import ai.libs.hasco.model.Parameter; import ai.libs.jaicore.basic.ILoggingCustomizable; import ai.libs.jaicore.ml.scikitwrapper.ScikitLearnWrapper; import ai.libs.mlplan.multiclass.wekamlplan.IClassifierFactory; import weka.classifiers.Classifier; /** * The SKLearnClassifierFactory takes a ground component instance and parses it into a <code>ScikitLearnWrapper</code> as defined in the project jaicore-ml. * This factory may be used in the context of HASCO, especially for ML-Plan. * * @author wever */ public class SKLearnClassifierFactory implements IClassifierFactory, ILoggingCustomizable { private static final CategoricalParameterDomain BOOL_DOMAIN = new CategoricalParameterDomain(Arrays.asList("True", "False")); private Logger logger = LoggerFactory.getLogger(SKLearnClassifierFactory.class); private String loggerName; @Override public Classifier getComponentInstantiation(final ComponentInstance groundComponent) throws ComponentInstantiationFailedException { this.logger.info("Parse ground component instance {} to ScikitLearnWrapper object.", groundComponent); StringBuilder constructInstruction = new StringBuilder(); Set<String> importSet = new HashSet<>(); constructInstruction.append(this.extractSKLearnConstructInstruction(groundComponent, importSet)); StringBuilder imports = new StringBuilder(); importSet.forEach(imports::append); try { return new ScikitLearnWrapper(constructInstruction.toString(), imports.toString(), true); } catch (IOException e) { this.logger.error("Could not create sklearn wrapper for construction {} and imports {}.", constructInstruction, imports); return null; } } public String extractSKLearnConstructInstruction(final ComponentInstance groundComponent, final Set<String> importSet) { StringBuilder sb = new StringBuilder(); if (groundComponent.getComponent().getName().startsWith("mlplan.util.model.make_forward")) { sb.append(this.extractSKLearnConstructInstruction(groundComponent.getSatisfactionOfRequiredInterfaces().get("source"), importSet)); sb.append(","); sb.append(this.extractSKLearnConstructInstruction(groundComponent.getSatisfactionOfRequiredInterfaces().get("base"), importSet)); return sb.toString(); } String[] packagePathSplit = groundComponent.getComponent().getName().split("\\."); StringBuilder fromSB = new StringBuilder(); fromSB.append(packagePathSplit[0]); for (int i = 1; i < packagePathSplit.length - 1; i++) { fromSB.append("." + packagePathSplit[i]); } String className = packagePathSplit[packagePathSplit.length - 1]; importSet.add("from " + fromSB.toString() + " import " + className + "\n"); if (groundComponent.getComponent().getName().startsWith("sklearn.feature_selection.f_classif")) { sb.append("f_classif(features, targets)"); return sb.toString(); } sb.append(className); sb.append("("); if (groundComponent.getComponent().getName().contains("make_pipeline")) { sb.append(this.extractSKLearnConstructInstruction(groundComponent.getSatisfactionOfRequiredInterfaces().get("preprocessor"), importSet)); sb.append(","); sb.append(this.extractSKLearnConstructInstruction(groundComponent.getSatisfactionOfRequiredInterfaces().get("classifier"), importSet)); } else if (groundComponent.getComponent().getName().contains("make_union")) { sb.append(this.extractSKLearnConstructInstruction(groundComponent.getSatisfactionOfRequiredInterfaces().get("p1"), importSet)); sb.append(","); sb.append(this.extractSKLearnConstructInstruction(groundComponent.getSatisfactionOfRequiredInterfaces().get("p2"), importSet)); } else { boolean first = true; for (Entry<String, String> parameterValue : groundComponent.getParameterValues().entrySet()) { if (first) { first = false; } else { sb.append(","); } Parameter param = groundComponent.getComponent().getParameterWithName(parameterValue.getKey()); sb.append(parameterValue.getKey() + "="); if (param.isNumeric()) { sb.append(parameterValue.getValue()); } else if (param.isCategorical() && BOOL_DOMAIN.subsumes(param.getDefaultDomain())) { sb.append(parameterValue.getValue()); } else { try { sb.append(Integer.parseInt(parameterValue.getValue())); } catch (Exception e) { try { sb.append(Double.parseDouble(parameterValue.getValue())); } catch (Exception e1) { sb.append("\"" + parameterValue.getValue() + "\""); } } } } for (Entry<String, ComponentInstance> satReqI : groundComponent.getSatisfactionOfRequiredInterfaces().entrySet()) { if (first) { first = false; } else { sb.append(","); } sb.append(satReqI.getKey() + "="); sb.append(this.extractSKLearnConstructInstruction(satReqI.getValue(), importSet)); } } sb.append(")"); return sb.toString(); } @Override public String getLoggerName() { return this.loggerName; } @Override public void setLoggerName(final String name) { this.loggerName = name; this.logger.debug("Switching logger name to {}", name); this.logger = LoggerFactory.getLogger(name); this.logger.debug("Switched SKLearnClassifierFactory logger to {}", name); } }
0
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan/multiclass/wekamlplan
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan/multiclass/wekamlplan/sklearn/SKLearnMLPlanWekaClassifier.java
package ai.libs.mlplan.multiclass.wekamlplan.sklearn; import java.io.IOException; import ai.libs.mlplan.core.AbstractMLPlanBuilder; import ai.libs.mlplan.multiclass.wekamlplan.MLPlanWekaClassifier; public class SKLearnMLPlanWekaClassifier extends MLPlanWekaClassifier { public SKLearnMLPlanWekaClassifier(final AbstractMLPlanBuilder builder) throws IOException { super(builder); } public SKLearnMLPlanWekaClassifier() throws IOException { super(AbstractMLPlanBuilder.forSKLearn()); } }
0
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan/multiclass/wekamlplan
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan/multiclass/wekamlplan/sophisticated/FeaturePreprocessor.java
package ai.libs.mlplan.multiclass.wekamlplan.sophisticated; import weka.core.Instance; import weka.core.Instances; public interface FeaturePreprocessor { public void prepare(Instances data) throws Exception; public Instance apply(Instance data) throws Exception; public Instances apply(Instances data) throws Exception; public boolean isPrepared(); }
0
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan/multiclass/wekamlplan
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan/multiclass/wekamlplan/sophisticated/MLSophisticatedPipeline.java
package ai.libs.mlplan.multiclass.wekamlplan.sophisticated; import java.io.Serializable; import java.util.ArrayList; import java.util.List; import ai.libs.jaicore.ml.WekaUtil; import ai.libs.mlplan.multiclass.wekamlplan.sophisticated.featuregen.FeatureGenerator; import weka.classifiers.Classifier; import weka.core.Attribute; import weka.core.Capabilities; import weka.core.DenseInstance; import weka.core.Instance; import weka.core.Instances; /** * * @author Felix Mohr * */ @SuppressWarnings("serial") public class MLSophisticatedPipeline implements Classifier, FeatureGenerator, Serializable { private final List<FeatureGenerator> featureGenerators = new ArrayList<>(); private final List<FeaturePreprocessor> featurePreprocessors = new ArrayList<>(); private final List<FeaturePreprocessor> featureSelectors = new ArrayList<>(); private final Classifier classifier; private boolean trained = false; private long timeForTrainingPreprocessors; private long timeForTrainingClassifier; private long timeForExecutingPreprocessor; private long timeForExecutingClassifier; private Instances emptyReferenceDataset; public MLSophisticatedPipeline(final List<FeatureGenerator> featureGenerators, final List<FeaturePreprocessor> preprocessors, final List<FeaturePreprocessor> featureSelectors, final Classifier baseClassifier) { super(); if (baseClassifier == null) { throw new IllegalArgumentException("Base classifier must not be null!"); } this.featureGenerators.addAll(featureGenerators); this.featurePreprocessors.addAll(preprocessors); this.featureSelectors.addAll(featureSelectors); this.classifier = baseClassifier; } @Override public void buildClassifier(Instances data) throws Exception { /* determine features to be created */ long start; Instances mergedInstances = new Instances(data); int f = data.numAttributes(); /* generate features */ for (FeatureGenerator pp : this.featureGenerators) { /* if the filter has not been trained yet, do so now and store it */ if (!pp.isPrepared()) { start = System.currentTimeMillis(); pp.prepare(data); this.timeForTrainingPreprocessors = System.currentTimeMillis() - start; } Instances modifiedInstances = pp.apply(data); if (modifiedInstances == null) { throw new IllegalStateException("Feature Generator " + pp + " has generated a null-dataset!"); } /* now apply the attribute selector */ for (int i = 0; i < modifiedInstances.numAttributes(); i++) { modifiedInstances.renameAttribute(modifiedInstances.attribute(i), "f" + (f++)); } mergedInstances = Instances.mergeInstances(mergedInstances, modifiedInstances); mergedInstances.setClassIndex(data.classIndex()); } data = mergedInstances; /* preprocess features */ for (FeaturePreprocessor pp : this.featurePreprocessors) { pp.prepare(data); data = pp.apply(data); if (data.classIndex() < 0) { throw new IllegalStateException("Preprocessor " + pp + " has removed class index!"); } } /* feature selection */ for (FeaturePreprocessor pp : this.featureSelectors) { pp.prepare(data); data = pp.apply(data); if (data.classIndex() < 0) { throw new IllegalStateException("Preprocessor " + pp + " has removed class index!"); } } /* build classifier based on reduced data */ this.emptyReferenceDataset = new Instances(data); this.emptyReferenceDataset.clear(); start = System.currentTimeMillis(); this.classifier.buildClassifier(data); this.timeForTrainingClassifier = System.currentTimeMillis() - start; this.trained = true; } private Instance applyPreprocessors(Instance data) throws Exception { long start = System.currentTimeMillis(); /* create features */ Instance mergedInstance = new DenseInstance(data); mergedInstance.setDataset(data.dataset()); for (FeatureGenerator pp : this.featureGenerators) { Instances mergedDatasetA = new Instances(mergedInstance.dataset()); mergedDatasetA.clear(); mergedDatasetA.add(mergedInstance); Instance modifiedInstance = pp.apply(data); if (modifiedInstance.dataset() == null) { throw new IllegalStateException("Instance was detached from dataset by " + pp); } Instances mergedDatasetB = modifiedInstance.dataset(); Instances mergedDataset = Instances.mergeInstances(mergedDatasetA, mergedDatasetB); mergedDataset.setClassIndex(mergedDatasetA.classIndex()); mergedInstance = mergedInstance.mergeInstance(modifiedInstance); mergedInstance.setDataset(mergedDataset); this.timeForExecutingPreprocessor = System.currentTimeMillis() - start; } data = mergedInstance; /* preprocess features */ for (FeaturePreprocessor pp : this.featurePreprocessors) { data = pp.apply(data); } /* feature selection */ for (FeaturePreprocessor pp : this.featureSelectors) { data = pp.apply(data); } return data; } @Override public double classifyInstance(Instance arg0) throws Exception { if (!this.trained) { throw new IllegalStateException("Cannot make predictions on untrained pipeline!"); } arg0 = this.applyPreprocessors(arg0); long start = System.currentTimeMillis(); double result = this.classifier.classifyInstance(arg0); this.timeForExecutingClassifier = System.currentTimeMillis() - start; return result; } @Override public double[] distributionForInstance(Instance arg0) throws Exception { if (!this.trained) { throw new IllegalStateException("Cannot make predictions on untrained pipeline!"); } if (arg0 == null) { throw new IllegalArgumentException("Cannot make predictions for null-instance"); } arg0 = this.applyPreprocessors(arg0); if (arg0 == null) { throw new IllegalStateException("The filter has turned the instance into NULL"); } long start = System.currentTimeMillis(); double[] result = this.classifier.distributionForInstance(arg0); this.timeForExecutingClassifier = System.currentTimeMillis() - start; return result; } @Override public Capabilities getCapabilities() { return this.classifier.getCapabilities(); } public Classifier getBaseClassifier() { return this.classifier; } public long getTimeForTrainingPreprocessor() { return this.timeForTrainingPreprocessors; } public long getTimeForTrainingClassifier() { return this.timeForTrainingClassifier; } public long getTimeForExecutingPreprocessor() { return this.timeForExecutingPreprocessor; } public long getTimeForExecutingClassifier() { return this.timeForExecutingClassifier; } @Override public void prepare(final Instances data) throws Exception { this.buildClassifier(data); } private Instances getEmptyProbingResultDataset() { if (!this.isPrepared()) { throw new IllegalStateException("Cannot determine empty dataset, because the pipeline has not been trained yet."); } ArrayList<Attribute> atts = new ArrayList<>(); List<String> attributeValues = WekaUtil.getClassesDeclaredInDataset(this.emptyReferenceDataset); for (String att : attributeValues) { atts.add(new Attribute("probe_classprob_" + att + "_" + this)); } return new Instances("probing", atts, 0); } @Override public Instance apply(final Instance data) throws Exception { double[] classProbs = this.distributionForInstance(data); Instance newInst = new DenseInstance(classProbs.length); Instances dataset = this.getEmptyProbingResultDataset(); dataset.add(newInst); newInst.setDataset(dataset); for (int i = 0; i < classProbs.length; i++) { newInst.setValue(i, classProbs[i]); } return newInst; } @Override public Instances apply(final Instances data) throws Exception { Instances probingResults = new Instances(this.getEmptyProbingResultDataset()); for (Instance inst : data) { Instance probedInst = this.apply(inst); probedInst.setDataset(probingResults); probingResults.add(probedInst); } return probingResults; } @Override public boolean isPrepared() { return this.trained; } }
0
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan/multiclass/wekamlplan/sophisticated
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan/multiclass/wekamlplan/sophisticated/featuregen/FeatureGenerator.java
package ai.libs.mlplan.multiclass.wekamlplan.sophisticated.featuregen; import ai.libs.mlplan.multiclass.wekamlplan.sophisticated.FeaturePreprocessor; public interface FeatureGenerator extends FeaturePreprocessor { }
0
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan/multiclass/wekamlplan/sophisticated
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan/multiclass/wekamlplan/sophisticated/featuregen/FeatureGeneratorTree.java
package ai.libs.mlplan.multiclass.wekamlplan.sophisticated.featuregen; import java.util.ArrayList; import java.util.List; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import weka.core.Instance; import weka.core.Instances; public class FeatureGeneratorTree implements FeatureGenerator { private final Logger logger = LoggerFactory.getLogger(FeatureGeneratorTree.class); private final FeatureGenerator root; private final List<FeatureGeneratorTree> children = new ArrayList<>(); private boolean prepared; public FeatureGeneratorTree(FeatureGenerator root) { this.root = root; } public void addChild(FeatureGenerator child) { children.add(new FeatureGeneratorTree(child)); } public void removeChild(FeatureGeneratorTree child) { children.removeIf(c -> c.root.equals(child)); } public FeatureGenerator getRoot() { return root; } @Override public void prepare(Instances data) throws Exception { logger.info("Starting preparation of FeatureGeneratorTree ({}) for {}x{}-matrix.", root.getClass().getName(), data.size(), data.numAttributes()); /* prepare children and apply them in order to get the data necessary to prepare the local feature generator */ for (FeatureGeneratorTree child : children) child.prepare(data); Instances mergedInstances = new Instances(data); for (FeatureGeneratorTree child : children) { Instances instancesGeneratedByChild = child.apply(data); mergedInstances = Instances.mergeInstances(mergedInstances, instancesGeneratedByChild); } /* prepare local feature generator */ this.root.prepare(mergedInstances); Instances result = apply(data); logger.info("Preparation of FeatureGeneratorTree ({}) ready. Result will be a {}x{}-matrix", root.getClass().getName(), result.size(), result.numAttributes()); prepared = true; } @Override public Instance apply(Instance data) throws Exception { Instances instances = new Instances(data.dataset()); instances.clear(); instances.add(data); return apply(instances).firstInstance(); } @Override public Instances apply(Instances data) throws Exception { Instances mergedInstances = new Instances(data); for (FeatureGeneratorTree child : children) mergedInstances = Instances.mergeInstances(mergedInstances, child.apply(data)); return root.apply(mergedInstances); } @Override public boolean isPrepared() { return prepared; } }
0
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan/multiclass/wekamlplan/sophisticated
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan/multiclass/wekamlplan/sophisticated/featuregen/InteractingFeatures.java
package ai.libs.mlplan.multiclass.wekamlplan.sophisticated.featuregen; import java.util.ArrayList; import java.util.List; import ai.libs.jaicore.basic.sets.Pair; import ai.libs.jaicore.basic.sets.SetUtil; import weka.core.Attribute; import weka.core.DenseInstance; import weka.core.Instance; import weka.core.Instances; public class InteractingFeatures implements FeatureGenerator { private boolean isPrepared; private List<Integer> indicesToInteract = new ArrayList<>(); @Override public void prepare(final Instances data) throws Exception { ArrayList<Attribute> attributes = new ArrayList<>(); this.indicesToInteract.clear(); for (int i = 0; i < data.numAttributes(); i++) { if (data.attribute(i).isNumeric()) { attributes.add(new weka.core.Attribute("q" + i, false)); this.indicesToInteract.add(i); } } // Instances squares = new Instances("squares", attributes, data.size()); this.isPrepared = true; } private Instances getEmptyDataset() { if (!this.isPrepared) { throw new IllegalStateException("Cannot get empty dataset before preparation"); } ArrayList<Attribute> attributes = new ArrayList<>(); for (Pair<Integer, Integer> pair : SetUtil.cartesianProduct(this.indicesToInteract, this.indicesToInteract)) { if (pair.getX() < pair.getY()) { attributes.add(new Attribute("interaction_" + pair.getX() + "_" + pair.getY(), false)); } } return new Instances("interaction", attributes, 0); } @Override public Instance apply(final Instance data) throws Exception { Instance newInstance = new DenseInstance(((int) Math.pow(this.indicesToInteract.size(), 2) - this.indicesToInteract.size()) / 2); int index = 0; for (Pair<Integer, Integer> pair : SetUtil.cartesianProduct(this.indicesToInteract, this.indicesToInteract)) { if (pair.getX() < pair.getY()) { newInstance.setValue(index++, data.value(pair.getX()) * data.value(pair.getY())); } } Instances dataset = this.getEmptyDataset(); dataset.add(newInstance); newInstance.setDataset(dataset); return newInstance; } @Override public Instances apply(final Instances data) throws Exception { Instances newDataset = this.getEmptyDataset(); for (Instance inst : data) { Instance modInst = this.apply(inst); newDataset.add(modInst); modInst.setDataset(newDataset); } return newDataset; } @Override public boolean isPrepared() { return this.isPrepared; } }
0
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan/multiclass/wekamlplan/sophisticated
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan/multiclass/wekamlplan/sophisticated/featuregen/PCA.java
package ai.libs.mlplan.multiclass.wekamlplan.sophisticated.featuregen; import ai.libs.mlplan.multiclass.wekamlplan.weka.model.SuvervisedFilterPreprocessor; import weka.attributeSelection.PrincipalComponents; import weka.attributeSelection.Ranker; @SuppressWarnings("serial") public class PCA extends SuvervisedFilterPreprocessor implements FeatureGenerator { public PCA() { super(new Ranker(), new PrincipalComponents()); } }
0
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan/multiclass/wekamlplan/sophisticated
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan/multiclass/wekamlplan/sophisticated/featuregen/PolynomialFeatures.java
package ai.libs.mlplan.multiclass.wekamlplan.sophisticated.featuregen; import java.util.ArrayList; import java.util.List; import weka.core.Attribute; import weka.core.DenseInstance; import weka.core.Instance; import weka.core.Instances; public class PolynomialFeatures implements FeatureGenerator { private boolean isPrepared; private int potence = 2; private List<Integer> indicesToSquare = new ArrayList<>(); @Override public void prepare(Instances data) throws Exception { ArrayList<Attribute> attributes = new ArrayList<>(); indicesToSquare.clear(); for (int i = 0; i < data.numAttributes(); i++) { if (data.attribute(i).isNumeric()) { attributes.add(new weka.core.Attribute("q" + i, false)); indicesToSquare.add(i); } } // Instances squares = new Instances("squares", attributes, data.size()); isPrepared = true; } private Instances getEmptyDataset() { if (!isPrepared) throw new IllegalStateException("Cannot get empty dataset before preparation"); ArrayList<Attribute> attributes = new ArrayList<>(); for (int indexToSquare : indicesToSquare) { attributes.add(new Attribute("pow_" + potence + "_" + indexToSquare, false)); } return new Instances("potences", attributes, 0); } @Override public Instance apply(Instance data) throws Exception { Instance copy = new DenseInstance(indicesToSquare.size()); int i = 0; for (int index : indicesToSquare) { copy.setValue(i++,Math.pow(data.value(index), potence)); } Instances dataset = getEmptyDataset(); dataset.add(copy); copy.setDataset(dataset); return copy; } @Override public Instances apply(Instances data) throws Exception { Instances copy = getEmptyDataset(); for (Instance inst : data) { Instance modInst = apply(inst); copy.add(modInst); modInst.setDataset(copy); } return copy; } @Override public boolean isPrepared() { return isPrepared; } public int getPotence() { return potence; } public void setPotence(int potence) { this.potence = potence; } }
0
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan/multiclass/wekamlplan/sophisticated
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan/multiclass/wekamlplan/sophisticated/featurepre/Normalization.java
package ai.libs.mlplan.multiclass.wekamlplan.sophisticated.featurepre; import ai.libs.mlplan.multiclass.wekamlplan.sophisticated.FeaturePreprocessor; import weka.core.Instance; import weka.core.Instances; import weka.filters.Filter; public class Normalization implements FeaturePreprocessor { private weka.filters.unsupervised.attribute.Normalize norm = new weka.filters.unsupervised.attribute.Normalize(); private boolean prepared; @Override public void prepare(Instances data) throws Exception { norm.setInputFormat(data); Filter.useFilter(data, norm); prepared = true; } @Override public Instance apply(Instance data) throws Exception { norm.input(data); return norm.output(); } @Override public Instances apply(Instances data) throws Exception { Instances newInstances = new Instances(data); newInstances.clear(); for (Instance i : data) { newInstances.add(apply(i)); } return newInstances; } @Override public boolean isPrepared() { return prepared; } }
0
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan/multiclass/wekamlplan
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan/multiclass/wekamlplan/weka/MLPipelineComponentInstanceFactory.java
package ai.libs.mlplan.multiclass.wekamlplan.weka; import java.io.IOException; import java.util.Arrays; import java.util.Collection; import java.util.HashMap; import java.util.Map; import org.aeonbits.owner.util.Collections; import org.apache.commons.lang3.math.NumberUtils; import org.apache.commons.lang3.tuple.ImmutablePair; import org.apache.commons.lang3.tuple.Pair; import ai.libs.hasco.model.Component; import ai.libs.hasco.model.ComponentInstance; import ai.libs.hasco.serialization.ComponentNotFoundException; import ai.libs.hasco.serialization.ComponentUtils; import ai.libs.mlplan.multiclass.wekamlplan.weka.model.MLPipeline; import ai.libs.mlplan.multiclass.wekamlplan.weka.model.SupervisedFilterSelector; import weka.core.OptionHandler; /** * A factory that provides the ability to wrap given MLPipelines to a * ComponentInstance * * @author Helena Graf * */ public class MLPipelineComponentInstanceFactory { private Collection<Component> components; /** * Creates a new factory object using the given configuration file * * @param components * @throws IOException */ public MLPipelineComponentInstanceFactory(Collection<Component> components) throws IOException { // TODO possibly change this to get loaded components so that components don't // have to be loaded twice this.components = components; } /** * Converts the given MLPipelines object to a ComponentInstance. * * @param pipeline * The pipelines to convert * @return The converted pipelines as a ComponentInstance * @throws ComponentNotFoundException * When the pipelines contains elements that are not in the loaded * configuration */ @SuppressWarnings("unchecked") public ComponentInstance convertToComponentInstance(MLPipeline pipeline) throws ComponentNotFoundException { if (pipeline.getPreprocessors() != null && pipeline.getPreprocessors().size() > 0) { // Pipeline has preprocessor SupervisedFilterSelector preprocessor = pipeline.getPreprocessors().get(0); // CI for searcher ComponentInstance searcherCI = getComponentInstanceForPipelineElement(preprocessor.getSearcher()); // CI for evaluator ComponentInstance evaluatorCI = getComponentInstanceForPipelineElement(preprocessor.getEvaluator()); // CI for preprocessor ComponentInstance preprocessorCI = getComponentInstanceForPipelineElement(preprocessor.getSelector(), new ImmutablePair<>("eval", evaluatorCI), new ImmutablePair<>("search", searcherCI)); // CI for classifier ComponentInstance classifierCI = getComponentInstanceForPipelineElement(pipeline.getBaseClassifier()); // Pipeline HashMap<String, ComponentInstance> satisfactionOfRequiredInterfaces = new HashMap<String, ComponentInstance>(); satisfactionOfRequiredInterfaces.put("preprocessor", preprocessorCI); satisfactionOfRequiredInterfaces.put("classifier", classifierCI); return new ComponentInstance(ComponentUtils.getComponentByName("pipeline", components), new HashMap<String, String>(), satisfactionOfRequiredInterfaces); } else { // Pipeline is only classifier ComponentInstance classifierCI = new ComponentInstance( ComponentUtils.getComponentByName(pipeline.getBaseClassifier().getClass().getName(), components), getParametersForPipelineElement(pipeline.getBaseClassifier()), new HashMap<String, ComponentInstance>()); return classifierCI; } } /** * Converts a single element of the pipeline to a ComponentInstance, e.g. a * classifier. * * @param pipelineElement * The pipeline element to convert * @param satisfactionOfRegquiredInterfaces * If the elements has this component, it must be included, otherwise * it is left out * @return The converted ComponentInstance * @throws ComponentNotFoundException * If the pipeline element contains elements that are not in the * loaded configuration */ private ComponentInstance getComponentInstanceForPipelineElement(Object pipelineElement, @SuppressWarnings("unchecked") Pair<String, ComponentInstance>... satisfactionOfRegquiredInterfaces) throws ComponentNotFoundException { HashMap<String, ComponentInstance> satisfactionOfRequiredInterfaces = new HashMap<String, ComponentInstance>(); Arrays.stream(satisfactionOfRegquiredInterfaces).forEach(entry -> { satisfactionOfRequiredInterfaces.put(entry.getKey(), entry.getValue()); }); return new ComponentInstance(ComponentUtils.getComponentByName(pipelineElement.getClass().getName(), components), getParametersForPipelineElement(pipelineElement), satisfactionOfRequiredInterfaces); } /** * Gets the parameters for the given pipeline element as a map from parameter * name to value * * @param classifier * The classifier for which to get the parameters * @return The parameter map */ private Map<String, String> getParametersForPipelineElement(Object classifier) { if (classifier instanceof OptionHandler) { OptionHandler handler = (OptionHandler) classifier; HashMap<String, String> parametersWithValues = new HashMap<String, String>(handler.getOptions().length); String optionName = null; boolean previousStringWasAValue = true; for (String option : handler.getOptions()) { if (option.equals("--")) { // TODO here all classifier parameters (i.e. for meta classifiers and such) are // skipped! Might want to include that in the future break; } if (previousStringWasAValue || (!(NumberUtils.isCreatable(option) || NumberUtils.isParsable(option)) && option.startsWith("-"))) { // Current String is option if (!previousStringWasAValue) { parametersWithValues.put(optionName, "true"); } previousStringWasAValue = false; optionName = option.equals("") ? option : option.substring(1, option.length()); } else { // Current String is value previousStringWasAValue = true; parametersWithValues.put(optionName, option); } } if (!previousStringWasAValue) { parametersWithValues.put(optionName, Collections.list(handler.getOptions()).get(handler.getOptions().length - 1)); } return parametersWithValues; } return new HashMap<String, String>(0); } }
0
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan/multiclass/wekamlplan
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan/multiclass/wekamlplan/weka/PreferenceBasedNodeEvaluator.java
package ai.libs.mlplan.multiclass.wekamlplan.weka; import java.util.ArrayList; import java.util.Collection; import java.util.LinkedList; import java.util.List; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import ai.libs.hasco.core.Util; import ai.libs.hasco.model.Component; import ai.libs.hasco.model.ComponentInstance; import ai.libs.jaicore.planning.hierarchical.algorithms.forwarddecomposition.graphgenerators.tfd.TFDNode; import ai.libs.jaicore.search.algorithms.standard.bestfirst.nodeevaluation.INodeEvaluator; import ai.libs.jaicore.search.model.travesaltree.Node; public class PreferenceBasedNodeEvaluator implements INodeEvaluator<TFDNode, Double> { private final Collection<Component> components; private final List<String> orderingOfComponents; private final static Logger logger = LoggerFactory.getLogger(PreferenceBasedNodeEvaluator.class); private boolean sentLogMessageForHavingEnteredSecondSubPhase = false; private String methodPrefix = "resolveAbstractClassifierWith"; public PreferenceBasedNodeEvaluator(final Collection<Component> components, final List<String> orderingOfComponents) { super(); this.components = components; this.orderingOfComponents = orderingOfComponents; } public PreferenceBasedNodeEvaluator(final Collection<Component> components) { this(components, new ArrayList<>()); } public PreferenceBasedNodeEvaluator(final Collection<Component> components, final List<String> orderingOfComponents, final String methodPrefix) { this(components, orderingOfComponents); this.methodPrefix = methodPrefix; } @Override public Double f(final Node<TFDNode, ?> n) { List<String> appliedMethods = new LinkedList<>(); for (TFDNode x : n.externalPath()) { if (x.getAppliedMethodInstance() != null) { appliedMethods.add(x.getAppliedMethodInstance().getMethod().getName()); } } /* get partial component */ ComponentInstance instance = Util.getSolutionCompositionFromState(this.components, n.getPoint().getState(), false); boolean isPipeline = appliedMethods.stream().anyMatch(x -> x.toLowerCase().contains("pipeline")); boolean lastMethod = false; String classifierName = null; Double score = 0.0; if (instance != null) { if (instance.getComponent().getName().toLowerCase().contains("pipeline")) { lastMethod = lastMethod || appliedMethods.get(appliedMethods.size() - 1).startsWith("resolveBaseClassifierWith"); if (instance.getSatisfactionOfRequiredInterfaces().containsKey("classifier")) { classifierName = instance.getSatisfactionOfRequiredInterfaces().get("classifier").getComponent().getName(); } else { return 0.0; } } else { classifierName = instance.getComponent().getName(); lastMethod = lastMethod || appliedMethods.get(appliedMethods.size() - 1).startsWith(this.methodPrefix); } if (lastMethod) { if (isPipeline) { score += this.orderingOfComponents.size() + 1; } score += (this.orderingOfComponents.contains(classifierName) ? this.orderingOfComponents.indexOf(classifierName) + 1 : this.orderingOfComponents.size() + 1); score *= 1.0e-10; } else { score = null; if (!this.sentLogMessageForHavingEnteredSecondSubPhase) { if ((Double) n.getParent().getInternalLabel() > 1.0e-6) { this.sentLogMessageForHavingEnteredSecondSubPhase = true; logger.info("Entering phase 1b! Breadth first search ends here, because the search is asking for the f-value of a node whose parent has been truely evaluated with an f-value of {}", n.getParent().getInternalLabel()); } } } } return score; } @Override public String toString() { return "PreferenceBasedNodeEvaluator [ORDERING_OF_CLASSIFIERS=" + this.orderingOfComponents + "]"; } }
0
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan/multiclass/wekamlplan
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan/multiclass/wekamlplan/weka/WEKAPipelineFactory.java
package ai.libs.mlplan.multiclass.wekamlplan.weka; import java.util.LinkedList; import java.util.List; import java.util.Map.Entry; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import ai.libs.hasco.exceptions.ComponentInstantiationFailedException; import ai.libs.hasco.model.ComponentInstance; import ai.libs.mlplan.multiclass.wekamlplan.IClassifierFactory; import ai.libs.mlplan.multiclass.wekamlplan.weka.model.MLPipeline; import weka.attributeSelection.ASEvaluation; import weka.attributeSelection.ASSearch; import weka.classifiers.AbstractClassifier; import weka.classifiers.Classifier; import weka.classifiers.MultipleClassifiersCombiner; import weka.classifiers.SingleClassifierEnhancer; import weka.classifiers.functions.SMO; import weka.classifiers.functions.supportVector.Kernel; import weka.core.OptionHandler; public class WEKAPipelineFactory implements IClassifierFactory { private Logger logger = LoggerFactory.getLogger(WEKAPipelineFactory.class); private static final String L_CLASSIFIER = "classifier"; @Override public Classifier getComponentInstantiation(final ComponentInstance groundComponent) throws ComponentInstantiationFailedException { try { if (groundComponent.getComponent().getName().equals("pipeline")) { ComponentInstance preprocessorCI = null; /* Retrieve component instances of pipeline */ preprocessorCI = groundComponent.getSatisfactionOfRequiredInterfaces().get("preprocessor"); ComponentInstance evaluatorCI = preprocessorCI.getSatisfactionOfRequiredInterfaces().get("eval"); ComponentInstance searcherCI = preprocessorCI.getSatisfactionOfRequiredInterfaces().get("search"); ASEvaluation eval = ASEvaluation.forName(evaluatorCI.getComponent().getName(), this.getParameterList(evaluatorCI).toArray(new String[0])); ASSearch search = ASSearch.forName(searcherCI.getComponent().getName(), this.getParameterList(searcherCI).toArray(new String[0])); Classifier c = this.getComponentInstantiation(groundComponent.getSatisfactionOfRequiredInterfaces().get(L_CLASSIFIER)); return new MLPipeline(search, eval, c); } else { Classifier c = AbstractClassifier.forName(groundComponent.getComponent().getName(), this.getParameterList(groundComponent).toArray(new String[0])); List<String> options = this.getParameterList(groundComponent); options.add("-do-not-check-capabilities"); if (c instanceof OptionHandler) { ((OptionHandler) c).setOptions(options.toArray(new String[0])); } for (Entry<String, ComponentInstance> reqI : groundComponent.getSatisfactionOfRequiredInterfaces().entrySet()) { switch (reqI.getKey()) { case "W": if (c instanceof SingleClassifierEnhancer) { ((SingleClassifierEnhancer) c).setClassifier(this.getComponentInstantiation(reqI.getValue())); } else { this.logger.error("Got required interface W but classifier {} is not single classifier enhancer", c.getClass().getName()); } break; case "K": if (c instanceof SMO) { Kernel k = (Kernel) Class.forName(reqI.getValue().getComponent().getName()).newInstance(); k.setOptions(this.getParameterList(reqI.getValue()).toArray(new String[0])); ((SMO) c).setKernel(k); } else { this.logger.error("Got required interface K but classifier {} is not SMO", c.getClass().getName()); } break; case "B": List<Classifier> baseLearnerList = this.getListOfBaseLearners(reqI.getValue()); if (c instanceof MultipleClassifiersCombiner) { ((MultipleClassifiersCombiner) c).setClassifiers(baseLearnerList.toArray(new Classifier[0])); } else { this.logger.error("Got required interface B but classifier {} is not MultipleClassifiersCombiner", c.getClass().getName()); } break; default: this.logger.error("Got required interface {} for classifier {}. Dont know what to do with it...", reqI.getKey(), c.getClass().getName()); break; } } return c; } } catch (Exception e) { throw new ComponentInstantiationFailedException(e, "Could not instantiate component."); } } private List<Classifier> getListOfBaseLearners(final ComponentInstance ci) throws ComponentInstantiationFailedException { List<Classifier> baseLearnerList = new LinkedList<>(); if (ci.getComponent().getName().equals("MultipleBaseLearnerListElement")) { baseLearnerList.add(this.getComponentInstantiation(ci.getSatisfactionOfRequiredInterfaces().get(L_CLASSIFIER))); } else if (ci.getComponent().getName().equals("MultipleBaseLearnerListChain")) { baseLearnerList.add(this.getComponentInstantiation(ci.getSatisfactionOfRequiredInterfaces().get(L_CLASSIFIER))); baseLearnerList.addAll(this.getListOfBaseLearners(ci.getSatisfactionOfRequiredInterfaces().get("chain"))); } return baseLearnerList; } private List<String> getParameterList(final ComponentInstance ci) { List<String> parameters = new LinkedList<>(); for (Entry<String, String> parameterValues : ci.getParameterValues().entrySet()) { if (parameterValues.getKey().toLowerCase().endsWith("activator") || parameterValues.getValue().equals("REMOVED")) { continue; } if (!parameterValues.getValue().equals("false")) { parameters.add("-" + parameterValues.getKey()); } if (parameterValues.getValue() != null && !parameterValues.getValue().equals("") && !parameterValues.getValue().equals("true") && !parameterValues.getValue().equals("false")) { parameters.add(parameterValues.getValue()); } } return parameters; } }
0
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan/multiclass/wekamlplan
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan/multiclass/wekamlplan/weka/WekaMLPlanWekaClassifier.java
package ai.libs.mlplan.multiclass.wekamlplan.weka; import java.io.IOException; import ai.libs.mlplan.core.MLPlanWekaBuilder; import ai.libs.mlplan.multiclass.wekamlplan.MLPlanWekaClassifier; public class WekaMLPlanWekaClassifier extends MLPlanWekaClassifier { /** * Automatically generated version uid for serialization. */ private static final long serialVersionUID = 985257791846750757L; public WekaMLPlanWekaClassifier(final MLPlanWekaBuilder builder) { super(builder); } public WekaMLPlanWekaClassifier() throws IOException { this(new MLPlanWekaBuilder()); } }
0
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan/multiclass/wekamlplan
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan/multiclass/wekamlplan/weka/WekaPipelineValidityCheckingNodeEvaluator.java
package ai.libs.mlplan.multiclass.wekamlplan.weka; import java.util.Arrays; import java.util.Collection; import ai.libs.hasco.core.Util; import ai.libs.hasco.model.Component; import ai.libs.hasco.model.ComponentInstance; import ai.libs.jaicore.ml.WekaUtil; import ai.libs.jaicore.planning.hierarchical.algorithms.forwarddecomposition.graphgenerators.tfd.TFDNode; import ai.libs.jaicore.search.algorithms.standard.bestfirst.exceptions.ControlledNodeEvaluationException; import ai.libs.jaicore.search.model.travesaltree.Node; import ai.libs.mlplan.core.PipelineValidityCheckingNodeEvaluator; import weka.core.Attribute; import weka.core.Instance; import weka.core.Instances; public class WekaPipelineValidityCheckingNodeEvaluator extends PipelineValidityCheckingNodeEvaluator { /* the predicates of the dataset */ private boolean propertiesDetermined; private boolean binaryClass; private boolean multiClass; private boolean regression; private boolean multiValuedNominalAttributes; private boolean containsNegativeValues; public WekaPipelineValidityCheckingNodeEvaluator() { super(); } public WekaPipelineValidityCheckingNodeEvaluator(final Collection<Component> components, final Instances data) { super(components, data); } private boolean multiValuedNominalAttributesExist() { for (int i = 0; i < this.getData().numAttributes(); i++) { Attribute att = this.getData().attribute(i); if (att != this.getData().classAttribute() && att.isNominal() && att.numValues() > 2) { return true; } } return false; } private synchronized void extractDatasetProperties() { if (!this.propertiesDetermined) { /* compute binary class predicate */ this.binaryClass = this.getData().classAttribute().isNominal() && this.getData().classAttribute().numValues() == 2; this.multiClass = this.getData().classAttribute().isNominal() && this.getData().classAttribute().numValues() > 2; this.regression = this.getData().classAttribute().isNumeric(); /* determine whether the dataset is multi-valued nominal */ this.multiValuedNominalAttributes = this.multiValuedNominalAttributesExist(); this.containsNegativeValues = false; for (Instance i : this.getData()) { this.containsNegativeValues = this.containsNegativeValues || Arrays.stream(i.toDoubleArray()).anyMatch(x -> x < 0); } this.propertiesDetermined = true; } } @Override public Double f(final Node<TFDNode, ?> n) throws ControlledNodeEvaluationException { if (!this.propertiesDetermined) { this.extractDatasetProperties(); } /* get partial component */ ComponentInstance instance = Util.getSolutionCompositionFromState(this.getComponents(), n.getPoint().getState(), false); if (instance != null) { /* check invalid preprocessor combinations */ ComponentInstance pp = instance.getSatisfactionOfRequiredInterfaces().get("preprocessor"); if (pp != null && pp.getComponent().getName().contains("AttributeSelection")) { ComponentInstance search = pp.getSatisfactionOfRequiredInterfaces().get("search"); ComponentInstance eval = pp.getSatisfactionOfRequiredInterfaces().get("eval"); if (search != null && eval != null && !WekaUtil.isValidPreprocessorCombination(search.getComponent().getName(), eval.getComponent().getName())) { throw new ControlledNodeEvaluationException("The given combination of searcher and evaluator cannot be benchmarked since they are incompatible."); } } /* check invalid classifiers for this kind of dataset */ ComponentInstance classifier; if (instance.getComponent().getName().toLowerCase().contains("pipeline")) { classifier = instance.getSatisfactionOfRequiredInterfaces().get("classifier"); } else { classifier = instance; } if (classifier != null) { this.checkValidity(classifier.getComponent().getName().toLowerCase()); } } return null; } private void checkValidity(final String classifierName) throws ControlledNodeEvaluationException { /* forbid M5regression algorithms on non-binary classes */ boolean binaryClassifierMatch = classifierName.matches("(.*)(additiveregression|simplelinearregression|m5rules|votedperceptron|m5p)(.*)"); if (!this.binaryClass && binaryClassifierMatch) { throw new ControlledNodeEvaluationException(classifierName + " cannot be adopted on non-binary datasets."); } boolean noBinaryClassifierMatch = classifierName.matches("(.*)(additiveregression|m5p|m5rules|simplelinearregression)(.*)"); if (this.binaryClass && noBinaryClassifierMatch) { throw new ControlledNodeEvaluationException(classifierName + " cannot be adopted for binary classification tasks."); } /* forbid NaiveBayesMultinomial on multi-valued nominal attributes */ if (this.multiValuedNominalAttributes && (classifierName.matches("(.*)(naivebayesmultinomial|simplelinearregression)(.*)"))) { throw new ControlledNodeEvaluationException(classifierName + " cannot be adopted on datasets with multi-valued nominal attributes."); } boolean noMulticlassClassifierMatch = classifierName.matches("(.*)(votedperceptron)(.*)"); if (this.multiClass && noMulticlassClassifierMatch) { throw new ControlledNodeEvaluationException(classifierName + " cannot be adopted on multinomial classification dataset."); } if (this.regression && !classifierName.matches("(.*)(additiveregression|m5p|m5rules|simplelinearregression)(.*)")) { throw new ControlledNodeEvaluationException(classifierName + " cannot be adopted on regression problems."); } if (this.containsNegativeValues && classifierName.matches("(.*)(naivebayesmultinomial)(.*)")) { throw new ControlledNodeEvaluationException("Negative numeric attribute values are not supported by the classifier."); } } }
0
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan/multiclass/wekamlplan/weka
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan/multiclass/wekamlplan/weka/model/MLPipeline.java
package ai.libs.mlplan.multiclass.wekamlplan.weka.model; import java.io.Serializable; import java.util.ArrayList; import java.util.List; import org.apache.commons.math.stat.descriptive.DescriptiveStatistics; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import ai.libs.jaicore.ml.WekaUtil; import weka.attributeSelection.ASEvaluation; import weka.attributeSelection.ASSearch; import weka.attributeSelection.AttributeSelection; import weka.classifiers.Classifier; import weka.classifiers.SingleClassifierEnhancer; import weka.core.Capabilities; import weka.core.Instance; import weka.core.Instances; /** * * @author Felix Mohr * */ @SuppressWarnings("serial") public class MLPipeline extends SingleClassifierEnhancer implements Classifier, Serializable { private static final Logger logger = LoggerFactory.getLogger(MLPipeline.class); private final List<SupervisedFilterSelector> preprocessors = new ArrayList<>(); private boolean trained = false; private int timeForTrainingPreprocessors; private int timeForTrainingClassifier; private DescriptiveStatistics timeForExecutingPreprocessors; private DescriptiveStatistics timeForExecutingClassifier; public MLPipeline(final List<SupervisedFilterSelector> preprocessors, final Classifier baseClassifier) { super(); if (baseClassifier == null) { throw new IllegalArgumentException("Base classifier must not be null!"); } this.preprocessors.addAll(preprocessors); super.setClassifier(baseClassifier); } public MLPipeline(final ASSearch searcher, final ASEvaluation evaluator, final Classifier baseClassifier) { super(); if (baseClassifier == null) { throw new IllegalArgumentException("Base classifier must not be null!"); } if (searcher != null && evaluator != null) { AttributeSelection selector = new AttributeSelection(); selector.setSearch(searcher); selector.setEvaluator(evaluator); this.preprocessors.add(new SupervisedFilterSelector(searcher, evaluator, selector)); } super.setClassifier(baseClassifier); } @Override public void buildClassifier(Instances data) throws Exception { /* reduce dimensionality */ long start; int numAttributesBefore = data.numAttributes(); logger.info("Starting to build the preprocessors of the pipeline."); for (SupervisedFilterSelector pp : this.preprocessors) { /* if the filter has not been trained yet, do so now and store it */ if (!pp.isPrepared()) { try { start = System.currentTimeMillis(); pp.prepare(data); this.timeForTrainingPreprocessors = (int) (System.currentTimeMillis() - start); int newNumberOfClasses = pp.apply(data).numClasses(); if (data.numClasses() != newNumberOfClasses) { logger.info("{} changed number of classes from {} to {}", pp.getSelector(), data.numClasses(), newNumberOfClasses); } } catch (NullPointerException e) { logger.error("Could not apply preprocessor", e); } } /* now apply the attribute selector */ data = pp.apply(data); } logger.info("Reduced number of attributes from {} to {}", numAttributesBefore, data.numAttributes()); /* build classifier based on reduced data */ start = System.currentTimeMillis(); super.getClassifier().buildClassifier(data); this.timeForTrainingClassifier = (int) (System.currentTimeMillis() - start); this.trained = true; this.timeForExecutingPreprocessors = new DescriptiveStatistics(); this.timeForExecutingClassifier = new DescriptiveStatistics(); } private Instance applyPreprocessors(Instance data) throws Exception { long start = System.currentTimeMillis(); for (SupervisedFilterSelector pp : this.preprocessors) { data = pp.apply(data); } this.timeForExecutingPreprocessors.addValue((int) (System.currentTimeMillis() - start)); return data; } @Override public double classifyInstance(Instance arg0) throws Exception { if (!this.trained) { throw new IllegalStateException("Cannot make predictions on untrained pipeline!"); } int numAttributesBefore = arg0.numAttributes(); arg0 = this.applyPreprocessors(arg0); if (numAttributesBefore != arg0.numAttributes()) { logger.info("Reduced number of attributes from {} to {}", numAttributesBefore, arg0.numAttributes()); } long start = System.currentTimeMillis(); double result = super.getClassifier().classifyInstance(arg0); this.timeForExecutingClassifier.addValue((System.currentTimeMillis() - start)); return result; } public double[] classifyInstances(final Instances arg0) throws Exception { int n = arg0.size(); double[] answers = new double[n]; for (int i = 0; i < n; i++) { answers[i] = this.classifyInstance(arg0.get(i)); } return answers; } @Override public double[] distributionForInstance(Instance arg0) throws Exception { if (!this.trained) { throw new IllegalStateException("Cannot make predictions on untrained pipeline!"); } if (arg0 == null) { throw new IllegalArgumentException("Cannot make predictions for null-instance"); } arg0 = this.applyPreprocessors(arg0); if (arg0 == null) { throw new IllegalStateException("The filter has turned the instance into NULL"); } long start = System.currentTimeMillis(); double[] result = super.getClassifier().distributionForInstance(arg0); this.timeForExecutingClassifier.addValue((int) (System.currentTimeMillis() - start)); return result; } @Override public Capabilities getCapabilities() { return super.getClassifier().getCapabilities(); } public Classifier getBaseClassifier() { return super.getClassifier(); } public List<SupervisedFilterSelector> getPreprocessors() { return this.preprocessors; } @Override public String toString() { return this.getPreprocessors() + " (preprocessors), " + WekaUtil.getClassifierDescriptor(this.getBaseClassifier()) + " (classifier)"; } public long getTimeForTrainingPreprocessor() { return this.timeForTrainingPreprocessors; } public long getTimeForTrainingClassifier() { return this.timeForTrainingClassifier; } public DescriptiveStatistics getTimeForExecutingPreprocessor() { return this.timeForExecutingPreprocessors; } public DescriptiveStatistics getTimeForExecutingClassifier() { return this.timeForExecutingClassifier; } }
0
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan/multiclass/wekamlplan/weka
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan/multiclass/wekamlplan/weka/model/SupervisedFilterSelector.java
package ai.libs.mlplan.multiclass.wekamlplan.weka.model; import java.io.Serializable; import ai.libs.mlplan.multiclass.wekamlplan.sophisticated.FeaturePreprocessor; import weka.attributeSelection.ASEvaluation; import weka.attributeSelection.ASSearch; import weka.attributeSelection.AttributeSelection; import weka.core.Instance; import weka.core.Instances; @SuppressWarnings("serial") public class SupervisedFilterSelector implements Serializable, FeaturePreprocessor { private final ASSearch searcher; private final ASEvaluation evaluator; private final AttributeSelection selector; private boolean prepared; public SupervisedFilterSelector(ASSearch searcher, ASEvaluation evaluator) { super(); this.searcher = searcher; this.evaluator = evaluator; this.selector = new AttributeSelection(); this.selector.setSearch(searcher); this.selector.setEvaluator(evaluator); } public SupervisedFilterSelector(ASSearch searcher, ASEvaluation evaluator, AttributeSelection selector) { super(); this.searcher = searcher; this.evaluator = evaluator; this.selector = selector; } public ASSearch getSearcher() { return searcher; } public ASEvaluation getEvaluator() { return evaluator; } public AttributeSelection getSelector() { return selector; } public void prepare(Instances data) throws Exception { selector.SelectAttributes(data); prepared = true; } public Instance apply(Instance data) throws Exception { if (!prepared) throw new IllegalStateException("Cannot apply preprocessor before it has been prepared!"); return selector.reduceDimensionality(data); } public Instances apply(Instances data) throws Exception { if (!prepared) throw new IllegalStateException("Cannot apply preprocessor before it has been prepared!"); return selector.reduceDimensionality(data); } @Override public int hashCode() { final int prime = 31; int result = 1; result = prime * result + ((evaluator == null) ? 0 : evaluator.hashCode()); result = prime * result + ((searcher == null) ? 0 : searcher.hashCode()); result = prime * result + ((selector == null) ? 0 : selector.hashCode()); return result; } @Override public boolean equals(Object obj) { if (this == obj) return true; if (obj == null) return false; if (getClass() != obj.getClass()) return false; SupervisedFilterSelector other = (SupervisedFilterSelector) obj; if (evaluator == null) { if (other.evaluator != null) return false; } else if (!evaluator.equals(other.evaluator)) return false; if (searcher == null) { if (other.searcher != null) return false; } else if (!searcher.equals(other.searcher)) return false; if (selector == null) { if (other.selector != null) return false; } else if (!selector.equals(other.selector)) return false; return true; } public boolean isPrepared() { return prepared; } public void setPrepared(boolean prepared) { this.prepared = prepared; } @Override public String toString() { return "SupervisedFilterSelector [searcher=" + searcher.getClass().getName() + ", evaluator=" + evaluator.getClass().getName() + "]"; } }
0
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan/multiclass/wekamlplan/weka
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan/multiclass/wekamlplan/weka/model/SuvervisedFilterPreprocessor.java
package ai.libs.mlplan.multiclass.wekamlplan.weka.model; import java.io.Serializable; import ai.libs.jaicore.ml.WekaUtil; import ai.libs.mlplan.multiclass.wekamlplan.sophisticated.FeaturePreprocessor; import weka.attributeSelection.ASEvaluation; import weka.attributeSelection.ASSearch; import weka.attributeSelection.AttributeSelection; import weka.core.Attribute; import weka.core.Instance; import weka.core.Instances; @SuppressWarnings("serial") public class SuvervisedFilterPreprocessor implements Serializable, FeaturePreprocessor { private final ASSearch searcher; private final ASEvaluation evaluator; private final AttributeSelection selector; private boolean prepared; public SuvervisedFilterPreprocessor(ASSearch searcher, ASEvaluation evaluator) { super(); this.searcher = searcher; this.evaluator = evaluator; this.selector = new AttributeSelection(); this.selector.setSearch(searcher); this.selector.setEvaluator(evaluator); } public SuvervisedFilterPreprocessor(ASSearch searcher, ASEvaluation evaluator, AttributeSelection selector) { super(); this.searcher = searcher; this.evaluator = evaluator; this.selector = selector; } public ASSearch getSearcher() { return searcher; } public ASEvaluation getEvaluator() { return evaluator; } public AttributeSelection getSelector() { return selector; } public void prepare(Instances data) throws Exception { selector.SelectAttributes(data); prepared = true; } public Instance apply(Instance data) throws Exception { if (!prepared) throw new IllegalStateException("Cannot apply preprocessor before it has been prepared!"); Instance inst = selector.reduceDimensionality(data); if (inst.dataset().classIndex() >= 0) inst = WekaUtil.removeClassAttribute(inst); for (int i = 0; i < inst.dataset().numAttributes(); i++) { Attribute a = inst.dataset().attribute(i); inst.dataset().renameAttribute(a, this.getClass().getSimpleName() + "_" + a.name()); } return inst; } public Instances apply(Instances data) throws Exception { if (!prepared) throw new IllegalStateException("Cannot apply preprocessor before it has been prepared!"); Instances inst = selector.reduceDimensionality(data); if (inst.classIndex() >= 0) inst = WekaUtil.removeClassAttribute(inst); for (int i = 0; i < inst.numAttributes(); i++) { Attribute a = inst.attribute(i); inst.renameAttribute(a, this.getClass().getSimpleName() + "_" + a.name()); } return inst; } @Override public int hashCode() { final int prime = 31; int result = 1; result = prime * result + ((evaluator == null) ? 0 : evaluator.hashCode()); result = prime * result + ((searcher == null) ? 0 : searcher.hashCode()); result = prime * result + ((selector == null) ? 0 : selector.hashCode()); return result; } @Override public boolean equals(Object obj) { if (this == obj) return true; if (obj == null) return false; if (getClass() != obj.getClass()) return false; SuvervisedFilterPreprocessor other = (SuvervisedFilterPreprocessor) obj; if (evaluator == null) { if (other.evaluator != null) return false; } else if (!evaluator.equals(other.evaluator)) return false; if (searcher == null) { if (other.searcher != null) return false; } else if (!searcher.equals(other.searcher)) return false; if (selector == null) { if (other.selector != null) return false; } else if (!selector.equals(other.selector)) return false; return true; } public boolean isPrepared() { return prepared; } public void setPrepared(boolean prepared) { this.prepared = prepared; } }
0
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan/multiclasswithreduction/ClassSplit.java
package ai.libs.mlplan.multiclasswithreduction; import java.util.ArrayList; import java.util.Collection; import org.apache.commons.lang.builder.EqualsBuilder; import org.apache.commons.lang.builder.HashCodeBuilder; public class ClassSplit<T> { private final Collection<T> classes; private final Collection<T> l; private final Collection<T> r; public ClassSplit(final Collection<T> classes, final Collection<T> l, final Collection<T> r) { super(); this.classes = classes; this.l = l; this.r = r; } public ClassSplit(final ClassSplit<T> split) { this(split.getClasses(), new ArrayList<>(split.getL()), new ArrayList<>(split.getR())); } public Collection<T> getClasses() { return this.classes; } public Collection<T> getL() { return this.l; } public Collection<T> getR() { return this.r; } @Override public int hashCode() { return new HashCodeBuilder().append(this.classes).append(this.l).append(this.r).toHashCode(); } @Override public boolean equals(final Object obj) { if (this == obj) { return true; } if (obj == null) { return false; } if (!(obj instanceof ClassSplit)) { return false; } ClassSplit<?> other = (ClassSplit<?>) obj; return new EqualsBuilder().append(other.classes, this.classes).append(other.l, this.l).append(other.r, this.r).isEquals(); } @Override public String toString() { return "ClassSplit [classes=" + this.classes + ", l=" + this.l + ", r=" + this.r + "]"; } }
0
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan/multiclasswithreduction/NestedDichotomyUtil.java
package ai.libs.mlplan.multiclasswithreduction; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.HashSet; import java.util.Iterator; import java.util.List; import java.util.Random; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import ai.libs.jaicore.basic.sets.SetUtil; import ai.libs.jaicore.ml.WekaUtil; import ai.libs.jaicore.ml.classification.multiclass.reduction.splitters.RPNDSplitter; import ai.libs.mlplan.multiclass.wekamlplan.weka.model.MLPipeline; import weka.attributeSelection.InfoGainAttributeEval; import weka.attributeSelection.Ranker; import weka.classifiers.AbstractClassifier; import weka.classifiers.Classifier; import weka.core.Instance; import weka.core.Instances; public class NestedDichotomyUtil { private NestedDichotomyUtil() { /* Intentionally left blank. Only prevent instantiation. */ } private static final Logger logger = LoggerFactory.getLogger(NestedDichotomyUtil.class); public static ClassSplit<String> createGeneralRPNDBasedSplit(final Collection<String> classes, final Random rand, final String classifierName, final Instances data) throws InterruptedException { if (classes.size() < 2) { throw new IllegalArgumentException("Cannot compute split for less than two classes!"); } try { RPNDSplitter splitter = new RPNDSplitter(rand, new MLPipeline(new Ranker(), new InfoGainAttributeEval(), AbstractClassifier.forName(classifierName, null))); Collection<Collection<String>> splitAsCollection = null; splitAsCollection = splitter.split(data); Iterator<Collection<String>> it = splitAsCollection.iterator(); return new ClassSplit<>(classes, it.next(), it.next()); } catch (InterruptedException e) { throw e; } catch (Exception e) { logger.error("Unexpected exception occurred while creating an RPND split", e); return null; } } public static ClassSplit<String> createGeneralRPNDBasedSplit(final Collection<String> classes, final Collection<String> s1, final Collection<String> s2, final Random rand, final String classifierName, final Instances data) { try { RPNDSplitter splitter = new RPNDSplitter(rand, AbstractClassifier.forName(classifierName, new String[] {})); Collection<Collection<String>> splitAsCollection = null; splitAsCollection = splitter.split(classes, s1, s2, data); Iterator<Collection<String>> it = splitAsCollection.iterator(); return new ClassSplit<>(classes, it.next(), it.next()); } catch (Exception e) { logger.error("Unexpected exception occurred while creating an RPND split", e); } return null; } public static ClassSplit<String> createUnaryRPNDBasedSplit(final Collection<String> classes, final Random rand, final String classifierName, final Instances data) { /* 2. if we have a leaf node, abort */ if (classes.size() == 1) { return new ClassSplit<>(classes, null, null); } /* 3a. otherwise select randomly two classes */ List<String> copy = new ArrayList<>(classes); Collections.shuffle(copy, rand); String c1 = copy.get(0); String c2 = copy.get(1); Collection<String> s1 = new HashSet<>(); s1.add(c1); Collection<String> s2 = new HashSet<>(); s2.add(c2); /* 3b. and 3c. train binary classifiers for c1 vs c2 */ Instances reducedData = WekaUtil.mergeClassesOfInstances(data, s1, s2); Classifier c = null; try { c = AbstractClassifier.forName(classifierName, new String[] {}); } catch (Exception e1) { logger.error("Could not get object of classifier with name {}", classifierName, e1); return null; } try { c.buildClassifier(reducedData); } catch (Exception e) { logger.error("Could not train classifier", e); } /* 3d. insort the remaining classes */ List<String> remainingClasses = new ArrayList<>(SetUtil.difference(SetUtil.difference(classes, s1), s2)); int o1 = 0; int o2 = 0; for (int i = 0; i < remainingClasses.size(); i++) { String className = remainingClasses.get(i); Instances testData = WekaUtil.getInstancesOfClass(data, className); for (Instance inst : testData) { try { double prediction = c.classifyInstance(WekaUtil.getRefactoredInstance(inst)); if (prediction == 0) { o1++; } else { o2++; } } catch (Exception e) { logger.error("Could not get prediction for some instance to assign it to a meta-class", e); } } } if (o1 > o2) { s1.addAll(remainingClasses); } else { s2.addAll(remainingClasses); } return new ClassSplit<>(classes, s1, s2); } }
0
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan/multiclasswithreduction/RPNDOracleTaskSolver.java
package ai.libs.mlplan.multiclasswithreduction; import java.util.ArrayList; import java.util.Collection; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Random; import java.util.Set; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import ai.libs.jaicore.basic.sets.Pair; import ai.libs.jaicore.basic.sets.SetUtil; import ai.libs.jaicore.logic.fol.structure.ConstantParam; import ai.libs.jaicore.logic.fol.structure.Literal; import ai.libs.jaicore.logic.fol.structure.Monom; import ai.libs.jaicore.logic.fol.structure.VariableParam; import ai.libs.jaicore.logic.fol.theories.set.SetTheoryUtil; import ai.libs.jaicore.planning.classical.problems.ceoc.CEOCAction; import ai.libs.jaicore.planning.classical.problems.ceoc.CEOCOperation; import ai.libs.jaicore.planning.core.Action; import ai.libs.jaicore.planning.hierarchical.algorithms.forwarddecomposition.graphgenerators.ceociptfd.OracleTaskResolver; import ai.libs.jaicore.planning.hierarchical.problems.ceocipstn.CEOCIPSTNPlanningProblem; import weka.core.Instances; public class RPNDOracleTaskSolver implements OracleTaskResolver { private static final Logger logger = LoggerFactory.getLogger(RPNDOracleTaskSolver.class); private final Random rand; private final String classifierName; private final Instances data; private CEOCOperation configChildNodesOp; public RPNDOracleTaskSolver(final Random rand, final String classifierName, final Instances data, final CEOCIPSTNPlanningProblem problem) { super(); this.rand = rand; this.classifierName = classifierName; this.data = data; for (CEOCOperation op : problem.getDomain().getOperations()) { if (op.getName().equals("configChildNodes")) { this.configChildNodesOp = op; break; } } if (this.configChildNodesOp == null) { throw new IllegalArgumentException("Domain has no operation with name \"configChildNodes\""); } } private interface Splitter { Split split(Collection<String> set) throws InterruptedException; } @SuppressWarnings("serial") class Split extends Pair<Set<String>, Set<String>> { public Split(final Set<String> x, final Set<String> y) { super(x, y); } @Override public String toString() { return "Split [getX()=" + this.getX() + ", getY()=" + this.getY() + "]"; } } @SuppressWarnings("serial") class SplitException extends Exception { public SplitException(final Exception e) { super(e); } } private class RPNDSplitter implements Splitter { private final Instances data; public RPNDSplitter(final Instances data) { super(); this.data = data; } @Override public Split split(final Collection<String> set) throws InterruptedException { ClassSplit<String> split = NestedDichotomyUtil.createGeneralRPNDBasedSplit(set, RPNDOracleTaskSolver.this.rand, RPNDOracleTaskSolver.this.classifierName, this.data); return new Split(new HashSet<>(split.getL()), new HashSet<>(split.getR())); } } @Override public Collection<List<Action>> getSubSolutions(final Monom state, final Literal task) throws Exception { /* prepare template grounding for actions */ String nameOfParent = task.getConstantParams().get(0).getName(); String nameOfLC = task.getConstantParams().get(1).getName(); String nameOfRC = task.getConstantParams().get(2).getName(); Map<VariableParam, ConstantParam> groundingTemplate = new HashMap<>(); groundingTemplate.put(new VariableParam("p"), new ConstantParam(nameOfParent)); groundingTemplate.put(new VariableParam("lc"), new ConstantParam(nameOfLC)); groundingTemplate.put(new VariableParam("rc"), new ConstantParam(nameOfRC)); List<String> set = new ArrayList<>(SetTheoryUtil.getObjectsInSet(state, nameOfParent)); logger.info("Compute RPND split for {}", set); if (set.size() <= 1) { return new ArrayList<>(); } /* if no decision is to be made, return the single possible solution */ if (set.size() == 2) { /* determine subsolutions */ Collection<List<Action>> subsolutions = new ArrayList<>(); Map<VariableParam, ConstantParam> grounding = new HashMap<>(groundingTemplate); grounding.put(new VariableParam("ss"), new ConstantParam("{" + set.get(0) + "}")); List<Action> subsolution = new ArrayList<>(); subsolution.add(new CEOCAction(this.configChildNodesOp, grounding)); subsolutions.add(subsolution); return subsolutions; } List<Splitter> splitters = new ArrayList<>(); int max = 1; logger.info("Make {} suggestions for {} classes", max, set.size()); for (int i = 0; i < max; i++) { splitters.add(new RPNDSplitter(this.data)); } /* determine subsolutions */ Collection<List<Action>> subsolutions = new ArrayList<>(); for (Splitter splitter : splitters) { logger.info("Compute next split"); Split split = splitter.split(set); logger.info("Split computed: {}", split); Map<VariableParam, ConstantParam> grounding = new HashMap<>(groundingTemplate); grounding.put(new VariableParam("ss"), new ConstantParam(SetUtil.serializeAsSet(split.getX()))); List<Action> subsolution = new ArrayList<>(); subsolution.add(new CEOCAction(this.configChildNodesOp, grounding)); subsolutions.add(subsolution); } logger.info("Ready with RPND computation"); return subsolutions; } }
0
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan/multilabel/MekaPipelineFactory.java
package ai.libs.mlplan.multilabel; import java.util.LinkedList; import java.util.List; import java.util.Map.Entry; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import ai.libs.hasco.exceptions.ComponentInstantiationFailedException; import ai.libs.hasco.model.ComponentInstance; import ai.libs.hasco.model.NumericParameterDomain; import ai.libs.jaicore.basic.sets.SetUtil; import ai.libs.mlplan.multiclass.wekamlplan.IClassifierFactory; import meka.classifiers.multilabel.MultiLabelClassifier; import weka.classifiers.Classifier; import weka.classifiers.MultipleClassifiersCombiner; import weka.classifiers.SingleClassifierEnhancer; import weka.classifiers.functions.supportVector.Kernel; import weka.core.OptionHandler; /** * A pipeline factory that converts a given ComponentInstance that consists of * components that correspond to MEKA algorithms to a MultiLabelClassifier. * */ public class MekaPipelineFactory implements IClassifierFactory { private static final String PARAMETER_NAME_WITH_DASH_WARNING = "Required interface of component {} has dash or underscore in interface id {}"; /* loggin */ private static final Logger logger = LoggerFactory.getLogger(MekaPipelineFactory.class); @Override public Classifier getComponentInstantiation(final ComponentInstance ci) throws ComponentInstantiationFailedException { MultiLabelClassifier instance = null; try { instance = (MultiLabelClassifier) this.getClassifier(ci); return instance; } catch (Exception e) { throw new ComponentInstantiationFailedException(e, "Could not instantiate " + ci.getComponent().getName()); } } private Classifier getClassifier(final ComponentInstance ci) throws Exception { Classifier c = (Classifier) Class.forName(ci.getComponent().getName()).newInstance(); List<String> optionsList = this.getOptionsForParameterValues(ci); for (Entry<String, ComponentInstance> reqI : ci.getSatisfactionOfRequiredInterfaces().entrySet()) { if (reqI.getKey().startsWith("-") || reqI.getKey().startsWith("_")) { logger.warn(PARAMETER_NAME_WITH_DASH_WARNING, ci.getComponent(), reqI.getKey()); } if (!reqI.getKey().equals("B") && !(c instanceof SingleClassifierEnhancer) && !(reqI.getKey().equals("K") && ci.getComponent().getName().endsWith("SMO"))) { logger.warn("Classifier {} is not a single classifier enhancer and still has an unexpected required interface: {}. Try to set this configuration in the form of options.", ci.getComponent().getName(), reqI); optionsList.add("-" + reqI.getKey()); optionsList.add(reqI.getValue().getComponent().getName()); if (!reqI.getValue().getParameterValues().isEmpty() || !reqI.getValue().getSatisfactionOfRequiredInterfaces().isEmpty()) { optionsList.add("--"); optionsList.addAll(this.getOptionsRecursively(reqI.getValue())); } } } if (c instanceof OptionHandler) { ((OptionHandler) c).setOptions(optionsList.toArray(new String[0])); } for (Entry<String, ComponentInstance> reqI : ci.getSatisfactionOfRequiredInterfaces().entrySet()) { if (reqI.getKey().startsWith("-") || reqI.getKey().startsWith("_")) { logger.warn(PARAMETER_NAME_WITH_DASH_WARNING, ci.getComponent(), reqI.getKey()); } if (reqI.getKey().equals("K") && ci.getComponent().getName().endsWith("SMO")) { ComponentInstance kernelCI = reqI.getValue(); logger.debug("Set kernel for SMO to be {}", kernelCI.getComponent().getName()); Kernel k = (Kernel) Class.forName(kernelCI.getComponent().getName()).newInstance(); k.setOptions(this.getOptionsForParameterValues(kernelCI).toArray(new String[0])); } else if (reqI.getKey().equals("B") && (c instanceof MultipleClassifiersCombiner)) { Classifier[] classifiers = this.getListOfBaseLearners(reqI.getValue()).toArray(new Classifier[0]); ((MultipleClassifiersCombiner) c).setClassifiers(classifiers); } else if (reqI.getKey().equals("W") && (c instanceof SingleClassifierEnhancer)) { if (logger.isTraceEnabled()) { logger.trace("Set {} as a base classifier for {}", reqI.getValue().getComponent().getName(), ci.getComponent().getName()); } ((SingleClassifierEnhancer) c).setClassifier(this.getClassifier(reqI.getValue())); } } return c; } private List<Classifier> getListOfBaseLearners(final ComponentInstance ci) throws Exception { List<Classifier> baseLearnerList = new LinkedList<>(); if (ci.getComponent().getName().equals("MultipleBaseLearnerListElement")) { baseLearnerList.add(this.getClassifier(ci.getSatisfactionOfRequiredInterfaces().get("classifier"))); } else if (ci.getComponent().getName().equals("MultipleBaseLearnerListChain")) { baseLearnerList.add(this.getClassifier(ci.getSatisfactionOfRequiredInterfaces().get("classifier"))); baseLearnerList.addAll(this.getListOfBaseLearners(ci.getSatisfactionOfRequiredInterfaces().get("chain"))); } return baseLearnerList; } private List<String> getOptionsForParameterValues(final ComponentInstance ci) { List<String> optionsList = new LinkedList<>(); for (Entry<String, String> parameterValue : ci.getParameterValues().entrySet()) { if (parameterValue.getKey().startsWith("-") || parameterValue.getKey().startsWith("_")) { logger.warn(PARAMETER_NAME_WITH_DASH_WARNING, ci.getComponent(), parameterValue); } if (parameterValue.getValue().equals("true")) { optionsList.add("-" + parameterValue.getKey()); } else if (parameterValue.getKey().toLowerCase().contains("activator") || parameterValue.getValue().equals("false")) { // ignore this parameter } else { optionsList.add("-" + parameterValue.getKey()); if (ci.getComponent().getParameterWithName(parameterValue.getKey()).isNumeric()) { NumericParameterDomain numDom = (NumericParameterDomain) ci.getComponent().getParameterWithName(parameterValue.getKey()).getDefaultDomain(); if (numDom.isInteger()) { optionsList.add(((int) Double.parseDouble(parameterValue.getValue())) + ""); } else { optionsList.add(parameterValue.getValue()); } } else { optionsList.add(parameterValue.getValue()); } } } return optionsList; } private List<String> getOptionsRecursively(final ComponentInstance ci) { List<String> optionsList = this.getOptionsForParameterValues(ci); for (Entry<String, ComponentInstance> reqI : ci.getSatisfactionOfRequiredInterfaces().entrySet()) { if (reqI.getKey().startsWith("-") || reqI.getKey().startsWith("_")) { logger.warn(PARAMETER_NAME_WITH_DASH_WARNING, ci.getComponent(), reqI.getKey()); } optionsList.add("-" + reqI.getKey()); if (reqI.getKey().equals("B") || reqI.getKey().equals("K")) { List<String> valueList = new LinkedList<>(); valueList.add(reqI.getValue().getComponent().getName()); valueList.addAll(this.getOptionsRecursively(reqI.getValue())); optionsList.add(SetUtil.implode(valueList, " ")); } else { optionsList.add(reqI.getValue().getComponent().getName()); if (!reqI.getValue().getParameterValues().isEmpty() || !reqI.getValue().getSatisfactionOfRequiredInterfaces().isEmpty()) { optionsList.add("--"); optionsList.addAll(this.getOptionsRecursively(reqI.getValue())); } } } return optionsList; } }
0
java-sources/ai/libs/mlplancli/0.1.4/ai/libs
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/reduction/Util.java
package ai.libs.reduction; import java.io.BufferedReader; import java.io.FileReader; import java.util.ArrayList; import java.util.Collection; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Random; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import ai.libs.jaicore.ml.WekaUtil; import ai.libs.jaicore.ml.classification.multiclass.reduction.MCTreeNodeReD; import ai.libs.jaicore.ml.classification.multiclass.reduction.splitters.RPNDSplitter; import ai.libs.reduction.ensemble.simple.EnsembleOfSimpleOneStepReductionsExperiment; import ai.libs.reduction.single.ReductionExperiment; import weka.classifiers.AbstractClassifier; import weka.classifiers.Classifier; import weka.classifiers.Evaluation; import weka.classifiers.meta.Vote; import weka.core.Instances; public class Util { private static final Logger logger = LoggerFactory.getLogger(Util.class); private static final String LABEL_TRAIN_TIME = "trainTime"; private Util() { /* Left blank to prevent instantiation of this class. */ } public static List<Map<String, Object>> conductSingleOneStepReductionExperiment(final ReductionExperiment experiment) throws Exception { /* load data */ Instances data = new Instances(new BufferedReader(new FileReader(experiment.getDataset()))); data.setClassIndex(data.numAttributes() - 1); /* prepare basis for experiments */ int seed = experiment.getSeed(); Classifier classifierForRPNDSplit = AbstractClassifier.forName(experiment.getNameOfInnerClassifier(), null); Classifier leftClassifier = AbstractClassifier.forName(experiment.getNameOfLeftClassifier(), null); Classifier innerClassifier = AbstractClassifier.forName(experiment.getNameOfInnerClassifier(), null); Classifier rightClassifier = AbstractClassifier.forName(experiment.getNameOfRightClassifier(), null); RPNDSplitter splitter = new RPNDSplitter(new Random(seed), classifierForRPNDSplit); /* conduct experiments */ List<Map<String, Object>> results = new ArrayList<>(); for (int k = 0; k < 10; k++) { List<Collection<String>> classSplit; try { classSplit = new ArrayList<>(splitter.split(data)); } catch (Exception e) { throw new RuntimeException("Could not create RPND split.", e); } MCTreeNodeReD classifier = new MCTreeNodeReD(innerClassifier, classSplit.get(0), leftClassifier, classSplit.get(1), rightClassifier); long start = System.currentTimeMillis(); Map<String, Object> result = new HashMap<>(); List<Instances> dataSplit = WekaUtil.getStratifiedSplit(data, (seed + k), .7); classifier.buildClassifier(dataSplit.get(0)); long time = System.currentTimeMillis() - start; Evaluation eval = new Evaluation(dataSplit.get(0)); eval.evaluateModel(classifier, dataSplit.get(1)); double loss = (100 - eval.pctCorrect()) / 100f; logger.info("Conducted experiment {} with split {}/{}. Loss: {}. Time: {}ms.", k, classSplit.get(0), classSplit.get(1), loss, time); result.put("errorRate", loss); result.put(LABEL_TRAIN_TIME, time); results.add(result); } return results; } public static List<Map<String, Object>> conductEnsembleOfOneStepReductionsExperiment(final EnsembleOfSimpleOneStepReductionsExperiment experiment) throws Exception { /* load data */ Instances data = new Instances(new BufferedReader(new FileReader(experiment.getDataset()))); data.setClassIndex(data.numAttributes() - 1); /* prepare basis for experiments */ int seed = experiment.getSeed(); String classifier = experiment.getNameOfClassifier(); RPNDSplitter splitter = new RPNDSplitter(new Random(seed), AbstractClassifier.forName(classifier, null)); /* conduct experiments */ List<Map<String, Object>> results = new ArrayList<>(); for (int k = 0; k < 10; k++) { Vote ensemble = new Vote(); ensemble.setOptions(new String[] { "-R", "MAJ" }); long start = System.currentTimeMillis(); List<Instances> dataSplit = WekaUtil.getStratifiedSplit(data, (seed + k), .7); for (int i = 0; i < experiment.getNumberOfStumps(); i++) { List<Collection<String>> classSplit; classSplit = new ArrayList<>(splitter.split(data)); MCTreeNodeReD tree = new MCTreeNodeReD(classifier, classSplit.get(0), classifier, classSplit.get(1), classifier); tree.buildClassifier(dataSplit.get(0)); ensemble.addPreBuiltClassifier(tree); } Map<String, Object> result = new HashMap<>(); result.put(LABEL_TRAIN_TIME, System.currentTimeMillis() - start); /* now evaluate the ensemble */ ensemble.buildClassifier(data); Evaluation eval = new Evaluation(dataSplit.get(0)); eval.evaluateModel(ensemble, dataSplit.get(1)); double loss = (100 - eval.pctCorrect()) / 100f; logger.info("Conducted experiment {}. Loss: {}. Time: {}ms.", k, loss, result.get(LABEL_TRAIN_TIME)); result.put("errorRate", loss); results.add(result); } return results; } }
0
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/reduction/ensemble
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/reduction/ensemble/simple/EnsembleOfSimpleOneStepReductionsExperiment.java
package ai.libs.reduction.ensemble.simple; public class EnsembleOfSimpleOneStepReductionsExperiment { private final int seed; private final String dataset; private final String nameOfClassifier; private final int numberOfStumps; private double errorRate; private String exception; public EnsembleOfSimpleOneStepReductionsExperiment(int seed, String dataset, String nameOfClassifier, int numberOfStumps) { super(); this.seed = seed; this.dataset = dataset; this.nameOfClassifier = nameOfClassifier; this.numberOfStumps = numberOfStumps; } public EnsembleOfSimpleOneStepReductionsExperiment(int seed, String dataset, String nameOfClassifier, int numberOfStumps, double errorRate, String exception) { this(seed, dataset, nameOfClassifier, numberOfStumps); this.errorRate = errorRate; this.exception = exception; } public int getSeed() { return seed; } public String getDataset() { return dataset; } public double getErrorRate() { return errorRate; } public void setErrorRate(double errorRate) { this.errorRate = errorRate; } public String getException() { return exception; } public void setException(String exception) { this.exception = exception; } public String getNameOfClassifier() { return nameOfClassifier; } public int getNumberOfStumps() { return numberOfStumps; } @Override public int hashCode() { final int prime = 31; int result = 1; result = prime * result + ((dataset == null) ? 0 : dataset.hashCode()); result = prime * result + ((nameOfClassifier == null) ? 0 : nameOfClassifier.hashCode()); result = prime * result + numberOfStumps; result = prime * result + seed; return result; } @Override public boolean equals(Object obj) { if (this == obj) return true; if (obj == null) return false; if (getClass() != obj.getClass()) return false; EnsembleOfSimpleOneStepReductionsExperiment other = (EnsembleOfSimpleOneStepReductionsExperiment) obj; if (dataset == null) { if (other.dataset != null) return false; } else if (!dataset.equals(other.dataset)) return false; if (nameOfClassifier == null) { if (other.nameOfClassifier != null) return false; } else if (!nameOfClassifier.equals(other.nameOfClassifier)) return false; if (numberOfStumps != other.numberOfStumps) return false; if (seed != other.seed) return false; return true; } }
0
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/reduction/ensemble
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/reduction/ensemble/simple/MySQLEnsembleOfSimpleOneStepReductionsExperiment.java
package ai.libs.reduction.ensemble.simple; public class MySQLEnsembleOfSimpleOneStepReductionsExperiment { private final int id; private final EnsembleOfSimpleOneStepReductionsExperiment experiment; public MySQLEnsembleOfSimpleOneStepReductionsExperiment(int id, EnsembleOfSimpleOneStepReductionsExperiment experiment) { super(); this.id = id; this.experiment = experiment; } public int getId() { return id; } @Override public int hashCode() { final int prime = 31; int result = 1; result = prime * result + ((experiment == null) ? 0 : experiment.hashCode()); result = prime * result + id; return result; } @Override public boolean equals(Object obj) { if (this == obj) return true; if (obj == null) return false; if (getClass() != obj.getClass()) return false; MySQLEnsembleOfSimpleOneStepReductionsExperiment other = (MySQLEnsembleOfSimpleOneStepReductionsExperiment) obj; if (experiment == null) { if (other.experiment != null) return false; } else if (!experiment.equals(other.experiment)) return false; if (id != other.id) return false; return true; } public EnsembleOfSimpleOneStepReductionsExperiment getExperiment() { return experiment; } }
0
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/reduction/ensemble
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/reduction/ensemble/simple/MySQLEnsembleOfSimpleOneStepReductionsExperimentRunner.java
package ai.libs.reduction.ensemble.simple; import java.io.File; import java.sql.ResultSet; import java.sql.SQLException; import java.util.Collection; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Optional; import org.apache.commons.math.stat.descriptive.DescriptiveStatistics; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import ai.libs.jaicore.basic.SQLAdapter; import ai.libs.reduction.Util; public class MySQLEnsembleOfSimpleOneStepReductionsExperimentRunner { private static final String KEY_ERROR_RATE = "errorRate"; private static final String TABLE_NAME = "homogeneousensemblesofreductionstumps"; private final SQLAdapter adapter; private final Collection<MySQLEnsembleOfSimpleOneStepReductionsExperiment> knownExperiments = new HashSet<>(); private final Logger logger = LoggerFactory.getLogger(MySQLEnsembleOfSimpleOneStepReductionsExperimentRunner.class); public MySQLEnsembleOfSimpleOneStepReductionsExperimentRunner(final String host, final String user, final String password, final String database) throws SQLException { this.adapter = new SQLAdapter(host, user, password, database); this.knownExperiments.addAll(this.getConductedExperiments()); } public Collection<MySQLEnsembleOfSimpleOneStepReductionsExperiment> getConductedExperiments() throws SQLException { Collection<MySQLEnsembleOfSimpleOneStepReductionsExperiment> experiments = new HashSet<>(); ResultSet rs = this.adapter.getRowsOfTable(TABLE_NAME); while (rs.next()) { experiments.add(new MySQLEnsembleOfSimpleOneStepReductionsExperiment(rs.getInt("evaluation_id"), new EnsembleOfSimpleOneStepReductionsExperiment(rs.getInt("seed"), rs.getString("dataset"), rs.getString("classifier"), rs.getInt("size"), rs.getDouble(KEY_ERROR_RATE), rs.getString("exception")))); } return experiments; } public MySQLEnsembleOfSimpleOneStepReductionsExperiment createAndGetExperimentIfNotConducted(final int seed, final File dataFile, final String nameOfClassifier, final int size) throws SQLException { /* first check whether exactly the same experiment (with the same seed) has been conducted previously */ EnsembleOfSimpleOneStepReductionsExperiment exp = new EnsembleOfSimpleOneStepReductionsExperiment(seed, dataFile.getAbsolutePath(), nameOfClassifier, size); Optional<MySQLEnsembleOfSimpleOneStepReductionsExperiment> existingExperiment = this.knownExperiments.stream().filter(e -> e.getExperiment().equals(exp)).findAny(); if (existingExperiment.isPresent()) { return null; } /* otherwise, check if the same classifier combination has been tried before */ if (this.canInfeasibilityBeDerived(this.knownExperiments, exp)) { return null; } Map<String, Object> map = new HashMap<>(); map.put("seed", String.valueOf(seed)); map.put("dataset", dataFile.getAbsolutePath()); map.put("classifier", nameOfClassifier); map.put("size", size); int id = this.adapter.insert(TABLE_NAME, map); return new MySQLEnsembleOfSimpleOneStepReductionsExperiment(id, exp); } private void updateExperiment(final MySQLEnsembleOfSimpleOneStepReductionsExperiment exp, final Map<String,? extends Object> values) throws SQLException { Map<String,String> where = new HashMap<>(); where.put("evaluation_id", String.valueOf(exp.getId())); this.adapter.update(TABLE_NAME, values, where); } public void conductExperiment(final MySQLEnsembleOfSimpleOneStepReductionsExperiment exp) throws Exception { List<Map<String,Object>> mccvResults = Util.conductEnsembleOfOneStepReductionsExperiment(exp.getExperiment()); DescriptiveStatistics errorRate = new DescriptiveStatistics(); DescriptiveStatistics runtime = new DescriptiveStatistics(); for (Map<String,Object> result : mccvResults) { errorRate.addValue((double)result.get(KEY_ERROR_RATE)); runtime.addValue((long)result.get("trainTime")); } /* prepapre values for experiment update */ Map<String, Object> values = new HashMap<>(); values.put(KEY_ERROR_RATE, errorRate.getMean()); this.updateExperiment(exp, values); } public void markExperimentAsUnsolvable(final MySQLEnsembleOfSimpleOneStepReductionsExperiment exp) throws SQLException { Map<String, String> values = new HashMap<>(); for (String key : new String[] {KEY_ERROR_RATE }) { values.put(key, "-1"); } this.updateExperiment(exp, values); } public void associateExperimentWithException(final MySQLEnsembleOfSimpleOneStepReductionsExperiment exp, final Throwable e) throws SQLException { Map<String, String> values = new HashMap<>(); for (String key : new String[] {KEY_ERROR_RATE }) { values.put(key, "-1"); } values.put("exception", e.getClass().getName() + "\n" + e.getMessage()); this.updateExperiment(exp, values); } private boolean canInfeasibilityBeDerived(final Collection<MySQLEnsembleOfSimpleOneStepReductionsExperiment> experimentsWithResults, final EnsembleOfSimpleOneStepReductionsExperiment experimentInQuestion) { for (MySQLEnsembleOfSimpleOneStepReductionsExperiment knownExperiment : experimentsWithResults) { if (!knownExperiment.getExperiment().getDataset().equals(experimentInQuestion.getDataset())) { continue; } EnsembleOfSimpleOneStepReductionsExperiment re = knownExperiment.getExperiment(); if (re.getException() != null && re.getNameOfClassifier().equals(experimentInQuestion.getNameOfClassifier())) { this.logger.debug("Skipping because {} is known to be problematic as classifier on {} due to {}", experimentInQuestion.getNameOfClassifier(), re.getDataset(), re.getException()); return true; } } return false; } }
0
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/reduction
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/reduction/single/BestOfKAtRandomExperiment.java
package ai.libs.reduction.single; public class BestOfKAtRandomExperiment extends ReductionExperiment { private final int k; private final int mccvRepeats; public BestOfKAtRandomExperiment(final int seed, final String dataset, final String nameOfLeftClassifier, final String nameOfInnerClassifier, final String nameOfRightClassifier, final int k, final int mccvRepeats) { super(seed, dataset, nameOfLeftClassifier, nameOfInnerClassifier, nameOfRightClassifier); this.k = k; this.mccvRepeats = mccvRepeats; } public int getK() { return this.k; } public int getMccvRepeats() { return this.mccvRepeats; } @Override public int hashCode() { final int prime = 31; int result = super.hashCode(); result = prime * result + this.k; result = prime * result + this.mccvRepeats; return result; } @Override public boolean equals(final Object obj) { if (this == obj) { return true; } if (!super.equals(obj)) { return false; } if (this.getClass() != obj.getClass()) { return false; } BestOfKAtRandomExperiment other = (BestOfKAtRandomExperiment) obj; if (this.k != other.k) { return false; } return this.mccvRepeats == other.mccvRepeats; } @Override public String toString() { return "BestOfKAtRandomExperiment [k=" + this.k + ", mccvRepeats=" + this.mccvRepeats + "]"; } }
0
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/reduction
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/reduction/single/ExperimentRunner.java
package ai.libs.reduction.single; import java.io.BufferedReader; import java.io.FileReader; import java.util.ArrayList; import java.util.Collection; import java.util.HashMap; import java.util.List; import java.util.Map; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import ai.libs.jaicore.experiments.exceptions.ExperimentEvaluationFailedException; import ai.libs.jaicore.ml.WekaUtil; import ai.libs.jaicore.ml.classification.multiclass.reduction.MCTreeNodeReD; import ai.libs.jaicore.ml.classification.multiclass.reduction.splitters.ISplitter; import ai.libs.jaicore.ml.classification.multiclass.reduction.splitters.ISplitterFactory; import ai.libs.jaicore.ml.core.evaluation.measure.singlelabel.ZeroOneLoss; import ai.libs.jaicore.ml.evaluation.evaluators.weka.FixedSplitClassifierEvaluator; import ai.libs.jaicore.ml.evaluation.evaluators.weka.MonteCarloCrossValidationEvaluator; import ai.libs.jaicore.ml.evaluation.evaluators.weka.splitevaluation.SimpleSLCSplitBasedClassifierEvaluator; import weka.classifiers.AbstractClassifier; import weka.classifiers.Classifier; import weka.core.Instances; public class ExperimentRunner<T extends ISplitter> { private final int k; private final int mccvRepeats; private final ISplitterFactory<T> splitterFactory; private final Logger logger = LoggerFactory.getLogger(ExperimentRunner.class); public ExperimentRunner(final int k, final int mccvRepeats, final ISplitterFactory<T> splitterFactory) { super(); this.k = k; this.mccvRepeats = mccvRepeats; this.splitterFactory = splitterFactory; } public Map<String, Object> conductSingleOneStepReductionExperiment(final ReductionExperiment experiment) throws Exception { /* load data */ Instances data = new Instances(new BufferedReader(new FileReader(experiment.getDataset()))); data.setClassIndex(data.numAttributes() - 1); /* prepare basis for experiments */ int seed = experiment.getSeed(); Classifier leftClassifier = AbstractClassifier.forName(experiment.getNameOfLeftClassifier(), null); Classifier innerClassifier = AbstractClassifier.forName(experiment.getNameOfInnerClassifier(), null); Classifier rightClassifier = AbstractClassifier.forName(experiment.getNameOfRightClassifier(), null); List<Instances> outerSplit = WekaUtil.getStratifiedSplit(data, experiment.getSeed(), .7); MonteCarloCrossValidationEvaluator mccv = new MonteCarloCrossValidationEvaluator(new SimpleSLCSplitBasedClassifierEvaluator(new ZeroOneLoss()), this.mccvRepeats, outerSplit.get(0), .7, seed); ISplitter splitter = this.splitterFactory.getSplitter(seed); /* compute best of k splits */ MCTreeNodeReD bestFoundClassifier = null; double bestFoundScore = Double.MAX_VALUE; for (int i = 0; i < this.k; i++) { List<Collection<String>> classSplit; try { classSplit = new ArrayList<>(splitter.split(outerSplit.get(0))); } catch (Exception e) { throw new ExperimentEvaluationFailedException("Could not create a split.", e); } MCTreeNodeReD classifier = new MCTreeNodeReD(innerClassifier, classSplit.get(0), leftClassifier, classSplit.get(1), rightClassifier); double loss = mccv.evaluate(classifier); this.logger.info("\t\t\tComputed loss {}", loss); if (loss < bestFoundScore) { bestFoundScore = loss; bestFoundClassifier = classifier; } } /* train classifier on all data */ double loss = new FixedSplitClassifierEvaluator(outerSplit.get(0), outerSplit.get(1)).evaluate(bestFoundClassifier); Map<String, Object> result = new HashMap<>(); this.logger.info("\t\t\tBest previously observed loss was {}. The retrained classifier achieves {} on the full data.", bestFoundScore, loss); result.put("errorRate", loss); return result; } }
0
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/reduction
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/reduction/single/MySQLReductionExperiment.java
package ai.libs.reduction.single; import org.apache.commons.lang.builder.HashCodeBuilder; import org.apache.commons.lang3.builder.EqualsBuilder; public class MySQLReductionExperiment { private final int id; private final ReductionExperiment experiment; public MySQLReductionExperiment(final int id, final ReductionExperiment experiment) { super(); this.id = id; this.experiment = experiment; } public int getId() { return this.id; } public ReductionExperiment getExperiment() { return this.experiment; } @Override public int hashCode() { return new HashCodeBuilder().append(this.experiment).append(this.id).toHashCode(); } @Override public boolean equals(final Object obj) { if (!(obj instanceof MySQLReductionExperiment)) { return false; } MySQLReductionExperiment other = (MySQLReductionExperiment)obj; return new EqualsBuilder().append(other.id, this.id).append(other.experiment, this.experiment).isEquals(); } @Override public String toString() { return "MySQLReductionExperiment [id=" + this.id + ", experiment=" + this.experiment + "]"; } }
0
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/reduction
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/reduction/single/ReductionExperiment.java
package ai.libs.reduction.single; public class ReductionExperiment { private final int seed; private final String dataset; private final String nameOfLeftClassifier; private final String nameOfInnerClassifier; private final String nameOfRightClassifier; private String exceptionLeft; private String exceptionInner; private String exceptionRight; public ReductionExperiment(final int seed, final String dataset, final String nameOfLeftClassifier, final String nameOfInnerClassifier, final String nameOfRightClassifier) { super(); this.seed = seed; this.dataset = dataset; this.nameOfLeftClassifier = nameOfLeftClassifier; this.nameOfInnerClassifier = nameOfInnerClassifier; this.nameOfRightClassifier = nameOfRightClassifier; } public ReductionExperiment(final int seed, final String dataset, final String nameOfLeftClassifier, final String nameOfInnerClassifier, final String nameOfRightClassifier, final String exceptionLeft, final String exceptionInner, final String exceptionRight) { this(seed,dataset,nameOfLeftClassifier,nameOfInnerClassifier,nameOfRightClassifier); this.exceptionLeft = exceptionLeft; this.exceptionInner = exceptionInner; this.exceptionRight = exceptionRight; } public int getSeed() { return this.seed; } public String getDataset() { return this.dataset; } public String getNameOfLeftClassifier() { return this.nameOfLeftClassifier; } public String getNameOfInnerClassifier() { return this.nameOfInnerClassifier; } public String getNameOfRightClassifier() { return this.nameOfRightClassifier; } public String getExceptionLeft() { return this.exceptionLeft; } public void setExceptionLeft(final String exceptionLeft) { this.exceptionLeft = exceptionLeft; } public String getExceptionInner() { return this.exceptionInner; } public void setExceptionInner(final String exceptionInner) { this.exceptionInner = exceptionInner; } public String getExceptionRight() { return this.exceptionRight; } public void setExceptionRight(final String exceptionRight) { this.exceptionRight = exceptionRight; } @Override public int hashCode() { final int prime = 31; int result = 1; result = prime * result + ((this.dataset == null) ? 0 : this.dataset.hashCode()); result = prime * result + ((this.exceptionInner == null) ? 0 : this.exceptionInner.hashCode()); result = prime * result + ((this.exceptionLeft == null) ? 0 : this.exceptionLeft.hashCode()); result = prime * result + ((this.exceptionRight == null) ? 0 : this.exceptionRight.hashCode()); result = prime * result + ((this.nameOfInnerClassifier == null) ? 0 : this.nameOfInnerClassifier.hashCode()); result = prime * result + ((this.nameOfLeftClassifier == null) ? 0 : this.nameOfLeftClassifier.hashCode()); result = prime * result + ((this.nameOfRightClassifier == null) ? 0 : this.nameOfRightClassifier.hashCode()); result = prime * result + this.seed; return result; } @Override public boolean equals(final Object obj) { if (this == obj) { return true; } if (obj == null) { return false; } if (this.getClass() != obj.getClass()) { return false; } ReductionExperiment other = (ReductionExperiment) obj; if (this.dataset == null) { if (other.dataset != null) { return false; } } else if (!this.dataset.equals(other.dataset)) { return false; } if (this.exceptionInner == null) { if (other.exceptionInner != null) { return false; } } else if (!this.exceptionInner.equals(other.exceptionInner)) { return false; } if (this.exceptionLeft == null) { if (other.exceptionLeft != null) { return false; } } else if (!this.exceptionLeft.equals(other.exceptionLeft)) { return false; } if (this.exceptionRight == null) { if (other.exceptionRight != null) { return false; } } else if (!this.exceptionRight.equals(other.exceptionRight)) { return false; } if (this.nameOfInnerClassifier == null) { if (other.nameOfInnerClassifier != null) { return false; } } else if (!this.nameOfInnerClassifier.equals(other.nameOfInnerClassifier)) { return false; } if (this.nameOfLeftClassifier == null) { if (other.nameOfLeftClassifier != null) { return false; } } else if (!this.nameOfLeftClassifier.equals(other.nameOfLeftClassifier)) { return false; } if (this.nameOfRightClassifier == null) { if (other.nameOfRightClassifier != null) { return false; } } else if (!this.nameOfRightClassifier.equals(other.nameOfRightClassifier)) { return false; } return this.seed == other.seed; } @Override public String toString() { return "ReductionExperiment [seed=" + this.seed + ", dataset=" + this.dataset + ", nameOfLeftClassifier=" + this.nameOfLeftClassifier + ", nameOfInnerClassifier=" + this.nameOfInnerClassifier + ", nameOfRightClassifier=" + this.nameOfRightClassifier + ", exceptionLeft=" + this.exceptionLeft + ", exceptionInner=" + this.exceptionInner + ", exceptionRight=" + this.exceptionRight + "]"; } }
0
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/reduction/single
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/reduction/single/confusion/AConfusionBasedAlgorithm.java
package ai.libs.reduction.single.confusion; import java.util.ArrayList; import java.util.Collection; public class AConfusionBasedAlgorithm { protected Collection<Integer> incrementCluster(final Collection<Integer> cluster, final double[][] confusionMatrix, final Collection<Integer> blackList) { int leastSeenPenalty = Integer.MAX_VALUE; int choice = -1; for (int cId = 0; cId < confusionMatrix.length; cId++) { if (cluster.contains(cId) || blackList.contains(cId)) { continue; } int addedPenalty = 0; for (int i = 0; i < confusionMatrix.length; i++) { addedPenalty += confusionMatrix[i][cId]; addedPenalty += confusionMatrix[cId][i]; } if (addedPenalty < leastSeenPenalty) { leastSeenPenalty = addedPenalty; choice = cId; } } Collection<Integer> newCluster = new ArrayList<>(cluster); if (choice < 0) { return newCluster; } newCluster.add(choice); return newCluster; } protected int getPenaltyOfCluster(final Collection<Integer> cluster, final double[][] confusionMatrix) { int sum = 0; for (int i : cluster) { for (int j : cluster) { if (i != j) { sum += confusionMatrix[i][j]; } } } return sum; } protected int getLeastConflictingClass(final double[][] confusionMatrix, final Collection<Integer> blackList) { /* compute least conflicting class */ int leastConflictingClass = -1; int leastKnownScore = Integer.MAX_VALUE; for (int i = 0; i < confusionMatrix.length; i++) { if (blackList.contains(i)) { continue; } int sum = 0; for (int j = 0; j < confusionMatrix.length; j++) { if (i != j) { sum += confusionMatrix[i][j]; } } if (sum < leastKnownScore) { leastKnownScore = sum; leastConflictingClass = i; } } return leastConflictingClass; } }
0
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/reduction/single
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/reduction/single/confusion/ConfusionBasedAlgorithm.java
package ai.libs.reduction.single.confusion; import java.util.ArrayList; import java.util.Collection; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Map.Entry; import java.util.stream.Collectors; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import ai.libs.jaicore.basic.sets.SetUtil; import ai.libs.jaicore.ml.WekaUtil; import ai.libs.jaicore.ml.classification.multiclass.reduction.MCTreeNodeReD; import weka.classifiers.AbstractClassifier; import weka.classifiers.Classifier; import weka.classifiers.Evaluation; import weka.core.Instances; public class ConfusionBasedAlgorithm extends AConfusionBasedAlgorithm { private Logger logger = LoggerFactory.getLogger(ConfusionBasedAlgorithm.class); public MCTreeNodeReD buildClassifier(final Instances data, final Collection<String> pClassifierNames) throws Exception { if (this.logger.isInfoEnabled()) { this.logger.info("START: {}", data.relationName()); } int seed = 0; Map<String, double[][]> confusionMatrices = new HashMap<>(); int numClasses = data.numClasses(); this.logger.info("Computing confusion matrices ..."); for (int i = 0; i < 10; i++) { List<Instances> split = WekaUtil.getStratifiedSplit(data, seed, .7f); /* compute confusion matrices for each classifier */ for (String classifier : pClassifierNames) { try { Classifier c = AbstractClassifier.forName(classifier, null); c.buildClassifier(split.get(0)); Evaluation eval = new Evaluation(split.get(0)); eval.evaluateModel(c, split.get(1)); if (!confusionMatrices.containsKey(classifier)) { confusionMatrices.put(classifier, new double[numClasses][numClasses]); } double[][] currentCM = confusionMatrices.get(classifier); double[][] addedCM = eval.confusionMatrix(); for (int j = 0; j < numClasses; j++) { for (int k = 0; k < numClasses; k++) { currentCM[j][k] += addedCM[j][k]; } } } catch (Exception e) { this.logger.error("Unexpected exception has been thrown", e); } } } this.logger.info("done"); /* compute zero-conflict sets for each classifier */ Map<String, Collection<Collection<Integer>>> zeroConflictSets = new HashMap<>(); for (Entry<String, double[][]> entry : confusionMatrices.entrySet()) { zeroConflictSets.put(entry.getKey(), this.getZeroConflictSets(entry.getValue())); } /* greedily identify the best left and right pair (that make least mistakes) */ Collection<List<String>> classifierPairs = SetUtil.cartesianProduct(confusionMatrices.keySet(), 2); String bestLeft = null; String bestRight = null; String bestInner = null; Collection<Integer> bestLeftClasses = null; Collection<Integer> bestRightClasses = null; for (List<String> classifierPair : classifierPairs) { String c1 = classifierPair.get(0); String c2 = classifierPair.get(1); Collection<Collection<Integer>> z1 = zeroConflictSets.get(c1); Collection<Collection<Integer>> z2 = zeroConflictSets.get(c2); /* create candidate split */ int sizeOfBestCombo = 0; for (Collection<Integer> zeroSet1 : z1) { for (Collection<Integer> zeroSet2 : z2) { Collection<Integer> coveredClassesOfThisPair = SetUtil.union(zeroSet1, zeroSet2); if (coveredClassesOfThisPair.size() > sizeOfBestCombo) { bestLeft = c1; bestRight = c2; sizeOfBestCombo = coveredClassesOfThisPair.size(); bestLeftClasses = zeroSet1; bestRightClasses = zeroSet2; } } } } /* greedily complete the best candidates */ double[][] cm1 = confusionMatrices.get(bestLeft); double[][] cm2 = confusionMatrices.get(bestRight); for (int cId = 0; cId < numClasses; cId++) { if (!bestLeftClasses.contains(cId) && !bestRightClasses.contains(cId)) { /* compute effect of adding this class to the respective clusters */ Collection<Integer> newBestZ1 = new ArrayList<>(bestLeftClasses); newBestZ1.add(cId); int p1 = this.getPenaltyOfCluster(newBestZ1, cm1); Collection<Integer> newBestZ2 = new ArrayList<>(bestRightClasses); newBestZ2.add(cId); int p2 = this.getPenaltyOfCluster(newBestZ2, cm2); if (p1 < p2) { bestLeftClasses = newBestZ1; } else { bestRightClasses = newBestZ2; } } } int p1 = this.getPenaltyOfCluster(bestLeftClasses, cm1); int p2 = this.getPenaltyOfCluster(bestRightClasses, cm2); /* create the split problem */ Map<String, String> classMap = new HashMap<>(); for (int i1 : bestLeftClasses) { classMap.put(data.classAttribute().value(i1), "l"); } for (int i2 : bestRightClasses) { classMap.put(data.classAttribute().value(i2), "r"); } Instances newData = WekaUtil.getRefactoredInstances(data, classMap); List<Instances> binaryInnerSplit = WekaUtil.getStratifiedSplit(newData, seed, .7f); /* now identify the classifier that can best separate these two clusters */ int leastSeenMistakes = Integer.MAX_VALUE; for (String classifier : pClassifierNames) { try { Classifier c = AbstractClassifier.forName(classifier, null); c.buildClassifier(binaryInnerSplit.get(0)); Evaluation eval = new Evaluation(newData); eval.evaluateModel(c, binaryInnerSplit.get(1)); int mistakes = (int) eval.incorrect(); int overallMistakes = p1 + p2 + mistakes; if (overallMistakes < leastSeenMistakes) { leastSeenMistakes = overallMistakes; this.logger.info("New best system: {}/{}/{} with {}", bestLeft, bestRight, classifier, leastSeenMistakes); bestInner = classifier; } } catch (Exception e) { this.logger.error("Exception has been thrown unexpectedly.", e); } } if (bestInner == null) { throw new IllegalStateException("No best inner has been chosen!"); } /* now create MCTreeNode with choices */ MCTreeNodeReD tree = new MCTreeNodeReD(bestInner, bestLeftClasses.stream().map(i -> data.classAttribute().value(i)).collect(Collectors.toList()), bestLeft, bestRightClasses.stream().map(i -> data.classAttribute().value(i)).collect(Collectors.toList()), bestRight); tree.buildClassifier(data); return tree; } private Collection<Collection<Integer>> getZeroConflictSets(final double[][] confusionMatrix) { Collection<Integer> blackList = new ArrayList<>(); Collection<Collection<Integer>> partitions = new ArrayList<>(); int leastConflictingClass = -1; do { leastConflictingClass = this.getLeastConflictingClass(confusionMatrix, blackList); if (leastConflictingClass >= 0) { Collection<Integer> cluster = new ArrayList<>(); cluster.add(leastConflictingClass); do { Collection<Integer> newCluster = this.incrementCluster(cluster, confusionMatrix, blackList); if (newCluster.size() == cluster.size()) { break; } cluster = newCluster; if (cluster.contains(-1)) { throw new IllegalStateException("Computed illegal cluster: " + cluster); } } while (this.getPenaltyOfCluster(cluster, confusionMatrix) == 0 && cluster.size() < confusionMatrix.length); blackList.addAll(cluster); partitions.add(cluster); } } while (leastConflictingClass >= 0 && blackList.size() < confusionMatrix.length); return partitions; } }
0
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/reduction/single
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/reduction/single/confusion/ConfusionBasedGreedyOptimizingAlgorithm.java
package ai.libs.reduction.single.confusion; import java.util.ArrayList; import java.util.Collection; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Map.Entry; import java.util.stream.Collectors; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import ai.libs.jaicore.basic.sets.SetUtil; import ai.libs.jaicore.ml.WekaUtil; import ai.libs.jaicore.ml.classification.multiclass.reduction.MCTreeNodeReD; import weka.classifiers.AbstractClassifier; import weka.classifiers.Classifier; import weka.classifiers.Evaluation; import weka.core.Instances; public class ConfusionBasedGreedyOptimizingAlgorithm extends AConfusionBasedAlgorithm { private static Logger logger = LoggerFactory.getLogger(ConfusionBasedGreedyOptimizingAlgorithm.class); public MCTreeNodeReD buildClassifier(final Instances data, final Collection<String> pClassifierNames) throws Exception { if (logger.isInfoEnabled()) { logger.info("START: {}", data.relationName()); } int seed = 0; List<Instances> split = WekaUtil.getStratifiedSplit(data, seed, .7f); int numClasses = data.numClasses(); /* compute confusion matrices for each classifier */ logger.info("Computing confusion matrices ..."); Map<String, double[][]> confusionMatrices = new HashMap<>(); for (String classifier : pClassifierNames) { logger.info("\t{} ...", classifier); try { Classifier c = AbstractClassifier.forName(classifier, null); c.buildClassifier(split.get(0)); Evaluation eval = new Evaluation(split.get(0)); eval.evaluateModel(c, split.get(1)); confusionMatrices.put(classifier, eval.confusionMatrix()); } catch (Exception e) { logger.error("Could not train classifier: {}", e); } } logger.info("done"); /* compute zero-conflict sets for each classifier */ Map<String, Collection<Collection<Integer>>> zeroConflictSets = new HashMap<>(); for (Entry<String, double[][]> entry : confusionMatrices.entrySet()) { zeroConflictSets.put(entry.getKey(), this.getZeroConflictSets(entry.getValue())); } /* greedily identify triplets */ Collection<List<String>> classifierPairs = SetUtil.cartesianProduct(confusionMatrices.keySet(), 2); int leastSeenMistakes = Integer.MAX_VALUE; String bestLeft = null; String bestRight = null; String bestInner = null; Collection<Integer> bestLeftClasses = null; Collection<Integer> bestRightClasses = null; int numPair = 0; for (List<String> classifierPair : classifierPairs) { numPair++; String c1 = classifierPair.get(0); String c2 = classifierPair.get(1); logger.info("\tConsidering {}/{} ({}/{})", c1, c2, numPair, classifierPairs.size()); double[][] cm1 = confusionMatrices.get(c1); double[][] cm2 = confusionMatrices.get(c2); Collection<Collection<Integer>> z1 = zeroConflictSets.get(c1); Collection<Collection<Integer>> z2 = zeroConflictSets.get(c2); /* create candidate split */ int sizeOfBestCombo = 0; Collection<Integer> bestZ1 = null; Collection<Integer> bestZ2 = null; for (Collection<Integer> zeroSet1 : z1) { for (Collection<Integer> zeroSet2 : z2) { Collection<Integer> coveredClassesOfThisPair = SetUtil.union(zeroSet1, zeroSet2); if (coveredClassesOfThisPair.size() > sizeOfBestCombo) { sizeOfBestCombo = coveredClassesOfThisPair.size(); bestZ1 = zeroSet1; bestZ2 = zeroSet2; } } } /* greedily complete these candidates */ for (int cId = 0; cId < numClasses; cId++) { if (!bestZ1.contains(cId) && !bestZ2.contains(cId)) { /* compute effect of adding this class to the respective clusters */ Collection<Integer> newBestZ1 = new ArrayList<>(bestZ1); newBestZ1.add(cId); int p1 = this.getPenaltyOfCluster(newBestZ1, cm1); Collection<Integer> newBestZ2 = new ArrayList<>(bestZ2); newBestZ2.add(cId); int p2 = this.getPenaltyOfCluster(newBestZ2, cm2); if (p1 < p2) { bestZ1 = newBestZ1; } else { bestZ2 = newBestZ2; } } } int p1 = this.getPenaltyOfCluster(bestZ1, cm1); int p2 = this.getPenaltyOfCluster(bestZ2, cm2); /* create the split problem */ Map<String, String> classMap = new HashMap<>(); for (int i1 : bestZ1) { classMap.put(data.classAttribute().value(i1), "l"); } for (int i2 : bestZ2) { classMap.put(data.classAttribute().value(i2), "r"); } Instances newData = WekaUtil.getRefactoredInstances(data, classMap); List<Instances> binaryInnerSplit = WekaUtil.getStratifiedSplit(newData, seed, .7f); /* now identify the classifier that can best separate these two clusters */ for (String classifier : pClassifierNames) { try { logger.info("\t\tConsidering {}/{}/{}", c1, c2, classifier); Classifier c = AbstractClassifier.forName(classifier, null); c.buildClassifier(binaryInnerSplit.get(0)); Evaluation eval = new Evaluation(newData); eval.evaluateModel(c, binaryInnerSplit.get(1)); int mistakes = (int) eval.incorrect(); int overallMistakes = p1 + p2 + mistakes; if (overallMistakes < leastSeenMistakes) { leastSeenMistakes = overallMistakes; logger.info("New best system: {}/{}/{} with {}", c1, c2, classifier, leastSeenMistakes); bestLeftClasses = bestZ1; bestRightClasses = bestZ2; bestLeft = c1; bestRight = c2; bestInner = classifier; } } catch (Exception e) { logger.error("Encountered error: {}", e); } } } if (bestLeftClasses == null) { throw new IllegalStateException("Best left classes must not be null"); } /* now create MCTreeNode with choices */ MCTreeNodeReD tree = new MCTreeNodeReD(bestInner, bestLeftClasses.stream().map(i -> data.classAttribute().value(i)).collect(Collectors.toList()), bestLeft, bestRightClasses.stream().map(i -> data.classAttribute().value(i)).collect(Collectors.toList()), bestRight); tree.buildClassifier(data); return tree; } private Collection<Collection<Integer>> getZeroConflictSets(final double[][] confusionMatrix) { Collection<Integer> blackList = new ArrayList<>(); Collection<Collection<Integer>> partitions = new ArrayList<>(); int leastConflictingClass = -1; do { leastConflictingClass = this.getLeastConflictingClass(confusionMatrix, blackList); if (leastConflictingClass >= 0) { Collection<Integer> cluster = new ArrayList<>(); cluster.add(leastConflictingClass); do { cluster = this.incrementCluster(cluster, confusionMatrix, blackList); if (cluster.contains(-1)) { throw new IllegalStateException("Computed illegal cluster: " + cluster); } } while (this.getPenaltyOfCluster(cluster, confusionMatrix) == 0); blackList.addAll(cluster); partitions.add(cluster); } } while (leastConflictingClass >= 0); return partitions; } }
0
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/reduction/single/heterogeneous
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/reduction/single/heterogeneous/bestofkrandom/MySQLReductionExperimentRunnerWrapper.java
package ai.libs.reduction.single.heterogeneous.bestofkrandom; import java.io.File; import java.io.FileNotFoundException; import java.io.IOException; import java.sql.ResultSet; import java.sql.SQLException; import java.util.Collection; import java.util.HashMap; import java.util.HashSet; import java.util.Map; import java.util.Optional; import java.util.Random; import ai.libs.jaicore.basic.SQLAdapter; import ai.libs.jaicore.ml.classification.multiclass.reduction.splitters.RandomSplitter; import ai.libs.reduction.single.BestOfKAtRandomExperiment; import ai.libs.reduction.single.ExperimentRunner; import ai.libs.reduction.single.MySQLReductionExperiment; public class MySQLReductionExperimentRunnerWrapper { private static final String TABLE_NAME = "reductionstumps_heterogeneous_random_bestofk"; private final SQLAdapter adapter; private final Collection<MySQLReductionExperiment> knownExperiments = new HashSet<>(); private final int k; private final int mccvrepeats; public MySQLReductionExperimentRunnerWrapper(String host, String user, String password, String database, int k, int mccvRepeats) { adapter = new SQLAdapter(host, user, password, database); this.k = k; this.mccvrepeats = mccvRepeats; try { knownExperiments.addAll(getConductedExperiments()); } catch (SQLException e) { e.printStackTrace(); } } public Collection<MySQLReductionExperiment> getConductedExperiments() throws SQLException { Collection<MySQLReductionExperiment> experiments = new HashSet<>(); ResultSet rs = adapter.getRowsOfTable(TABLE_NAME); while (rs.next()) { experiments.add(new MySQLReductionExperiment(rs.getInt("evaluation_id"), new BestOfKAtRandomExperiment(rs.getInt("seed"), rs.getString("dataset"), rs.getString("left_classifier"), rs.getString("inner_classifier"), rs.getString("right_classifier"), rs.getInt("k"), rs.getInt("mccvrepeats")))); } return experiments; } public MySQLReductionExperiment createAndGetExperimentIfNotConducted(int seed, File dataFile, String nameOfLeftClassifier, String nameOfInnerClassifier, String nameOfRightClassifier) throws FileNotFoundException, IOException { /* first check whether exactly the same experiment (with the same seed) has been conducted previously */ BestOfKAtRandomExperiment exp = new BestOfKAtRandomExperiment(seed, dataFile.getAbsolutePath(), nameOfLeftClassifier, nameOfInnerClassifier, nameOfRightClassifier, k, mccvrepeats); Optional<MySQLReductionExperiment> existingExperiment = knownExperiments.stream().filter(e -> e.getExperiment().equals(exp)).findAny(); if (existingExperiment.isPresent()) return null; Map<String, Object> map = new HashMap<>(); map.put("seed", seed); map.put("dataset", dataFile.getAbsolutePath()); map.put("left_classifier", nameOfLeftClassifier); map.put("inner_classifier", nameOfInnerClassifier); map.put("right_classifier", nameOfRightClassifier); map.put("k", k); map.put("mccvrepeats", mccvrepeats); try { int id = adapter.insert(TABLE_NAME, map); return new MySQLReductionExperiment(id, exp); } catch (SQLException e) { System.err.println(e.getMessage()); return null; } } private void updateExperiment(MySQLReductionExperiment exp, Map<String,? extends Object> values) throws SQLException { Map<String,String> where = new HashMap<>(); where.put("evaluation_id", String.valueOf(exp.getId())); adapter.update(TABLE_NAME, values, where); } public void conductExperiment(MySQLReductionExperiment exp) throws Exception { ExperimentRunner<RandomSplitter> runner = new ExperimentRunner<RandomSplitter>(k, mccvrepeats, (seed) -> new RandomSplitter(new Random(seed))); Map<String,Object> results = runner.conductSingleOneStepReductionExperiment(exp.getExperiment()); updateExperiment(exp, results); } public void markExperimentAsUnsolvable(MySQLReductionExperiment exp) throws SQLException { Map<String, String> values = new HashMap<>(); values.put("errorRate", "-1"); updateExperiment(exp, values); } public void associateExperimentWithException(MySQLReductionExperiment exp, Throwable e) throws SQLException { Map<String, String> values = new HashMap<>(); values.put("errorRate", "-1"); values.put("exception", e.getClass().getName() + "\n" + e.getMessage()); updateExperiment(exp, values); } }
0
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/reduction/single/heterogeneous
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/reduction/single/heterogeneous/simplerpnd/MySQLExperimentRunner.java
package ai.libs.reduction.single.heterogeneous.simplerpnd; import java.io.File; import java.sql.ResultSet; import java.sql.SQLException; import java.util.Collection; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Optional; import org.apache.commons.math.stat.descriptive.DescriptiveStatistics; import ai.libs.jaicore.basic.SQLAdapter; import ai.libs.reduction.Util; import ai.libs.reduction.single.MySQLReductionExperiment; import ai.libs.reduction.single.ReductionExperiment; public class MySQLExperimentRunner { private static final String ERROR_RATE_MIN = "error_rate_min"; private static final String ERROR_RATE_MAX = "error_rate_max"; private static final String ERROR_RATE_MEAN = "error_rate_mean"; private static final String ERROR_RATE_STD = "error_rate_std"; private static final String RUNTIME_MIN = "runtime_min"; private static final String RUNTIME_MAX = "runtime_max"; private static final String RUNTIME_MEAN = "runtime_mean"; private static final String RUNTIME_STD = "runtime_std"; private static final String TABLE_NAME = "reductionstumps"; private final SQLAdapter adapter; private final Collection<MySQLReductionExperiment> knownExperiments = new HashSet<>(); public MySQLExperimentRunner(final String host, final String user, final String password, final String database) throws SQLException { this.adapter = new SQLAdapter(host, user, password, database); this.knownExperiments.addAll(this.getConductedExperiments()); } public Collection<MySQLReductionExperiment> getConductedExperiments() throws SQLException { Collection<MySQLReductionExperiment> experiments = new HashSet<>(); ResultSet rs = this.adapter.getRowsOfTable(TABLE_NAME); while (rs.next()) { experiments.add(new MySQLReductionExperiment(rs.getInt("evaluation_id"), new ReductionExperiment(rs.getInt("seed"), rs.getString("dataset"), rs.getString("left_classifier"), rs.getString("inner_classifier"), rs.getString("right_classifier"), rs.getString("exception_left"), rs.getString("exception_inner"), rs.getString("exception_right")))); } return experiments; } public MySQLReductionExperiment createAndGetExperimentIfNotConducted(final int seed, final File dataFile, final String nameOfLeftClassifier, final String nameOfInnerClassifier, final String nameOfRightClassifier) throws SQLException { /* first check whether exactly the same experiment (with the same seed) has been conducted previously */ ReductionExperiment exp = new ReductionExperiment(seed, dataFile.getAbsolutePath(), nameOfLeftClassifier, nameOfInnerClassifier, nameOfRightClassifier); Optional<MySQLReductionExperiment> existingExperiment = this.knownExperiments.stream().filter(e -> e.getExperiment().equals(exp)).findAny(); if (existingExperiment.isPresent()) { return null; } Map<String, String> map = new HashMap<>(); map.put("seed", String.valueOf(seed)); map.put("dataset", dataFile.getAbsolutePath()); map.put("rpnd_classifier", nameOfInnerClassifier); map.put("left_classifier", nameOfLeftClassifier); map.put("inner_classifier", nameOfInnerClassifier); map.put("right_classifier", nameOfRightClassifier); int id = this.adapter.insert(TABLE_NAME, map); return new MySQLReductionExperiment(id, exp); } private void updateExperiment(final MySQLReductionExperiment exp, final Map<String,? extends Object> values) throws SQLException { Map<String,String> where = new HashMap<>(); where.put("evaluation_id", String.valueOf(exp.getId())); this.adapter.update(TABLE_NAME, values, where); } public void conductExperiment(final MySQLReductionExperiment exp) throws Exception { List<Map<String,Object>> mccvResults = Util.conductSingleOneStepReductionExperiment(exp.getExperiment()); DescriptiveStatistics errorRate = new DescriptiveStatistics(); DescriptiveStatistics runtime = new DescriptiveStatistics(); for (Map<String,Object> result : mccvResults) { errorRate.addValue((double)result.get("errorRate")); runtime.addValue((long)result.get("trainTime")); } /* prepapre values for experiment update */ Map<String, Object> values = new HashMap<>(); values.put(ERROR_RATE_MIN, errorRate.getMin()); values.put(ERROR_RATE_MAX, errorRate.getMax()); values.put(ERROR_RATE_MEAN, errorRate.getMean()); values.put(ERROR_RATE_STD, errorRate.getStandardDeviation()); values.put(RUNTIME_MIN, runtime.getMin()); values.put(RUNTIME_MAX, runtime.getMax()); values.put(RUNTIME_MEAN, runtime.getMean()); values.put(RUNTIME_STD, runtime.getStandardDeviation()); this.updateExperiment(exp, values); } public void markExperimentAsUnsolvable(final MySQLReductionExperiment exp) throws SQLException { Map<String, String> values = new HashMap<>(); for (String key : new String[] {ERROR_RATE_MIN, ERROR_RATE_MAX, ERROR_RATE_MEAN, ERROR_RATE_STD, RUNTIME_MIN, RUNTIME_MAX, RUNTIME_MEAN, RUNTIME_STD }) { values.put(key, "-1"); } this.updateExperiment(exp, values); } public void associateExperimentWithException(final MySQLReductionExperiment exp, final String classifier, final Throwable e) throws SQLException { Map<String, String> values = new HashMap<>(); for (String key : new String[] {ERROR_RATE_MIN, ERROR_RATE_MAX, ERROR_RATE_MEAN, ERROR_RATE_STD, RUNTIME_MIN, RUNTIME_MAX, RUNTIME_MEAN, RUNTIME_STD }) { values.put(key, "-1"); } values.put("exception_" + classifier, e.getClass().getName() + "\n" + e.getMessage()); this.updateExperiment(exp, values); } }
0
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/reduction/single/homogeneous
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/reduction/single/homogeneous/bestofkatrandom/MySQLReductionExperimentRunnerWrapper.java
package ai.libs.reduction.single.homogeneous.bestofkatrandom; import java.io.File; import java.sql.ResultSet; import java.sql.SQLException; import java.util.Collection; import java.util.HashMap; import java.util.HashSet; import java.util.Map; import java.util.Optional; import java.util.Random; import ai.libs.jaicore.basic.SQLAdapter; import ai.libs.jaicore.ml.classification.multiclass.reduction.splitters.RandomSplitter; import ai.libs.reduction.single.BestOfKAtRandomExperiment; import ai.libs.reduction.single.ExperimentRunner; import ai.libs.reduction.single.MySQLReductionExperiment; public class MySQLReductionExperimentRunnerWrapper { private static final String KEY_CLASSIFIER = "classifier"; private static final String TABLE_NAME = "reductionstumps_homogeneous_random_bestofk"; private final SQLAdapter adapter; private final Collection<MySQLReductionExperiment> knownExperiments = new HashSet<>(); private final int k; private final int mccvrepeats; public MySQLReductionExperimentRunnerWrapper(final String host, final String user, final String password, final String database, final int k, final int mccvRepeats) throws SQLException { this.adapter = new SQLAdapter(host, user, password, database); this.k = k; this.mccvrepeats = mccvRepeats; this.knownExperiments.addAll(this.getConductedExperiments()); } public Collection<MySQLReductionExperiment> getConductedExperiments() throws SQLException { Collection<MySQLReductionExperiment> experiments = new HashSet<>(); ResultSet rs = this.adapter.getRowsOfTable(TABLE_NAME); while (rs.next()) { experiments.add(new MySQLReductionExperiment(rs.getInt("evaluation_id"), new BestOfKAtRandomExperiment(rs.getInt("seed"), rs.getString("dataset"), rs.getString(KEY_CLASSIFIER), rs.getString(KEY_CLASSIFIER), rs.getString(KEY_CLASSIFIER), rs.getInt("k"), rs.getInt("mccvrepeats")))); } return experiments; } public MySQLReductionExperiment createAndGetExperimentIfNotConducted(final int seed, final File dataFile, final String nameOfClassifier) throws SQLException { /* first check whether exactly the same experiment (with the same seed) has been conducted previously */ BestOfKAtRandomExperiment exp = new BestOfKAtRandomExperiment(seed, dataFile.getAbsolutePath(), nameOfClassifier, nameOfClassifier, nameOfClassifier, this.k, this.mccvrepeats); Optional<MySQLReductionExperiment> existingExperiment = this.knownExperiments.stream().filter(e -> e.getExperiment().equals(exp)).findAny(); if (existingExperiment.isPresent()) { return null; } Map<String, Object> map = new HashMap<>(); map.put("seed", seed); map.put("dataset", dataFile.getAbsolutePath()); map.put(KEY_CLASSIFIER, nameOfClassifier); map.put("k", this.k); map.put("mccvrepeats", this.mccvrepeats); int id = this.adapter.insert(TABLE_NAME, map); return new MySQLReductionExperiment(id, exp); } private void updateExperiment(final MySQLReductionExperiment exp, final Map<String,? extends Object> values) throws SQLException { Map<String,String> where = new HashMap<>(); where.put("evaluation_id", String.valueOf(exp.getId())); this.adapter.update(TABLE_NAME, values, where); } public void conductExperiment(final MySQLReductionExperiment exp) throws Exception { ExperimentRunner<RandomSplitter> runner = new ExperimentRunner<>(this.k, this.mccvrepeats, seed -> new RandomSplitter(new Random(seed))); Map<String,Object> results = runner.conductSingleOneStepReductionExperiment(exp.getExperiment()); this.updateExperiment(exp, results); } public void markExperimentAsUnsolvable(final MySQLReductionExperiment exp) throws SQLException { Map<String, String> values = new HashMap<>(); values.put("errorRate", "-1"); this.updateExperiment(exp, values); } public void associateExperimentWithException(final MySQLReductionExperiment exp, final Throwable e) throws SQLException { Map<String, String> values = new HashMap<>(); values.put("errorRate", "-1"); values.put("exception", e.getClass().getName() + "\n" + e.getMessage()); this.updateExperiment(exp, values); } }
0
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers/AbstractMultiSearch.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * MultiSearch.java * Copyright (C) 2008-2017 University of Waikato, Hamilton, New Zealand */ package meka.classifiers; import meka.core.multisearch.MekaEvaluationFactory; import weka.classifiers.AbstractClassifier; import weka.classifiers.Classifier; import weka.classifiers.RandomizableSingleClassifierEnhancer; import weka.classifiers.meta.multisearch.AbstractEvaluationFactory; import weka.classifiers.meta.multisearch.AbstractEvaluationMetrics; import weka.classifiers.meta.multisearch.AbstractSearch; import weka.classifiers.meta.multisearch.AbstractSearch.SearchResult; import weka.classifiers.meta.multisearch.DefaultSearch; import weka.classifiers.meta.multisearch.MultiSearchCapable; import weka.classifiers.meta.multisearch.Performance; import weka.classifiers.meta.multisearch.PerformanceComparator; import weka.classifiers.meta.multisearch.TraceableOptimizer; import weka.core.AdditionalMeasureProducer; import weka.core.Attribute; import weka.core.Capabilities; import weka.core.Capabilities.Capability; import weka.core.Debug; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.OptionHandler; import weka.core.SelectedTag; import weka.core.SetupGenerator; import weka.core.Summarizable; import weka.core.Tag; import weka.core.Utils; import weka.core.setupgenerator.AbstractParameter; import weka.core.setupgenerator.ParameterGroup; import weka.core.setupgenerator.Point; import weka.core.setupgenerator.Space; import java.io.File; import java.io.Serializable; import java.util.AbstractMap; import java.util.ArrayList; import java.util.Enumeration; import java.util.Iterator; import java.util.List; import java.util.Map.Entry; import java.util.Vector; /** <!-- globalinfo-start --> <!-- globalinfo-end --> * <!-- options-start --> <!-- options-end --> * * @author fracpete (fracpete at waikato dot ac dot nz) * @version $Revision: 4521 $ */ public abstract class AbstractMultiSearch extends RandomizableSingleClassifierEnhancer implements MultiSearchCapable, AdditionalMeasureProducer, Summarizable, TraceableOptimizer { /** for serialization. */ private static final long serialVersionUID = -5129316523575906233L; /** the Classifier with the best setup. */ protected SearchResult m_BestClassifier; /** the evaluation factory to use. */ protected AbstractEvaluationFactory m_Factory; /** the metrics to use. */ protected AbstractEvaluationMetrics m_Metrics; /** the type of evaluation. */ protected int m_Evaluation; /** the log file to use. */ protected File m_LogFile = new File("."); /** the default parameters. */ protected AbstractParameter[] m_DefaultParameters; /** the parameters. */ protected AbstractParameter[] m_Parameters; /** the search algorithm. */ protected AbstractSearch m_Algorithm; /** the current setup generator. */ protected SetupGenerator m_Generator; /** for tracking the setups. */ protected List<Entry<Integer, Performance>> m_Trace; /** * the default constructor. */ public AbstractMultiSearch() { super(); m_Factory = newFactory(); m_Metrics = m_Factory.newMetrics(); m_Evaluation = m_Metrics.getDefaultMetric(); m_Classifier = defaultClassifier(); m_DefaultParameters = defaultSearchParameters(); m_Parameters = defaultSearchParameters(); m_Algorithm = defaultAlgorithm(); m_Trace = new ArrayList<Entry<Integer, Performance>>(); try { m_BestClassifier = new SearchResult(); m_BestClassifier.classifier = AbstractClassifier.makeCopy(m_Classifier); } catch (Exception e) { System.err.println("Failed to create copy of default classifier!"); e.printStackTrace(); } } /** * Returns a string describing classifier. * * @return a description suitable for displaying in the * explorer/experimenter gui */ public String globalInfo() { return "Performs a search of an arbitrary number of parameters of a classifier " + "and chooses the best combination found.\n" + "The properties being explored are totally up to the user.\n" + "\n" + "E.g., if you have a FilteredClassifier selected as base classifier, " + "sporting a PLSFilter and you want to explore the number of PLS components, " + "then your property will be made up of the following components:\n" + " - filter: referring to the FilteredClassifier's property (= PLSFilter)\n" + " - numComponents: the actual property of the PLSFilter that we want to modify\n" + "And assembled, the property looks like this:\n" + " filter.numComponents\n" + "\n" + "\n" + "The best classifier setup can be accessed after the buildClassifier " + "call via the getBestClassifier method.\n" + "\n" + "The trace of setups evaluated can be accessed after the buildClassifier " + "call as well, using the following methods:\n" + "- getTrace()\n" + "- getTraceSize()\n" + "- getTraceValue(int)\n" + "- getTraceFolds(int)\n" + "- getTraceClassifierAsCli(int)\n" + "- getTraceParameterSettings(int)\n" + "\n" + "Using the " + ParameterGroup.class.getName() + " parameter, it is " + "possible to group dependent parameters. In this case, all top-level " + "parameters must be of type " + ParameterGroup.class.getName() + "."; } /** * String describing default classifier. * * @return the classname of the default classifier */ @Override protected String defaultClassifierString() { return defaultClassifier().getClass().getName(); } /** * Returns the default classifier to use. * * @return the default classifier */ protected abstract Classifier defaultClassifier(); /** * Returns the default search parameters. * * @return the parameters */ protected abstract AbstractParameter[] defaultSearchParameters(); /** * Gets an enumeration describing the available options. * * @return an enumeration of all the available options. */ @Override public Enumeration listOptions() { Vector result; Enumeration en; String desc; SelectedTag tag; int i; result = new Vector(); desc = ""; for (i = 0; i < m_Metrics.getTags().length; i++) { tag = new SelectedTag(m_Metrics.getTags()[i].getID(), m_Metrics.getTags()); desc += "\t" + tag.getSelectedTag().getIDStr() + " = " + tag.getSelectedTag().getReadable() + "\n"; } result.addElement(new Option( "\tDetermines the parameter used for evaluation:\n" + desc + "\t(default: " + new SelectedTag(m_Metrics.getDefaultMetric(), m_Metrics.getTags()) + ")", "E", 1, "-E " + Tag.toOptionList(m_Metrics.getTags()))); result.addElement(new Option( "\tA property search setup.\n", "search", 1, "-search \"<classname options>\"")); result.addElement(new Option( "\tA search algorithm.\n", "algorithm", 1, "-algorithm \"<classname options>\"")); result.addElement(new Option( "\tThe log file to log the messages to.\n" + "\t(default: none)", "log-file", 1, "-log-file <filename>")); en = super.listOptions(); while (en.hasMoreElements()) result.addElement(en.nextElement()); return result.elements(); } /** * returns the options of the current setup. * * @return the current options */ @Override public String[] getOptions() { int i; Vector<String> result; String[] options; result = new Vector<String>(); result.add("-E"); result.add("" + getEvaluation()); for (i = 0; i < getSearchParameters().length; i++) { result.add("-search"); result.add(getCommandline(getSearchParameters()[i])); } result.add("-algorithm"); result.add(getCommandline(m_Algorithm)); result.add("-log-file"); result.add("" + getLogFile()); options = super.getOptions(); for (i = 0; i < options.length; i++) result.add(options[i]); return result.toArray(new String[result.size()]); } /** * Parses the options for this object. * * @param options the options to use * @throws Exception if setting of options fails */ @Override public void setOptions(String[] options) throws Exception { String tmpStr; String[] tmpOptions; Vector<String> search; int i; AbstractParameter[] params; tmpStr = Utils.getOption('E', options); if (tmpStr.length() != 0) setEvaluation(new SelectedTag(tmpStr, m_Metrics.getTags())); else setEvaluation(new SelectedTag(m_Metrics.getDefaultMetric(), m_Metrics.getTags())); search = new Vector<String>(); do { tmpStr = Utils.getOption("search", options); if (tmpStr.length() > 0) search.add(tmpStr); } while (tmpStr.length() > 0); if (search.size() == 0) { for (i = 0; i < m_DefaultParameters.length; i++) search.add(getCommandline(m_DefaultParameters[i])); } params = new AbstractParameter[search.size()]; for (i = 0; i < search.size(); i++) { tmpOptions = Utils.splitOptions(search.get(i)); tmpStr = tmpOptions[0]; tmpOptions[0] = ""; params[i] = (AbstractParameter) Utils.forName(AbstractParameter.class, tmpStr, tmpOptions); } setSearchParameters(params); tmpStr = Utils.getOption("algorithm", options); if (!tmpStr.isEmpty()) { tmpOptions = Utils.splitOptions(tmpStr); tmpStr = tmpOptions[0]; tmpOptions[0] = ""; setAlgorithm((AbstractSearch) Utils.forName(AbstractSearch.class, tmpStr, tmpOptions)); } else { setAlgorithm(new DefaultSearch()); } tmpStr = Utils.getOption("log-file", options); if (tmpStr.length() != 0) setLogFile(new File(tmpStr)); else setLogFile(new File(System.getProperty("user.dir"))); super.setOptions(options); } /** * Set the base learner. * * @param newClassifier the classifier to use. */ @Override public void setClassifier(Classifier newClassifier) { super.setClassifier(newClassifier); try { m_BestClassifier.classifier = AbstractClassifier.makeCopy(m_Classifier); } catch (Exception e) { e.printStackTrace(); } } /** * Returns the tip text for this property. * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String searchParametersTipText() { return "Defines the search parameters."; } /** * Sets the search parameters. * * @param value the parameters */ public void setSearchParameters(AbstractParameter[] value) { m_Parameters = value; } /** * Returns the search parameters. * * @return the parameters */ public AbstractParameter[] getSearchParameters() { return m_Parameters; } /** * Returns the tip text for this property. * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String algorithmTipText() { return "Defines the search algorithm."; } /** * Sets the search algorithm. * * @param value the algorithm */ public void setAlgorithm(AbstractSearch value) { m_Algorithm = value; } /** * Returns the search algorithm. * * @return the algorithm */ public AbstractSearch getAlgorithm() { return m_Algorithm; } /** * Creates the default search algorithm. * * @return the algorithm */ public AbstractSearch defaultAlgorithm() { DefaultSearch result; result = new DefaultSearch(); return result; } /** * Returns the tip text for this property. * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String evaluationTipText() { return "Sets the criterion for evaluating the classifier performance and " + "choosing the best one."; } /** * Returns the underlying tags. * * @return the tags */ public Tag[] getMetricsTags() { return m_Metrics.getTags(); } /** * Sets the criterion to use for evaluating the classifier performance. * * @param value the evaluation criterion */ public void setEvaluation(SelectedTag value) { if (value.getTags() == m_Metrics.getTags()) { m_Evaluation = value.getSelectedTag().getID(); } } /** * Gets the criterion used for evaluating the classifier performance. * * @return the current evaluation criterion. */ public SelectedTag getEvaluation() { return new SelectedTag(m_Evaluation, m_Metrics.getTags()); } /** * Returns the tip text for this property. * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String logFileTipText() { return "The log file to log the messages to."; } /** * Gets current log file. * * @return the log file. */ public File getLogFile() { return m_LogFile; } /** * Sets the log file to use. * * @param value the log file. */ public void setLogFile(File value) { m_LogFile = value; } /** * Returns the integer index. * * @param upper the maximum to use * @return always 0 */ @Override public int getClassLabelIndex(int upper) { return 0; } /** * returns the best Classifier setup. * * @return the best Classifier setup */ public Classifier getBestClassifier() { return m_BestClassifier.classifier; } /** * Returns the setup generator. * * @return the generator */ public SetupGenerator getGenerator() { return m_Generator; } /** * Returns an enumeration of the measure names. * * @return an enumeration of the measure names */ public Enumeration enumerateMeasures() { Vector result; int i; result = new Vector(); if (getBestValues() != null) { for (i = 0; i < getBestValues().dimensions(); i++) { if (getBestValues().getValue(i) instanceof Double) result.add("measure-" + i); } } return result.elements(); } /** * Returns the value of the named measure. * * @param measureName the name of the measure to query for its value * @return the value of the named measure */ public double getMeasure(String measureName) { if (measureName.startsWith("measure-")) return (Double) getBestValues().getValue(Integer.parseInt(measureName.replace("measure-", ""))); else throw new IllegalArgumentException("Measure '" + measureName + "' not supported!"); } /** * Returns the evaluation factory to use. * * @return the factory */ protected AbstractEvaluationFactory newFactory() { return new MekaEvaluationFactory(); } /** * Returns the factory instance. * * @return the factory */ public AbstractEvaluationFactory getFactory() { return m_Factory; } /** * Returns the evaluation metrics. * * @return the metrics */ public AbstractEvaluationMetrics getMetrics() { return m_Metrics; } /** * returns the parameter values that were found to work best. * * @return the best parameter combination */ public Point<Object> getBestValues() { return m_BestClassifier.values; } /** * returns the points that were found to work best. * * @return the best points */ public Point<Object> getBestCoordinates() { return m_BestClassifier.performance.getValues(); } /** * Returns default capabilities of the classifier. * * @return the capabilities of this classifier */ @Override public Capabilities getCapabilities() { Capabilities result; Capabilities classes; Iterator iter; Capability capab; result = super.getCapabilities(); // only nominal and numeric classes allowed classes = result.getClassCapabilities(); iter = classes.capabilities(); while (iter.hasNext()) { capab = (Capability) iter.next(); if ( (capab != Capability.BINARY_CLASS) && (capab != Capability.NOMINAL_CLASS) && (capab != Capability.NUMERIC_CLASS) && (capab != Capability.DATE_CLASS) ) result.disable(capab); } // set dependencies for (Capability cap: Capability.values()) result.enableDependency(cap); if (result.getMinimumNumberInstances() < 1) result.setMinimumNumberInstances(1); result.setOwner(this); return result; } /** * Returns the commandline of the given object. * * @param obj the object to create the commandline for * @return the commandline */ public String getCommandline(Object obj) { String result; result = obj.getClass().getName(); if (obj instanceof OptionHandler) result += " " + Utils.joinOptions(((OptionHandler) obj).getOptions()); return result.trim(); } /** * prints the specified message to stdout if debug is on and can also dump * the message to a log file. * * @param message the message to print or store in a log file */ public void log(String message) { log(message, false); } /** * prints the specified message to stdout if debug is on and can also dump * the message to a log file. * * @param message the message to print or store in a log file * @param onlyLog if true the message will only be put into the log file * but not to stdout */ public void log(String message, boolean onlyLog) { // print to stdout? if (getDebug() && (!onlyLog)) System.out.println(message); // log file? if (!getLogFile().isDirectory()) Debug.writeToFile(getLogFile().getAbsolutePath(), message, true); } /** * generates a table string for all the performances in the space and returns * that. * * @param space the current space to align the performances to * @param performances the performances to align * @param type the type of performance * @return the table string */ public String logPerformances(Space space, Vector<Performance> performances, Tag type) { StringBuffer result; int i; result = new StringBuffer(type.getReadable() + ":\n"); result.append(space.toString()); result.append("\n"); for (i = 0; i < performances.size(); i++) { result.append(performances.get(i).getPerformance(type.getID())); result.append("\n"); } result.append("\n"); return result.toString(); } /** * aligns all performances in the space and prints those tables to the log * file. * * @param space the current space to align the performances to * @param performances the performances to align */ public void logPerformances(Space space, Vector<Performance> performances) { int i; for (i = 0; i < m_Metrics.getTags().length; i++) log("\n" + logPerformances(space, performances, m_Metrics.getTags()[i]), true); } /** * Returns the size of m_Trace, which is technically the amount of * setups that where tested in order to find the best. */ public int getTraceSize() { return m_Trace.size(); } /** * Returns the CLI string of a given item in the trace. * * @param index the index of the trace item to obtain */ public String getTraceClassifierAsCli(int index) { return getCommandline(m_Trace.get(index).getValue().getClassifier()); } /** * Returns the performance score of a given item in the trace. * * @param index the index of the trace item to obtain */ public Double getTraceValue(int index) { return m_Trace.get(index).getValue().getPerformance(); } /** * Returns the parameter settings in structured way * * @param index the index of the trace item to obtain * @return the parameter settings */ public List<Entry<String, Object>> getTraceParameterSettings(int index) { List<Entry<String, Object>> parameterSettings = new ArrayList<Entry<String,Object>>(); List<String> dimensions = m_Algorithm.getSearchDimensions(); for (int i = 0; i < dimensions.size(); ++i) { String parameter = dimensions.get(i); Object value = m_Trace.get(index).getValue().getValues().getValue(i); Entry<String, Object> current = new AbstractMap.SimpleEntry<String,Object>(parameter,value); parameterSettings.add(i, current); } return parameterSettings; } /** * Returns the folds of a given item in the trace. * * @param index the index of the trace item to obtain */ public Integer getTraceFolds(int index) { return m_Trace.get(index).getKey(); } /** * Returns the full trace. */ public List<Entry<Integer, Performance>> getTrace() { return m_Trace; } /** * Groups the parameters, i.e., when using ParameterGroup objects. * * @return the groups */ protected List<AbstractParameter[]> groupParameters() { List<AbstractParameter[]> result; int groupCount; int i; result = new ArrayList<AbstractParameter[]>(); groupCount = 0; for (i = 0; i < m_Parameters.length; i++) { if (m_Parameters[i] instanceof ParameterGroup) groupCount++; } if ((groupCount > 0) && (m_Parameters.length != groupCount)) throw new IllegalStateException( "Cannot mix " + ParameterGroup.class.getName() + " with other parameter types!"); if (groupCount > 0) { for (i = 0; i < m_Parameters.length; i++) result.add(((ParameterGroup) m_Parameters[i]).getParameters()); } else { result.add(m_Parameters); } return result; } /** * TestCapabilities. * Make sure the training data is suitable. * @param D the data */ public void testCapabilities(Instances D) throws Exception { // get the classifier's capabilities, enable all class attributes and do the usual test Capabilities cap = getCapabilities(); cap.enableAllClasses(); //getCapabilities().testWithFail(D); // get the capabilities again, test class attributes individually int L = D.classIndex(); for(int j = 0; j < L; j++) { Attribute c = D.attribute(j); cap.testWithFail(c,true); } } /** * builds the classifier. * * @param data the training instances * @throws Exception if something goes wrong */ public void buildClassifier(Instances data) throws Exception { int i; SearchResult result; List<AbstractParameter[]> groups; List<SearchResult> results; PerformanceComparator comp; // can classifier handle the data? testCapabilities(data); // remove instances with missing class data = new Instances(data); data.deleteWithMissingClass(); m_Trace.clear(); groups = groupParameters(); results = new ArrayList<SearchResult>(); for (i = 0; i < groups.size(); i++) { if (groups.size() > 1) log("\n---> group #" + (i+1)); m_Generator = new SetupGenerator(); m_Generator.setBaseObject(this); m_Generator.setParameters(groups.get(i).clone()); m_Generator.setBaseObject((Serializable) getClassifier()); m_Algorithm.setOwner(this); result = m_Algorithm.search(data); results.add(result); m_Trace.addAll(m_Algorithm.getTrace()); } // find best classifier among groups result = results.get(0); if (results.size() > 1) { comp = new PerformanceComparator(getEvaluation().getSelectedTag().getID(), getMetrics()); for (i = 1; i < results.size(); i++) { if (comp.compare(results.get(i).performance, result.performance) < 0) result = results.get(i); } } m_BestClassifier = result; // train classifier log("\n---> train best - start"); log(Utils.toCommandLine(m_BestClassifier)); m_Classifier = AbstractClassifier.makeCopy(m_BestClassifier.classifier); m_Classifier.buildClassifier(data); log("\n---> train best - end"); if (m_Debug) { log("\n---> Trace (format: #. folds/performance - setup)"); for (i = 0; i < getTraceSize(); i++) log((i + 1) + ". " + getTraceFolds(i) + "/" + getTraceValue(i) + " - " + getTraceClassifierAsCli(i)); } } /** * Returns the distribution for the given instance. * * @param instance the test instance * @return the distribution array * @throws Exception if distribution can't be computed successfully */ @Override public double[] distributionForInstance(Instance instance) throws Exception { return m_Classifier.distributionForInstance(instance); } /** * returns a string representation of the classifier. * * @return a string representation of the classifier */ @Override public String toString() { StringBuilder result; int i; result = new StringBuilder(); if (getBestValues() == null) { result.append("No search performed yet."); } else { result.append(this.getClass().getName() + ":\n" + "Classifier: " + getCommandline(getBestClassifier()) + "\n\n"); for (i = 0; i < m_Parameters.length; i++) result.append((i+1) + ". parameter: " + m_Parameters[i] + "\n"); result.append("Evaluation: " + getEvaluation().getSelectedTag().getReadable() + "\n" + "Coordinates: " + getBestCoordinates() + "\n"); result.append("Values: " + getBestValues() + "\n\n" + m_Classifier.toString()); if (m_Debug) { result.append("\n\nTrace (format: #. folds/performance - setup):\n"); for (i = 0; i < getTraceSize(); i++) { result.append("\n" + (i + 1) + ". " + getTraceFolds(i) + "/" + getTraceValue(i) + " - " + getTraceClassifierAsCli(i)); } } } return result.toString(); } /** * Returns a string that summarizes the object. * * @return the object summarized as a string */ public String toSummaryString() { String result; result = "Best classifier: " + getCommandline(getBestClassifier()); return result; } /** * Returns a string representation of the model. * * @return the model */ public String getModel() { return toString(); } }
0
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers/MultiXClassifier.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /** * MultiXClassifier.java * Copyright (C) 2015 University of Waikato, Hamilton, NZ */ package meka.classifiers; import weka.classifiers.Classifier; import weka.core.OptionHandler; /** * Interface for multi-label classifiers. * * @author Joerg Wicker * @version $Revision$ */ public interface MultiXClassifier extends Classifier, OptionHandler { /** * Set debugging mode. * * @param debug true if debug output should be printed */ public void setDebug(boolean debug); /** * Get whether debugging is turned on. * * @return true if debugging output is on */ public boolean getDebug(); /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String debugTipText(); /** * Returns a string representation of the model. * * @return the model */ public String getModel(); }
0
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers/incremental/IncrementalEvaluation.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ package meka.classifiers.incremental; import meka.classifiers.MultiXClassifier; import meka.core.*; import weka.classifiers.UpdateableClassifier; import weka.core.*; import meka.classifiers.multilabel.MultiLabelClassifier; import meka.classifiers.multitarget.MultiTargetClassifier; import meka.classifiers.multilabel.Evaluation; import weka.core.converters.AbstractFileSaver; import weka.core.converters.ArffSaver; import weka.core.converters.ConverterUtils; import java.io.File; import java.util.Arrays; import java.util.HashMap; import java.util.ArrayList; import java.util.Enumeration; /** * IncrementalEvaluation.java - For Evaluating Incremental (Updateable) Classifiers. * @author Jesse Read * @version September 2015 */ public class IncrementalEvaluation { /** * RunExperiment - Build and evaluate a model with command-line options. * @param h a multi-label updateable classifier * @param args classifier + dataset options */ public static void runExperiment(MultiXClassifier h, String args[]) { try { h.setOptions(args); Result avg = IncrementalEvaluation.evaluateModel(h,args); if (avg != null) System.out.println(avg); } catch(Exception e) { System.err.println("Evaluation exception ("+e+"); failed to run experiment"); e.printStackTrace(); printOptions(h.listOptions()); } } /** * EvaluateModel - Build and evaluate. * @param h a multi-label Updateable classifier * @param options dataset options (classifier options should already be set) * @return The evaluation Result, null if none produced (eg when just outputting predictions) */ public static Result evaluateModel(MultiXClassifier h, String options[]) throws Exception { boolean needPrebuiltModel = false; // Load data for non-incremental testing Instances test = null; if (Utils.getOptionPos('T', options) > -1) { test = Evaluation.loadDataset(options, 'T'); MLUtils.prepareData(test); needPrebuiltModel = true; } // Load training data Instances train = null; if (Utils.getOptionPos('t', options) > -1) { train = Evaluation.loadDataset(options, 't'); MLUtils.prepareData(train); needPrebuiltModel = false; // we can build a model with training data } // output predictions to file? String predictions = Utils.getOption("predictions", options); // suppress evaluation? boolean doEval = !Utils.getFlag("no-eval", options); // Set the number of windows (batches) @todo move below combining options? int nWin = OptionUtils.parse(options, 'x', 10); // Set the size of the initial training int nInit = OptionUtils.parse(options, "split-percentage", 10); // Partially labelled ? double rLabeled = OptionUtils.parse(options, "supervision", 1.); // Get Threshold String Top = OptionUtils.parse(options, "threshold", "0.5"); // Get Verbosity (do we want to see everything?) String Vop = OptionUtils.parse(options, "verbosity", "3"); // Dump for later? String dname = null; if (Utils.getOptionPos('d',options) >= 0) dname = Utils.getOption('d',options); // Load from file? String lname = null; Instances dataHeader = null; if (Utils.getOptionPos('l',options) >= 0) { lname = Utils.getOption('l',options); Object[] data = SerializationHelper.readAll(lname); MultiLabelClassifier h2 = (MultiLabelClassifier)data[0]; if (h.getClass() != h2.getClass()) throw new IllegalArgumentException("Classifier stored in '" + lname + "' is not a " + h.getClass().getName()); if (data.length > 1) { dataHeader = (Instances) data[1]; String msg; if (train != null) { msg = train.equalHeadersMsg(dataHeader); if (msg != null) throw new IllegalArgumentException("New training data is not compatible with training header stored in '" + lname + "':\n" + msg); } if (test != null) { msg = test.equalHeadersMsg(dataHeader); if (msg != null) throw new IllegalArgumentException("New test data is not compatible with training header stored in '" + lname + "':\n" + msg); } } h = h2; needPrebuiltModel = false; // successfully loaded prebuilt model } if (needPrebuiltModel) throw new IllegalArgumentException("Options require a prebuilt model, but none available!"); if (h.getDebug()) { if (train != null) System.out.println(":- Dataset -: " + MLUtils.getDatasetName(train) + "\tL=" + train.classIndex() + ""); if (test != null) System.out.println(":- Test -: " + MLUtils.getDatasetName(test) + "\tL=" + test.classIndex() + ""); } Utils.checkForRemainingOptions(options); Result result = null; if (train != null) { if (doEval) { result = evaluateModelPrequentialBasic(h, train, nWin, rLabeled, Top, Vop); } else { Instances init = new Instances(train, 0, nWin); // initial window h.buildClassifier(init); for (Instance inst: train) ((UpdateableClassifier) h).updateClassifier(inst); } } if (test != null && doEval) { if (h.getDebug()) System.out.println("Non-incremental evaluation on provided test set"); result = Evaluation.evaluateModel(h, test, Top, Vop); } if (dname != null) { dataHeader = new Instances(train, 0); SerializationHelper.writeAll(dname, new Object[]{h, dataHeader}); } // predictions if (!predictions.isEmpty()) { if (test == null) { System.err.println("No test set provided, cannot make predictions!"); } else { Instances predicted = new Instances(test, 0); for (int i = 0; i < test.numInstances(); i++) { double pred[] = h.distributionForInstance(test.instance(i)); // Cut off any [no-longer-needed] probabalistic information from MT classifiers. if (h instanceof MultiTargetClassifier) pred = Arrays.copyOf(pred, test.classIndex()); Instance predInst = (Instance) test.instance(i).copy(); for (int j = 0; j < pred.length; j++) predInst.setValue(j, pred[j]); predicted.add(predInst); } AbstractFileSaver saver = ConverterUtils.getSaverForFile(predictions); if (saver == null) { System.err.println("Failed to determine saver for '" + predictions + "', using " + ArffSaver.class.getName()); saver = new ArffSaver(); } saver.setFile(new File(predictions)); saver.setInstances(predicted); saver.writeBatch(); System.out.println("Predictions saved to: " + predictions); } } return result; } private static String measures[] = new String[]{"Accuracy", "Exact match", "Hamming score"}; /** * EvaluateModel - over 20 windows. */ public static Result evaluateModel(MultiLabelClassifier h, Instances D) throws Exception { return evaluateModelPrequentialBasic(h,D,20,1.0,"PCut1","3"); } /** * EvaluateModelBatchWindow - Evaluate a multi-label data-stream model over windows. * @param h Multilabel Classifier * @param D stream * @param numWindows number of windows * @param rLabeled labelled-ness (1.0 by default) * @param Top threshold option * @param Vop verbosity option * @return The Result on the final window (but it contains samples of all the other evaluated windows). * The window is sampled every N/numWindows instances, for a total of numWindows windows. */ public static Result evaluateModelBatchWindow(MultiLabelClassifier h, Instances D, int numWindows, double rLabeled, String Top, String Vop) throws Exception { if (h.getDebug()) System.out.println(":- Classifier -: "+h.getClass().getName()+": "+Arrays.toString(h.getOptions())); int N = D.numInstances(); int L = D.classIndex(); // the Result to use Result result = null; // the samples of all windows ArrayList<HashMap<String,Object>> samples = new ArrayList<HashMap<String,Object>>(); long train_time = 0; long test_time = 0; int windowSize = (int) Math.floor(D.numInstances() / (double)numWindows); if (rLabeled * windowSize < 1.) throw new Exception ("[Error] The ratio of labelled instances ("+rLabeled+") is too small given the window size!"); double nth = 1. / rLabeled; // label every nth example Instances D_init = new Instances(D,0,windowSize); // initial window if (h.getDebug()) { System.out.println("Training classifier on initial window ..."); } train_time = System.currentTimeMillis(); h.buildClassifier(D_init); // initial classifier train_time = System.currentTimeMillis() - train_time; if (h.getDebug()) { System.out.println("Done (in "+(train_time/1000.0)+" s)"); } D = new Instances(D,windowSize,D.numInstances()-windowSize); // the rest (after the initial window) double t[] = new double[L]; Arrays.fill(t,0.5); int V = MLUtils.getIntegerOption(Vop,3); if (h.getDebug()) { System.out.println("--------------------------------------------------------------------------------"); System.out.print("#"+Utils.padLeft("w",6)+" "+Utils.padLeft("n",6)); for (String m : measures) { System.out.print(" "); System.out.print(Utils.padLeft(m,12)); } System.out.println(""); System.out.println("--------------------------------------------------------------------------------"); } int i = 0; for (int w = 0; w < numWindows-1; w++) { // For each evaluation window ... result = new Result(L); result.setInfo("Supervision",String.valueOf(rLabeled)); result.setInfo("Type","MLi"); int n = 0; test_time = 0; train_time = 0; for(int c = 0; i < (w*windowSize)+windowSize; i++) { // For each instance in the evaluation window ... Instance x = D.instance(i); AbstractInstance x_ = (AbstractInstance)((AbstractInstance) x).copy(); // copy // (we can't clear the class values because certain classifiers need to know how well they're doing -- just trust that there's no cheating!) //for(int j = 0; j < L; j++) // x_.setValue(j,0.0); if ( rLabeled < 0.5 && (i % (int)(1/rLabeled) == 0) || ( rLabeled >= 0.5 && (i % (int)(1./(1.-rLabeled)) != 0 )) ) { // LABELLED - Test & record prediction long before_test = System.currentTimeMillis(); double y[] = h.distributionForInstance(x_); long after_test = System.currentTimeMillis(); test_time += (after_test-before_test); // was += result.addResult(y, x); n++; } else { // UNLABELLED x = MLUtils.setLabelsMissing(x,L); } // Update the classifier. (The classifier will have to decide if it wants to deal with unlabelled instances.) long before = System.currentTimeMillis(); ((UpdateableClassifier)h).updateClassifier(x); long after = System.currentTimeMillis(); train_time += (after-before); // was += } // calculate results result.setInfo("Threshold", Arrays.toString(t)); result.output = Result.getStats(result,Vop); result.setMeasurement("Test time", (test_time) / 1000.0); result.setMeasurement("Build time",(train_time)/1000.0); result.setMeasurement("Total time",(test_time+train_time)/1000.0); result.setMeasurement("Threshold",(double)t[0]); result.setMeasurement("Instances",(double)i); result.setMeasurement("Samples",(double)(samples.size()+1)); samples.add(result.output); // Display results (to CLI) if (h.getDebug()) { System.out.print("#"+Utils.doubleToString((double)w+1,6,0)+" "+Utils.doubleToString((double)n,6,0)); n = 0; for (String m : measures) { System.out.print(" "); System.out.print(Utils.doubleToString((Double)result.getMeasurement(m),12,4)); } System.out.println(""); } // Calibrate threshold for next window if (Top.equals("PCutL")) { t = ThresholdUtils.calibrateThresholds(result.predictions,MLUtils.labelCardinalities(result.actuals)); } else { Arrays.fill(t,ThresholdUtils.calibrateThreshold(result.predictions,MLUtils.labelCardinality(result.allTrueValues()))); } } if (h.getDebug()) { System.out.println("--------------------------------------------------------------------------------"); } // This is the last Result; prepare it for evaluation output. result.setInfo("Classifier", h.getClass().getName()); result.vals.put("Test time", (test_time) / 1000.0); result.vals.put("Build time",(train_time)/1000.0); result.vals.put("Total time",(test_time+train_time)/1000.0); result.vals.put("Total instances tested",(double)i); result.vals.put("Initial instances for training",(double)windowSize); result.setInfo("Options", Arrays.toString(h.getOptions())); result.setInfo("Additional Info", h.toString()); result.setInfo("Dataset", MLUtils.getDatasetName(D)); result.output = Result.getStats(result,Vop); result.setMeasurement("Results sampled over time", Result.getResultsAsInstances(samples)); return result; } /* * EvaluateModelPrequentialWindow - Evaluate a multi-label data-stream model over a moving window. public static Result[] evaluateModelPrequentialWindow(MultilabelClassifier h, Instances D, int windowSize, double rLabeled) throws Exception { if (h.getDebug()) System.out.println(":- Classifier -: "+h.getClass().getName()+": "+Arrays.toString(h.getOptions())); int L = D.classIndex(); Result result = new Result(); long train_time = 0; long test_time = 0; double nth = 1. / rLabeled; // label every nth example results.setInfo("Supervision",String.valueOf(rLabeled)); Instances D_init = new Instances(D,0,windowSize); // initial window if (h.getDebug()) { System.out.println("Training classifier on initial window (of size "+windowSize+") ..."); } train_time = System.currentTimeMillis(); h.buildClassifier(D_init); // initial classifir train_time = System.currentTimeMillis() - train_time; D = new Instances(D,windowSize,D.numInstances()-windowSize); // the rest (after the initial window) if (h.getDebug()) { System.out.println("Proceeding to Test/Label/Update cycle on remaining ("+D.numInstances()+") instances ..."); } for(int i = 0; i < D.numInstances(); i++) { test_time = 0; train_time = 0; Instance x = D.instance(i); AbstractInstance x_ = (AbstractInstance)((AbstractInstance) x).copy(); // copy * TEST long before_test = System.currentTimeMillis(); double y[] = h.distributionForInstance(x_); long after_test = System.currentTimeMillis(); test_time += (after_test-before_test); result.addResult(y,x); * LABEL BECOMES AVAILABLE ? if ( rLabeled >= 0.5 ) { x = MLUtils.setLabelsMissing(x,L); } * UPDATE * (The classifier will have to decide if it wants to deal with unlabelled instances.) long before = System.currentTimeMillis(); ((UpdateableClassifier)h).updateClassifier(x); long after = System.currentTimeMillis(); train_time += (after-before); // calculate results result.output = Result.getStats(results[w],Vop); } result.setInfo("Classifier",h.getClass().getName()); result.setInfo("Options",Arrays.toString(h.getOptions())); result.setInfo("Additional Info",h.toString()); result.setInfo("Dataset",MLUtils.getDatasetName(D)); result.setInfo("Type","MLi"); double t = 0.5; try { t = Double.parseDouble(Top); } catch(Exception e) { System.err.println("[WARNING] Only a single threshold can be chosen for this kind of evaluation; Using "+t); } result.setInfo("Threshold", t); result.vals.put("Test time",(test_time)/1000.0); result.vals.put("Build time",(train_time)/1000.0); result.vals.put("Total time",(test_time+train_time)/1000.0); return result; } */ /** * Prequential Evaluation - Accuracy since the start of evaluation. * @param h Multilabel Classifier * @param D stream * @param windowSize sampling frequency (of evaluation statistics) * @param rLabeled labelled-ness (1.0 by default) * @param Top threshold option * @param Vop verbosity option * The window is sampled every N/numWindows instances, for a total of numWindows windows. */ public static Result evaluateModelPrequentialBasic(MultiXClassifier h, Instances D, int windowSize, double rLabeled, String Top, String Vop) throws Exception { if (h.getDebug()) System.out.println(":- Classifier -: "+h.getClass().getName()+": "+Arrays.toString(h.getOptions())); int L = D.classIndex(); Result result = new Result(); long train_time = 0; long test_time = 0; double nth = 1. / rLabeled; // label every nth example result.setInfo("Supervision",String.valueOf(rLabeled)); Instances D_init = new Instances(D,0,windowSize); // initial window if (h.getDebug()) { System.out.println("Training classifier on initial window (of size "+windowSize+") ..."); } train_time = System.currentTimeMillis(); h.buildClassifier(D_init); // initial classifir train_time = System.currentTimeMillis() - train_time; D = new Instances(D,windowSize,D.numInstances()-windowSize); // the rest (after the initial window) if (h.getDebug()) { System.out.println("Proceeding to Test/Label/Update cycle on remaining ("+D.numInstances()+") instances ..."); } result.setInfo("Classifier",h.getClass().getName()); result.setInfo("Options",Arrays.toString(h.getOptions())); result.setInfo("Additional Info",h.toString()); result.setInfo("Dataset",MLUtils.getDatasetName(D)); result.setInfo("Verbosity",Vop); if (h instanceof MultiTargetClassifier || Evaluation.isMT(D)) { result.setInfo("Type","MT"); } else { result.setInfo("Type","ML"); double t = 0.5; try { t = Double.parseDouble(Top); } catch(Exception e) { System.err.println("[WARNING] Only a single threshold can be chosen for this kind of evaluation; Using "+t); } result.setInfo("Threshold", String.valueOf(t)); } ArrayList<HashMap<String,Object>> samples = new ArrayList<HashMap<String,Object>>(); for(int i = 0; i < D.numInstances(); i++) { Instance x = D.instance(i); Instance x_ = (Instance)x.copy(); // copy /* * TEST */ long before_test = System.currentTimeMillis(); double y[] = h.distributionForInstance(x_); long after_test = System.currentTimeMillis(); test_time += (after_test-before_test); result.addResult(y,x); /* * LABEL BECOMES AVAILABLE ? */ if ( rLabeled >= 0.5 ) { x = MLUtils.setLabelsMissing(x,L); } /* * UPDATE * (The classifier will have to decide if it wants to deal with unlabelled instances.) */ long before = System.currentTimeMillis(); ((UpdateableClassifier)h).updateClassifier(x); long after = System.currentTimeMillis(); train_time += (after-before); /* * RECORD MEASUREMENT */ if (i % windowSize == (windowSize-1)) { HashMap<String,Object> eval_sample = Result.getStats(result,Vop); eval_sample.put("Test time",(test_time)/1000.0); eval_sample.put("Build time",(train_time)/1000.0); eval_sample.put("Total time",(test_time+train_time)/1000.0); eval_sample.put("Instances",(double)i); eval_sample.put("Samples",(double)(samples.size()+1)); samples.add(eval_sample); System.out.println("Sample (#"+samples.size()+") of performance at "+i+"/"+D.numInstances()+" instances."); } } result.output = Result.getStats(result,Vop); result.setMeasurement("Results sampled over time", Result.getResultsAsInstances(samples)); result.vals.put("Test time",(test_time)/1000.0); result.vals.put("Build time",(train_time)/1000.0); result.vals.put("Total time",(test_time+train_time)/1000.0); return result; } public static void printOptions(Enumeration e) { // Evaluation Options StringBuffer text = new StringBuffer(); text.append("\n\nEvaluation Options:\n\n"); text.append("-t\n"); text.append("\tSpecify the dataset (required)\n"); text.append("-T <name of test file>\n"); text.append("\tSets test file (will be used for making predictions).\n"); text.append("-predictions <name of output file for predictions>\n"); text.append("\tSets the file to store the predictions in (does not work with cross-validation).\n"); //text.append("-split-percentage <percentage>\n"); //text.append("\tSets the percentage of data to use for the initial training, e.g., 10.\n"); text.append("-x <number of windows>\n"); text.append("\tSets the number of samples to take (at evenly space intervals); default: 10.\n"); text.append("-no-eval\n"); text.append("\tSkips evaluation, e.g., used when test set contains no class labels.\n"); text.append("-supervision <ratio labelled>\n"); text.append("\tSets the ratio of labelled instances; default: 1.\n"); text.append("-threshold <threshold>\n"); text.append("\tSets the threshold to use.\n"); text.append("-verbosity <verbosity level>\n"); text.append("\tSpecify more/less evaluation output.\n"); // Multilabel Options text.append("\n\nClassifier Options:\n\n"); while (e.hasMoreElements()) { Option o = (Option) (e.nextElement()); text.append("-"+o.name()+'\n'); text.append(""+o.description()+'\n'); } System.out.println(text); } }
0
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers/multilabel/AbstractMultiLabelClassifier.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ package meka.classifiers.multilabel; import meka.classifiers.incremental.IncrementalEvaluation; import weka.classifiers.AbstractClassifier; import weka.classifiers.UpdateableClassifier; import weka.core.Attribute; import weka.core.Capabilities; import weka.core.Instance; import weka.core.Instances; import weka.core.SerializedObject; /** * A Multilabel Classifier. * @author Jesse Read * @version Jan 2015 */ public abstract class AbstractMultiLabelClassifier extends AbstractClassifier implements MultiLabelClassifier { /** for serialization. */ private static final long serialVersionUID = 1713843369736127215L; /** * Description to display in the GUI. * * @return the description */ public String globalInfo() { // highly recommended to overwrite this method! return "A multi-label classifier"; } @Override public String toString() { return ""; } /** * Returns a string representation of the model. * * @return the model */ public String getModel() { return ""; } /** * TestCapabilities. * Make sure the training data is suitable. * @param D the data */ public void testCapabilities(Instances D) throws Exception { // get the classifier's capabilities, enable all class attributes and do the usual test Capabilities cap = getCapabilities(); cap.enableAllClasses(); //getCapabilities().testWithFail(D); // get the capabilities again, test class attributes individually int L = D.classIndex(); for(int j = 0; j < L; j++) { Attribute c = D.attribute(j); cap.testWithFail(c,true); } } @Override public abstract void buildClassifier(Instances trainingSet) throws Exception; @Override public abstract double[] distributionForInstance(Instance i) throws Exception; /** * Creates a given number of deep copies of the given multi-label classifier using serialization. * * @param model the classifier to copy * @param num the number of classifier copies to create. * @return an array of classifiers. * @exception Exception if an error occurs */ public static MultiLabelClassifier[] makeCopies(MultiLabelClassifier model, int num) throws Exception { if (model == null) { throw new Exception("No model classifier set"); } MultiLabelClassifier classifiers[] = new MultiLabelClassifier[num]; SerializedObject so = new SerializedObject(model); for(int i = 0; i < classifiers.length; i++) { classifiers[i] = (MultiLabelClassifier) so.getObject(); } return classifiers; } /** * Called by classifier's main() method upon initialisation from the command line. * TODO: In the future Use runClassifier(h,args) directly, and depreciated this function. * @param h A classifier * @param args Command-line options. */ public static void evaluation(MultiLabelClassifier h, String args[]) { runClassifier(h,args); } /** * Called by classifier's main() method upon initialisation from the command line. * @param h A classifier * @param args Command-line options. */ public static void runClassifier(MultiLabelClassifier h, String args[]) { if (h instanceof UpdateableClassifier) { try { IncrementalEvaluation.runExperiment(h,args); } catch(Exception e) { System.err.println("\n"+e); //e.printStackTrace(); IncrementalEvaluation.printOptions(h.listOptions()); } } else { try { Evaluation.runExperiment(h,args); } catch(Exception e) { System.err.println("\n"+e); //e.printStackTrace(); Evaluation.printOptions(h.listOptions()); } } } }
0
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers/multilabel/BCC.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ package meka.classifiers.multilabel; import java.util.ArrayList; import java.util.Arrays; import java.util.Enumeration; import java.util.List; import java.util.Random; import java.util.Vector; import meka.classifiers.multilabel.cc.CNode; import meka.core.A; import meka.core.MatrixUtils; import meka.core.OptionUtils; import meka.core.StatUtils; import mst.Edge; import mst.EdgeWeightedGraph; import mst.KruskalMST; import weka.core.Instances; import weka.core.Option; import weka.core.TechnicalInformation; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import weka.core.Utils; /** * BCC.java - Bayesian Classifier Chains. Probably would be more aptly called Bayesian Classifier * Tree. Creates a maximum spanning tree based on marginal label dependence; then employs a CC * classifier. The original paper used Naive Bayes as a base classifier, hence the name. <br> * See Zaragoza et al. "Bayesian Classifier Chains for Multi-dimensional Classification. IJCAI 2011. * <br> * * @author Jesse Read * @version June 2013 */ public class BCC extends CC { private static final long serialVersionUID = 585507197229071545L; /** * Description to display in the GUI. * * @return the description */ @Override public String globalInfo() { return "Bayesian Classifier Chains (BCC).\n" + "Creates a maximum spanning tree based on marginal label dependence. Then employs CC.\n" + "For more information see:\n" + this.getTechnicalInformation().toString(); } @Override public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; result = new TechnicalInformation(Type.INPROCEEDINGS); result.setValue(Field.AUTHOR, "Julio H. Zaragoza et al."); result.setValue(Field.TITLE, "Bayesian Chain Classifiers for Multidimensional Classification"); result.setValue(Field.BOOKTITLE, "IJCAI'11: International Joint Conference on Artificial Intelligence."); result.setValue(Field.YEAR, "2011"); return result; } @Override public void buildClassifier(final Instances D) throws Exception { this.testCapabilities(D); this.m_R = new Random(this.getSeed()); int L = D.classIndex(); int d = D.numAttributes() - L; /* * Measure [un]conditional label dependencies (frequencies). */ if (this.getDebug()) { System.out.println("Get unconditional dependencies ..."); } double CD[][] = null; if (this.m_DependencyType.equals("L")) { // New Option if (this.getDebug()) { System.out.println("The 'LEAD' method for finding conditional dependence."); } CD = StatUtils.LEAD(D, this.getClassifier(), this.m_R); } else { // Old/default Option if (this.getDebug()) { System.out.println("The Frequency method for finding marginal dependence."); } CD = StatUtils.margDepMatrix(D, this.m_DependencyType); } if (this.getDebug()) { System.out.println(MatrixUtils.toString(CD)); } /* * Make a fully connected graph, each edge represents the dependence measured between the pair of * labels. */ CD = MatrixUtils.multiply(CD, -1); // because we want a *maximum* spanning tree if (this.getDebug()) { System.out.println("Make a graph ..."); } EdgeWeightedGraph G = new EdgeWeightedGraph(L); for (int i = 0; i < L; i++) { for (int j = i + 1; j < L; j++) { if (Thread.currentThread().isInterrupted()) { throw new InterruptedException("Thread has been interrupted."); } Edge e = new Edge(i, j, CD[i][j]); G.addEdge(e); } } /* * Run an off-the-shelf MST algorithm to get a MST. */ if (this.getDebug()) { System.out.println("Get an MST ..."); } KruskalMST mst = new KruskalMST(G); /* * Define graph connections based on the MST. */ int paM[][] = new int[L][L]; for (Edge e : mst.edges()) { if (Thread.currentThread().isInterrupted()) { throw new InterruptedException("Thread has been interrupted."); } int j = e.either(); int k = e.other(j); paM[j][k] = 1; paM[k][j] = 1; // StdOut.println(e); } if (this.getDebug()) { System.out.println(MatrixUtils.toString(paM)); } /* * Turn the DAG into a Tree with the m_Seed-th node as root */ int root = this.getSeed(); if (this.getDebug()) { System.out.println("Make a Tree from Root " + root); } int paL[][] = new int[L][0]; int visted[] = new int[L]; Arrays.fill(visted, -1); visted[root] = 0; this.treeify(root, paM, paL, visted); if (this.getDebug()) { for (int i = 0; i < L; i++) { System.out.println("pa_" + i + " = " + Arrays.toString(paL[i])); } } this.m_Chain = Utils.sort(visted); if (this.getDebug()) { System.out.println("sequence: " + Arrays.toString(this.m_Chain)); } /* * Bulid a classifier 'tree' based on the Tree */ if (this.getDebug()) { System.out.println("Build Classifier Tree ..."); } this.nodes = new CNode[L]; for (int j : this.m_Chain) { if (Thread.currentThread().isInterrupted()) { throw new InterruptedException("Thread has been interrupted."); } if (this.getDebug()) { System.out.println("\t node h_" + j + " : P(y_" + j + " | x_[1:" + d + "], y_" + Arrays.toString(paL[j]) + ")"); } this.nodes[j] = new CNode(j, null, paL[j]); this.nodes[j].build(D, this.m_Classifier); } if (this.getDebug()) { System.out.println(" * DONE * "); } /* * Notes ... paL[j] = new int[]{}; // <-- BR !! paL[j] = MLUtils.gen_indices(j); // <-- CC !! */ } /** * Treeify - make a tree given the structure defined in paM[][], using the root-th node as root. * * @throws InterruptedException */ private void treeify(final int root, final int paM[][], final int paL[][], final int visited[]) throws InterruptedException { int children[] = new int[] {}; for (int j = 0; j < paM[root].length; j++) { if (Thread.currentThread().isInterrupted()) { throw new InterruptedException("Thread has been interrupted."); } if (paM[root][j] == 1) { if (visited[j] < 0) { children = A.append(children, j); paL[j] = A.append(paL[j], root); visited[j] = visited[Utils.maxIndex(visited)] + 1; } // set as visited // paM[root][j] = 0; } } // go through again for (int child : children) { this.treeify(child, paM, paL, visited); } } /* * TODO: Make a generic abstract -dependency_user- class that has this option, and extend it here */ String m_DependencyType = "Ibf"; public void setDependencyType(final String value) { this.m_DependencyType = value; } public String getDependencyType() { return this.m_DependencyType; } public String dependencyTypeTipText() { return "XXX"; } @Override public Enumeration listOptions() { Vector result = new Vector(); result.addElement(new Option("\tThe way to measure dependencies.\n\tdefault: " + this.m_DependencyType + " (frequencies only)", "X", 1, "-X <value>")); OptionUtils.add(result, super.listOptions()); return OptionUtils.toEnumeration(result); } @Override public void setOptions(final String[] options) throws Exception { this.setDependencyType(OptionUtils.parse(options, 'X', "Ibf")); super.setOptions(options); } @Override public String[] getOptions() { List<String> result = new ArrayList<>(); OptionUtils.add(result, 'X', this.getDependencyType()); OptionUtils.add(result, super.getOptions()); return OptionUtils.toArray(result); } public static void main(final String args[]) { ProblemTransformationMethod.evaluation(new BCC(), args); } }
0
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers/multilabel/BPNN.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ package meka.classifiers.multilabel; import java.util.Arrays; import java.util.Random; import Jama.Matrix; import meka.classifiers.multilabel.NN.AbstractNeuralNet; import meka.core.MLUtils; import meka.core.MatrixUtils; import weka.core.Instance; import weka.core.Instances; /** * @TEMP */ /** * BPNN.java - Back Propagation Neural Network. This is a standard back-propagated Neural Network * with multiple outputs that correspond to multiple labels.<br> * If trained 'from scratch' only 1 layer is possible, but if you initialise it (from another * method) with pre-trained weight matrices, the number of layers is inferred from that. * * @author Jesse Read * @version March 2013 */ public class BPNN extends AbstractNeuralNet { private static final long serialVersionUID = -4568680054917021671L; /** Weight Matrix */ public Matrix W[] = null; protected Random r = null; protected Matrix dW_[] = null; public BPNN() { // different default for now this.m_E = 100; } @Override public void buildClassifier(final Instances D) throws Exception { this.testCapabilities(D); double X_[][] = MLUtils.getXfromD(D); double Y_[][] = MLUtils.getYfromD(D); this.r = new Random(this.m_Seed); if (this.W == null) { if (this.getDebug()) { System.out.println("initialize weights ..."); } int h[] = new int[] { this.m_H }; // TODO: parameterize this int d = X_[0].length; int L = D.classIndex(); this.initWeights(d, L, h); } // else ... probably pre-initialized, continue ... else if (this.getDebug()) { System.out.println("weights already preset, continue ..."); } this.train(X_, Y_, this.m_E); } @Override public double[] distributionForInstance(final Instance xy) throws Exception { double x[] = MLUtils.getxfromInstance(xy); return this.popy(x); } /** * Preset Weights - Initialize a BPNN with (pre-trained) weight matrices W (which also determines X * dimensions). * * @param W * pre-trained weight matrix (should include bias weights, assume W[-1]-1 hidden units in * penultimate layer not including bias]) * @param L * the number of labels (for making the final matrix) */ public void presetWeights(final Matrix W[], final int L) throws Exception { this.r = new Random(0); this.W = new Matrix[W.length + 1]; for (int l = 0; l < W.length; l++) { this.W[l] = W[l]; } int h = W[1].getRowDimension() - 1; this.W[W.length] = MatrixUtils.randomn(h + 1, L, this.r).timesEquals(0.1); this.makeMomentumMatrices(); } private void makeMomentumMatrices() { this.dW_ = new Matrix[this.W.length]; // weight deltas for (int i = 0; i < this.dW_.length; i++) { this.dW_[i] = new Matrix(this.W[i].getRowDimension(), this.W[i].getColumnDimension(), 0.0); } } /** * InitWeights - Initialize a BPNN of H.length hidden layers with H[0], H[1], etc hidden units in * each layer (W will be random, and of the corresponding dimensions). * * @param d * number of visible units * @param L * number of labels (output units) * @param H * number of units in hidden layers, H.length = number of hidden layers. CURRENTLY LIMITED * TO 1. */ public void initWeights(final int d, final int L, int H[]) throws Exception { int numHidden = H.length; if (this.getDebug()) { System.out.println("Initializing " + (H.length) + " hidden Layers ..."); System.out.println("d = " + d); System.out.println("L = " + L); } // We need weights for Z to Y, as well as from X to Z Matrix W[] = new Matrix[H.length + 1]; int h = H[0]; H = new int[] { d, h, L }; // Hidden layers System.out.println("" + Arrays.toString(H)); for (int n = 0; n < H.length - 1; n++) { W[n] = MatrixUtils.randomn(H[n] + 1, H[n + 1], this.r).timesEquals(0.1); if (this.getDebug()) { System.out.println("W[" + n + "] = " + (H[n] + 1) + " x " + H[n + 1]); } } // setWeights(W, L); this.W = W; this.makeMomentumMatrices(); } public double train(final double X_[][], final double Y_[][]) throws Exception { return this.train(X_, Y_, this.m_E); } /** * Train - Train for I iterations. I is not necessarily m_E (yet)! */ public double train(final double[][] X_, final double[][] Y_, int I) throws Exception { if (this.getDebug()) { System.out.println("BPNN train; For " + I + " epochs ..."); } int N = X_.length; boolean breakEarly = (I < 0) ? true : false; I = Math.abs(I); double E_ = Double.MAX_VALUE; double E = 0.0; for (int e = 0; e < I; e++) { if (Thread.currentThread().isInterrupted()) { throw new InterruptedException("Thread has been interrupted."); } E = this.update(X_, Y_); if (breakEarly && E > E_) { if (this.getDebug()) { System.out.println(" early stopped at epcho " + e + " ... "); } break; // positive gradient } E_ = E; } if (this.getDebug()) { System.out.println("Done."); } return E; } /** * Update - A single training epoch. */ public double update(final double X_[][], final double Y_[][]) throws Exception { int N = X_.length; double E = 0.0; for (int i = 0; i < N; i++) { if (Thread.currentThread().isInterrupted()) { throw new InterruptedException("Thread has been interrupted."); } E += this.backPropagate(new double[][] { X_[i] }, new double[][] { Y_[i] }); } return E; } /** * Forward Pass - Given input x_, get output y_. * * @param x_ * input * @return y_ output * @throws InterruptedException */ public double[] popy(final double x_[]) throws InterruptedException { return this.popY(new double[][] { x_ })[0]; } /** * Forward Pass - Given input X_, get output Y_. * * @param X_ * input * @return Y_ output * @throws InterruptedException */ public double[][] popY(final double X_[][]) throws InterruptedException { Matrix Z[] = this.forwardPass(X_); int n = Z.length - 1; return Z[n].getArray(); } /** * Forward Pass - Given input X_, get output of all layers Z[0]... * * @param X_ * input (no bias included) * @return output Z[] = {X,Z1,Z2,...,Y} * @throws InterruptedException */ public Matrix[] forwardPass(final double X_[][]) throws InterruptedException { int numW = this.W.length; // number of weight matrices Matrix Z[] = new Matrix[numW + 1]; // input activations Z[0] = new Matrix(MatrixUtils.addBias(X_)); // hidden layer(s) int i = 1; for (i = 1; i < numW; i++) { if (Thread.currentThread().isInterrupted()) { throw new InterruptedException("Thread has been interrupted."); } if (this.getDebug()) { System.out.print("DO: [" + i + "] " + MatrixUtils.getDim(Z[i - 1].getArray()) + " * " + MatrixUtils.getDim(this.W[i - 1].getArray()) + " => "); } Matrix A_z = Z[i - 1].times(this.W[i - 1]); // A = X * W1 = Z[n-1] * W[n-1] Z[i] = MatrixUtils.sigma(A_z); Z[i] = MatrixUtils.addBias(Z[i]); // ACTIVATIONS Z[n] = sigma(A) = if (this.getDebug()) { System.out.println("==: " + MatrixUtils.getDim(A_z.getArray())); } } // output layer if (this.getDebug()) { System.out.print("DX: [" + i + "] " + MatrixUtils.getDim(Z[i - 1].getArray()) + " * " + MatrixUtils.getDim(this.W[i - 1].getArray()) + " => "); } Matrix A_y = Z[i - 1].times(this.W[i - 1]); // A = X * W1 = Z[n-1] * W[n-1] if (this.getDebug()) { System.out.println("==: " + MatrixUtils.getDim(A_y.getArray())); } Z[numW] = MatrixUtils.sigma(A_y); // ACTIVATIONS Z[n] = sigma(A) = return Z; } /** * Back Propagate - Do one round of Back Propagation on batch X_,Y_. * * @param X_ * input * @param Y_ * teacher values */ public double backPropagate(final double[][] X_, final double[][] Y_) throws Exception { int N = X_.length; // batch size int L = Y_[0].length; // num. of labels int nW = this.W.length; // num. of weight matrices Matrix T = new Matrix(Y_); // TARGETS /* * 1. FORWARD PROPAGATION. Forward-propagate X through the neural net to produce Z_1, Z_2, ..., Y. */ // Matrix X = new Matrix(M.addBias(X_)); // INPUT Matrix Z[] = this.forwardPass(X_); // ALL LAYERS /* * 2. BACKWARD PROPAGATION. Propagate the errors backward through the neural net. */ Matrix dZ[] = new Matrix[nW + 1]; // *new* // Error terms (output) Matrix E_y = T.minus(Z[nW]); // ERROR dZ[nW] = MatrixUtils.dsigma(Z[nW]).arrayTimes(E_y); // Error terms (hidden) *NEW* for (int i = nW - 1; i > 0; i--) { if (Thread.currentThread().isInterrupted()) { throw new InterruptedException("Thread has been interrupted."); } Matrix E = dZ[i + 1].times(this.W[i].transpose()); dZ[i] = MatrixUtils.dsigma(Z[i]).arrayTimes(E); dZ[i] = new Matrix(MatrixUtils.removeBias(dZ[i].getArray())); } // Error terms (hidden) // Matrix E_z = dY.times(W[1].transpose()); // Matrix dZ = M.dsigma(Z[1]).arrayTimes(E_z); // dZ = new Matrix(M.removeBias(dZ.getArray())); // Weight derivatives Matrix dW[] = new Matrix[nW]; for (int i = 0; i < nW; i++) { if (Thread.currentThread().isInterrupted()) { throw new InterruptedException("Thread has been interrupted."); } dW[i] = (Z[i].transpose().times(this.m_R).times(dZ[i + 1])).plus(this.dW_[i].times(this.m_M)); } // Weight update for (int i = 0; i < nW; i++) { if (Thread.currentThread().isInterrupted()) { throw new InterruptedException("Thread has been interrupted."); } // W[i] = W[i].plusEquals(dW[i]); this.W[i].plusEquals(dW[i]); } // Update momentum records this.dW_ = dW; // double SSE = (E_y.transpose().times(E_y)).trace(); // SUM of SQUARE ERROR (faster?) double SSE = E_y.normF(); // SQRT of SUM of SQUARE ERROR (not the sqrt is not necessary, thus the following line should also // suffice) return SSE; } public static void main(final String args[]) throws Exception { ProblemTransformationMethod.evaluation(new BPNN(), args); } }
0
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers/multilabel/BR.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ package meka.classifiers.multilabel; import java.util.Collections; import java.util.HashMap; import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.concurrent.Executors; import java.util.concurrent.Semaphore; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantLock; import java.util.stream.IntStream; import meka.core.F; import meka.core.MLUtils; import meka.core.MultiLabelDrawable; /** * BR.java - The Binary Relevance Method. * The standard baseline Binary Relevance method (BR) -- create a binary problems for each label and learn a model for them individually. * See also <i>BR</i> from the <a href=http://mulan.sourceforge.net>MULAN</a> framework * @author Jesse Read (jmr30@cs.waikato.ac.nz) */ import weka.classifiers.AbstractClassifier; import weka.classifiers.Classifier; import weka.core.Drawable; import weka.core.Instance; import weka.core.Instances; import weka.core.RevisionUtils; import weka.filters.Filter; import weka.filters.unsupervised.attribute.NominalToBinary; public class BR extends ProblemTransformationMethod implements MultiLabelDrawable { /** for serialization. */ private static final long serialVersionUID = -5390512540469007904L; protected Classifier m_MultiClassifiers[] = null; protected Instances m_preFilterInstancesTemplates[] = null; protected Instances m_InstancesTemplates[] = null; protected NominalToBinary m_NominalToBinary[] = null; private int numThreads = 1; /** * Description to display in the GUI. * * @return the description */ @Override public String globalInfo() { return "The Binary Relevance Method.\n" + "See also MULAN framework:\n" + "http://mulan.sourceforge.net"; } public void setNumThreads(final int numThreads) { this.numThreads = numThreads; } @Override public void buildClassifier(final Instances D) throws Exception { this.testCapabilities(D); int L = D.classIndex(); if (this.getDebug()) { System.out.print("Creating " + L + " models (" + this.m_Classifier.getClass().getName() + "): "); } this.m_MultiClassifiers = AbstractClassifier.makeCopies(this.m_Classifier, L); this.m_InstancesTemplates = new Instances[L]; this.m_preFilterInstancesTemplates = new Instances[L]; this.m_NominalToBinary = new NominalToBinary[L]; final Lock lock = new ReentrantLock(); IntStream.range(0, L).forEach(x -> this.m_NominalToBinary[x] = new NominalToBinary()); Semaphore sem = new Semaphore(0); AtomicBoolean buildFailed = new AtomicBoolean(false); List<Throwable> exception = Collections.synchronizedList(new LinkedList<>()); List<Runnable> runnables = new LinkedList<>(); IntStream.range(0, L).forEach(j -> { runnables.add(new Runnable() { @Override public void run() { // Select only class attribute 'j' try { Instances D_j = F.keepLabels(new Instances(D), L, new int[] { j }); D_j.setClassIndex(0); lock.lock(); try { BR.this.m_preFilterInstancesTemplates[j] = new Instances(D_j, 0); } finally { lock.unlock(); } BR.this.m_NominalToBinary[j].setInputFormat(D_j); D_j = Filter.useFilter(D_j, BR.this.m_NominalToBinary[j]); lock.lock(); try { BR.this.m_InstancesTemplates[j] = new Instances(D_j, 0); } finally { lock.unlock(); } // Build the classifier for that class BR.this.m_MultiClassifiers[j].buildClassifier(D_j); if (BR.this.getDebug()) { System.out.println(Thread.currentThread().getName() + ": " + (D_j.classAttribute().name())); } sem.release(); } catch (Throwable e) { exception.add(e); buildFailed.set(true); sem.release(L); } } }); }); if (this.numThreads > 1) { ThreadPoolExecutor executor = (ThreadPoolExecutor) Executors.newFixedThreadPool(this.numThreads); runnables.stream().forEach(executor::submit); sem.acquire(L); if (buildFailed.get()) { executor.shutdownNow(); throw new Exception(exception.get(0)); } else { executor.shutdown(); executor.awaitTermination(24, TimeUnit.HOURS); } } else { for (Runnable run : runnables) { run.run(); if (buildFailed.get()) { throw new Exception(exception.get(0)); } } } // sanity check for (Instances temp : this.m_InstancesTemplates) { if (temp == null) { throw new Exception("Not all instances templates are filled."); } } } @Override public double[] distributionForInstance(final Instance x) throws Exception { int L = x.classIndex(); double y[] = new double[L]; for (int j = 0; j < L; j++) { if (Thread.currentThread().isInterrupted()) { throw new InterruptedException("Thread has been interrupted."); } Instance x_j = (Instance) x.copy(); x_j.setDataset(null); x_j = MLUtils.keepAttributesAt(x_j, new int[] { j }, L); Instances schema = new Instances(this.m_preFilterInstancesTemplates[j], 0); schema.add(x_j); x_j = Filter.useFilter(schema, this.m_NominalToBinary[j]).get(0); x_j.setDataset(this.m_InstancesTemplates[j]); // y[j] = m_MultiClassifiers[j].classifyInstance(x_j); y[j] = this.m_MultiClassifiers[j].distributionForInstance(x_j)[1]; } return y; } /** * Returns the type of graph representing the object. * * @return the type of graph representing the object (label index as key) */ @Override public Map<Integer, Integer> graphType() { Map<Integer, Integer> result; int i; result = new HashMap<>(); if (this.m_MultiClassifiers != null) { for (i = 0; i < this.m_MultiClassifiers.length; i++) { if (this.m_MultiClassifiers[i] instanceof Drawable) { result.put(i, ((Drawable) this.m_MultiClassifiers[i]).graphType()); } } } return result; } /** * Returns a string that describes a graph representing the object. The string should be in XMLBIF * ver. 0.3 format if the graph is a BayesNet, otherwise it should be in dotty format. * * @return the graph described by a string (label index as key) * @throws Exception * if the graph can't be computed */ @Override public Map<Integer, String> graph() throws Exception { Map<Integer, String> result; int i; result = new HashMap<>(); if (this.m_MultiClassifiers != null) { for (i = 0; i < this.m_MultiClassifiers.length; i++) { if (this.m_MultiClassifiers[i] instanceof Drawable) { result.put(i, ((Drawable) this.m_MultiClassifiers[i]).graph()); } } } return result; } @Override public String getRevision() { return RevisionUtils.extract("$Revision: 9117 $"); } public static void main(final String args[]) { BR br = new BR(); br.setNumThreads(4); ProblemTransformationMethod.evaluation(br, args); } }
0
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers/multilabel/BRq.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ package meka.classifiers.multilabel; import java.util.ArrayList; import java.util.Enumeration; import java.util.List; import java.util.Random; import java.util.Vector; import meka.core.OptionUtils; import weka.classifiers.AbstractClassifier; import weka.classifiers.Classifier; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.Randomizable; import weka.core.RevisionUtils; import weka.core.TechnicalInformation; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import weka.core.TechnicalInformationHandler; import weka.filters.Filter; import weka.filters.unsupervised.attribute.Remove; /** * BRq.java - Random Subspace ('quick') Version. Like BR, but randomly samples the attribute space * for each binary model. Intended for use in an ensemble (but will work in a standalone fashion * also). <br> * See: Jesse Read, Bernhard Pfahringer, Geoff Holmes, Eibe Frank. <i>Classifier Chains for * Multi-label Classification</i>. Machine Learning Journal. Springer. Vol. 85(3), pp 333-359. (May * 2011). <br> * * @see BR * @author Jesse Read (jmr30@cs.waikato.ac.nz) * @version January 2009 */ public class BRq extends ProblemTransformationMethod implements Randomizable, TechnicalInformationHandler { /** for serialization. */ private static final long serialVersionUID = 398261703726763108L; /** The downsample ratio */ protected double m_DownSampleRatio = 0.75; /** The random generator */ protected int m_S = 0; protected Random m_Random = new Random(this.m_S); protected Classifier m_MultiClassifiers[] = null; /** * Description to display in the GUI. * * @return the description */ @Override public String globalInfo() { return "The Binary Relevance Method - Random Subspace ('quick') Version.\n" + "This version is able to downsample the number of instances across the binary models.\n" + "For more information see:\n" + this.getTechnicalInformation().toString(); } @Override public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; result = new TechnicalInformation(Type.ARTICLE); result.setValue(Field.AUTHOR, "Jesse Read, Bernhard Pfahringer, Geoff Holmes, Eibe Frank"); result.setValue(Field.TITLE, "Classifier Chains for Multi-label Classification"); result.setValue(Field.JOURNAL, "Machine Learning Journal"); result.setValue(Field.YEAR, "2011"); result.setValue(Field.VOLUME, "85"); result.setValue(Field.NUMBER, "3"); result.setValue(Field.PAGES, "333-359"); return result; } @Override public void buildClassifier(final Instances data) throws Exception { this.testCapabilities(data); int c = data.classIndex(); if (this.getDebug()) { System.out.print("-: Creating " + c + " models (" + this.m_Classifier.getClass().getName() + "): "); } this.m_MultiClassifiers = AbstractClassifier.makeCopies(this.m_Classifier, c); Instances sub_data = null; for (int i = 0; i < c; i++) { if (Thread.currentThread().isInterrupted()) { throw new InterruptedException("Thread has been interrupted."); } int indices[][] = new int[c][c - 1]; for (int j = 0, k = 0; j < c; j++) { if (Thread.currentThread().isInterrupted()) { throw new InterruptedException("Thread has been interrupted."); } if (j != i) { indices[i][k++] = j; } } // Select only class attribute 'i' Remove FilterRemove = new Remove(); FilterRemove.setAttributeIndicesArray(indices[i]); FilterRemove.setInputFormat(data); FilterRemove.setInvertSelection(true); sub_data = Filter.useFilter(data, FilterRemove); sub_data.setClassIndex(0); /* BEGIN downsample for this link */ sub_data.randomize(this.m_Random); int numToRemove = sub_data.numInstances() - (int) Math.round(sub_data.numInstances() * this.m_DownSampleRatio); for (int m = 0, removed = 0; m < sub_data.numInstances(); m++) { if (Thread.currentThread().isInterrupted()) { throw new InterruptedException("Thread has been interrupted."); } if (sub_data.instance(m).classValue() <= 0.0) { sub_data.instance(m).setClassMissing(); if (++removed >= numToRemove) { break; } } } sub_data.deleteWithMissingClass(); /* END downsample for this link */ // Build the classifier for that class this.m_MultiClassifiers[i].buildClassifier(sub_data); if (this.getDebug()) { System.out.print(" " + (i + 1)); } } if (this.getDebug()) { System.out.println(" :-"); } this.m_InstancesTemplate = new Instances(sub_data, 0); } protected Instance[] convertInstance(final Instance instance, final int c) { Instance FilteredInstances[] = new Instance[c]; // for each 'i' classifiers for (int i = 0; i < c; i++) { // remove all except 'i' FilteredInstances[i] = (Instance) instance.copy(); FilteredInstances[i].setDataset(null); for (int j = 0, offset = 0; j < c; j++) { if (j == i) { offset = 1; } else { FilteredInstances[i].deleteAttributeAt(offset); } } FilteredInstances[i].setDataset(this.m_InstancesTemplate); } return FilteredInstances; } @Override public double[] distributionForInstance(final Instance instance) throws Exception { int c = instance.classIndex(); double result[] = new double[c]; Instance finstances[] = this.convertInstance(instance, c); for (int i = 0; i < c; i++) { if (Thread.currentThread().isInterrupted()) { throw new InterruptedException("Thread has been interrupted."); } result[i] = this.m_MultiClassifiers[i].classifyInstance(finstances[i]); // result[i] = m_MultiClassifiers[i].distributionForInstance(finstances[i])[1]; } return result; } @Override public void setSeed(final int s) { this.m_S = s; this.m_Random = new Random(this.m_S); } @Override public int getSeed() { return this.m_S; } public String seedTipText() { return "The seed value for randomizing the data."; } public void setDownSampleRatio(final double value) { this.m_DownSampleRatio = value; } public double getDownSampleRatio() { return this.m_DownSampleRatio; } public String downSampleRatioTipText() { return "The down sample ratio."; } @Override public Enumeration listOptions() { Vector result = new Vector(); result.addElement(new Option("\tSets the downsampling ratio\n\tdefault: 0.75\n\t(% of original)", "P", 1, "-P <value>")); result.addElement(new Option("\tThe seed value for randomization\n\tdefault: 0", "S", 1, "-S <value>")); OptionUtils.add(result, super.listOptions()); return OptionUtils.toEnumeration(result); } @Override public void setOptions(final String[] options) throws Exception { this.setDownSampleRatio(OptionUtils.parse(options, 'P', 0.75)); this.setSeed(OptionUtils.parse(options, 'S', 0)); super.setOptions(options); } @Override public String[] getOptions() { List<String> result = new ArrayList<>(); OptionUtils.add(result, 'P', this.getDownSampleRatio()); OptionUtils.add(result, 'S', this.getSeed()); OptionUtils.add(result, super.getOptions()); return OptionUtils.toArray(result); } @Override public String getRevision() { return RevisionUtils.extract("$Revision: 9117 $"); } public static void main(final String args[]) { ProblemTransformationMethod.evaluation(new BRq(), args); } }
0
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers/multilabel/CC.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ package meka.classifiers.multilabel; import java.util.ArrayList; import java.util.Arrays; import java.util.Enumeration; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Random; import java.util.Vector; import meka.classifiers.multilabel.cc.CNode; import meka.core.A; import meka.core.MultiLabelDrawable; import meka.core.OptionUtils; import weka.core.Drawable; import weka.core.Instance; import weka.core.Instances; import weka.core.Randomizable; import weka.core.TechnicalInformation; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import weka.core.TechnicalInformationHandler; /** * CC.java - The Classifier Chains Method. Like BR, but label outputs become new inputs for the next * classifiers in the chain. <br> * See: Jesse Read, Bernhard Pfahringer, Geoff Holmes, Eibe Frank. <i>Classifier Chains for * Multi-label Classification</i>. Machine Learning Journal. Springer. Vol. 85(3), pp 333-359. (May * 2011). <br> * See: Jesse Read, Bernhard Pfahringer, Geoff Holmes, Eibe Frank. <i>Classifier Chains for * Multi-label Classification</i>. In Proc. of 20th European Conference on Machine Learning (ECML * 2009). Bled, Slovenia, September 2009. <br> * * Note that the code was reorganised substantially since earlier versions, to accomodate additional * functionality needed for e.g., MCC, PCC. * * @author Jesse Read * @version December 2013 */ public class CC extends ProblemTransformationMethod implements Randomizable, TechnicalInformationHandler, MultiLabelDrawable { private static final long serialVersionUID = -4115294965331340629L; protected CNode nodes[] = null; protected int m_S = this.getDefaultSeed(); protected Random m_R = null; protected int m_Chain[] = null; /** * Prepare a Chain. One of the following:<br> * - Use pre-set chain. If there is none, then <br> * - Use default chain (1,2,...,L). Unless a different random seed has been set, then <br> * - Use a random chain. * * @param L * number of labels */ protected void prepareChain(final int L) { int chain[] = this.retrieveChain(); // if has not yet been manually chosen ... if (chain == null) { // create the standard order (1,2,...,L) .. chain = A.make_sequence(L); // and shuffle if m_S > 0 if (this.m_S != 0) { this.m_R = new Random(this.m_S); A.shuffle(chain, this.m_R); } } // set it this.prepareChain(chain); } /** * Prepare a Chain. Set the specified 'chain'. It must contain all indices [0,...,L-1] (but in any * order) * * @param chain * a specified chain */ public void prepareChain(final int chain[]) { this.m_Chain = Arrays.copyOf(chain, chain.length); if (this.getDebug()) { System.out.println("Chain s=" + Arrays.toString(this.m_Chain)); } } public int[] retrieveChain() { return this.m_Chain; } @Override public void buildClassifier(final Instances D) throws Exception { this.testCapabilities(D); int L = D.classIndex(); this.prepareChain(L); /* * make a classifier node for each label, taking the parents of all previous nodes */ if (this.getDebug()) { System.out.print(":- Chain ("); } this.nodes = new CNode[L]; int pa[] = new int[] {}; for (int j : this.m_Chain) { if (Thread.currentThread().isInterrupted()) { throw new InterruptedException("Thread has been interrupted."); } if (this.getDebug()) { System.out.print(" " + D.attribute(j).name()); } this.nodes[j] = new CNode(j, null, pa); this.nodes[j].build(D, this.m_Classifier); pa = A.append(pa, j); } if (this.getDebug()) { System.out.println(" ) -:"); } // to store posterior probabilities (confidences) this.confidences = new double[L]; } protected double confidences[] = null; /** * GetConfidences - get the posterior probabilities of the previous prediction (after calling * distributionForInstance(x)). */ public double[] getConfidences() { return this.confidences; } @Override public double[] distributionForInstance(final Instance x) throws Exception { int L = x.classIndex(); double y[] = new double[L]; for (int j : this.m_Chain) { if (Thread.currentThread().isInterrupted()) { throw new InterruptedException("Thread has been interrupted."); } // h_j : x,pa_j -> y_j y[j] = this.nodes[j].classify((Instance) x.copy(), y); } return y; } /** * SampleForInstance. predict y[j] stochastically rather than deterministically (as with * distributionForInstance(Instance x)). * * @param x * test Instance * @param r * Random &lt;- TODO probably can use this.m_R instead */ public double[] sampleForInstance(final Instance x, final Random r) throws Exception { int L = x.classIndex(); double y[] = new double[L]; for (int j : this.m_Chain) { if (Thread.currentThread().isInterrupted()) { throw new InterruptedException("Thread has been interrupted."); } double p[] = this.nodes[j].distribution(x, y); y[j] = A.samplePMF(p, r); this.confidences[j] = p[(int) y[j]]; } return y; } /** * GetTransformTemplates - pre-transform the instance x, to make things faster. * * @return the templates */ public Instance[] getTransformTemplates(final Instance x) throws Exception { int L = x.classIndex(); Instance t_[] = new Instance[L]; double ypred[] = new double[L]; for (int j : this.m_Chain) { t_[j] = this.nodes[j].transform(x, ypred); } return t_; } /** * SampleForInstance - given an Instance template for each label, and a Random. * * @param t_ * Instance templates (pre-transformed) using #getTransformTemplates(x) */ public double[] sampleForInstanceFast(final Instance t_[], final Random r) throws Exception { int L = t_.length; double y[] = new double[L]; for (int j : this.m_Chain) { if (Thread.currentThread().isInterrupted()) { throw new InterruptedException("Thread has been interrupted."); } double p[] = this.nodes[j].distribution(t_[j], y); // e.g., [0.4, 0.6] y[j] = A.samplePMF(p, r); // e.g., 0 this.confidences[j] = p[(int) y[j]]; // e.g., 0.4 this.nodes[j].updateTransform(t_[j], y); // need to update the transform #SampleForInstance(x,r) } return y; } /** * TransformInstances - this function is DEPRECATED. this function preloads the instances with the * correct class labels ... to make the chain much faster, but CNode does not yet have this * functionality ... need to do something about this! */ public Instance[] transformInstance(final Instance x) throws Exception { return null; /* * //System.out.println("CHAIN : "+Arrays.toString(this.getChain())); int L = x.classIndex(); * Instance x_copy[] = new Instance[L]; root.transform(x,x_copy); return x_copy; */ } /** * ProbabilityForInstance - Force our way down the imposed 'path'. <br> * TODO rename distributionForPath ? and simplify like distributionForInstance ? <br> * For example p (y=1010|x) = [0.9,0.8,0.1,0.2]. If the product = 1, this is probably the correct * path! * * @param x * test Instance * @param path * the path we want to go down * @return the probabilities associated with this path: [p(Y_1==path[1]|x),...,p(Y_L==path[L]|x)] */ public double[] probabilityForInstance(final Instance x, final double path[]) throws Exception { int L = x.classIndex(); double p[] = new double[L]; for (int j : this.m_Chain) { if (Thread.currentThread().isInterrupted()) { throw new InterruptedException("Thread has been interrupted."); } // h_j : x,pa_j -> y_j double d[] = this.nodes[j].distribution((Instance) x.copy(), path); // <-- posterior distribution int k = (int) Math.round(path[j]); // <-- value of interest p[j] = d[k]; // <-- p(y_j==k) i.e., 'confidence' // y[j] = path[j]; } return p; } /** * Rebuild - NOT YET IMPLEMENTED. For efficiency reasons, we may want to rebuild part of the chain. * If chain[] = [1,2,3,4] and new_chain[] = [1,2,4,3] we only need to rebuild the final two links. * * @param new_chain * the new chain * @param D * the original training data */ public void rebuildClassifier(final int new_chain[], final Instances D) throws Exception { if (Thread.currentThread().isInterrupted()) { throw new InterruptedException("Thread has been interrupted."); } } public int getDefaultSeed() { return 0; } @Override public int getSeed() { return this.m_S; } @Override public void setSeed(final int s) { this.m_S = s; } public String seedTipText() { return "The seed value for randomizing the data."; } @Override public Enumeration listOptions() { Vector result = new Vector(); OptionUtils.addOption(result, this.seedTipText(), "" + this.getDefaultSeed(), 'S'); OptionUtils.add(result, super.listOptions()); return OptionUtils.toEnumeration(result); } @Override public void setOptions(final String[] options) throws Exception { this.setSeed(OptionUtils.parse(options, 'S', this.getDefaultSeed())); super.setOptions(options); } @Override public String[] getOptions() { List<String> result = new ArrayList<>(); OptionUtils.add(result, 'S', this.getSeed()); OptionUtils.add(result, super.getOptions()); return OptionUtils.toArray(result); } /** * Description to display in the GUI. * * @return the description */ @Override public String globalInfo() { return "Classifier Chains. " + "For more information see:\n" + this.getTechnicalInformation().toString(); } @Override public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; TechnicalInformation additional; result = new TechnicalInformation(Type.ARTICLE); result.setValue(Field.AUTHOR, "Jesse Read, Bernhard Pfahringer, Geoff Holmes, Eibe Frank"); result.setValue(Field.TITLE, "Classifier Chains for Multi-label Classification"); result.setValue(Field.JOURNAL, "Machine Learning Journal"); result.setValue(Field.YEAR, "2011"); result.setValue(Field.VOLUME, "85"); result.setValue(Field.NUMBER, "3"); result.setValue(Field.PAGES, "333-359"); additional = new TechnicalInformation(Type.INPROCEEDINGS); additional.setValue(Field.AUTHOR, "Jesse Read, Bernhard Pfahringer, Geoff Holmes, Eibe Frank"); additional.setValue(Field.TITLE, "Classifier Chains for Multi-label Classification"); additional.setValue(Field.BOOKTITLE, "20th European Conference on Machine Learning (ECML 2009). Bled, Slovenia, September 2009"); additional.setValue(Field.YEAR, "2009"); result.add(additional); return result; } /** * Returns the type of graph representing the object. * * @return the type of graph representing the object (label index as key) */ @Override public Map<Integer, Integer> graphType() { Map<Integer, Integer> result; int i; result = new HashMap<>(); if (this.nodes != null) { for (i = 0; i < this.nodes.length; i++) { if (this.nodes[i].getClassifier() instanceof Drawable) { result.put(i, ((Drawable) this.nodes[i].getClassifier()).graphType()); } } } return result; } /** * Returns a string that describes a graph representing the object. The string should be in XMLBIF * ver. 0.3 format if the graph is a BayesNet, otherwise it should be in dotty format. * * @return the graph described by a string (label index as key) * @throws Exception * if the graph can't be computed */ @Override public Map<Integer, String> graph() throws Exception { Map<Integer, String> result; int i; result = new HashMap<>(); if (this.nodes != null) { for (i = 0; i < this.nodes.length; i++) { if (this.nodes[i].getClassifier() instanceof Drawable) { result.put(i, ((Drawable) this.nodes[i].getClassifier()).graph()); } } } return result; } /** * Returns a string representation of the model. * * @return the model */ @Override public String getModel() { StringBuilder result; int i; if (this.nodes == null) { return "No model built yet"; } result = new StringBuilder(); for (i = 0; i < this.nodes.length; i++) { if (i > 0) { result.append("\n\n"); } result.append(this.getClass().getName() + ": Node #" + (i + 1) + "\n\n"); result.append(this.nodes[i].getClassifier().toString()); } return result.toString(); } @Override public String toString() { return Arrays.toString(this.retrieveChain()); } public static void main(final String args[]) { ProblemTransformationMethod.evaluation(new CC(), args); } }
0
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers/multilabel/CCq.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ package meka.classifiers.multilabel; import java.util.ArrayList; import java.util.Arrays; import java.util.Enumeration; import java.util.List; import java.util.Random; import java.util.Vector; import meka.core.MLUtils; import meka.core.OptionUtils; import weka.classifiers.AbstractClassifier; import weka.classifiers.Classifier; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.Randomizable; import weka.core.RevisionUtils; import weka.core.TechnicalInformation; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import weka.core.TechnicalInformationHandler; /** * The Classifier Chains Method - Random Subspace ('quick') Version. This version is able to * downsample the number of training instances across the binary models.<br> * See: Jesse Read, Bernhard Pfahringer, Geoff Holmes, Eibe Frank. <i>Classifier Chains for * Multi-label Classification</i>. Machine Learning Journal. Springer. Vol. 85(3), pp 333-359. (May * 2011). * * @author Jesse Read (jesse@tsc.uc3m.es) * @version January 2009 */ public class CCq extends ProblemTransformationMethod implements Randomizable, TechnicalInformationHandler { /** for serialization. */ private static final long serialVersionUID = 7881602808389873411L; /** The downsample ratio */ protected double m_DownSampleRatio = 0.75; /** The random generator */ protected int m_S = 0; protected Random m_Random = new Random(this.m_S); /** The number of classes */ protected int m_NumClasses = -1; protected QLink root = null; protected class QLink { private QLink next = null; private Classifier classifier = null; public Instances _template = null; private int index = -1; private int excld[]; // to contain the indices to delete private int j = 0; // @temp public QLink(final int chain[], int j, final Instances train) throws Exception { this.j = j; this.index = chain[j]; // sort out excludes [4|5,1,0,2,3] this.excld = Arrays.copyOfRange(chain, j + 1, chain.length); // sort out excludes [0,1,2,3,5] Arrays.sort(this.excld); this.classifier = AbstractClassifier.forName(CCq.this.getClassifier().getClass().getName(), ((AbstractClassifier) CCq.this.getClassifier()).getOptions()); Instances new_train = new Instances(train); // delete all except one (leaving a binary problem) if (CCq.this.getDebug()) { System.out.print(" " + this.index); } new_train.setClassIndex(-1); // delete all the attributes (and track where our index ends up) int c_index = chain[j]; for (int i = this.excld.length - 1; i >= 0; i--) { new_train.deleteAttributeAt(this.excld[i]); if (this.excld[i] < this.index) { c_index--; } } new_train.setClassIndex(c_index); /* BEGIN downsample for this link */ new_train.randomize(CCq.this.m_Random); int numToRemove = new_train.numInstances() - (int) Math.round(new_train.numInstances() * CCq.this.m_DownSampleRatio); for (int i = 0, removed = 0; i < new_train.numInstances(); i++) { if (Thread.currentThread().isInterrupted()) { throw new InterruptedException("Thread has been interrupted."); } if (new_train.instance(i).classValue() <= 0.0) { new_train.instance(i).setClassMissing(); if (++removed >= numToRemove) { break; } } } new_train.deleteWithMissingClass(); /* END downsample for this link */ this._template = new Instances(new_train, 0); this.classifier.buildClassifier(new_train); new_train = null; if (j + 1 < chain.length) { this.next = new QLink(chain, ++j, train); } } private void classify(final Instance test) throws Exception { if (Thread.currentThread().isInterrupted()) { throw new InterruptedException("Thread has been interrupted."); } // copy Instance copy = (Instance) test.copy(); copy.setDataset(null); // delete attributes we don't need for (int i = this.excld.length - 1; i >= 0; i--) { copy.deleteAttributeAt(this.excld[i]); } // set template copy.setDataset(this._template); // set class test.setValue(this.index, (int) (this.classifier.classifyInstance(copy))); // carry on if (this.next != null) { this.next.classify(test); } } @Override public String toString() { return (this.next == null) ? String.valueOf(this.index) : String.valueOf(this.index) + ">" + this.next.toString(); } } /** * Description to display in the GUI. * * @return the description */ @Override public String globalInfo() { return "The Classifier Chains Method - Random Subspace ('quick') Version.\n" + "This version is able to downsample the number of training instances across the binary models." + "For more information see:\n" + this.getTechnicalInformation().toString(); } @Override public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; result = new TechnicalInformation(Type.ARTICLE); result.setValue(Field.AUTHOR, "Jesse Read, Bernhard Pfahringer, Geoff Holmes, Eibe Frank"); result.setValue(Field.TITLE, "Classifier Chains for Multi-label Classification"); result.setValue(Field.JOURNAL, "Machine Learning Journal"); result.setValue(Field.YEAR, "2011"); result.setValue(Field.VOLUME, "85"); result.setValue(Field.NUMBER, "3"); result.setValue(Field.PAGES, "333-359"); return result; } @Override public int getSeed() { return this.m_S; } @Override public void setSeed(final int s) { this.m_S = s; this.m_Random = new Random(this.m_S); } public String seedTipText() { return "The seed value for randomization."; } /** Set the downsample ratio */ public void setDownSampleRatio(final double r) { this.m_DownSampleRatio = r; } /** Get the downsample ratio */ public double getDownSampleRatio() { return this.m_DownSampleRatio; } public String downSampleRatioTipText() { return "The down sample ratio (0-1)."; } @Override public Enumeration listOptions() { Vector result = new Vector(); result.addElement(new Option("\tSets the downsampling ratio \n\tdefault: 0.75\t(of original)", "P", 1, "-P <value>")); result.addElement(new Option("\tThe seed value for randomization\n\tdefault: 0", "S", 1, "-S <value>")); OptionUtils.add(result, super.listOptions()); return OptionUtils.toEnumeration(result); } @Override public void setOptions(final String[] options) throws Exception { this.setDownSampleRatio(OptionUtils.parse(options, 'P', 0.75)); this.setSeed(OptionUtils.parse(options, 'S', 0)); super.setOptions(options); } @Override public String[] getOptions() { List<String> result = new ArrayList<>(); OptionUtils.add(result, 'P', this.getDownSampleRatio()); OptionUtils.add(result, 'S', this.getSeed()); OptionUtils.add(result, super.getOptions()); return OptionUtils.toArray(result); } @Override public void buildClassifier(final Instances Train) throws Exception { this.testCapabilities(Train); this.m_NumClasses = Train.classIndex(); int indices[] = MLUtils.gen_indices(this.m_NumClasses); MLUtils.randomize(indices, new Random(this.m_S)); if (this.getDebug()) { System.out.print(":- Chain ("); } this.root = new QLink(indices, 0, Train); if (this.getDebug()) { System.out.println(" ) -:"); } } @Override public double[] distributionForInstance(final Instance test) throws Exception { this.root.classify(test); return MLUtils.toDoubleArray(test, this.m_NumClasses); } @Override public String getRevision() { return RevisionUtils.extract("$Revision: 9117 $"); } public static void main(final String args[]) { ProblemTransformationMethod.evaluation(new CCq(), args); } }
0
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers/multilabel/CDN.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ package meka.classifiers.multilabel; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.Enumeration; import java.util.List; import java.util.Random; import java.util.Vector; import meka.core.A; import meka.core.OptionUtils; import weka.classifiers.AbstractClassifier; import weka.classifiers.Classifier; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.Randomizable; import weka.core.RevisionUtils; import weka.core.TechnicalInformation; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import weka.core.TechnicalInformationHandler; import weka.core.Utils; /** * CDN.java - Conditional Dependency Networks. A fully connected undirected network, each node * (label) is connected to each other node (label). Each node is a binary classifier that predicts * p(y_j|x,y_1,...,y_{j-1},y_{j-1},...,y_L). Inference is done using Gibbs sampling over I * iterations. The final I_c iterations are used to collected the marginal probabilities, which * becomes the prediction y[]. <br> * See: Yuhong Guoand and Suicheng Gu. <i>Multi-Label Classification Using Conditional Dependency * Networks</i>. IJCAI '11. 2011. <br> * * @author Jesse Read * @version November 2012 */ public class CDN extends ProblemTransformationMethod implements Randomizable, TechnicalInformationHandler { /** for serialization. */ private static final long serialVersionUID = -4571133392057899417L; protected Classifier h[] = null; protected Random m_R = null; protected Instances D_templates[]; protected int I = 1000; // total iterations protected int I_c = 100; // collection iterations @Override public void buildClassifier(final Instances D) throws Exception { this.testCapabilities(D); int N = D.numInstances(); int L = D.classIndex(); this.h = new Classifier[L]; this.m_R = new Random(this.m_S); this.D_templates = new Instances[L]; // Build L probabilistic models, each to predict Y_i | X, Y_{-y}; save the templates. for (int j = 0; j < L; j++) { if (Thread.currentThread().isInterrupted()) { throw new InterruptedException("Thread has been interrupted."); } // X = [Y[0],...,Y[j-1],Y[j+1],...,Y[L],X] this.D_templates[j] = new Instances(D); this.D_templates[j].setClassIndex(j); // train H[j] : X -> Y this.h[j] = AbstractClassifier.forName(this.getClassifier().getClass().getName(), ((AbstractClassifier) this.getClassifier()).getOptions()); this.h[j].buildClassifier(this.D_templates[j]); } } /* * Discrete Classification. Use Gibbs sampling. public double[] distributionForInstance(Instance x) * throws Exception { int L = x.classIndex(); int r[] = MLUtils.gen_indices(L); * Collections.shuffle(Arrays.asList(r)); for(int i = 0; i < I; i++) { for(int j : r) { * x.setDataset(D_templates[j]); // set target att. to j x.setValue(j,h[j].classifyInstance(x)); // * y_j = h_j(x) } System.out.println(""+MLUtils.toBitString(x,L)); } double y[] = new double[L]; * for(int j : r) { y[j] = x.value(j); } System.out.println(""+Arrays.toString(y)); return y; } */ @Override public double[] distributionForInstance(final Instance x) throws Exception { int L = x.classIndex(); // ArrayList<double[]> collection = new ArrayList<double[]>(100); double y[] = new double[L]; // for collectiing marginal int sequence[] = A.make_sequence(L); double likelihood[] = new double[L]; for (int i = 0; i < this.I; i++) { if (Thread.currentThread().isInterrupted()) { throw new InterruptedException("Thread has been interrupted."); } Collections.shuffle(Arrays.asList(sequence)); for (int j : sequence) { if (Thread.currentThread().isInterrupted()) { throw new InterruptedException("Thread has been interrupted."); } // x = [x,y[1],...,y[j-1],y[j+1],...,y[L]] x.setDataset(this.D_templates[j]); // q = h_j(x) i.e. p(y_j | x) double dist[] = this.h[j].distributionForInstance(x); int k = A.samplePMF(dist, this.m_R); x.setValue(j, k); likelihood[j] = dist[k]; // likelihood double s = Utils.sum(likelihood); // collect // and where is is good if (i > (this.I - this.I_c)) { y[j] += x.value(j); } // else still burning in } } // finish, calculate marginals for (int j = 0; j < L; j++) { y[j] /= this.I_c; } return y; } protected int m_S = 0; @Override public void setSeed(final int s) { this.m_S = s; } @Override public int getSeed() { return this.m_S; } public String seedTipText() { return "The seed value for randomization."; } /** * GetI - Get the number of iterations. */ public int getI() { return this.I; } /** * SetI - Sets the number of iterations. */ public void setI(final int i) { this.I = i; } public String iTipText() { return "The total number of iterations."; } /** * GetIc - Get the number of collection iterations. */ public int getIc() { return this.I_c; } /** * SetIc - Sets the number of collection iterations. */ public void setIc(final int ic) { this.I_c = ic; } public String icTipText() { return "The number of collection iterations."; } @Override public Enumeration listOptions() { Vector result = new Vector(); result.addElement(new Option("\t" + this.iTipText() + "\n\tdefault: 1000", "I", 1, "-I <value>")); result.addElement(new Option("\t" + this.icTipText() + "\n\tdefault: 100", "Ic", 1, "-Ic <value>")); result.addElement(new Option("\t" + this.seedTipText(), "S", 1, "-S <value>")); OptionUtils.add(result, super.listOptions()); return OptionUtils.toEnumeration(result); } @Override public void setOptions(final String[] options) throws Exception { this.setI(OptionUtils.parse(options, 'I', 1000)); this.setIc(OptionUtils.parse(options, "Ic", 100)); this.setSeed(OptionUtils.parse(options, 'S', 0)); super.setOptions(options); } @Override public String[] getOptions() { List<String> result = new ArrayList<>(); OptionUtils.add(result, 'I', this.getI()); OptionUtils.add(result, "Ic", this.getIc()); OptionUtils.add(result, 'S', this.getSeed()); OptionUtils.add(result, super.getOptions()); return OptionUtils.toArray(result); } public static void main(final String args[]) { ProblemTransformationMethod.evaluation(new CDN(), args); } /** * Description to display in the GUI. * * @return the description */ @Override public String globalInfo() { return "A Conditional Dependency Network. " + "For more information see:\n" + this.getTechnicalInformation().toString(); } @Override public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; result = new TechnicalInformation(Type.ARTICLE); result.setValue(Field.AUTHOR, "Yuhong Guoand and Suicheng Gu"); result.setValue(Field.TITLE, "Multi-Label Classification Using Conditional Dependency Networks"); result.setValue(Field.BOOKTITLE, "IJCAI '11"); result.setValue(Field.YEAR, "2011"); return result; } @Override public String getRevision() { return RevisionUtils.extract("$Revision: 9117 $"); } }
0
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers/multilabel/CDT.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ package meka.classifiers.multilabel; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.Enumeration; import java.util.List; import java.util.Random; import java.util.Vector; import meka.classifiers.multilabel.cc.CNode; import meka.classifiers.multilabel.cc.Trellis; import meka.core.A; import meka.core.OptionUtils; import meka.core.StatUtils; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.RevisionUtils; import weka.core.TechnicalInformation; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; /** * CDT.java - Conditional Dependency Trellis. Like CDN, but with a trellis structure (like CT) * rather than a fully connected network. * * @see CDN * @see CT * @author Jesse Read * @version January 2014 */ public class CDT extends CDN { private static final long serialVersionUID = -1237783546336254364L; protected int m_Width = -1; protected int m_Density = 1; protected String m_DependencyMetric = "None"; Trellis trel = null; protected CNode nodes[] = null; @Override public void buildClassifier(final Instances D) throws Exception { this.testCapabilities(D); int L = D.classIndex(); int d = D.numAttributes() - L; this.m_R = new Random(this.getSeed()); int width = this.m_Width; if (this.m_Width < 0) { width = (int) Math.sqrt(L); } else if (this.m_Width == 0) { width = L; } this.nodes = new CNode[L]; /* * Make the Trellis. */ if (this.getDebug()) { System.out.println("Make Trellis of width " + this.m_Width); } int indices[] = A.make_sequence(L); A.shuffle(indices, new Random(this.getSeed())); this.trel = new Trellis(indices, width, this.m_Density); if (this.getDebug()) { System.out.println("==>\n" + this.trel.toString()); } /* Rearrange the Trellis */ if (!this.m_DependencyMetric.equals("None")) { this.trel = CT.orderTrellis(this.trel, StatUtils.margDepMatrix(D, this.m_DependencyMetric), this.m_R); } /* * Build Trellis */ if (this.getDebug()) { System.out.println("Build Trellis"); } if (this.getDebug()) { System.out.println("nodes: " + Arrays.toString(this.trel.indices)); } for (int j = 0; j < L; j++) { if (Thread.currentThread().isInterrupted()) { throw new InterruptedException("Thread has been interrupted."); } int jv = this.trel.indices[j]; if (this.getDebug()) { System.out.println("Build Node h_" + jv + "] : P(y_" + jv + " | x_[1:d], y_" + Arrays.toString(this.trel.getNeighbours(j)) + ")"); } this.nodes[jv] = new CNode(jv, null, this.trel.getNeighbours(j)); this.nodes[jv].build(D, this.m_Classifier); } } @Override public double[] distributionForInstance(final Instance x) throws Exception { int L = x.classIndex(); double y[] = new double[L]; // for sampling double y_marg[] = new double[L]; // for collectiing marginal int sequence[] = A.make_sequence(L); double likelihood[] = new double[L]; for (int i = 0; i < this.I; i++) { if (Thread.currentThread().isInterrupted()) { throw new InterruptedException("Thread has been interrupted."); } Collections.shuffle(Arrays.asList(sequence)); for (int j : sequence) { if (Thread.currentThread().isInterrupted()) { throw new InterruptedException("Thread has been interrupted."); } // sample y[j] = this.nodes[j].sample(x, y, this.m_R); // collect marginals if (i > (this.I - this.I_c)) { y_marg[j] += y[j]; } // else still burning in } } // finish, calculate marginals for (int j = 0; j < L; j++) { y_marg[j] /= this.I_c; } return y_marg; } /* NOTE: these options in common with CT */ @Override public Enumeration listOptions() { Vector result = new Vector(); result.addElement(new Option("\t" + this.widthTipText(), "H", 1, "-H <value>")); result.addElement(new Option("\t" + this.densityTipText(), "L", 1, "-L <value>")); result.addElement(new Option("\t" + this.dependencyMetricTipText(), "X", 1, "-X <value>")); OptionUtils.add(result, super.listOptions()); return OptionUtils.toEnumeration(result); } @Override public void setOptions(final String[] options) throws Exception { this.setWidth(OptionUtils.parse(options, 'H', -1)); this.setDensity(OptionUtils.parse(options, 'L', 1)); this.setDependencyMetric(OptionUtils.parse(options, 'X', "None")); super.setOptions(options); } @Override public String[] getOptions() { List<String> result = new ArrayList<>(); OptionUtils.add(result, 'H', this.getWidth()); OptionUtils.add(result, 'L', this.getDensity()); OptionUtils.add(result, 'X', this.getDependencyMetric()); OptionUtils.add(result, super.getOptions()); return OptionUtils.toArray(result); } /** * GetDensity - Get the neighbourhood density (number of neighbours for each node). */ public int getDensity() { return this.m_Density; } /** * SetDensity - Sets the neighbourhood density (number of neighbours for each node). */ public void setDensity(final int c) { this.m_Density = c; } public String densityTipText() { return "Determines the neighbourhood density (the number of neighbours for each node in the trellis)."; } /** * GetH - Get the trellis width. */ public int getWidth() { return this.m_Width; } /** * SetH - Sets the trellis width. */ public void setWidth(final int h) { this.m_Width = h; } public String widthTipText() { return "Determines the width of the trellis (use 0 for chain; use -1 for a square trellis, i.e., width of sqrt(number of labels))."; } /** * GetDependency - Get the type of depependency to use in rearranging the trellis (None by default) */ public String getDependencyMetric() { return this.m_DependencyMetric; } /** * SetDependency - Sets the type of depependency to use in rearranging the trellis (None by default) */ public void setDependencyMetric(final String m) { this.m_DependencyMetric = m; } public String dependencyMetricTipText() { return "The dependency heuristic to use in rearranging the trellis (None by default)."; } public static void main(final String args[]) { ProblemTransformationMethod.evaluation(new CDT(), args); } /** * Description to display in the GUI. * * @return the description */ @Override public String globalInfo() { return "A Conditional Dependency Trellis. Like CDN, but with a trellis structure (like CT) rather than a fully connected network." + "For more information see:\n" + this.getTechnicalInformation().toString(); } @Override public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; result = new TechnicalInformation(Type.ARTICLE); result.setValue(Field.AUTHOR, "Yuhong Guoand and Suicheng Gu"); result.setValue(Field.TITLE, "Multi-Label Classification Using Conditional Dependency Networks"); result.setValue(Field.BOOKTITLE, "IJCAI '11"); result.setValue(Field.YEAR, "2011"); result.add(new CT().getTechnicalInformation()); return result; } @Override public String getRevision() { return RevisionUtils.extract("$Revision: 9117 $"); } }
0
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers/multilabel/CT.java
package meka.classifiers.multilabel; import java.util.ArrayList; import java.util.Enumeration; import java.util.List; import java.util.Random; import java.util.Vector; import meka.classifiers.multilabel.cc.CNode; import meka.classifiers.multilabel.cc.Trellis; import meka.core.OptionUtils; import meka.core.StatUtils; import weka.core.Instances; import weka.core.Option; import weka.core.TechnicalInformation; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import weka.core.TechnicalInformationHandler; /** * CT - Classifier Trellis. CC in a trellis structure (rather than a cascaded chain). You set the * width and type/connectivity/density of the trellis, and optionally change the dependency * heuristic which guides the placement of nodes (labels) within the trellis. * * @author Jesse Read * @version September 2015 */ public class CT extends MCC implements TechnicalInformationHandler { private static final long serialVersionUID = -5773951599734753129L; protected int m_Width = -1; protected int m_Density = 1; protected String m_DependencyMetric = "Ibf"; Trellis trel = null; private String info = ""; @Override public String toString() { return this.info; } @Override public String globalInfo() { return "CC in a trellis structure (rather than a cascaded chain). You set the width and type/connectivity of the trellis, and optionally change the payoff function which guides the placement of nodes (labels) within the trellis."; } @Override public void buildClassifier(final Instances D) throws Exception { int L = D.classIndex(); int d = D.numAttributes() - L; this.m_R = new Random(this.getSeed()); int width = this.m_Width; if (this.m_Width < 0) { // If no width specified for the trellis, use sqrt(L) width = (int) Math.sqrt(L); if (this.getDebug()) { System.out.println("Setting width to " + width); } } else if (this.m_Width == 0) { // 0-width is not possible, use it to indicate a width of L width = L; if (this.getDebug()) { System.out.println("Setting width to " + width); } } /* * Make the Trellis. Start with a random structure (unless -S 0 specified, see CC.java). */ if (this.getDebug()) { System.out.println("Make Trellis"); } this.prepareChain(L); int indices[] = this.retrieveChain(); this.trel = new Trellis(indices, width, this.m_Density); long start = System.currentTimeMillis(); /* * If specified, try and reorder the nodes in the trellis (i.e., get a superior structure) */ if (this.m_Is > 0) { double I[][] = StatUtils.margDepMatrix(D, this.m_DependencyMetric); /* * Get dependency Matrix */ if (this.getDebug()) { System.out.println("Got " + this.m_DependencyMetric + "-type Matrix in " + ((System.currentTimeMillis() - start) / 1000.0) + "s"); } // ORDER THE TRELLIS ACCORDING TO THE DEPENDENCY MATRIX this.trel = orderTrellis(this.trel, I, this.m_R); } this.info = String.valueOf((System.currentTimeMillis() - start) / 1000.0); if (this.getDebug()) { System.out.println("\nTrellis built in: " + this.info + "s"); } /* * Build Trellis */ if (this.getDebug()) { System.out.println("Build Trellis"); } this.nodes = new CNode[L]; for (int jv : this.trel.indices) { if (Thread.currentThread().isInterrupted()) { throw new InterruptedException("Thread has been interrupted."); } if (this.getDebug()) { System.out.print(" -> " + jv); // System.out.println("Build Node h_"+jv+"] : P(y_"+jv+" | x_[1:d], // y_"+Arrays.toString(trel.trellis[jv])+")"); } this.nodes[jv] = new CNode(jv, null, this.trel.trellis[jv]); this.nodes[jv].build(D, this.m_Classifier); } if (this.getDebug()) { System.out.println(); } // So we can use the MCC.java and CC.java framework this.confidences = new double[L]; this.m_Chain = this.trel.indices; } /** * OrderTrellis - order the trellis according to marginal label dependencies. * * @param trel * a randomly initialised trellis * @param I * a matrix of marginal pairwise dependencies * @param rand * a random seed * @return the modified trellis TODO: move to Trellis.java ? * @throws InterruptedException */ public static Trellis orderTrellis(Trellis trel, final double I[][], final Random rand) throws InterruptedException { int L = I.length; int Y[] = new int[L]; /* * Make list of indices */ ArrayList<Integer> list = new ArrayList<>(); for (int i : trel.indices) { if (Thread.currentThread().isInterrupted()) { throw new InterruptedException("Thread has been interrupted."); } list.add(new Integer(i)); } /* * Take first index, and proceed */ Y[0] = list.remove(rand.nextInt(L)); // if (getDebug()) // System.out.print(" "+String.format("%4d", Y[0])); // @todo: update(I,j_0) to make faster for (int j = 1; j < L; j++) { if (Thread.currentThread().isInterrupted()) { throw new InterruptedException("Thread has been interrupted."); } // if (getDebug() && j % m_Width == 0) // System.out.println(); double max_w = -1.; int j_ = -1; for (int j_prop : list) { if (Thread.currentThread().isInterrupted()) { throw new InterruptedException("Thread has been interrupted."); } double w = trel.weight(Y, j, j_prop, I); if (w >= max_w) { max_w = w; j_ = j_prop; } } list.remove(new Integer(j_)); // if (getDebug()) { // System.out.print(" "+String.format("%4d", j_)); // } Y[j] = j_; // @todo: update(I,j_), because it will be a parent now } // if (getDebug()) // System.out.println(); trel = new Trellis(Y, trel.WIDTH, trel.TYPE); return trel; } /** * GetDensity - Get the neighbourhood density (number of neighbours for each node). */ public int getDensity() { return this.m_Density; } /** * SetDensity - Sets the neighbourhood density (number of neighbours for each node). */ public void setDensity(final int c) { this.m_Density = c; } public String densityTipText() { return "Determines the neighbourhood density (the number of neighbours for each node in the trellis). Default = 1, BR = 0."; } /** * GetH - Get the trellis width. */ public int getWidth() { return this.m_Width; } /** * SetH - Sets the trellis width. */ public void setWidth(final int h) { this.m_Width = h; } public String widthTipText() { return "Determines the width of the trellis (use 0 for chain; use -1 for a square trellis, i.e., width of sqrt(number of labels))."; } /** * GetDependency - Get the type of depependency to use in rearranging the trellis */ public String getDependencyMetric() { return this.m_DependencyMetric; } /** * SetDependency - Sets the type of depependency to use in rearranging the trellis */ public void setDependencyMetric(final String m) { this.m_DependencyMetric = m; } public String dependencyMetricTipText() { return "The dependency heuristic to use in rearranging the trellis (applicable if chain iterations > 0), default: Ibf (Mutual Information, fast binary version for multi-label data)"; } @Override public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; result = new TechnicalInformation(Type.ARTICLE); result.setValue(Field.AUTHOR, "Jesse Read, Luca Martino, David Luengo, Pablo Olmos"); result.setValue(Field.TITLE, "Scalable multi-output label prediction: From classifier chains to classifier trellises"); result.setValue(Field.JOURNAL, "Pattern Recognition"); result.setValue(Field.URL, "http://www.sciencedirect.com/science/article/pii/S0031320315000084"); result.setValue(Field.YEAR, "2015"); return result; } @Override public Enumeration listOptions() { Vector result = new Vector(); result.addElement(new Option("\t" + this.widthTipText(), "H", 1, "-H <value>")); result.addElement(new Option("\t" + this.densityTipText(), "L", 1, "-L <value>")); result.addElement(new Option("\t" + this.dependencyMetricTipText(), "X", 1, "-X <value>")); OptionUtils.add(result, super.listOptions()); return OptionUtils.toEnumeration(result); } @Override public void setOptions(final String[] options) throws Exception { this.setWidth(OptionUtils.parse(options, 'H', -1)); this.setDensity(OptionUtils.parse(options, 'L', 1)); this.setDependencyMetric(OptionUtils.parse(options, 'X', "Ibf")); super.setOptions(options); } @Override public String[] getOptions() { List<String> result = new ArrayList<>(); OptionUtils.add(result, 'H', this.getWidth()); OptionUtils.add(result, 'L', this.getDensity()); OptionUtils.add(result, 'X', this.getDependencyMetric()); OptionUtils.add(result, super.getOptions()); return OptionUtils.toArray(result); } public static void main(final String args[]) { ProblemTransformationMethod.evaluation(new CT(), args); } }
0
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers/multilabel/DBPNN.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ package meka.classifiers.multilabel; import java.util.Random; import Jama.Matrix; import meka.classifiers.multilabel.NN.AbstractDeepNeuralNet; import meka.core.MLUtils; import meka.core.MatrixUtils; import rbms.DBM; import rbms.RBM; import weka.core.Instance; import weka.core.Instances; import weka.core.TechnicalInformation; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import weka.core.TechnicalInformationHandler; /** * DBPNN.java - Deep Back-Propagation Neural Network. Use an RBM to pre-train the network, then plug * in BPNN. <br> * See: Geoffrey Hinton and Ruslan Salakhutdinov. <i>Reducing the Dimensionality of Data with Neural * Networks</i>. Science. Vol 313(5786), pages 504 - 507. 2006. <br> * * @see BPNN * @author Jesse Read * @version December 2012 */ public class DBPNN extends AbstractDeepNeuralNet implements TechnicalInformationHandler { private static final long serialVersionUID = 5007534249445210725L; protected RBM dbm = null; protected long rbm_time = 0; @Override public void buildClassifier(final Instances D) throws Exception { this.testCapabilities(D); // Extract variables int L = D.classIndex(); int d = D.numAttributes() - L; double X_[][] = MLUtils.getXfromD(D); double Y_[][] = MLUtils.getYfromD(D); // Build an RBM if (this.getDebug()) { System.out.println("Build RBM(s) ... "); } String ops[] = this.getOptions(); this.dbm = new DBM(ops); this.dbm.setE(this.m_E); ((DBM) this.dbm).setH(this.m_H, this.m_N); long before = System.currentTimeMillis(); this.dbm.train(X_, this.m_H); // batch train this.rbm_time = System.currentTimeMillis() - before; if (this.getDebug()) { Matrix tW[] = this.dbm.getWs(); System.out.println("X = \n" + MatrixUtils.toString(X_)); for (int l = 0; l < tW.length; l++) { System.out.println("W = \n" + MatrixUtils.toString(tW[l].getArray())); } System.out.println("Y = \n" + MatrixUtils.toString(Y_)); } /* * Trim W's: instead of (d+1 x h+1), they become (d+1, h) wwb ww wwb ww wwb -> ww wwb ww bbb (this * is because RBMs go both ways -- have biases both ways -- whereas BP only goes up) TODO the best * thing would be to keep different views of the same array ... */ Matrix W[] = trimBiases(this.dbm.getWs()); // Back propagate with batch size of 1 to fine tune the DBM into a supervised DBN if (this.m_Classifier instanceof BPNN) { if (this.getDebug()) { System.out.println("You have chosen to use BPNN (good!)"); } } else { System.err.println("[WARNING] Was expecting BPNN as the base classifier (will set it now, with default parameters) ..."); this.m_Classifier = new BPNN(); } int i_Y = W.length - 1; // the final W W[i_Y] = RBM.makeW(W[i_Y].getRowDimension() - 1, W[i_Y].getColumnDimension() - 1, new Random(1)); // ((BPNN) this.m_Classifier).presetWeights(W, L); // this W will be modified ((BPNN) this.m_Classifier).train(X_, Y_); // could also have called buildClassifier(D) /* * for(int i = 0; i < 1000; i++) { double E = ((BPNN)m_Classifier).update(X_,Y_); //double Ypred[][] * = ((BPNN)m_Classifier).popY(X_); System.out.println("i="+i+", MSE="+E); } */ if (this.getDebug()) { Matrix tW[] = W; // System.out.println("X = \n"+M.toString(X_)); System.out.println("W = \n" + MatrixUtils.toString(tW[0].getArray())); System.out.println("W = \n" + MatrixUtils.toString(tW[1].getArray())); double Ypred[][] = ((BPNN) this.m_Classifier).popY(X_); System.out.println("Y = \n" + MatrixUtils.toString(MatrixUtils.threshold(Ypred, 0.5))); // System.out.println("Z = \n"+M.toString(M.threshold(Z,0.5))); } } @Override public double[] distributionForInstance(final Instance xy) throws Exception { return this.m_Classifier.distributionForInstance(xy); } protected static Matrix trimBiases(final Matrix A) { double M_[][] = A.getArray(); return new Matrix(MatrixUtils.removeBias(M_)); // return new Matrix(M.getArray(),M.getRowDimension(),M.getColumnDimension()-1); // ignore last // column } protected static Matrix[] trimBiases(final Matrix M[]) throws InterruptedException { for (int i = 0; i < M.length; i++) { if (Thread.currentThread().isInterrupted()) { throw new InterruptedException("Thread has been interrupted."); } M[i] = trimBiases(M[i]); } return M; } /** * Description to display in the GUI. * * @return the description */ @Override public String globalInfo() { return "A Deep Back-Propagation Neural Network. " + "For more information see:\n" + this.getTechnicalInformation().toString(); } @Override public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; result = new TechnicalInformation(Type.ARTICLE); result.setValue(Field.AUTHOR, "Geoffrey Hinton and Ruslan Salakhutdinov"); result.setValue(Field.TITLE, "Reducing the Dimensionality of Data with Neural Networks"); result.setValue(Field.JOURNAL, "Science"); result.setValue(Field.VOLUME, "313"); result.setValue(Field.NUMBER, "5786"); result.setValue(Field.PAGES, "504-507"); result.setValue(Field.YEAR, "2006"); return result; } public static void main(final String args[]) throws Exception { ProblemTransformationMethod.evaluation(new DBPNN(), args); } }
0
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers/multilabel/Evaluation.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ package meka.classifiers.multilabel; import java.io.File; import java.util.Arrays; import java.util.Enumeration; import java.util.Random; import meka.classifiers.MultiXClassifier; import meka.classifiers.multitarget.MultiTargetClassifier; import meka.core.MLEvalUtils; import meka.core.MLUtils; import meka.core.Result; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.SerializationHelper; import weka.core.Utils; import weka.core.converters.AbstractFileSaver; import weka.core.converters.ArffSaver; import weka.core.converters.ConverterUtils; import weka.core.converters.ConverterUtils.DataSource; /** * Evaluation.java - Evaluation functionality. * * @author Jesse Read * @version March 2014 */ public class Evaluation { /** * RunExperiment - Build and evaluate a model with command-line options. * * @param h * multi-label classifier * @param options * command line options */ public static void runExperiment(MultiLabelClassifier h, final String options[]) throws Exception { // Help if (Utils.getOptionPos('h', options) >= 0) { System.out.println("\nHelp requested"); Evaluation.printOptions(h.listOptions()); return; } h.setOptions(options); if (h.getDebug()) { System.out.println("Loading and preparing dataset ..."); } // Load Instances from a file Instances D_train = loadDataset(options); Instances D_full = D_train; // Try extract and set a class index from the @relation name MLUtils.prepareData(D_train); // Override the number of classes with command-line option (optional) if (Utils.getOptionPos('C', options) >= 0) { int L = Integer.parseInt(Utils.getOption('C', options)); D_train.setClassIndex(L); } // We we still haven't found -C option, we can't continue (don't know how many labels) int L = D_train.classIndex(); if (L <= 0) { throw new Exception("[Error] Number of labels not specified.\n\tYou must set the number of labels with the -C option, either inside the @relation tag of the Instances file, or on the command line."); // apparently the dataset didn't contain the '-C' flag, check in the command line options ... } // Randomize (Instances) int seed = (Utils.getOptionPos('s', options) >= 0) ? Integer.parseInt(Utils.getOption('s', options)) : 0; if (Utils.getFlag('R', options)) { D_train.randomize(new Random(seed)); } boolean Threaded = false; if (Utils.getOptionPos("Thr", options) >= 0) { Threaded = Utils.getFlag("Thr", options); } // Verbosity Option String voption = "1"; if (Utils.getOptionPos("verbosity", options) >= 0) { voption = Utils.getOption("verbosity", options); } // Save for later? // String fname = null; // if (Utils.getOptionPos('f',options) >= 0) { // fname = Utils.getOption('f',options); // } // Dump for later? String dname = null; if (Utils.getOptionPos('d', options) >= 0) { dname = Utils.getOption('d', options); } // Load from file? String lname = null; Instances dataHeader = null; if (Utils.getOptionPos('l', options) >= 0) { lname = Utils.getOption('l', options); Object[] data = SerializationHelper.readAll(lname); h = (MultiLabelClassifier) data[0]; if (data.length > 1) { dataHeader = (Instances) data[1]; // Object o[] = SerializationHelper.readAll(lname); // h = (MultilabelClassifier)o[0]; } } try { Result r = null; // Threshold OPtion String top = "PCut1"; // default if (Utils.getOptionPos("threshold", options) >= 0) { top = Utils.getOption("threshold", options); } // output predictions to file? String predictions = Utils.getOption("predictions", options); // suppress evaluation? boolean doEval = !Utils.getFlag("no-eval", options); if (Utils.getOptionPos('x', options) >= 0) { // CROSS-FOLD-VALIDATION // TODO output predictions? if (!predictions.isEmpty()) { System.err.println("Predictions cannot be saved when using cross-validation!"); } int numFolds = MLUtils.getIntegerOption(Utils.getOption('x', options), 10); // default 10 // Check for remaining options Utils.checkForRemainingOptions(options); r = Evaluation.cvModel(h, D_train, numFolds, top, voption); System.out.println(r.toString()); } else { // TRAIN-TEST SPLIT Instances D_test = null; if (Utils.getOptionPos('T', options) >= 0) { // load separate test set try { D_test = loadDataset(options, 'T'); MLUtils.prepareData(D_test); } catch (Exception e) { throw new Exception("[Error] Failed to Load Test Instances from file.", e); } } else { // split training set into train and test sets // default split int N_T = (int) (D_train.numInstances() * 0.60); if (Utils.getOptionPos("split-percentage", options) >= 0) { // split by percentage double percentTrain = Double.parseDouble(Utils.getOption("split-percentage", options)); N_T = (int) Math.round((D_train.numInstances() * (percentTrain / 100.0))); } else if (Utils.getOptionPos("split-number", options) >= 0) { // split by number N_T = Integer.parseInt(Utils.getOption("split-number", options)); } int N_t = D_train.numInstances() - N_T; D_test = new Instances(D_train, N_T, N_t); D_train = new Instances(D_train, 0, N_T); } // Invert the split? if (Utils.getFlag('i', options)) { // boolean INVERT = Utils.getFlag('i',options); Instances temp = D_test; D_test = D_train; D_train = temp; } // Check for remaining options Utils.checkForRemainingOptions(options); if (h.getDebug()) { System.out.println(":- Dataset -: " + MLUtils.getDatasetName(D_train) + "\tL=" + L + "\tD(t:T)=(" + D_train.numInstances() + ":" + D_test.numInstances() + ")\tLC(t:T)=" + Utils.roundDouble(MLUtils.labelCardinality(D_train, L), 2) + ":" + Utils.roundDouble(MLUtils.labelCardinality(D_test, L), 2) + ")"); } if (lname != null) { // h is already built, and loaded from a file, test it! if (doEval) { r = testClassifier(h, D_test); String t = top; if (top.startsWith("PCut")) { // if PCut is specified we need the training data, // so that we can calibrate the threshold! t = MLEvalUtils.getThreshold(r.predictions, D_train, top); } r = evaluateModel(h, D_test, t, voption); } } else { // check if train and test set size are > 0 if (D_train.numInstances() > 0 && D_test.numInstances() > 0) { if (doEval) { if (Threaded) { r = evaluateModelM(h, D_train, D_test, top, voption); } else { r = evaluateModel(h, D_train, D_test, top, voption); } } else { h.buildClassifier(D_train); } } else { // otherwise just train on full set. Maybe better throw an exception. h.buildClassifier(D_full); } } // @todo, if D_train==null, assume h is already trained if (D_train.numInstances() > 0 && D_test.numInstances() > 0 && r != null) { System.out.println(r.toString()); } // predictions if (!predictions.isEmpty()) { Instances predicted = new Instances(D_test, 0); for (int i = 0; i < D_test.numInstances(); i++) { double pred[] = h.distributionForInstance(D_test.instance(i)); // Cut off any [no-longer-needed] probabalistic information from MT classifiers. if (h instanceof MultiTargetClassifier) { pred = Arrays.copyOf(pred, D_test.classIndex()); } Instance predInst = (Instance) D_test.instance(i).copy(); for (int j = 0; j < pred.length; j++) { predInst.setValue(j, Math.round(pred[j])); // ML have probabilities; MT have discrete label indices } predicted.add(predInst); } AbstractFileSaver saver = ConverterUtils.getSaverForFile(predictions); if (saver == null) { System.err.println("Failed to determine saver for '" + predictions + "', using " + ArffSaver.class.getName()); saver = new ArffSaver(); } saver.setFile(new File(predictions)); saver.setInstances(predicted); saver.writeBatch(); System.out.println("Predictions saved to: " + predictions); } } // Save model to file? if (dname != null) { dataHeader = new Instances(D_train, 0); SerializationHelper.writeAll(dname, new Object[] { h, dataHeader }); } } catch (Exception e) { e.printStackTrace(); Evaluation.printOptions(h.listOptions()); System.exit(1); } System.exit(0); } /** * IsMT - see if dataset D is multi-target (else only multi-label) * * @param D * data * @return true iff D is multi-target only (else false) */ public static boolean isMT(final Instances D) { int L = D.classIndex(); for (int j = 0; j < L; j++) { if (D.attribute(j).isNominal()) { // Classification if (D.attribute(j).numValues() > 2) { // Multi-class return true; } } else { // Regression? System.err.println("[Warning] Found a non-nominal class -- not sure how this happened?"); } } return false; } /** * EvaluateModel - Build model 'h' on 'D_train', test it on 'D_test', threshold it according to * 'top', using default verbosity option. * * @param h * a multi-dim. classifier * @param D_train * training data * @param D_test * test data * @param top * Threshold OPtion (pertains to multi-label data only) * @return Result raw prediction data with evaluation statistics included. */ public static Result evaluateModel(final MultiXClassifier h, final Instances D_train, final Instances D_test, final String top) throws Exception { return Evaluation.evaluateModel(h, D_train, D_test, top, "1"); } /** * EvaluateModel - Build model 'h' on 'D_train', test it on 'D_test', threshold it according to * 'top', verbosity 'vop'. * * @param h * a multi-dim. classifier * @param D_train * training data * @param D_test * test data * @param top * Threshold OPtion (pertains to multi-label data only) * @param vop * Verbosity OPtion (which measures do we want to calculate/output) * @return Result raw prediction data with evaluation statistics included. */ public static Result evaluateModel(final MultiXClassifier h, final Instances D_train, final Instances D_test, final String top, final String vop) throws Exception { Result r = evaluateModel(h, D_train, D_test); if (h instanceof MultiTargetClassifier || isMT(D_test)) { r.setInfo("Type", "MT"); } else if (h instanceof MultiLabelClassifier) { r.setInfo("Type", "ML"); r.setInfo("Threshold", MLEvalUtils.getThreshold(r.predictions, D_train, top)); // <-- only relevant to ML (for now), but we'll put it in here in any case } r.setInfo("Verbosity", vop); r.output = Result.getStats(r, vop); return r; } /** * EvaluateModel - Assume 'h' is already built, test it on 'D_test', threshold it according to * 'top', verbosity 'vop'. * * @param h * a multi-dim. classifier * @param D_test * test data * @param tal * Threshold VALUES (not option) * @param vop * Verbosity OPtion (which measures do we want to calculate/output) * @return Result raw prediction data with evaluation statistics included. */ public static Result evaluateModel(final MultiXClassifier h, final Instances D_test, final String tal, final String vop) throws Exception { Result r = testClassifier(h, D_test); if (h instanceof MultiTargetClassifier || isMT(D_test)) { r.setInfo("Type", "MT"); } else if (h instanceof MultiLabelClassifier) { r.setInfo("Type", "ML"); } r.setInfo("Threshold", tal); r.setInfo("Verbosity", vop); r.output = Result.getStats(r, vop); return r; } /** * CVModel - Split D into train/test folds, and then train and evaluate on each one. * * @param h * a multi-output classifier * @param D * test data Instances * @param numFolds * number of folds of CV * @param top * Threshold OPtion (pertains to multi-label data only) * @return Result raw prediction data with evaluation statistics included. */ public static Result cvModel(final MultiLabelClassifier h, final Instances D, final int numFolds, final String top) throws Exception { return cvModel(h, D, numFolds, top, "1"); } /** * CVModel - Split D into train/test folds, and then train and evaluate on each one. * * @param h * a multi-output classifier * @param D * test data Instances * @param numFolds * number of folds of CV * @param top * Threshold OPtion (pertains to multi-label data only) * @param vop * Verbosity OPtion (which measures do we want to calculate/output) * @return Result raw prediction data with evaluation statistics included. */ public static Result cvModel(final MultiLabelClassifier h, final Instances D, final int numFolds, final String top, final String vop) throws Exception { Result r_[] = new Result[numFolds]; for (int i = 0; i < numFolds; i++) { if (Thread.currentThread().isInterrupted()) { throw new InterruptedException("Thread has been interrupted."); } Instances D_train = D.trainCV(numFolds, i); Instances D_test = D.testCV(numFolds, i); if (h.getDebug()) { System.out.println(":- Fold [" + i + "/" + numFolds + "] -: " + MLUtils.getDatasetName(D) + "\tL=" + D.classIndex() + "\tD(t:T)=(" + D_train.numInstances() + ":" + D_test.numInstances() + ")\tLC(t:T)=" + Utils.roundDouble(MLUtils.labelCardinality(D_train, D.classIndex()), 2) + ":" + Utils.roundDouble(MLUtils.labelCardinality(D_test, D.classIndex()), 2) + ")"); } r_[i] = evaluateModel(h, D_train, D_test); // <-- should not run stats yet! } Result r = MLEvalUtils.combinePredictions(r_); if (h instanceof MultiTargetClassifier || isMT(D)) { r.setInfo("Type", "MT-CV"); } else if (h instanceof MultiLabelClassifier) { r.setInfo("Type", "ML-CV"); try { r.setInfo("Threshold", String.valueOf(Double.parseDouble(top))); } catch (Exception e) { System.err.println("[WARNING] Automatic threshold calibration not currently enabled for cross-fold validation, setting threshold = 0.5.\n"); r.setInfo("Threshold", String.valueOf(0.5)); } } r.setInfo("Verbosity", vop); r.output = Result.getStats(r, vop); // Need to reset this because of CV r.setValue("Number of training instances", D.numInstances()); r.setValue("Number of test instances", D.numInstances()); return r; } /** * EvaluateModel - Build model 'h' on 'D_train', test it on 'D_test'. Note that raw multi-label * predictions returned in Result may not have been thresholded yet. However, data statistics, * classifier info, and running times are inpregnated into the Result here. * * @param h * a multi-dim. classifier * @param D_train * training data * @param D_test * test data * @return raw prediction data (no evaluation yet) */ public static Result evaluateModel(final MultiXClassifier h, final Instances D_train, final Instances D_test) throws Exception { long before = System.currentTimeMillis(); // Set test data as unlabelled data, if SemisupervisedClassifier if (h instanceof SemisupervisedClassifier) { ((SemisupervisedClassifier) h).introduceUnlabelledData(MLUtils.setLabelsMissing(new Instances(D_test))); } // Train h.buildClassifier(D_train); long after = System.currentTimeMillis(); // System.out.println(":- Classifier -: "+h.getClass().getName()+": // "+Arrays.toString(h.getOptions())); // Test long before_test = System.currentTimeMillis(); Result result = testClassifier(h, D_test); long after_test = System.currentTimeMillis(); result.setValue("Number of training instances", D_train.numInstances()); result.setValue("Number of test instances", D_test.numInstances()); result.setValue("Label cardinality (train set)", MLUtils.labelCardinality(D_train)); result.setValue("Label cardinality (test set)", MLUtils.labelCardinality(D_test)); result.setValue("Build Time", (after - before) / 1000.0); result.setValue("Test Time", (after_test - before_test) / 1000.0); result.setValue("Total Time", (after_test - before) / 1000.0); result.setInfo("Classifier", h.getClass().getName()); result.setInfo("Options", Arrays.toString(h.getOptions())); result.setInfo("Additional Info", h.toString()); result.setInfo("Dataset", MLUtils.getDatasetName(D_train)); result.setInfo("Number of labels (L)", String.valueOf(D_train.classIndex())); // result.setInfo("Maxfreq_set",MLUtils.mostCommonCombination(D_train,result.L)); String model = h.getModel(); if (model.length() > 0) { result.setModel("Model", h.getModel()); } return result; } /* * allow threaded evaluation of model, all instances are passed to the classifier then they are * gathered in results, for short datasets the overhead might be significant */ public static Result evaluateModelM(final MultiXClassifier h, final Instances D_train, final Instances D_test, final String top, final String vop) throws Exception { // Train long before = System.currentTimeMillis(); /* * if (h instanceof SemisupervisedClassifier) { // *NEW* for semi-supervised * ((SemisupervisedClassifier)h).setUnlabelledData(MLUtils.setLabelsMissing(new Instances(D_test))); * } */ h.buildClassifier(D_train); long after = System.currentTimeMillis(); // System.out.println(":- Classifier -: "+h.getClass().getName()+": // "+Arrays.toString(h.getOptions())); // Test long before_test = System.currentTimeMillis(); Result result = testClassifierM(h, D_test); long after_test = System.currentTimeMillis(); result.setValue("N_train", D_train.numInstances()); result.setValue("N_test", D_test.numInstances()); result.setValue("LCard_train", MLUtils.labelCardinality(D_train)); result.setValue("LCard_test", MLUtils.labelCardinality(D_test)); result.setValue("Build_time", (after - before) / 1000.0); result.setValue("Test_time", (after_test - before_test) / 1000.0); result.setValue("Total_time", (after_test - before) / 1000.0); result.setInfo("Classifier_name", h.getClass().getName()); result.setInfo("Classifier_ops", Arrays.toString(h.getOptions())); result.setInfo("Classifier_info", h.toString()); result.setInfo("Dataset_name", MLUtils.getDatasetName(D_train)); // result.setInfo("Maxfreq_set",MLUtils.mostCommonCombination(D_train,result.L)); if (h instanceof MultiTargetClassifier || isMT(D_test)) { result.setInfo("Type", "MT"); } else if (h instanceof MultiLabelClassifier) { result.setInfo("Type", "ML"); } result.setInfo("Threshold", MLEvalUtils.getThreshold(result.predictions, D_train, top)); // <-- only relevant to ML (for now), but we'll put it in here in any case result.setInfo("Verbosity", vop); result.output = Result.getStats(result, vop); return result; } /** * TestClassifier - test classifier h on D_test * * @param h * a multi-dim. classifier, ALREADY BUILT * @param D_test * test data * @return Result with raw prediction data ONLY */ public static Result testClassifier(final MultiXClassifier h, final Instances D_test) throws Exception { int L = D_test.classIndex(); Result result = new Result(D_test.numInstances(), L); if (h.getDebug()) { System.out.print(":- Evaluate "); } for (int i = 0, c = 0; i < D_test.numInstances(); i++) { if (Thread.currentThread().isInterrupted()) { throw new InterruptedException("Thread has been interrupted."); } if (h.getDebug()) { int t = i * 50 / D_test.numInstances(); if (t > c) { System.out.print("#"); c = t; } } // No cheating allowed; clear all class information Instance x = (Instance) (D_test.instance(i)).copy(); for (int v = 0; v < D_test.classIndex(); v++) { x.setValue(v, 0.0); } // Get and store ranking double y[] = h.distributionForInstance(x); // Cut off any [no-longer-needed] probabalistic information from MT classifiers. if (h instanceof MultiTargetClassifier) { y = Arrays.copyOf(y, L); } // Store the result result.addResult(y, D_test.instance(i)); } if (h.getDebug()) { System.out.println(":-"); } return result; } /** * Test Classifier but threaded (Multiple) * * @param h * a multi-dim. classifier, ALREADY BUILT (threaded, implements MultiLabelThreaded) * @param D_test * test data * @return Result with raw prediction data ONLY */ public static Result testClassifierM(final MultiXClassifier h, final Instances D_test) throws Exception { int L = D_test.classIndex(); Result result = new Result(D_test.numInstances(), L); if (h.getDebug()) { System.out.print(":- Evaluate "); } if (h instanceof MultiLabelClassifierThreaded) { ((MultiLabelClassifierThreaded) h).setThreaded(true); double y[][] = ((MultiLabelClassifierThreaded) h).distributionForInstanceM(D_test); for (int i = 0, c = 0; i < D_test.numInstances(); i++) { // Store the result result.addResult(y[i], D_test.instance(i)); } if (h.getDebug()) { System.out.println(":-"); } /* * if(h.getDebug()) { * * for(int i = 0; i < result.size(); i++) { * System.out.println("\t"+Arrays.toString(result.rowActual(i))+" vs "+Arrays.toString(result. * rowRanking(i))); } * * * } */ } return result; } /** * GetDataset - load a dataset, given command line options specifying an arff file, and set the * class index correctly to indicate the number of labels. * * @param options * command line options * @param T * set to 'T' if we want to load a test file * @return An Instances representing the dataset public static Instances getDataset(String * options[], char T) throws Exception { Instances D = loadDataset(options, T); * setClassesFromOptions(D,MLUtils.getDatasetOptions(D)); return D; } */ /** * GetDataset - load a dataset, given command line options specifying an arff file, and set the * class index correctly to indicate the number of labels. * * @param options * command line options * @return An Instances representing the dataset public static Instances getDataset(String * options[]) throws Exception { return getDataset(options,'t'); } */ /** * loadDataset - load a dataset, given command line option '-t' specifying an arff file. * * @param options * command line options, specifying dataset filename * @return the dataset */ public static Instances loadDataset(final String options[]) throws Exception { return loadDataset(options, 't'); } /** * loadDataset - load a dataset, given command line options specifying an arff file. * * @param options * command line options, specifying dataset filename * @param T * set to 'T' if we want to load a test file (default 't': load train or train-test file) * @return the dataset */ public static Instances loadDataset(final String options[], final char T) throws Exception { Instances D = null; String filename = Utils.getOption(T, options); // Check for filename if (filename == null || filename.isEmpty()) { throw new Exception("[Error] You did not specify a dataset!"); } // Check for existence of file File file = new File(filename); if (!file.exists()) { throw new Exception("[Error] File does not exist: " + filename); } if (file.isDirectory()) { throw new Exception("[Error] " + filename + " points to a directory!"); } try { DataSource source = new DataSource(filename); D = source.getDataSet(); } catch (Exception e) { e.printStackTrace(); throw new Exception("[Error] Failed to load Instances from file '" + filename + "'."); } return D; } /* * GetL - get number of labels (option 'C' from options 'options'). private static int getL(String * options[]) throws Exception { return (Utils.getOptionPos('C', options) >= 0) ? * Integer.parseInt(Utils.getOption('C',options)) : 0; } */ /* * SetClassesFromOptions - set the class index correctly in a dataset 'D', given command line * options 'options'. <br> NOTE: there is a similar function in Exlorer.prepareData(D) but that * function can only take -C from the dataset options. <br> TODO: replace the call to * Exlorer.prepareData(D) with this method here (use the name 'prepareData' -- it souds better). * public static void setClassesFromOptions(Instances D, String options[]) throws Exception { try { * // get L int L = getL(options); // if negative, then invert first if ( L < 0) { L = -L; D = * F.mulan2meka(D,L); } // set L D.setClassIndex(L); } catch(Exception e) { e.printStackTrace(); * throw new Exception * ("[Error] Failed to Set Classes from options. You must supply the number of labels either in the @Relation Name of the dataset or on the command line using the option: -C <num. labels>" * ); } } */ public static void printOptions(final Enumeration e) { // Evaluation Options StringBuffer text = new StringBuffer(); text.append("\n\nEvaluation Options:\n\n"); text.append("-h\n"); text.append("\tOutput help information.\n"); text.append("-t <name of training file>\n"); text.append("\tSets training file.\n"); text.append("-T <name of test file>\n"); text.append("\tSets test file (will be used for making predictions).\n"); text.append("-predictions <name of output file for predictions>\n"); text.append("\tSets the file to store the predictions in (does not work with cross-validation).\n"); text.append("-x <number of folds>\n"); text.append("\tDo cross-validation with this many folds.\n"); text.append("-no-eval\n"); text.append("\tSkips evaluation, e.g., used when test set contains no class labels.\n"); text.append("-R\n"); text.append("\tRandomize the order of instances in the dataset.\n"); text.append("-split-percentage <percentage>\n"); text.append("\tSets the percentage for the train/test set split, e.g., 66.\n"); text.append("-split-number <number>\n"); text.append("\tSets the number of training examples, e.g., 800\n"); text.append("-i\n"); text.append("\tInvert the specified train/test split.\n"); text.append("-s <random number seed>\n"); text.append("\tSets random number seed (use with -R, for different CV or train/test splits).\n"); text.append("-threshold <threshold>\n"); text.append( "\tSets the type of thresholding; where\n\t\t'PCut1' automatically calibrates a threshold (the default);\n\t\t'PCutL' automatically calibrates one threshold for each label;\n\t\tany number, e.g. '0.5', specifies that threshold.\n"); text.append("-C <number of labels>\n"); text.append("\tSets the number of target variables (labels) to assume (indexed from the beginning).\n"); // text.append("-f <results_file>\n"); // text.append("\tSpecify a file to output results and evaluation statistics into.\n"); text.append("-d <classifier_file>\n"); text.append("\tSpecify a file to dump classifier into.\n"); text.append("-l <classifier_file>\n"); text.append("\tSpecify a file to load classifier from.\n"); text.append("-verbosity <verbosity level>\n"); text.append("\tSpecify more/less evaluation output\n"); // Multilabel Options text.append("\n\nClassifier Options:\n\n"); while (e.hasMoreElements()) { Option o = (Option) (e.nextElement()); text.append("-" + o.name() + '\n'); text.append("" + o.description() + '\n'); } System.out.println(text); } }
0
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers/multilabel/FW.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ package meka.classifiers.multilabel; import weka.classifiers.AbstractClassifier; import weka.classifiers.Classifier; import weka.core.Attribute; import weka.core.FastVector; import weka.core.Instance; import weka.core.Instances; /** * FW.java Four-class pairWise classification. Trains a multi-class base classifier for each pair of * labels -- (L*(L-1))/2 in total --, each with four possible class values: {00,01,10,11} * representing the possible combinations of relevant (1) /irrelevant (0) for the pair. Uses a * voting + threshold scheme at testing time where e.g., 01 from pair jk gives one vote to label k; * any label with votes above the threshold is considered relevant. * * @version October 2012 * @author Jesse Read (jesse@tsc.uc3m.es) */ public class FW extends ProblemTransformationMethod { private static final long serialVersionUID = -8259554419725274112L; Classifier h[][] = null; Attribute classAttribute = null; @Override public String globalInfo() { return "The Fourclass Pairwise (FW) method.\n" + "Trains a multi-class base classifier for each pair of labels -- (L*(L-1))/2 in total --, each with four possible class values: {00,01,10,11} representing the possible combinations of relevant (1) /irrelevant (0) for the pair. Uses a voting + threshold scheme at testing time where e.g., 01 from pair jk gives one vote to label k; any label with votes above the threshold is considered relevant."; } protected Instances convert(Instances D, final int j, final int k) throws InterruptedException { int L = D.classIndex(); D = new Instances(D); D.insertAttributeAt(this.classAttribute, 0); D.setClassIndex(0); for (int i = 0; i < D.numInstances(); i++) { if (Thread.currentThread().isInterrupted()) { throw new InterruptedException("Thread has been interrupted."); } String c = (int) Math.round(D.instance(i).value(j + 1)) + "" + (int) Math.round(D.instance(i).value(k + 1)); D.instance(i).setClassValue(c); } for (int i = 0; i < L; i++) { D.deleteAttributeAt(1); } this.m_InstancesTemplate = new Instances(D, 0); return D; } @Override public void buildClassifier(final Instances D) throws Exception { this.testCapabilities(D); FastVector values = new FastVector(4); values.addElement("00"); values.addElement("10"); values.addElement("01"); values.addElement("11"); this.classAttribute = new Attribute("TheCLass", values); int L = D.classIndex(); this.h = new Classifier[L][L]; for (int j = 0; j < L; j++) { for (int k = j + 1; k < L; k++) { if (Thread.currentThread().isInterrupted()) { throw new InterruptedException("Thread has been interrupted."); } if (this.getDebug()) { System.out.print("."); } Instances D_pair = this.convert(D, j, k); this.h[j][k] = AbstractClassifier.forName(this.getClassifier().getClass().getName(), ((AbstractClassifier) this.getClassifier()).getOptions()); this.h[j][k].buildClassifier(D_pair); } if (this.getDebug()) { System.out.println(""); } } } @Override public double[] distributionForInstance(Instance x) throws Exception { int L = x.classIndex(); x = (Instance) x.copy(); x.setDataset(null); for (int i = 1; i < L; i++) { x.deleteAttributeAt(1); } x.setDataset(this.m_InstancesTemplate); double r[] = new double[L]; for (int j = 0; j < L; j++) { for (int k = j + 1; k < L; k++) { if (Thread.currentThread().isInterrupted()) { throw new InterruptedException("Thread has been interrupted."); } // double d[] = h[j][k].distributionForInstance(x); int c = (int) Math.round(this.h[j][k].classifyInstance(x)); if (c == 1) { r[j] += 1.0; } if (c == 2) { r[k] += 1.0; } if (c == 3) { r[j] += 1.0; r[k] += 1.0; } } } return r; } public static void main(final String args[]) { ProblemTransformationMethod.evaluation(new FW(), args); } }
0
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers/multilabel/HASEL.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ package meka.classifiers.multilabel; import java.util.Arrays; import meka.core.SuperLabelUtils; import weka.classifiers.AbstractClassifier; import weka.core.Instances; import weka.core.RevisionUtils; /** * HASEL - Partitions labels into subsets based on the dataset defined hierarchy. Note: assuming * that a <code>.</code> (fullstop/period) in the attribute names defines hierarchical branches, * e.g., <code>Europe.Spain</code>. * * @author Jesse Read * @version June 2014 */ public class HASEL extends RAkELd { /** for serialization. */ private static final long serialVersionUID = -6208388889440497988L; /** * Description to display in the GUI. * * @return the description */ @Override public String globalInfo() { return "Partitions labels into subsets based on the dataset defined hierarchy (assuming that a '.' in the attribute names defines hierarchical branches, e.g., \"Europe.Spain\")."; } @Override public void buildClassifier(final Instances D) throws Exception { int L = D.classIndex(); int N = D.numInstances(); // Get partition from dataset hierarchy this.kMap = SuperLabelUtils.getPartitionFromDatasetHierarchy(D); this.m_M = this.kMap.length; this.m_Classifiers = AbstractClassifier.makeCopies(this.m_Classifier, this.m_M); this.m_InstancesTemplates = new Instances[this.m_M]; for (int i = 0; i < this.m_M; i++) { if (Thread.currentThread().isInterrupted()) { throw new InterruptedException("Thread has been interrupted."); } if (this.getDebug()) { System.out.println("Building model " + (i + 1) + "/" + this.m_M + ": " + Arrays.toString(this.kMap[i])); } Instances D_i = SuperLabelUtils.makePartitionDataset(D, this.kMap[i]); this.m_Classifiers[i].buildClassifier(D_i); this.m_InstancesTemplates[i] = new Instances(D_i, 0); } } @Override public String getRevision() { return RevisionUtils.extract("$Revision: 9117 $"); } public static void main(final String args[]) { ProblemTransformationMethod.evaluation(new HASEL(), args); } }
0
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers/multilabel/IncrementalMultiLabelClassifier.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /** * IncrementalMultiLabelClassifier.java * Copyright (C) 2015 University of Waikato, Hamilton, NZ */ package meka.classifiers.multilabel; import weka.classifiers.UpdateableClassifier; /** * Interface for incremental multi-label classifiers. * * @author FracPete (fracpete at waikato dot ac dot nz) * @version $Revision$ */ public interface IncrementalMultiLabelClassifier extends MultiLabelClassifier, UpdateableClassifier{ }
0
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers/multilabel/LC.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ package meka.classifiers.multilabel; import java.util.HashMap; import java.util.Map; import meka.core.MultiLabelDrawable; import meka.core.PSUtils; import weka.core.Drawable; import weka.core.Instance; import weka.core.Instances; import weka.core.OptionHandler; import weka.core.RevisionUtils; /** * LC.java - The LC (Label Combination) aka LP (Laber Powerset) Method. Treats each label * combination as a single class in a multi-class learning scheme. The set of possible values of * each class is the powerset of labels. This code was rewritten at some point. See also <i>LP</i> * from the <a href=http://mulan.sourceforge.net>MULAN</a> framework. * * @version June 2014 * @author Jesse Read */ public class LC extends ProblemTransformationMethod implements OptionHandler, MultiLabelDrawable { /** for serialization. */ private static final long serialVersionUID = -2726090581435923988L; /** * Description to display in the GUI. * * @return the description */ @Override public String globalInfo() { return "LC aka LP (Laber Powerset) Method.\nTreats each label combination as a single class in a multi-class learning scheme. The set of possible values of each class is the powerset of labels.\n" + "See also LP from MULAN:\n" + "http://mulan.sourceforge.net"; } @Override public void buildClassifier(final Instances D) throws Exception { this.testCapabilities(D); int L = D.classIndex(); // Transform Instances if (this.getDebug()) { System.out.print("Transforming Instances ..."); } Instances D_ = PSUtils.LCTransformation(D, L); this.m_InstancesTemplate = new Instances(D_, 0); // Set Info ; Build Classifier this.info = "K = " + this.m_InstancesTemplate.attribute(0).numValues() + ", N = " + D_.numInstances(); if (this.getDebug()) { System.out.print("Building Classifier (" + this.info + "), ..."); } this.m_Classifier.buildClassifier(D_); if (this.getDebug()) { System.out.println("Done"); } } @Override public double[] distributionForInstance(final Instance x) throws Exception { int L = x.classIndex(); // if there is only one class (as for e.g. in some hier. mtds) predict it if (L == 1) { return new double[] { 1.0 }; } Instance x_ = PSUtils.convertInstance(x, L, this.m_InstancesTemplate); // convertInstance(x,L); x_.setDataset(this.m_InstancesTemplate); // Get a classification double y[] = new double[x_.numClasses()]; y[(int) this.m_Classifier.classifyInstance(x_)] = 1.0; return PSUtils.convertDistribution(y, L, this.m_InstancesTemplate); } /** * Returns the type of graph representing the object. * * @return the type of graph representing the object (label index as key) */ @Override public Map<Integer, Integer> graphType() { Map<Integer, Integer> result; result = new HashMap<>(); if (this.getClassifier() != null) { if (this.getClassifier() instanceof Drawable) { result.put(0, ((Drawable) this.getClassifier()).graphType()); } } return result; } /** * Returns a string that describes a graph representing the object. The string should be in XMLBIF * ver. 0.3 format if the graph is a BayesNet, otherwise it should be in dotty format. * * @return the graph described by a string (label index as key) * @throws Exception * if the graph can't be computed */ @Override public Map<Integer, String> graph() throws Exception { Map<Integer, String> result; result = new HashMap<>(); if (this.getClassifier() != null) { if (this.getClassifier() instanceof Drawable) { result.put(0, ((Drawable) this.getClassifier()).graph()); } } return result; } private String info = ""; @Override public String toString() { return this.info; } @Override public String getRevision() { return RevisionUtils.extract("$Revision: 9117 $"); } public static void main(final String args[]) { ProblemTransformationMethod.evaluation(new LC(), args); } }
0
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers/multilabel/LabelTransformationClassifier.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ package meka.classifiers.multilabel; import weka.classifiers.Classifier; import weka.classifiers.SingleClassifierEnhancer; import weka.core.Attribute; import weka.core.Capabilities; import weka.core.Instance; import weka.core.Instances; import weka.core.RevisionUtils; import weka.filters.Filter; import weka.filters.unsupervised.attribute.Remove; /** * Abstract label transformation classifiers, all classes that transform the labels should inherit * from this classifier. The general algorithm is a transformation of the labels into a new set of * labels given a specific transformation method. A new multi-label classifier is then trained on * the new set of latent labels. The prediction works the other way around, the multi-label * classifier predicts the set of latent labels, and a specific transformation method transforms the * predicted latent labels back into the original label dimension. * * Implementing classes need to implement three methods, first the transformation method used for * the training process, second a transformation method to create a new instance as input for the * prediction of the multi-label classifier, and third, a method transforming the predicted labels * back into the original dimension. * * @author Joerg Wicker (jw@joerg-wicker.org) */ public abstract class LabelTransformationClassifier extends SingleClassifierEnhancer implements MultiLabelClassifier { /** for serialization. */ private static final long serialVersionUID = 1L; /** * The method to transform the labels into another set of latent labels, typically a compression * method is used, e.g., Boolean matrix decomposition in the case of MLC-BMaD, or matrix * multiplication based on SVD for PLST. * * @param D * the instances to transform into new instances with transformed labels. The Instances * consist of features and original labels. * @return The resulting instances. Instances consist of features and transformed labels. */ public abstract Instances transformLabels(Instances D) throws Exception; /** * Transforms the instance in the prediction process before given to the internal multi-label or * multi-target classifier. The instance is passed having the original set of labels, these must be * replaced with the transformed labels (attributes) so that the internla classifier can predict * them. * * @param x * The instance to transform. Consists of features and labels. * @return The transformed instance. Consists of features and transformed labels. */ public abstract Instance transformInstance(Instance x) throws Exception; /** * Transforms the predictions of the internal classifier back to the original labels. * * @param y * The predictions that should be transformed back. The array consists only of the * predictions as they are returned from the internal classifier. * @return The transformed predictions. */ public abstract double[] transformPredictionsBack(double[] y); /** * Default constructor using BR. */ protected LabelTransformationClassifier() { this.m_Classifier = this.getDefaultClassifier(); } protected Classifier getDefaultClassifier() { return new BR(); } @Override protected String defaultClassifierString() { return this.getDefaultClassifier().getClass().getName(); } @Override public void setClassifier(final Classifier newClassifier) { if (newClassifier instanceof MultiLabelClassifier) { super.setClassifier(newClassifier); } else { System.err.println("Base classifier must implement " + MultiLabelClassifier.class.getName() + ", provided: " + newClassifier.getClass().getName()); } } /** * Returns a new set of instances either only with the labels (labels = true) or only the features * (labels = false) * * @param inst * The input instances. * @param labels * Return labels (true) or features (false) */ protected Instances extractPart(final Instances inst, final boolean labels) throws Exception { // TODO Maybe alreade exists somewhere in Meka? Remove remove = new Remove(); remove.setAttributeIndices("first-" + (inst.classIndex())); remove.setInvertSelection(labels); remove.setInputFormat(inst); return Filter.useFilter(inst, remove); } @Override public void buildClassifier(final Instances D) throws Exception { this.testCapabilities(D); int L = D.classIndex(); if (this.getDebug()) { System.out.print("transforming labels with size: " + L + " baseModel: " + this.m_Classifier.getClass().getName() + " "); } Instances transformed_D = this.transformLabels(D); this.m_Classifier.buildClassifier(transformed_D); } @Override public double[] distributionForInstance(final Instance x) throws Exception { Instance x_transformed = this.transformInstance(x); double[] y_transformed = this.m_Classifier.distributionForInstance(x_transformed); double[] y = this.transformPredictionsBack(y_transformed); return y; } @Override public String getRevision() { return RevisionUtils.extract("$Revision: 9117 $"); } /** * TestCapabilities. Make sure the training data is suitable. * * @param D * the data */ public void testCapabilities(final Instances D) throws Exception { // get the classifier's capabilities, enable all class attributes and do the usual test Capabilities cap = this.getCapabilities(); cap.enableAllClasses(); // getCapabilities().testWithFail(D); // get the capabilities again, test class attributes individually int L = D.classIndex(); for (int j = 0; j < L; j++) { if (Thread.currentThread().isInterrupted()) { throw new InterruptedException("Thread has been interrupted."); } Attribute c = D.attribute(j); cap.testWithFail(c, true); } } }
0
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers/multilabel/MCC.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ package meka.classifiers.multilabel; import java.util.ArrayList; import java.util.Arrays; import java.util.Enumeration; import java.util.List; import java.util.Random; import java.util.Vector; import meka.core.A; import meka.core.CCUtils; import meka.core.OptionUtils; import meka.core.Result; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.TechnicalInformation; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import weka.core.TechnicalInformationHandler; /** * MCC.java - CC with Monte Carlo optimisation. * * Note inference is now a bit slower than reported in the paper, <br> * Jesse Read, Luca Martino, David Luengo. <i>Efficient Monte Carlo Optimization for * Multi-dimensional Classifier Chains</i>. http://arxiv.org/abs/1211.2190. 2012 <br> * There we used a faster implementation, full of ugly hacks, but it got broken when I updated * CC.java.<br> * This version extends CC, and thus is a bit cleaner, but for some reason inference is quite slower * than expected with high m_Iy. * * TODO Option for hold-out set, instead of training and testing on training data (internally). * * @see meka.classifiers.multilabel.CC * @author Jesse Read * @version March 2015 */ public class MCC extends CC implements TechnicalInformationHandler, MultiTargetCapable { private static final long serialVersionUID = 5085402586815030939L; protected int m_Is = 0; protected int m_Iy = 10; protected String m_Payoff = "Exact match"; /** * Payoff - Return a default score of h evaluated on D. * * @param h * a classifier * @param D * a dataset */ public double payoff(final CC h, final Instances D) throws Exception { Result r = Evaluation.testClassifier(h, D); // assume multi-label for now r.setInfo("Type", "ML"); r.setInfo("Threshold", "0.5"); r.setInfo("Verbosity", "7"); r.output = Result.getStats(r, "7"); return (Double) r.getMeasurement(this.m_Payoff); } @Override public void buildClassifier(final Instances D) throws Exception { this.testCapabilities(D); // Variables int L = D.classIndex(); int N = D.numInstances(); int d = D.numAttributes() - L; this.m_R = new Random(this.m_S); this.prepareChain(L); int s[] = this.retrieveChain(); if (this.getDebug()) { System.out.println("s_[0] = " + Arrays.toString(s)); } // If we want to optimize the chain space ... if (this.m_Is > 0) { // Make CC CC h = CCUtils.buildCC(s, D, this.m_Classifier); if (Thread.currentThread().isInterrupted()) { throw new InterruptedException("Thread has been interrupted."); } if (this.getDebug()) { System.out.println("Optimising s ... (" + this.m_Is + " iterations):"); } double w = this.payoff(h, new Instances(D)); if (this.getDebug()) { System.out.println("h_{t=" + 0 + "} := " + Arrays.toString(s)); // +"; w = "+w); } for (int t = 0; t < this.m_Is; t++) { if (Thread.currentThread().isInterrupted()) { throw new InterruptedException("Thread has been interrupted."); } // propose a chain s' by swapping two elements in s int s_[] = Arrays.copyOf(A.swap(s, this.m_R), s.length); // build h' CC h_ = CCUtils.buildCC(s_, D, this.m_Classifier); // rate h' double w_ = this.payoff(h_, new Instances(D)); // accept h' over h ? if (w_ > w) { w = w_; s = s_; h = h_; if (this.getDebug()) { System.out.println("h_{t=" + (t + 1) + "} := " + Arrays.toString(s)); // +"; w = "+w); // if (getDebug()) System.out.print("& "+Utils.doubleToString(likelihood(h_,new // Instances(D),1),8,2)); // if (getDebug()) System.out.print("& "+Utils.doubleToString(likelihood(h_,new // Instances(D),2),8,2)); // if (getDebug()) System.out.println("& "+Utils.doubleToString(likelihood(h_,new // Instances(D),5),8,2)); } } } } if (this.getDebug()) { System.out.println("---"); } this.prepareChain(s); super.buildClassifier(D); } @Override public double[] distributionForInstance(final Instance x) throws Exception { // T = 0 double y[] = super.distributionForInstance(x); // T > 0 if (this.m_Iy > 0) { // double yT[] = CCUtils.RandomSearchaa(this,x,m_Iy,m_R,y0); double w = A.product(this.probabilityForInstance(x, y)); // p(y|x) Instance t_[] = this.getTransformTemplates(x); // System.out.println("----"); // System.out.println("p0("+Arrays.toString(y)+") = "+Arrays.toString(h.getConfidences())+", w="+w); for (int t = 0; t < this.m_Iy; t++) { if (Thread.currentThread().isInterrupted()) { throw new InterruptedException("Thread has been interrupted."); } double y_[] = this.sampleForInstanceFast(t_, this.m_R); // propose y' by sampling i.i.d. // double y_[] = this.sampleForInstance(x,m_R); // propose y' by sampling i.i.d. // double p_[] = h.getConfidences(); // double w_ = A.product(this.getConfidences()); // rate y' as w' --- TODO allow for command-line option // System.out.println("p("+Arrays.toString(y_)+") = "+Arrays.toString(p_)+", w="+w_); if (w_ > w) { // accept ? if (this.getDebug()) { System.out.println("y' = " + Arrays.toString(y_) + ", :" + w_); } w = w_; // y = y_; y = Arrays.copyOf(y_, y_.length); // System.out.println("* ACCEPT *"); } } } return y; } @Override public Enumeration listOptions() { Vector result = new Vector(); result.addElement(new Option("\t" + this.chainIterationsTipText() + "\n\tdefault: 0", "Is", 1, "-Is <value>")); result.addElement(new Option("\t" + this.inferenceIterationsTipText() + "\n\tdefault: 10", "Iy", 1, "-Iy <value>")); result.addElement(new Option("\t" + this.payoffTipText() + "\n\tdefault: Exact match", "P", 1, "-P <value>")); OptionUtils.add(result, super.listOptions()); return OptionUtils.toEnumeration(result); } @Override public void setOptions(final String[] options) throws Exception { this.setChainIterations(OptionUtils.parse(options, "Is", 0)); this.setInferenceIterations(OptionUtils.parse(options, "Iy", 10)); this.setPayoff(OptionUtils.parse(options, 'P', "Exact match")); super.setOptions(options); } @Override public String[] getOptions() { List<String> result = new ArrayList<>(); OptionUtils.add(result, "Is", this.getChainIterations()); OptionUtils.add(result, "Iy", this.getInferenceIterations()); OptionUtils.add(result, 'P', this.getPayoff()); OptionUtils.add(result, super.getOptions()); return OptionUtils.toArray(result); } /** Set the inference iterations */ public void setInferenceIterations(final int iy) { this.m_Iy = iy; } /** Get the inference iterations */ public int getInferenceIterations() { return this.m_Iy; } public String inferenceIterationsTipText() { return "The number of iterations to search the output space at test time."; } /** Set the iterations of s (chain order) */ public void setChainIterations(final int is) { this.m_Is = is; } /** Get the iterations of s (chain order) */ public int getChainIterations() { return this.m_Is; } public String chainIterationsTipText() { return "The number of iterations to search the chain space at train time."; } /** Set the payoff function */ public void setPayoff(final String p) { this.m_Payoff = p; } /** Get the payoff function */ public String getPayoff() { return this.m_Payoff; } public String payoffTipText() { return "Sets the payoff function. Any of those listed in regular evaluation output will do (e.g., 'Exact match')."; } @Override public String globalInfo() { return "Classifier Chains with Monte Carlo optimization. " + "For more information see:\n" + this.getTechnicalInformation().toString(); } @Override public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; TechnicalInformation additional; result = new TechnicalInformation(Type.INPROCEEDINGS); result.setValue(Field.AUTHOR, "Jesse Read and Luca Martino and David Luengo"); result.setValue(Field.TITLE, "Efficient Monte Carlo Optimization for Multi-label Classifier Chains"); result.setValue(Field.BOOKTITLE, "ICASSP'13: International Conference on Acoustics, Speech, and Signal Processing"); result.setValue(Field.YEAR, "2013"); additional = new TechnicalInformation(Type.ARTICLE); additional.setValue(Field.AUTHOR, "Jesse Read and Luca Martino and David Luengo"); additional.setValue(Field.TITLE, "Efficient Monte Carlo Optimization for Multi-dimensional Classifier Chains"); additional.setValue(Field.JOURNAL, "Elsevier Pattern Recognition"); additional.setValue(Field.YEAR, "2013"); result.add(additional); return result; } public static void main(final String args[]) { ProblemTransformationMethod.evaluation(new MCC(), args); } }
0
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers/multilabel/MLCBMaD.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ package meka.classifiers.multilabel; import java.util.ArrayList; import java.util.Enumeration; import java.util.List; import java.util.Vector; import org.kramerlab.bmad.algorithms.BooleanMatrixDecomposition; import org.kramerlab.bmad.general.Tuple; import org.kramerlab.bmad.matrix.BooleanMatrix; import meka.core.OptionUtils; import weka.core.Instance; import weka.core.Instances; import weka.core.TechnicalInformation; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import weka.core.TechnicalInformationHandler; /** * MLC-BMaD - Multi-Label Classification using Boolean Matrix Decomposition. Transforms the labels * using a Boolean matrix decomposition, the first resulting matrix are used as latent labels and a * classifier is trained to predict them. The second matrix is used in a multiplication to * decompress the predicted latent labels. <br> * See: J&ouml;rg Wicker, Bernhard Pfahringer, Stefan Kramer. <i>Multi-label Classification Using * Boolean Matrix Decomposition</i>. Proceedings of the 27th Annual ACM Symposium on Applied * Computing, pp. 179–186, ACM, 2012. * * @author Joerg Wicker (jw@oerg-wicker.org) */ public class MLCBMaD extends LabelTransformationClassifier implements TechnicalInformationHandler { protected static final long serialVersionUID = 585507197229071545L; /** * The upper matrix. Decomposition is done such that Y=Y'*M, this is M. */ protected Instances uppermatrix = null; /** * The compressed matrix. Decomposition is done such that Y=Y'*M, this is Y'. */ protected Instances compressedMatrix = null; /** * The size of the compressed matrix, i.e., the number of columns of Y'. */ protected int size = this.getDefaultSize(); /** * The threshold t of the decomposition process, see the paper for details. Sets the minimum * frequency to be considered a frequent coocurence. Between 0 (all are frequent) and 1 (must be in * all rows to be frequent). */ protected double threshold = this.getDefaultThreshold(); /** * Default threshold = 0.5, has to be in at least half of the rows. * * @return the default threshold. */ protected double getDefaultThreshold() { return 0.5; } /** * Default size = 20, seems to be a good choice for most data sets. * * @return the tooltip */ protected int getDefaultSize() { return 20; } /** * Returns the size of the compressed labels. * * @return The size of the compressed labels, i.e., the number of columns. */ public int getSize() { return this.size; } /** * Sets the size of the compressed labels. * * @param size * The size of the compressed labels, i.e., the number of columns. */ public void setSize(final int size) { this.size = size; } /** * The tooltip for the size. */ public String sizeTipText() { return "Size of the compressed matrix. Should be \n" + "less than the number of labels and more than 1."; } /** * Getter for the threshold for Boolean matrix decomposition. * * @return the threshold for the Boolean matrix decomposition. */ public double getThreshold() { return this.threshold; } /** * Sets the threshold for the Boolean matrix decomposition. * * @param threshold * the threshold for the Boolean matrix decomposition. */ public void setThreshold(final double threshold) { this.threshold = threshold; } /** * Tooltip for the threshold. * * @return Description of the threshold for Boolean matrix decomposition. */ public String thresholdTipText() { return "Threshold for the matrix decompositon, what is considered frequent." + "\n Between 0 and 1."; } /** * Returns the global information of the classifier. * * @return Global information of the classfier */ public String globalInfo() { return "MLC-BMaD - Multi-Label Classification using Boolean Matrix Decomposition. Transforms " + "the labels using a Boolean matrix decomposition, the first resulting matrix are " + "used as latent labels and a classifier is trained to predict them. The second matrix is " + "used in a multiplication to decompress the predicted latent labels.\n" + "For more information see:\n" + this.getTechnicalInformation(); } /** * Returns an enumeration of the options. * * @return Enumeration of the options. */ @Override public Enumeration listOptions() { Vector newVector = new Vector(); OptionUtils.addOption(newVector, this.sizeTipText(), "" + this.getDefaultSize(), "size"); OptionUtils.addOption(newVector, this.thresholdTipText(), "" + this.getDefaultThreshold(), "threshold"); OptionUtils.add(newVector, super.listOptions()); return OptionUtils.toEnumeration(newVector); } /** * Returns an array with the options of the classifier. * * @return Array of options. */ @Override public String[] getOptions() { List<String> result = new ArrayList<>(); OptionUtils.add(result, "size", this.getSize()); OptionUtils.add(result, "threshold", this.getThreshold()); OptionUtils.add(result, super.getOptions()); return OptionUtils.toArray(result); } /** * Sets the options to the given values in the array. * * @param options * The options to be set. */ @Override public void setOptions(final String[] options) throws Exception { this.setSize(OptionUtils.parse(options, "size", this.getDefaultSize())); this.setThreshold(OptionUtils.parse(options, "threshold", this.getDefaultThreshold())); super.setOptions(options); } @Override public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; result = new TechnicalInformation(Type.INPROCEEDINGS); result.setValue(Field.AUTHOR, "J\"org Wicker, Bernhard Pfahringer, Stefan Kramer"); result.setValue(Field.TITLE, "Multi-Label Classification using Boolean Matrix Decomposition"); result.setValue(Field.BOOKTITLE, "Proceedings of the 27th Annual ACM Symposium on Applied Computing"); result.setValue(Field.YEAR, "2012"); result.setValue(Field.PAGES, "179-186"); return result; } @Override public Instance transformInstance(final Instance x) throws Exception { Instances tmpInst = new Instances(x.dataset()); tmpInst.delete(); tmpInst.add(x); Instances features = this.extractPart(tmpInst, false); Instances pseudoLabels = new Instances(this.compressedMatrix); Instance tmpin = pseudoLabels.instance(0); pseudoLabels.delete(); pseudoLabels.add(tmpin); for (int i = 0; i < pseudoLabels.classIndex(); i++) { if (Thread.currentThread().isInterrupted()) { throw new InterruptedException("Thread has been interrupted."); } pseudoLabels.instance(0).setMissing(i); } Instances newDataSet = Instances.mergeInstances(pseudoLabels, features); newDataSet.setClassIndex(this.size); return newDataSet.instance(0); } @Override public Instances transformLabels(final Instances D) throws Exception { Instances features = this.extractPart(D, false); Instances labels = this.extractPart(D, true); BooleanMatrixDecomposition bmd = BooleanMatrixDecomposition.BEST_CONFIGURED(this.threshold); Tuple<Instances, Instances> res = bmd.decompose(labels, this.size); this.compressedMatrix = res._1; this.uppermatrix = res._2; Instances result = Instances.mergeInstances(this.compressedMatrix, features); result.setClassIndex(this.getSize()); return result; } @Override public double[] transformPredictionsBack(final double[] y) { byte[] yByteArray = new byte[y.length]; for (int i = 0; i < y.length; i++) { yByteArray[i] = y[i] >= 0.5 ? BooleanMatrix.TRUE : BooleanMatrix.FALSE; } BooleanMatrix yMatrix = new BooleanMatrix(new byte[][] { yByteArray }); BooleanMatrix reconstruction = yMatrix.booleanProduct(new BooleanMatrix(this.uppermatrix)); double[] result = new double[reconstruction.getWidth()]; for (int i = 0; i < reconstruction.getWidth(); i++) { result[i] = reconstruction.apply(0, i) == BooleanMatrix.TRUE ? 1.0 : 0.0; } return result; } @Override public String getModel() { return ""; } @Override public String toString() { return this.getModel(); } /** * Main method for testing. * * @param args * - Arguments passed from the command line **/ public static void main(final String[] args) throws Exception { AbstractMultiLabelClassifier.evaluation(new MLCBMaD(), args); } }
0
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers/multilabel/MULAN.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ package meka.classifiers.multilabel; import java.io.BufferedWriter; import java.io.File; import java.io.FileWriter; import java.util.ArrayList; import java.util.Enumeration; import java.util.List; import java.util.Random; import java.util.Vector; import meka.core.F; import meka.core.MLUtils; import meka.core.OptionUtils; import mulan.classifier.MultiLabelLearner; import mulan.classifier.lazy.IBLR_ML; import mulan.classifier.lazy.MLkNN; import mulan.classifier.meta.HOMER; import mulan.classifier.meta.HierarchyBuilder; import mulan.classifier.meta.RAkEL; import mulan.classifier.neural.BPMLL; import mulan.classifier.transformation.BinaryRelevance; import mulan.classifier.transformation.CalibratedLabelRanking; import mulan.classifier.transformation.ClassifierChain; import mulan.classifier.transformation.LabelPowerset; import mulan.data.MultiLabelInstances; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.RevisionUtils; /** * MULAN.java - A wrapper for MULAN classifiers <a href=http://mulan.sourceforge.net>MULAN</a>. <br> * The classifiers are instantiated with suitable parameters. * * @version June 2014 * @author Jesse Read (jmr30@cs.waikato.ac.nz) */ public class MULAN extends ProblemTransformationMethod { /** for serialization. */ private static final long serialVersionUID = 1720289364996202350L; protected MultiLabelLearner m_MULAN = null; // HOMER.Random.3.LabelPowerset private String MethodSelection = "{BR, LP, CLR, RAkELn, MLkNN, IBLR_ML, BPMLL, HOMER.type.numPartitions.pt}\n" + "\twhere n=1 is with (m=10,k=L/2), n=2 is with (m=L*2,k=3); and\n\twhere type \\in {BalancedClustering,Clustering,Random}, pt \\in {BinaryRelevance,LabelPowerset,ClassifierChain, numPartitions \\in [1,2,3,4,...]}."; protected String m_MethodString = "RAkEL1"; /** * Description to display in the GUI. * * @return the description */ @Override public String globalInfo() { return "A wrapper for MULAN classifiers.\n" + "http://mulan.sourceforge.net"; } /** * Set a prescribed MULAN classifier configuration. * * @param m * key; one of: BR, LP, CLR, RAkEL1, RAkEL2, MLkNN, IBLR_ML, BPMLL (you can add more in the * MULAN.java code) */ public void setMethod(final String m) { this.m_MethodString = m; } public String getMethod() { return this.m_MethodString; } public String methodTipText() { return "Any of " + this.MethodSelection + ". If you wish to add more, you will have to add code to the buildClassifier(Instances) function in MULAN.java"; } @Override public Enumeration listOptions() { Vector result = new Vector(); result.addElement(new Option("\tMethod Name\n\tdefault: RAkEL1", "S", 1, "-S <value>")); OptionUtils.add(result, super.listOptions()); return OptionUtils.toEnumeration(result); } @Override public void setOptions(final String[] options) throws Exception { this.setMethod(OptionUtils.parse(options, 'S', "RAkEL1")); super.setOptions(options); } @Override public String[] getOptions() { List<String> result = new ArrayList<>(); OptionUtils.add(result, 'S', this.getMethod()); OptionUtils.add(result, super.getOptions()); return OptionUtils.toArray(result); } @Override public void buildClassifier(final Instances instances) throws Exception { this.testCapabilities(instances); long before = System.currentTimeMillis(); if (this.getDebug()) { System.err.print(" moving target attributes to the beginning ... "); } Random r = instances.getRandomNumberGenerator(0); String name = "temp_" + MLUtils.getDatasetName(instances) + "_" + r.nextLong() + ".arff"; System.err.println("Using temporary file: " + name); int L = instances.classIndex(); // rename attributes, because MULAN doesn't deal well with hypens etc for (int i = L; i < instances.numAttributes(); i++) { instances.renameAttribute(i, "a_" + i); } BufferedWriter writer = new BufferedWriter(new FileWriter(name)); this.m_InstancesTemplate = F.meka2mulan(new Instances(instances), L); writer.write(this.m_InstancesTemplate.toString()); writer.flush(); writer.close(); MultiLabelInstances train = new MultiLabelInstances(name, L); try { new File(name).delete(); } catch (Exception e) { System.err.println("[Error] Failed to delete temporary file: " + name + ". You may want to delete it manually."); } if (this.getDebug()) { System.out.println(" done "); } long after = System.currentTimeMillis(); System.err.println("[Note] Discount " + ((after - before) / 1000.0) + " seconds from this build time"); this.m_InstancesTemplate = new Instances(train.getDataSet(), 0); System.out.println("CLASSIFIER " + this.m_Classifier); // m_InstancesTemplate.delete(); if (this.m_MethodString.equals("BR")) { this.m_MULAN = new BinaryRelevance(this.m_Classifier); } else if (this.m_MethodString.equals("LP")) { this.m_MULAN = new LabelPowerset(this.m_Classifier); } else if (this.m_MethodString.equals("CLR")) { this.m_MULAN = new CalibratedLabelRanking(this.m_Classifier); } else if (this.m_MethodString.equals("RAkEL1")) { this.m_MULAN = new RAkEL(new LabelPowerset(this.m_Classifier), 10, L / 2); System.out.println("m=10,k=" + (L / 2)); } else if (this.m_MethodString.equals("RAkEL2")) { this.m_MULAN = new RAkEL(new LabelPowerset(this.m_Classifier), 2 * L, 3); System.out.println("m=" + (L * 2) + ",k=3"); } else if (this.m_MethodString.equals("MLkNN")) { this.m_MULAN = new MLkNN(10, 1.0); } else if (this.m_MethodString.equals("IBLR_ML")) { this.m_MULAN = new IBLR_ML(10); } else if (this.m_MethodString.equals("BPMLL")) { // BPMLL is run withthe number of hidden units equal to 20% of the input units. this.m_MULAN = new BPMLL(); ((BPMLL) this.m_MULAN).setLearningRate(0.01); ((BPMLL) this.m_MULAN).setHiddenLayers(new int[] { 30 }); ((BPMLL) this.m_MULAN).setTrainingEpochs(100); } else if (this.m_MethodString.startsWith("HOMER")) { // Class m = Class.forName("HierarchyBuilder.Method.Random"); // Class w = Class.forName("mulan.classifier.LabelPowerset"); // Constructor c = new h.getConstructor(new Class[]{MultiLabelLearner.class, Integer.TYPE, // HierarchyBuilder.Method.class}); // Object obj = h.newInstance(); String ops[] = this.m_MethodString.split("\\."); // number of clusters int n = 3; try { n = Integer.parseInt(ops[2]); } catch (Exception e) { System.err.println("[Warning] Could not parse number of clusters, using default: " + n); } // learner // @TODO use reflection here MultiLabelLearner mll = new LabelPowerset(this.m_Classifier); if (ops[3].equalsIgnoreCase("BinaryRelevance")) { mll = new BinaryRelevance(this.m_Classifier); } else if (ops[3].equalsIgnoreCase("ClassifierChain")) { mll = new ClassifierChain(this.m_Classifier); } else if (ops[3].equalsIgnoreCase("LabelPowerset")) { // already set } else { System.err.println("[Warning] Did not recognise classifier type String, using default: LabelPowerset"); } if (this.getDebug()) { System.out.println("HOMER(" + mll + "," + n + "," + ops[1] + ")"); } this.m_MULAN = new HOMER(mll, n, HierarchyBuilder.Method.valueOf(ops[1])); } else { throw new Exception("Could not find MULAN Classifier by that name: " + this.m_MethodString); } this.m_MULAN.setDebug(this.getDebug()); this.m_MULAN.build(train); } @Override public double[] distributionForInstance(final Instance instance) throws Exception { int L = instance.classIndex(); Instance x = F.meka2mulan((Instance) instance.copy(), L); x.setDataset(this.m_InstancesTemplate); double y[] = this.m_MULAN.makePrediction(x).getConfidences(); return y; } @Override public String getRevision() { return RevisionUtils.extract("$Revision: 9117 $"); } public static void main(final String args[]) { ProblemTransformationMethod.evaluation(new MULAN(), args); } }
0
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers/multilabel/MajorityLabelset.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ package meka.classifiers.multilabel; import java.util.HashMap; import meka.classifiers.multitarget.MultiTargetClassifier; import meka.core.MLUtils; import weka.core.Instance; import weka.core.Instances; /** * MajorityLabelset.java - The most simplest multi-label classifier. * <p> * Predicts the most common labelset from the training data for <i>all</i> test instances. * </p> * * @author Jesse Read * @version September 2015 */ public class MajorityLabelset extends AbstractMultiLabelClassifier implements MultiTargetClassifier { /** for serialization. */ private static final long serialVersionUID = -5932291001079843869L; protected double prediction[] = null; protected HashMap<String, Double> classFreqs = new HashMap<>(); protected double maxValue = 0.0; /** * Description to display in the GUI. * * @return the description */ @Override public String globalInfo() { return "Majority Labelset Classifier: Always predict the combination of labels which occurs most frequently in the training set."; } protected void updateCount(final Instance x, final int L) throws InterruptedException { String y = MLUtils.toBitString(x, L); if (Thread.currentThread().isInterrupted()) { throw new InterruptedException("Thread has been interrupted."); } if (this.classFreqs.containsKey(y)) { double freq = this.classFreqs.get(y) + x.weight(); this.classFreqs.put(y, freq); if (this.maxValue < freq) { this.maxValue = freq; this.prediction = MLUtils.fromBitString(y); } } else { this.classFreqs.put(y, x.weight()); } } @Override public void buildClassifier(final Instances D) throws Exception { this.testCapabilities(D); int L = D.classIndex(); this.prediction = new double[L]; for (int i = 0; i < D.numInstances(); i++) { if (Thread.currentThread().isInterrupted()) { throw new InterruptedException("Thread has been interrupted."); } this.updateCount(D.instance(i), L); } } @Override public double[] distributionForInstance(final Instance test) throws Exception { return this.prediction; } public static void main(final String args[]) { AbstractMultiLabelClassifier.evaluation(new MajorityLabelset(), args); } }
0
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers/multilabel/Maniac.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ package meka.classifiers.multilabel; import java.util.ArrayList; import java.util.Enumeration; import java.util.List; import java.util.Vector; import org.kramerlab.autoencoder.math.matrix.Mat; import org.kramerlab.autoencoder.neuralnet.autoencoder.Autoencoder; import meka.classifiers.multitarget.CR; import meka.core.OptionUtils; import meka.core.Result; import weka.classifiers.Classifier; import weka.classifiers.functions.LinearRegression; import weka.core.Instance; import weka.core.Instances; import weka.core.TechnicalInformation; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import weka.core.TechnicalInformationHandler; import weka.filters.Filter; import weka.filters.unsupervised.instance.SparseToNonSparse; /** * Maniac - Multi-lAbel classificatioN using AutoenCoders. Transforms the labels using layers of * autoencoders. <br> * See: J&ouml;rg Wicker, Andrey Tyukin, Stefan Kramer. <i>A Nonlinear Label Compression and * Transformation Method for Multi-Label Classification using Autoencoders</i>. The 20th Pacific * Asia Conference on Knowledge Discovery and Data Mining (PAKDD), 2016. * * @author Joerg Wicker (jw@joerg-wicker.org) */ public class Maniac extends LabelTransformationClassifier implements TechnicalInformationHandler { protected static final long serialVersionUID = 585507197229071545L; /** * The autoencoder that is trained, and used to compress and decompress the labels/ */ private Autoencoder ae; /** * Template of the compressed labels, used in the prediction. */ private Instances compressedTemplateInst; /** * Flag to tell if the number of autoencoders should be optimized. Multiple layers are tested and * the best setting is chosen. The maximum number of autoencoders can be set via the * numberAutoencoders variable. */ protected boolean optimizeAE = this.getDefaultOptimizeAE(); /** * The compression factor, i.e. the compression from one layer to the next. 0.85 means for example, * that from one layer with 100 labels, on the next layer 85 are left. */ protected double compression = this.getDefaultCompression(); /** * Sets the autoencoder (for using a trained one, e.g. done in optimization). * * @param ae * The autoencoder */ protected void setAE(final Autoencoder ae) { this.ae = ae; } /** * Returns the autoencoder of the class (used for compression of labels). * * @return the autoencoder that is used for compression. */ private Autoencoder getAE() { return this.ae; } /** * Number of autoencoders to train, i.e. number of hidden layers + 1. Note that this can be also * used as the number of autoencoders to use in the optimization search, autoencoders will be added * until this number is reached and then the best configuration in terms of number of layers is * selects. */ protected int numberAutoencoders = this.getDefaultNumberAutoencoders(); /** * Returns the default number of autoencoders, set to 4, which seems to be good choice for most * problems. */ protected int getDefaultNumberAutoencoders() { return 4; } /** * Get the <code>numberAutoencoders</code> value. * * @return an <code>in</code> value */ public final int getNumberAutoencoders() { return this.numberAutoencoders; } /** * Set the <code>numberAutoencoders</code> value. * * @param numberAutoencoders * The new NumberAutoencoders value. */ public final void setNumberAutoencoders(final int numberAutoencoders) { this.numberAutoencoders = numberAutoencoders; } /** * Gives the tooltip for numberAutoencoders. * * @return the tooltip for numberAutoencoders. */ public String numberAutoencodersToolTip() { return "Number of autoencoders, i.e. number of hidden layers " + "+1. Note that this can be also used as the number of " + "autoencoders to use in the optimization search, " + "autoencoders will be added until this number is reached " + " and then the best configuration in terms of number of layers is selects."; } /** * Gives the tiptext for numberAutoencoders. * * @return the tiptext for numberAutoencoders. */ public String numberAutoencodersTipText() { return this.numberAutoencodersToolTip(); } /** * Get the <code>Compression</code> value. * * @return a <code>double</code> value */ public final double getCompression() { return this.compression; } /** * Set the <code>Compression</code> value. * * @param compression * The new Compression value. */ public final void setCompression(final double compression) { this.compression = compression; } /** * Returns the default compression, 0.85 seems to be a good value for most settings. * * @return The default compression. */ protected double getDefaultCompression() { return 0.85; } /** * Gives the tooltip for compression. * * @return the tooltip for compression. */ public String compressionToolTip() { return "Compression factor of the autoencoders, each level " + "of autoencoders will compress the labels to factor times " + "previous layer size."; } /** * Gives the tiptext for compression. * * @return the tiptext for compression. */ public String compressionTipText() { return this.compressionToolTip(); } /** * Gives the tiptext for optimizeAE. * * @return the tiptext for optimizeAE. */ public String optimizeAETipText() { return this.optimizeAEToolTip(); } /** * Get the <code>OptimizeAE</code> value. * * @return a <code>boolean</code> value */ public final boolean isOptimizeAE() { return this.optimizeAE; } /** * Set the <code>OptimizeAE</code> value. * * @param optimizeAE * The new OptimizeAE value. */ public final void setOptimizeAE(final boolean optimizeAE) { this.optimizeAE = optimizeAE; } /** * Tge default setting for optimizing the autoencoders. Set to false, as this is an expensive * operation. * * @return The default flag for optimizing the autoencoders. */ protected boolean getDefaultOptimizeAE() { return false; } /** * Gives the tooltip for OptimizeAE. * * @return the tooltip for OptimizeAE. */ public String optimizeAEToolTip() { return "Optimize the number of layers of autoencoders. If set to true " + "the number of layers will internally be optimized using a validation " + "set."; } /** * Returns the global information of the classifier. * * @return Global information of the classfier */ public String globalInfo() { return "Maniac - Multi-lAbel classificatioN using AutoenCoders." + "Transforms the labels using layers of autoencoders." + "For more information see:\n" + this.getTechnicalInformation(); } /** * Returns an enumeration of the options. * * @return Enumeration of the options. */ @Override public Enumeration listOptions() { Vector newVector = new Vector(); OptionUtils.addOption(newVector, this.compressionTipText(), "" + this.getDefaultCompression(), "compression"); OptionUtils.addOption(newVector, this.numberAutoencodersTipText(), "" + this.getDefaultNumberAutoencoders(), "numberAutoencoders"); OptionUtils.addOption(newVector, this.optimizeAETipText(), "" + this.getDefaultOptimizeAE(), "optimizeAE"); OptionUtils.add(newVector, super.listOptions()); return OptionUtils.toEnumeration(newVector); } /** * Change default classifier to CR with Linear Regression as base as this classifier uses numeric * values in the compressed labels. */ @Override protected Classifier getDefaultClassifier() { CR cr = new CR(); LinearRegression lr = new LinearRegression(); cr.setClassifier(lr); return cr; } /** * Returns an array with the options of the classifier. * * @return Array of options. */ @Override public String[] getOptions() { List<String> result = new ArrayList<>(); OptionUtils.add(result, "compression", this.getCompression()); OptionUtils.add(result, "optimizeAE", this.isOptimizeAE()); OptionUtils.add(result, "numberAutoencoders", this.getNumberAutoencoders()); OptionUtils.add(result, super.getOptions()); return OptionUtils.toArray(result); } /** * Sets the options to the given values in the array. * * @param options * The options to be set. */ @Override public void setOptions(final String[] options) throws Exception { this.setCompression(OptionUtils.parse(options, "compression", this.getDefaultCompression())); this.setNumberAutoencoders(OptionUtils.parse(options, "numberAutoencoders", this.getDefaultNumberAutoencoders())); this.setOptimizeAE(OptionUtils.parse(options, "optimizeAE", this.getDefaultOptimizeAE())); super.setOptions(options); } @Override public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; result = new TechnicalInformation(Type.INPROCEEDINGS); result.setValue(Field.AUTHOR, "J\"org Wicker, Andrey Tyukin, Stefan Kramer"); result.setValue(Field.TITLE, "A Nonlinear Label Compression and Transformation Method for Multi-Label Classification using Autoencoders"); result.setValue(Field.BOOKTITLE, "The 20th Pacific Asia Conference on Knowledge Discovery and Data Mining (PAKDD)"); result.setValue(Field.YEAR, "2016"); result.setValue(Field.PAGES, "328-340"); return result; } @Override public Instance transformInstance(final Instance x) throws Exception { Instances tmpInst = new Instances(x.dataset()); tmpInst.delete(); tmpInst.add(x); Instances features = this.extractPart(tmpInst, false); Instances pseudoLabels = new Instances(this.compressedTemplateInst); Instance tmpin = pseudoLabels.instance(0); pseudoLabels.delete(); pseudoLabels.add(tmpin); for (int i = 0; i < pseudoLabels.classIndex(); i++) { if (Thread.currentThread().isInterrupted()) { throw new InterruptedException("Thread has been interrupted."); } pseudoLabels.instance(0).setMissing(i); } Instances newDataSet = Instances.mergeInstances(pseudoLabels, features); newDataSet.setClassIndex(pseudoLabels.numAttributes()); return newDataSet.instance(0); } @Override public Instances transformLabels(final Instances D) throws Exception { // crazy scala-specific stuff that is necessary to access // "static" methods from java org.kramerlab.autoencoder.package$ autoencoderStatics = org.kramerlab.autoencoder.package$.MODULE$; org.kramerlab.autoencoder.wekacompatibility.package$ wekaStatics = org.kramerlab.autoencoder.wekacompatibility.package$.MODULE$; org.kramerlab.autoencoder.experiments.package$ experimentsStatics = org.kramerlab.autoencoder.experiments.package$.MODULE$; int topiter = -1; // the optimization is a bit special, since we learn a stream // of autoencoders, no need to start from scratch, we just add layers if (this.isOptimizeAE()) { Instances train = D.trainCV(3, 1); Instances test = D.testCV(3, 1); Instances labels = this.extractPart(train, true); // first convert the arff into non sparse form SparseToNonSparse spfilter = new SparseToNonSparse(); spfilter.setInputFormat(labels); Instances aeData = Filter.useFilter(labels, spfilter); // now convert it into a format suitable for the autoencoder Mat data = wekaStatics.instancesToMat(aeData); Iterable<Autoencoder> autoencoders = autoencoderStatics.deepAutoencoderStream_java(autoencoderStatics.Sigmoid(), // type of neurons. // Sigmoid is ok this.getNumberAutoencoders(), // number of autoencoders = (max hidden layers + 1) / // 2 this.getCompression(), // compression from k-th layer to (k+1)-th layer data, // training data true, // true = L2 Error, false = CrossEntropy autoencoderStatics.HintonsMiraculousStrategy(), true, autoencoderStatics.NoObservers()); // test each autoencoder, select the best classifier double bestAccuracy = Double.NEGATIVE_INFINITY; int iteratorcount = 0; topiter = 0; for (Autoencoder a : autoencoders) { if (Thread.currentThread().isInterrupted()) { throw new InterruptedException("Thread has been interrupted."); } iteratorcount++; Maniac candidate = new Maniac(); candidate.setOptimizeAE(false); candidate.setNumberAutoencoders(this.getNumberAutoencoders()); candidate.setCompression(this.getCompression()); candidate.setClassifier(this.getClassifier()); candidate.setAE(a); Result res = Evaluation.evaluateModel(candidate, train, test); double curac = (Double) res.getValue("Accuracy"); if (bestAccuracy < curac) { bestAccuracy = curac; topiter = iteratorcount; } } } Instances features = this.extractPart(D, false); Instances labels = this.extractPart(D, true); // first convert the arff into non sparse form SparseToNonSparse spfilter = new SparseToNonSparse(); spfilter.setInputFormat(labels); Instances aeData = Filter.useFilter(labels, spfilter); // now convert it into a format suitable for the autoencoder Mat data = wekaStatics.instancesToMat(aeData); if (this.getAE() == null) { Iterable<Autoencoder> autoencoders = autoencoderStatics.deepAutoencoderStream_java(autoencoderStatics.Sigmoid(), // type of neurons. // Sigmoid is ok this.getNumberAutoencoders(), // number of autoencoders = (max hidden layers + 1) / // 2 this.getCompression(), // compression from k-th layer to (k+1)-th layer data, // training data true, // true = L2 Error, false = CrossEntropy autoencoderStatics.HintonsMiraculousStrategy(), true, autoencoderStatics.NoObservers()); int itercount = 0; for (Autoencoder a : autoencoders) { if (Thread.currentThread().isInterrupted()) { throw new InterruptedException("Thread has been interrupted."); } itercount++; if (topiter > 0 && itercount == topiter || itercount == this.getNumberAutoencoders()) { this.setAE(a); break; } } } Mat compressed = this.getAE().compress(data); Instances compressedLabels = wekaStatics.matToInstances(compressed); // remember the labels to use for the prediction step, this.compressedTemplateInst = new Instances(compressedLabels); Instances result = Instances.mergeInstances(compressedLabels, features); result.setClassIndex(compressedLabels.numAttributes()); return result; } @Override public double[] transformPredictionsBack(final double[] y) { Mat matrix = new Mat(1, y.length / 2); for (int i = 0; i < y.length / 2; i++) { matrix.update(0, i, y[y.length / 2 + i]); } Mat reconstruction = this.getAE().decompress(matrix); double[] result = new double[reconstruction.toArray()[0].length]; for (int i = 0; i < result.length; i++) { result[i] = reconstruction.apply(0, i); } return result; } @Override public String getModel() { return ""; } @Override public String toString() { return this.getModel(); } /** * Main method for testing. * * @param args * - Arguments passed from the command line **/ public static void main(final String[] args) throws Exception { AbstractMultiLabelClassifier.evaluation(new Maniac(), args); } }
0
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers/multilabel/MultiLabelClassifier.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /** * MultiLabelClassifier.java * Copyright (C) 2015 University of Waikato, Hamilton, NZ */ package meka.classifiers.multilabel; import meka.classifiers.MultiXClassifier; import weka.classifiers.Classifier; import weka.core.OptionHandler; /** * Interface for multi-label classifiers. * * @author FracPete (fracpete at waikato dot ac dot nz) * @version $Revision$ */ public interface MultiLabelClassifier extends MultiXClassifier { }
0
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers/multilabel/MultiLabelClassifierThreaded.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /** * MultiLabelClassifier.java * Copyright (C) 2015 University of Waikato, Hamilton, NZ */ package meka.classifiers.multilabel; import weka.classifiers.Classifier; import weka.classifiers.SingleClassifierEnhancer; import weka.core.Instances; import weka.core.OptionHandler; /** * Interface for multi-label classifiers. * * @author FracPete (fracpete at waikato dot ac dot nz) * @version $Revision$ */ public interface MultiLabelClassifierThreaded extends MultiLabelClassifier{ public boolean isThreaded() ; public void setThreaded(boolean setv) ; public double[][] distributionForInstanceM(Instances i) throws Exception ; }
0
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers/multilabel/MultiTargetCapable.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ package meka.classifiers.multilabel; /** * MultiTargetCapable.java - A multi-label Classifier that can also handle generic multi-target data. * * @author Jesse Read (jesse@tsc.uc3m.es) * @version January 2012 */ public interface MultiTargetCapable { /* * Everything is the same as MultilabelClassifier except for the Evaluation * ... but in the future I would like to add getProbabilities() or something here. */ }
0
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers/multilabel/PCC.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ package meka.classifiers.multilabel; import java.util.Arrays; import meka.core.A; import weka.core.Instance; import weka.core.Instances; import weka.core.TechnicalInformation; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import weka.core.TechnicalInformationHandler; /** * PCC.java - (Bayes Optimal) Probabalistic Classifier Chains. Exactly like CC at build time, but * explores all possible paths as inference at test time (hence, 'Bayes optimal'). <br> * This version is multi-target capable. <br> * See: Dembczynsky et al, <i>Bayes Optimal Multi-label Classification via Probabalistic Classifier * Chains</i>, ICML 2010. * * @author Jesse Read (jesse@tsc.uc3m.es) * @version November 2012 */ public class PCC extends CC implements TechnicalInformationHandler { private static final long serialVersionUID = -7669951968300150007L; // MT Capable /** * Push - increment y[0] until = K[0], then reset and start with y[0], etc ... Basically a counter. * * @return True if finished */ private static boolean push(final double y[], final int K[], int j) { if (j >= y.length) { return true; } else if (y[j] < K[j] - 1) { y[j]++; return false; } else { y[j] = 0.0; return push(y, K, ++j); } } /** * GetKs - return [K_1,K_2,...,K_L] where each Y_j \in {1,...,K_j}. In the multi-label case, K[j] = * 2 for all j = 1,...,L. * * @param D * a dataset * @return an array of the number of values that each label can take * @throws InterruptedException */ private static int[] getKs(final Instances D) throws InterruptedException { int L = D.classIndex(); int K[] = new int[L]; for (int k = 0; k < L; k++) { if (Thread.currentThread().isInterrupted()) { throw new InterruptedException("Thread has been interrupted."); } K[k] = D.attribute(k).numValues(); } return K; } /** * Return multi-label probabilities. Where p(y_j = y[j]) = confidence[j], then return [p(y_j = * 1),...,p(y_L = 1)]. */ private static double[] convertConfidenceToProbability(final double y[], final double confidences[]) { double p[] = new double[confidences.length]; for (int j = 0; j < confidences.length; j++) { p[j] = confidences[j] * y[j] + (1. - confidences[j]) * Math.abs(y[j] - 1.); } return p; } @Override public double[] distributionForInstance(final Instance xy) throws Exception { int L = xy.classIndex(); double y[] = new double[L]; double conf[] = new double[L]; double w = 0.0; /* * e.g. K = [3,3,5] we push y_[] from [0,0,0] to [2,2,4] over all necessary iterations. */ int K[] = getKs(xy.dataset()); if (this.getDebug()) { System.out.println("K[] = " + Arrays.toString(K)); } double y_[] = new double[L]; for (int i = 0; i < 1000000; i++) { // limit to 1m if (Thread.currentThread().isInterrupted()) { throw new InterruptedException("Thread has been interrupted."); } double conf_[] = super.probabilityForInstance(xy, y_); double w_ = A.product(conf_); // System.out.println(""+i+" "+Arrays.toString(y_)+" "+w_+" // "+Arrays.toString(conf_)+"/"+Arrays.toString(convertConfidenceToProbability(y_,conf_))); if (w_ > w) { if (this.getDebug()) { System.out.println("y' = " + Arrays.toString(y_) + ", :" + w_); } y = Arrays.copyOf(y_, y_.length); w = w_; conf = conf_; } if (push(y_, K, 0)) { // Done ! if (this.getDebug()) { System.out.println("Tried all " + (i + 1) + " combinations."); } break; } } // If it's multi-label (binary only), return the probabilistic output (else just the values). return (A.max(K) > 2) ? y : convertConfidenceToProbability(conf, y); // return p_y; //y; } @Override public String globalInfo() { return "Probabalistic Classifier Chains. " + "For more information see:\n" + this.getTechnicalInformation().toString(); } @Override public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; result = new TechnicalInformation(Type.INPROCEEDINGS); result.setValue(Field.AUTHOR, "Krzysztof Dembczynsky and Weiwei Cheng and Eyke Hullermeier"); result.setValue(Field.TITLE, "Bayes Optimal Multi-label Classification via Probabalistic Classifier Chains"); result.setValue(Field.BOOKTITLE, "ICML '10: 27th International Conference on Machine Learning"); result.setValue(Field.YEAR, "2010"); return result; } public static void main(final String args[]) { ProblemTransformationMethod.evaluation(new PCC(), args); } }
0
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers/multilabel/PLST.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ package meka.classifiers.multilabel; import java.util.ArrayList; import java.util.Enumeration; import java.util.List; import java.util.Vector; import meka.classifiers.multitarget.CR; import meka.core.MatrixUtils; import meka.core.OptionUtils; import weka.classifiers.Classifier; import weka.classifiers.functions.LinearRegression; import weka.core.Attribute; import weka.core.DenseInstance; import weka.core.Instance; import weka.core.Instances; import weka.core.TechnicalInformation; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import weka.core.TechnicalInformationHandler; import weka.core.matrix.Matrix; import weka.core.matrix.SingularValueDecomposition; /** * PLST - Principal Label Space Transformation. Uses SVD to generate a matrix that transforms the * label space. This implementation is adapted from the MatLab implementation provided by the * authors at <a href="https://github.com/hsuantien/mlc_lsdr">Github</a> <br> * See: Farbound Tai and Hsuan-Tien Lin. Multilabel classification with principal label space * transformation. Neural Computation, 24(9):2508--2542, September 2012. * * @author Joerg Wicker (wicker@uni-mainz.de) */ public class PLST extends LabelTransformationClassifier implements TechnicalInformationHandler { private static final long serialVersionUID = 3761303322465321039L; /* * The shift matrix, used in the training and prediction */ protected Matrix m_Shift; /* * Pattern Instances, needed to transform an Instance object for the prediction step */ protected Instances m_PatternInstances; /* * The transformation matrix which is generated using SVD. */ protected Matrix m_v = null; /* * The size of the compresed / transformed matrix. */ protected int m_Size = this.getDefaultSize(); /** * Returns the global information of the classifier. * * @return Global information of the classfier */ public String globalInfo() { return "PLST - Principle Label Space Transformation. Uses SVD to generate a matrix " + "that transforms the label space. This implementation is adapted from the " + "MatLab implementation provided by the authors.\n\n" + "https://github.com/hsuantien/mlc_lsdr\n\n" + "For more information see:\n " + this.getTechnicalInformation(); } /** * Change default classifier to CR with Linear Regression as base as this classifier uses numeric * values in the compressed labels. */ @Override protected Classifier getDefaultClassifier() { CR cr = new CR(); LinearRegression lr = new LinearRegression(); cr.setClassifier(lr); return cr; } /** * The default size, set to 5. * * @return the default size. */ protected int getDefaultSize() { return 3; } /** * Returns the size of the compressed labels. * * @return The size of the compressed labels, i.e., the number of columns. */ public int getSize() { return this.m_Size; } /** * Sets the size of the compressed labels. * * @param size * The size of the compressed labels, i.e., the number of columns. */ public void setSize(final int size) { this.m_Size = size; } /** * The tooltip for the size. * * @return the tooltip. */ public String sizeTipText() { return "Size of the compressed matrix. Should be \n" + "less than the number of labels and more than 1."; } /** * Returns an instance of a TechnicalInformation object, containing detailed information about the * technical background of this class, e.g., paper reference or book this class is based on. * * @return the technical information about this class */ @Override public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; result = new TechnicalInformation(Type.INPROCEEDINGS); result.setValue(Field.AUTHOR, "Farbound Tai and Hsuan-Tien Lin"); result.setValue(Field.TITLE, "Multilabel classification with principal label space transformation"); result.setValue(Field.BOOKTITLE, "Neural Computation"); result.setValue(Field.YEAR, "2012"); result.setValue(Field.PAGES, "2508-2542"); result.setValue(Field.VOLUME, "24"); result.setValue(Field.NUMBER, "9"); return result; } /** * Returns an enumeration of the options. * * @return Enumeration of the options. */ @Override public Enumeration listOptions() { Vector newVector = new Vector(); OptionUtils.addOption(newVector, this.sizeTipText(), "" + this.getDefaultSize(), "size"); OptionUtils.add(newVector, super.listOptions()); return OptionUtils.toEnumeration(newVector); } /** * Returns an array with the options of the classifier. * * @return Array of options. */ @Override public String[] getOptions() { List<String> result = new ArrayList<>(); OptionUtils.add(result, "size", this.getSize()); OptionUtils.add(result, super.getOptions()); return OptionUtils.toArray(result); } /** * Sets the options to the given values in the array. * * @param options * The options to be set. */ @Override public void setOptions(final String[] options) throws Exception { this.setSize(OptionUtils.parse(options, "size", this.getDefaultSize())); super.setOptions(options); } /** * The method to transform the labels into another set of latent labels, typically a compression * method is used, e.g., Boolean matrix decomposition in the case of MLC-BMaD, or matrix * multiplication based on SVD for PLST. * * @param D * the instances to transform into new instances with transformed labels. The Instances * consist of features and original labels. * @return The resulting instances. Instances consist of features and transformed labels. */ @Override public Instances transformLabels(final Instances D) throws Exception { Instances features = this.extractPart(D, false); Instances labels = this.extractPart(D, true); Matrix labelMatrix = MatrixUtils.instancesToMatrix(labels); // first, lets do the preprocessing as in the original implementation double[] averages = new double[labels.numAttributes()]; for (int i = 0; i < labels.numAttributes(); i++) { if (Thread.currentThread().isInterrupted()) { throw new InterruptedException("Thread has been interrupted."); } double[] column = labels.attributeToDoubleArray(i); double sum = 0.0; for (int j = 0; j < column.length; j++) { if (column[j] == 1.0) { sum += 1.0; } else { sum += -1; // The algorithm needs 1/-1 coding, so let's // change the matrix here labelMatrix.set(j, i, -1.0); } } averages[i] = sum / column.length; } double[][] shiftMatrix = new double[1][labels.numAttributes()]; shiftMatrix[0] = averages; // remember shift for prediction this.m_Shift = new Matrix(shiftMatrix); double[][] shiftTrainMatrix = new double[labels.numInstances()][labels.numAttributes()]; for (int i = 0; i < labels.numInstances(); i++) { shiftTrainMatrix[i] = averages; } Matrix trainShift = new Matrix(shiftTrainMatrix); SingularValueDecomposition svd = new SingularValueDecomposition(labelMatrix.minus(trainShift)); // The paper uses U here, but the implementation by the authors uses V, so // we used V here too. this.m_v = svd.getV(); // remove columns so only size are left double[][] newArr = new double[this.m_v.getRowDimension()][this.getSize()]; for (int i = 0; i < newArr.length; i++) { for (int j = 0; j < newArr[i].length; j++) { newArr[i][j] = this.m_v.getArray()[i][j]; } } this.m_v = new Matrix(newArr); // now the multiplication (last step of the algorithm) Matrix compressed = MatrixUtils.instancesToMatrix(labels).times(this.m_v); // and transform it to Instances ArrayList<Attribute> attinfos = new ArrayList<>(); for (int i = 0; i < compressed.getColumnDimension(); i++) { Attribute att = new Attribute("att" + i); attinfos.add(att); } // create pattern instances (also used in prediction) note: this is a regression // problem now, labels are not binary this.m_PatternInstances = new Instances("compressedlabels", attinfos, compressed.getRowDimension()); // fill result Instances Instances result = Instances.mergeInstances(MatrixUtils.matrixToInstances(compressed, this.m_PatternInstances), features); result.setClassIndex(this.getSize()); return result; } /** * Transforms the predictions of the internal classifier back to the original labels. * * @param y * The predictions that should be transformed back. The array consists only of the * predictions as they are returned from the internal classifier. * @return The transformed predictions. */ @Override public double[] transformPredictionsBack(final double[] y) { // y consists of predictions and maxindex, we need only predictions double[] predictions = new double[y.length / 2]; for (int i = 0; i < predictions.length; i++) { predictions[i] = y[predictions.length + i]; } double[][] dataArray = new double[1][predictions.length]; dataArray[0] = predictions; Matrix yMat = new Matrix(dataArray); Matrix multiplied = yMat.times(this.m_v.transpose()).plus(this.m_Shift); double[] res = new double[multiplied.getColumnDimension()]; // change back from -1/1 coding to 0/1 for (int i = 0; i < res.length; i++) { res[i] = multiplied.getArray()[0][i] < 0.0 ? 0.0 : 1.0; } return res; } /** * Transforms the instance in the prediction process before given to the internal multi-label or * multi-target classifier. The instance is passed having the original set of labels, these must be * replaced with the transformed labels (attributes) so that the internla classifier can predict * them. * * @param x * The instance to transform. Consists of features and labels. * @return The transformed instance. Consists of features and transformed labels. */ @Override public Instance transformInstance(final Instance x) throws Exception { Instances tmpInst = new Instances(x.dataset()); tmpInst.delete(); tmpInst.add(x); Instances features = this.extractPart(tmpInst, false); Instances labels = new Instances(this.m_PatternInstances); labels.add(new DenseInstance(labels.numAttributes())); Instances result = Instances.mergeInstances(labels, features); result.setClassIndex(labels.numAttributes()); return result.instance(0); } /** * Returns a string representation of the model. * * @return the model */ @Override public String getModel() { return ""; } @Override public String toString() { return this.getModel(); } /** * Main method for testing. * * @param args * - Arguments passed from the command line **/ public static void main(final String[] args) throws Exception { AbstractMultiLabelClassifier.evaluation(new PLST(), args); } }
0
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers/multilabel/PMCC.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ package meka.classifiers.multilabel; import java.util.ArrayList; import java.util.Arrays; import java.util.Enumeration; import java.util.HashMap; import java.util.List; import java.util.Random; import java.util.Vector; import meka.core.A; import meka.core.MLUtils; import meka.core.OptionUtils; import weka.classifiers.AbstractClassifier; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.Utils; /** * PMCC.java - Like MCC but creates a population of M chains at training time (from Is * <i>candidate</i> chains, using Monte Carlo sampling), and uses this population for inference at * test time; If you are looking for a 'more typical' majority-vote ensemble method, use something * like EnsembleML or BaggingML with MCC. * * <p> * <b>NOTE:</b> this implementation was faster, because the chain was only rebuilt from the first * node which was different -- this is no longer the case (due to updates to way classifier chains * works, using the CNode class). * </p> * * @see meka.classifiers.multilabel.MCC * @author Jesse Read * @version Sep 2014 */ public class PMCC extends MCC { private static final long serialVersionUID = 1999206808758133267L; protected int m_M = 10; protected int m_O = 0; protected double m_Beta = 0.03; protected CC h[] = null; protected double[] w = null; public PMCC() { // a new default super.setChainIterations(50); } /** * MatchedUpTo - returns the index i of the first character which differs between two strings. TODO * this is a generic function, could go somewhere else into utils. */ private static int matchedUpto(final String s1, final String s2, final String DELIM) { String s_1[] = s1.split(DELIM); String s_2[] = s2.split(DELIM); int i = 0; while (i < s_1.length && i < s_2.length && s_1[i].equals(s_2[i])) { i++; } return i; } /** * GetClosest - returns the 'CC' in 'map' which is built on the sequence most matched to 'sequence'. */ protected static CC getClosest(final HashMap<String, CC> map, final String sequence) { int score = -1; String best = sequence; for (String key : map.keySet()) { int score_ = matchedUpto(key, sequence, ","); if (score_ > score) { score = score_; best = key; } } return map.get(best); } /** * RebuildCC - rebuild a classifier chain 'h_old' to have a new sequence 's_new'. */ protected CC rebuildCC(final CC h_old, final int s_new[], final Instances D) throws Exception { // make a deep copy CC h = (CC) AbstractClassifier.makeCopy(h_old); // rebuild this chain h.rebuildClassifier(s_new, new Instances(D)); return h; } /** * BuildCC - Build a CC of chain-order 's' on dataset 'D'. */ protected CC buildCC(final int s[], final Instances D) throws Exception { // a new classifier chain CC h = new CC(); // build this chain h.prepareChain(s); h.setClassifier(this.m_Classifier); h.buildClassifier(new Instances(D)); return h; } /** * pi - proposal distribution; swap elements in s, depending on iteration t (temperature). <br> * TODO - make faster! * * @param s * a chain sequence * @param r * a random number generator * @param t * the current iteration * @return s' ~ p(s'|s) */ public static int[] pi(final int s[], final Random r, final int t, final double beta) { int L = s.length; // select some entry j double p[] = new double[s.length]; for (int j = 0; j < L; j++) { p[j] = Math.pow((1. / L), beta * t / (1 + j)); } Utils.normalize(p); int j = A.samplePMF(p, r); // blank out the j-th entry, and renormalize, now select k p[j] = 0.0; Utils.normalize(p); int k = A.samplePMF(p, r); // swap j and k return A.swap(s, j, k); } @Override public void buildClassifier(final Instances D) throws Exception { this.m_R = new Random(this.m_S); // Variables int L = D.classIndex(); int N = D.numInstances(); int d = D.numAttributes() - L; this.h = new CC[this.m_M]; this.w = new double[this.m_M]; // int s[][] = new int[m_M][L]; // for interest's sake if (this.m_Is >= this.m_M) { // HashMap<String,CC> id2cc = new HashMap<String,CC>(); // Make CC int s[] = MLUtils.gen_indices(L); MLUtils.randomize(s, this.m_R); this.h[0] = this.buildCC(Arrays.copyOf(s, s.length), D); // @todo move into setChain(..) this.w[0] = this.payoff(this.h[0], D); // id2cc.put(Arrays.toString(s),h[0]); // save a copy // s[0] = s_; if (this.getDebug()) { System.out.println("s[0] = " + Arrays.toString(s)); } for (int t = 0; t < this.m_Is; t++) { if (Thread.currentThread().isInterrupted()) { throw new InterruptedException("Thread has been interrupted."); } // propose a chain s' ~ pi(s'|s) int s_[] = (this.m_O > 0) ? pi(Arrays.copyOf(s, s.length), this.m_R, t, this.m_Beta) : // default cond. option - with temperature A.swap(Arrays.copyOf(s, s.length), this.m_R); // special simple option - swap two elements // build h' with sequence s' // CC h_ = rebuildCC(getClosest(id2cc,Arrays.toString(s_)),s_,D); CC h_ = this.buildCC(Arrays.copyOf(s_, s_.length), D); // id2cc.put(Arrays.toString(s_), h_); // rate h' (by its performance on the training data) double w_ = this.payoff(h_, D); // accept h' weighted more than the weakest h in the population int min = Utils.sort(this.w)[0]; // (min index) if (w_ > this.w[min]) { this.w[min] = w_; this.h[min] = h_; if (this.getDebug()) { System.out.println(" accepted h_ with score " + w_ + " > " + this.w[min]); } s = s_; } else if (this.getDebug()) { System.out.println(" DENIED h_ with score " + w_ + " !> score " + this.w[min]); } } if (this.getDebug()) { System.out.println("---"); } // normalise weights Utils.normalize(this.w); } else { throw new Exception("[Error] Number of chains evaluated (Is) should be at least as great as the population selected (M), and always greater than 0."); } } @Override public double[] distributionForInstance(final Instance x) throws Exception { // Start with a good guess int max = Utils.maxIndex(this.w); double y[] = this.h[max].distributionForInstance(x); double wm = A.product(this.h[max].probabilityForInstance(x, y)); for (int t = 0; t < this.m_Iy; t++) { if (Thread.currentThread().isInterrupted()) { throw new InterruptedException("Thread has been interrupted."); } // m ~ p(m|w) int m = A.samplePMF(this.w, this.m_R); // y ~ p(y|x,m) double y_[] = this.h[m].sampleForInstance(x, this.m_R); // <-- TODO: can do this faster, see #MCC // w = prod_j p(y[j]|x,m) double w_ = A.product(this.h[m].getConfidences()); // accept ? if (w_ > wm) { wm = w_; y = y_; } } return y; } @Override public Enumeration listOptions() { Vector result = new Vector(); result.addElement(new Option("\tThe population size (of chains) -- should be smaller than the total number of chains evaluated (Is) \n\tdefault: 10", "M", 1, "-M <value>")); result.addElement( new Option("\tUse temperature: cool the chain down over time (from the beginning of the chain) -- can be faster\n\tdefault: 0 (no temperature)", "O", 1, "-O <value>")); result.addElement(new Option("\tIf using O = 1 for temperature, this sets the Beta constant \n\tdefault: 0.03", "B", 1, "-B <value>")); OptionUtils.add(result, super.listOptions()); return OptionUtils.toEnumeration(result); } @Override public void setOptions(final String[] options) throws Exception { this.setM(OptionUtils.parse(options, 'M', 10)); this.setO(OptionUtils.parse(options, 'O', 0)); this.setBeta(OptionUtils.parse(options, 'B', 0.03)); super.setOptions(options); } @Override public String[] getOptions() { List<String> result = new ArrayList<>(); OptionUtils.add(result, 'M', this.getM()); OptionUtils.add(result, 'O', this.getO()); OptionUtils.add(result, 'B', this.getBeta()); OptionUtils.add(result, super.getOptions()); return OptionUtils.toArray(result); } /** Set the temperature factor */ public void setBeta(final double t) { this.m_Beta = t; } /** Get the temperature factor */ public double getBeta() { return this.m_Beta; } public String betaTipText() { return "Sets the temperature factor."; } /** Set the temperature switch */ public void setO(final int t) { this.m_O = t; } /** Get the temperature switch */ public int getO() { return this.m_O; } public String oTipText() { return "Sets the temperature switch."; } /** Set the population size */ public void setM(final int M) { this.m_M = M; } /** Get the population size */ public int getM() { return this.m_M; } public String mTipText() { return "Sets the population size."; } @Override public String globalInfo() { return "PMCC - Like MCC but selects the top M chains at training time, and uses all them at test time (using Monte Carlo sampling -- this is not a typical majority-vote ensemble method). For more information see:\n" + this.getTechnicalInformation().toString(); } public static void main(final String args[]) { ProblemTransformationMethod.evaluation(new PMCC(), args); } }
0
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers/multilabel/PS.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ package meka.classifiers.multilabel; import meka.core.MLUtils; import meka.core.OptionUtils; import meka.core.PSUtils; import weka.core.*; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import java.util.*; /** * PS.java - The Pruned Sets Method. * Removes examples with P-infrequent labelsets from the training data, then subsamples these labelsets N time to produce N new examples with P-frequent labelsets. Then train a standard LC classifier. The idea is to reduce the number of unique class values that would otherwise need to be learned by LC. Best used in an Ensemble (e.g., EnsembleML).<br> * This class was rewritten and extended to be faster, use sparse LabelSets, and better OOP. There may be minor variation in results (probably on account of internal randomness, different set orderings, etc) but this should not be statistically significant. * <br> * See: Jesse Read, Bernhard Pfahringer, Geoff Holmes. <i>Multi-label Classification using Ensembles of Pruned Sets</i>. Proc. of IEEE International Conference on Data Mining (ICDM 2008), Pisa, Italy, 2008 * @see LC * @version April 2014 * @author Jesse Read */ public class PS extends LC implements Randomizable, TechnicalInformationHandler { /** for serialization. */ private static final long serialVersionUID = 8943667795912487237L; protected int m_P = 0; protected int m_N = 0; protected int m_S = 0; /** * Description to display in the GUI. * * @return the description */ @Override public String globalInfo() { return "The Pruned Sets method (PS).\n" + "Removes examples with P-infrequent labelsets from the training data, then subsamples these labelsets N time to produce N new examples with P-frequent labelsets. Then train a standard LC classifier. The idea is to reduce the number of unique class values that would otherwise need to be learned by LC. Best used in an Ensemble (e.g., EnsembleML).\n" + "For more information see:\n" + getTechnicalInformation().toString(); } @Override public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; result = new TechnicalInformation(Type.INPROCEEDINGS); result.setValue(Field.AUTHOR, "Jesse Read, Bernhard Pfahringer, Geoff Holmes"); result.setValue(Field.TITLE, "Multi-label Classification Using Ensembles of Pruned Sets"); result.setValue(Field.BOOKTITLE, "ICDM'08: International Conference on Data Mining (ICDM 2008). Pisa, Italy."); result.setValue(Field.YEAR, "2008"); return result; } private int parseValue(String s) { int i = s.indexOf('-'); Random m_R = new Random(m_S); if(i > 0 && i < s.length()) { int lo = Integer.parseInt(s.substring(0,i)); int hi = Integer.parseInt(s.substring(i+1,s.length())); return lo + m_R.nextInt(hi-lo+1); } else return Integer.parseInt(s); } /** * GetP - Get the pruning value P. */ public int getP() { return m_P; } /** * SetP - Sets the pruning value P, defining an infrequent labelset as one which occurs less than P times in the data (P = 0 defaults to LC). */ public void setP(int p) { m_P = p; } public String pTipText() { return "The pruning value P, defining an infrequent labelset as one which occurs less than P times in the data (P = 0 defaults to LC)."; } /** * GetN - Get the subsampling value N. */ public int getN() { return m_N; } /** * SetN - Sets the subsampling value N, the (maximum) number of frequent labelsets to subsample from the infrequent labelsets. */ public void setN(int n) { m_N = n; } public String nTipText() { return "The subsampling value N, the (maximum) number of frequent labelsets to subsample from the infrequent labelsets."; } /** * SetSeed - Use random P and N values (in this case P and N arguments determine a <i>range</i> of values to select from randomly, e.g., -P 1-5 selects P randomly in {1,2,3,4,5}. */ @Override public void setSeed(int s) { // set random P / N values here (used by, e.g., EnsembleML) m_S = s; if (getDebug()) { System.out.println("P = "+m_P); System.out.println("N = "+m_N); } } @Override public int getSeed() { return m_S; } public String seedTipText() { return "The seed value for randomizing the data."; } @Override public Enumeration listOptions() { Vector result = new Vector(); result.addElement(new Option("\tSets the pruning value, defining an infrequent labelset as one which occurs <= P times in the data (P = 0 defaults to LC).\n\tdefault: "+m_P+"\t(LC)", "P", 1, "-P <value>")); result.addElement(new Option("\tSets the (maximum) number of frequent labelsets to subsample from the infrequent labelsets.\n\tdefault: "+m_N+"\t(none)\n\tn\tN = n\n\t-n\tN = n, or 0 if LCard(D) >= 2\n\tn-m\tN = random(n,m)", "N", 1, "-N <value>")); result.addElement(new Option("\tThe seed value for randomization\n\tdefault: 0", "S", 1, "-S <value>")); OptionUtils.add(result, super.listOptions()); return OptionUtils.toEnumeration(result); } @Override public void setOptions(String[] options) throws Exception { String tmpStr; tmpStr = Utils.getOption('P', options); if (tmpStr.length() != 0) setP(parseValue(tmpStr)); else setP(parseValue("0")); tmpStr = Utils.getOption('N', options); if (tmpStr.length() != 0) setN(parseValue(tmpStr)); else setN(parseValue("0")); setSeed(OptionUtils.parse(options, 'S', 0)); super.setOptions(options); } @Override public String [] getOptions() { List<String> result = new ArrayList<>(); OptionUtils.add(result, 'P', getP()); OptionUtils.add(result, 'N', getN()); OptionUtils.add(result, 'S', getSeed()); OptionUtils.add(result, super.getOptions()); return OptionUtils.toArray(result); } @Override public void buildClassifier(Instances D) throws Exception { testCapabilities(D); int L = D.classIndex(); // Check N if (m_N < 0) { double lc = MLUtils.labelCardinality(D,L); if (lc > 2.0) m_N = 0; else m_N = Math.abs(m_N); System.err.println("N set to "+m_N); } // Transform Instances D_ = PSUtils.PSTransformation(D,L,m_P,m_N); m_InstancesTemplate = new Instances(D_,0); // Info if(getDebug()) System.out.println("("+m_InstancesTemplate.attribute(0).numValues()+" classes, "+D_.numInstances()+" ins. )"); // Build m_Classifier.buildClassifier(D_); } @Override public String getRevision() { return RevisionUtils.extract("$Revision: 9117 $"); } public static void main(String args[]) { ProblemTransformationMethod.evaluation(new PS(), args); } }
0
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers/multilabel/PSt.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ package meka.classifiers.multilabel; import weka.core.Instance; import meka.core.PSUtils; import weka.core.RevisionUtils; import weka.core.TechnicalInformation; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import weka.core.TechnicalInformationHandler; /** * PSt.java - Pruned Sets with a a threshold so as to be able to predict sets not seen in the training set. * <br> * See: Jesse Read. <i>A Pruned Problem Transformation Method for Multi-label Classification</i>. In Proc. of the NZ Computer Science Research Student Conference. Christchurch, New Zealand (2008). * @see PS * @author Jesse Read (jmr30@cs.waikato.ac.nz) */ public class PSt extends PS implements TechnicalInformationHandler { /** for serialization. */ private static final long serialVersionUID = -792705184263116856L; /** * Description to display in the GUI. * * @return the description */ @Override public String globalInfo() { return "Pruned Sets with a a threshold so as to be able to predict sets not seen in the training set." + "For more information see:\n" + getTechnicalInformation().toString(); } @Override public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; result = new TechnicalInformation(Type.INPROCEEDINGS); result.setValue(Field.AUTHOR, "Jesse Read"); result.setValue(Field.TITLE, "A Pruned Problem Transformation Method for Multi-label Classification"); result.setValue(Field.BOOKTITLE, "NZ Computer Science Research Student Conference. Christchurch, New Zealand"); result.setValue(Field.YEAR, "2008"); return result; } @Override public double[] distributionForInstance(Instance x) throws Exception { int L = x.classIndex(); // if there is only one class (as for e.g. in some hier. mtds) predict it if(L == 1) return new double[]{1.0}; Instance x_ = PSUtils.convertInstance(x,L,m_InstancesTemplate); //convertInstance(x,L); //x_.setDataset(m_InstancesTemplate); // Get a classification return PSUtils.recombination_t(m_Classifier.distributionForInstance(x_),L,m_InstancesTemplate); } @Override public String getRevision() { return RevisionUtils.extract("$Revision: 9117 $"); } public static void main(String args[]) { ProblemTransformationMethod.evaluation(new PSt(), args); } }
0
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers/multilabel/ProblemTransformationMethod.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ package meka.classifiers.multilabel; import meka.classifiers.incremental.IncrementalEvaluation; import weka.classifiers.SingleClassifierEnhancer; import weka.classifiers.UpdateableClassifier; import weka.classifiers.trees.J48; import weka.core.*; import weka.core.Capabilities.Capability; /** * MultilabelClassifier.java - A Multilabel Classifier. * @author Jesse Read * @version Jan 2015 */ public abstract class ProblemTransformationMethod extends SingleClassifierEnhancer implements MultiLabelClassifier { /** for serialization. */ private static final long serialVersionUID = 1713843369766127169L; /** A Template for Problem Transformations */ protected Instances m_InstancesTemplate; /** * Description to display in the GUI. * * @return the description */ public String globalInfo() { // highly recommended to overwrite this method! return "A multi-label classifier"; } public ProblemTransformationMethod() { // default classifier for GUI this.m_Classifier = new J48(); } @Override protected String defaultClassifierString() { // default classifier for CLI return "weka.classifiers.trees.J48"; } @Override public String toString() { return ""; } /** * Returns a string representation of the model. * * @return the model */ public String getModel() { return ""; } public Instances getTemplate() { return m_InstancesTemplate; } /** * TestCapabilities. * Make sure the training data is suitable. * @param D the data */ public void testCapabilities(Instances D) throws Exception { // get the classifier's capabilities, enable all class attributes and do the usual test Capabilities cap = getCapabilities(); cap.enableAllClasses(); //getCapabilities().testWithFail(D); // get the capabilities again, test class attributes individually int L = D.classIndex(); for(int j = 0; j < L; j++) { Attribute c = D.attribute(j); cap.testWithFail(c,true); } } @Override public Capabilities getCapabilities() { Capabilities result; result = super.getCapabilities(); //result.enable(Capability.NUMERIC_CLASS); result.disable(Capability.NUMERIC_CLASS); result.disable(Capability.DATE_CLASS); result.disable(Capability.STRING_CLASS); result.disable(Capability.RELATIONAL_CLASS); return result; } @Override public abstract void buildClassifier(Instances trainingSet) throws Exception; @Override public abstract double[] distributionForInstance(Instance i) throws Exception; @Override public String getRevision() { return RevisionUtils.extract("$Revision: 9117 $"); } /** * Creates a given number of deep copies of the given multi-label classifier using serialization. * * @param model the classifier to copy * @param num the number of classifier copies to create. * @return an array of classifiers. * @exception Exception if an error occurs */ public static MultiLabelClassifier[] makeCopies(MultiLabelClassifier model, int num) throws Exception { if (model == null) { throw new Exception("No model classifier set"); } MultiLabelClassifier classifiers[] = new MultiLabelClassifier[num]; SerializedObject so = new SerializedObject(model); for(int i = 0; i < classifiers.length; i++) { classifiers[i] = (MultiLabelClassifier) so.getObject(); } return classifiers; } /** * Called by classifier's main() method upon initialisation from the command line. * TODO: In the future Use runClassifier(h,args) directly, and depreciated this function. * @param h A classifier * @param args Command-line options. */ public static void evaluation(MultiLabelClassifier h, String args[]) { runClassifier(h,args); } /** * Called by classifier's main() method upon initialisation from the command line. * @param h A classifier * @param args Command-line options. */ public static void runClassifier(MultiLabelClassifier h, String args[]) { if (h instanceof UpdateableClassifier) { try { IncrementalEvaluation.runExperiment(h,args); } catch(Exception e) { System.err.println("\n"+e); //e.printStackTrace(); IncrementalEvaluation.printOptions(h.listOptions()); } } else { try { Evaluation.runExperiment(h,args); } catch(Exception e) { System.err.println("\n"+e); //e.printStackTrace(); Evaluation.printOptions(h.listOptions()); } } } }
0
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers/multilabel/RAkEL.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ package meka.classifiers.multilabel; import java.util.ArrayList; import java.util.Arrays; import java.util.Enumeration; import java.util.List; import java.util.Random; import java.util.Vector; import meka.core.OptionUtils; import meka.core.SuperLabelUtils; import weka.classifiers.AbstractClassifier; import weka.core.Instances; import weka.core.Option; import weka.core.RevisionUtils; import weka.core.TechnicalInformation; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; /** * RAkEL - RAndom k-labEL subsets: Draws M subsets of size k from the set of labels. * * This method draws M subsets of size k from the set of labels. and trains PS upon each one, then * combines label votes from these PS classifiers to get a label-vector prediction. The original * RAkEL by <i>Tsoumakas et al.</i> was a meta method, typically taking LC (aka label powerset) base * classifiers. This implementation is based on (extends) PS, making it potentially very fast (due * to the pruning mechanism offered by PS). * * See also <i>RAkEL</i> from the <a href=http://mulan.sourceforge.net>MULAN</a> framework. * * @see PS * @author Jesse Read * @version September 2015 */ public class RAkEL extends RAkELd { /** for serialization. */ private static final long serialVersionUID = -6208337124440497991L; /** * Description to display in the GUI. * * @return the description */ @Override public String globalInfo() { return "Draws M subsets of size k from the set of labels, and trains PS upon each one, then combines label votes from the PS classifiers to get a label-vector prediction."; } @Override public void buildClassifier(final Instances D) throws Exception { this.testCapabilities(D); int L = D.classIndex(); Random random = new Random(this.m_S); if (this.getDebug()) { System.out.println("Building " + this.m_M + " models of " + this.m_K + " random subsets:"); } this.m_InstancesTemplates = new Instances[this.m_M]; this.kMap = new int[this.m_M][this.m_K]; this.m_Classifiers = AbstractClassifier.makeCopies(this.m_Classifier, this.m_M); for (int i = 0; i < this.m_M; i++) { if (Thread.currentThread().isInterrupted()) { throw new InterruptedException("Thread has been interrupted."); } this.kMap[i] = SuperLabelUtils.get_k_subset(L, this.m_K, random); if (this.getDebug()) { System.out.println("\tmodel " + (i + 1) + "/" + this.m_M + ": " + Arrays.toString(this.kMap[i]) + ", P=" + this.m_P + ", N=" + this.m_N); } Instances D_i = SuperLabelUtils.makePartitionDataset(D, this.kMap[i], this.m_P, this.m_N); this.m_Classifiers[i].buildClassifier(D_i); this.m_InstancesTemplates[i] = new Instances(D_i, 0); } } @Override public String kTipText() { return "The number of labels k in each subset (must be 0 < k < L for L labels)"; } /** * Get the M parameter (the number of subsets). */ public int getM() { return this.m_M; } /** * Sets the M parameter (the number of subsets) */ public void setM(final int M) { this.m_M = M; } public String mTipText() { return "The number of subsets to draw (which together form an ensemble)"; } @Override public String getRevision() { return RevisionUtils.extract("$Revision: 9117 $"); } @Override public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; TechnicalInformation additional; result = new TechnicalInformation(Type.INPROCEEDINGS); result.setValue(Field.AUTHOR, "Grigorios Tsoumakas and Ioannis Katakis and Ioannis Vlahavas"); result.setValue(Field.TITLE, "Random k-Labelsets for Multi-Label Classification"); result.setValue(Field.JOURNAL, "IEEE Transactions on Knowledge and Data Engineering"); result.setValue(Field.VOLUME, "99"); result.setValue(Field.NUMBER, "1"); result.setValue(Field.YEAR, "2010"); additional = new TechnicalInformation(Type.INPROCEEDINGS); additional.setValue(Field.AUTHOR, "Jesse Read, Antti Puurula, Albert Bifet"); additional.setValue(Field.TITLE, "Multi-label Classification with Meta-labels"); additional.setValue(Field.BOOKTITLE, "International Conference on Data Mining"); additional.setValue(Field.YEAR, "2014"); result.add(additional); return result; } @Override public Enumeration listOptions() { Vector result = new Vector(); result.addElement(new Option("\tSets M (default 10): the number of subsets", "M", 1, "-M <num>")); OptionUtils.add(result, super.listOptions()); return OptionUtils.toEnumeration(result); } @Override public void setOptions(final String[] options) throws Exception { this.setM(OptionUtils.parse(options, 'M', 10)); super.setOptions(options); } @Override public String[] getOptions() { List<String> result = new ArrayList<>(); OptionUtils.add(result, 'M', this.getM()); OptionUtils.add(result, super.getOptions()); return OptionUtils.toArray(result); } public static void main(final String args[]) { ProblemTransformationMethod.evaluation(new RAkEL(), args); } }
0
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers/multilabel/RAkELd.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ package meka.classifiers.multilabel; import java.util.ArrayList; import java.util.Arrays; import java.util.Enumeration; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Random; import java.util.Vector; import meka.core.A; import meka.core.MLUtils; import meka.core.OptionUtils; import meka.core.PSUtils; import meka.core.SuperLabelUtils; import weka.classifiers.AbstractClassifier; import weka.classifiers.Classifier; import weka.core.Drawable; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.RevisionUtils; import weka.core.TechnicalInformation; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import weka.core.TechnicalInformationHandler; /** * RAkELd - RAndom partition of labELs; like RAkEL but labelsets are disjoint / non-overlapping. * * As in <code>RAkEL</code>, <code>k</code> still indicates the size of partitions, however, note * that anything more than L/2 (for L labels) just causes the classifier to default to * <code>PS</code>, because it is not possible form more than one labelset of size L/2 from L * labels. * * For example, for 6 labels, a possibility where k=2 are the labelsets <code>[1,3,4]</code> and * <code>[0,2,5]</code> (indices 0,...,5). * * Note that the number of partitions (in this case, 2) is interpreted automatically (unlike in * <code>RAkEL</code> where this (the <code>M</code> parameter is open). * * @see RAkEL * @author Jesse Read * @version September 2015 */ public class RAkELd extends PS implements TechnicalInformationHandler { /** for serialization. */ private static final long serialVersionUID = -6208388889440497990L; protected Classifier m_Classifiers[] = null; protected Instances m_InstancesTemplates[] = null; int m_K = 3; int m_M = 10; protected int kMap[][] = null; /** * Description to display in the GUI. * * @return the description */ @Override public String globalInfo() { return "Takes RAndom partition of labELs; like RAkEL but labelsets are disjoint / non-overlapping subsets."; } @Override public void buildClassifier(final Instances D) throws Exception { int L = D.classIndex(); int N = D.numInstances(); Random random = new Random(this.m_S); // Note: a slightly roundabout way of doing it: int num = (int) Math.ceil(L / this.m_K); this.kMap = SuperLabelUtils.generatePartition(A.make_sequence(L), num, random, true); this.m_M = this.kMap.length; this.m_Classifiers = AbstractClassifier.makeCopies(this.m_Classifier, this.m_M); this.m_InstancesTemplates = new Instances[this.m_M]; if (this.getDebug()) { System.out.println("Building " + this.m_M + " models of " + this.m_K + " partitions:"); } for (int i = 0; i < this.m_M; i++) { if (Thread.currentThread().isInterrupted()) { throw new InterruptedException("Thread has been interrupted."); } if (this.getDebug()) { System.out.println("\tpartitioning model " + (i + 1) + "/" + this.m_M + ": " + Arrays.toString(this.kMap[i]) + ", P=" + this.m_P + ", N=" + this.m_N); } Instances D_i = SuperLabelUtils.makePartitionDataset(D, this.kMap[i], this.m_P, this.m_N); if (this.getDebug()) { System.out.println("\tbuilding model " + (i + 1) + "/" + this.m_M + ": " + Arrays.toString(this.kMap[i])); } this.m_Classifiers[i].buildClassifier(D_i); this.m_InstancesTemplates[i] = new Instances(D_i, 0); } } @Override public double[] distributionForInstance(final Instance x) throws Exception { int L = x.classIndex(); // If there is only one label, predict it // if(L == 1) return new double[]{1.0}; double y[] = new double[L]; // int c[] = new int[L]; // to scale it between 0 and 1 for (int m = 0; m < this.m_M; m++) { if (Thread.currentThread().isInterrupted()) { throw new InterruptedException("Thread has been interrupted."); } // Transform instance Instance x_m = PSUtils.convertInstance(x, L, this.m_InstancesTemplates[m]); x_m.setDataset(this.m_InstancesTemplates[m]); // Get a meta classification int i_m = (int) this.m_Classifiers[m].classifyInstance(x_m); // e.g., 2 int k_indices[] = this.mapBack(this.m_InstancesTemplates[m], i_m); // e.g., [3,8] // Vote with classification for (int i : k_indices) { int index = this.kMap[m][i]; y[index] += 1.; } } return y; } /** * mapBack: returns the original indices (encoded in the class attribute). */ private int[] mapBack(final Instances template, final int i) { try { return MLUtils.toIntArray(template.classAttribute().value(i)); } catch (Exception e) { return new int[] {}; } } /** * Returns a string that describes a graph representing the object. The string should be in XMLBIF * ver. 0.3 format if the graph is a BayesNet, otherwise it should be in dotty format. * * @return the graph described by a string (label index as key) * @throws Exception * if the graph can't be computed */ @Override public Map<Integer, String> graph() throws Exception { Map<Integer, String> result; result = new HashMap<>(); for (int i = 0; i < this.m_Classifiers.length; i++) { if (this.m_Classifiers[i] != null) { if (this.m_Classifiers[i] instanceof Drawable) { result.put(i, ((Drawable) this.m_Classifiers[i]).graph()); } } } return result; } /** * Returns the type of graph representing the object. * * @return the type of graph representing the object (label index as key) */ @Override public Map<Integer, Integer> graphType() { Map<Integer, Integer> result; int i; result = new HashMap<>(); if (this.m_Classifiers != null) { for (i = 0; i < this.m_Classifiers.length; i++) { if (this.m_Classifiers[i] instanceof Drawable) { result.put(i, ((Drawable) this.m_Classifiers[i]).graphType()); } } } return result; } @Override public String toString() { if (this.kMap == null) { return "No model built yet"; } StringBuilder s = new StringBuilder("{"); for (int k = 0; k < this.m_M; k++) { s.append(Arrays.toString(this.kMap[k])); } return s.append("}").toString(); } /** * Get the k parameter (the size of each partition). */ public int getK() { return this.m_K; } /** * Sets the k parameter (the size of each partition) */ public void setK(final int k) { this.m_K = k; } public String kTipText() { return "The number of labels in each partition -- should be 1 <= k < (L/2) where L is the total number of labels."; } @Override public Enumeration listOptions() { Vector result = new Vector(); result.addElement(new Option("\t" + this.kTipText(), "k", 1, "-k <num>")); OptionUtils.add(result, super.listOptions()); return OptionUtils.toEnumeration(result); } @Override public void setOptions(final String[] options) throws Exception { this.setK(OptionUtils.parse(options, 'k', 3)); super.setOptions(options); } @Override public String[] getOptions() { List<String> result = new ArrayList<>(); OptionUtils.add(result, 'k', this.getK()); OptionUtils.add(result, super.getOptions()); return OptionUtils.toArray(result); } @Override public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; TechnicalInformation additional; result = new TechnicalInformation(Type.ARTICLE); result.setValue(Field.AUTHOR, "Grigorios Tsoumakas, Ioannis Katakis, Ioannis Vlahavas"); result.setValue(Field.TITLE, "Random k-Labelsets for Multi-Label Classification"); result.setValue(Field.JOURNAL, "IEEE Transactions on Knowledge and Data Engineering"); result.setValue(Field.YEAR, "2011"); result.setValue(Field.VOLUME, "23"); result.setValue(Field.NUMBER, "7"); result.setValue(Field.PAGES, "1079--1089"); additional = new TechnicalInformation(Type.INPROCEEDINGS); additional.setValue(Field.AUTHOR, "Jesse Read, Antti Puurula, Albert Bifet"); additional.setValue(Field.TITLE, "Classifier Chains for Multi-label Classification"); result.setValue(Field.BOOKTITLE, "ICDM'14: International Conference on Data Mining (ICDM 2014). Shenzen, China."); result.setValue(Field.PAGES, "941--946"); result.setValue(Field.YEAR, "2014"); result.add(additional); return result; } @Override public String getRevision() { return RevisionUtils.extract("$Revision: 9117 $"); } public static void main(final String args[]) { ProblemTransformationMethod.evaluation(new RAkELd(), args); } }
0
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers/multilabel/RT.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ package meka.classifiers.multilabel; import weka.core.Attribute; import weka.core.FastVector; import weka.core.Instance; import weka.core.Instances; import weka.core.RevisionUtils; /** * RT.java - The 'Ranking + Threshold' classifier. Duplicates each multi-labelled example, and * assigns one of the labels (only) to each copy; then trains a regular multi-class base classifier. * At test time, a <i>threshold</i> separates relevant from irrelevant labels using the posterior * for each class value (i.e., label). * * @author Jesse Read (jmr30@cs.waikato.ac.nz) * @version 2010 */ public class RT extends ProblemTransformationMethod { /** for serialization. */ private static final long serialVersionUID = 7348139531854838421L; /** * Description to display in the GUI. * * @return the description */ @Override public String globalInfo() { return "Duplicates each multi-labelled example, and assigns one of the labels (only) to each copy; then trains a regular multi-class base classifier.\n" + "At test time, a threshold separates relevant from irrelevant labels using the posterior for each class value (i.e., label)."; } @Override public void buildClassifier(final Instances D) throws Exception { this.testCapabilities(D); int L = D.classIndex(); // Create header Instances D_ = new Instances(D, 0, 0); // Delete the old class attributes for (int j = 0; j < L; j++) { D_.deleteAttributeAt(0); } // Make the new class attribute FastVector classes = new FastVector(L); for (int j = 0; j < L; j++) { classes.addElement("C" + j); } // Add the new class attribute D_.insertAttributeAt(new Attribute("ClassY", classes), 0); D_.setClassIndex(0); // Loop through D again for (int i = 0; i < D.numInstances(); i++) { for (int j = 0; j < L; j++) { if (Thread.currentThread().isInterrupted()) { throw new InterruptedException("Thread has been interrupted."); } if ((int) D.instance(i).value(j) > 0) { // make a copy here ... Instance x_ = (Instance) D.instance(i).copy(); x_.setDataset(null); // make it multi-class, and set the appropriate class value ... for (int k = 1; k < L; k++) { x_.deleteAttributeAt(1); } x_.setDataset(D_); x_.setClassValue(j); // (*) this just ponts to the right index D_.add(x_); } } } // Save the template this.m_InstancesTemplate = new Instances(D_, 0); // Build if (this.getDebug()) { System.out.println("Building classifier " + this.m_Classifier.getClass().getName() + " on " + D_.numInstances() + " instances (originally " + D.numInstances() + ")"); } this.m_Classifier.buildClassifier(D_); } /** * ConvertInstance - Convert an Instance to multi-class format by deleting all but one of the label * attributes. * * @param x * incoming Instance * @return the converted Instance */ public Instance convertInstance(final Instance x) { int L = x.classIndex(); // Copy the original instance Instance x_ = (Instance) x.copy(); x_.setDataset(null); // Delete all class attributes for (int i = 0; i < L; i++) { x_.deleteAttributeAt(0); } // Add one of those class attributes at the begginning x_.insertAttributeAt(0); // Hopefully setting the dataset will configure that attribute properly x_.setDataset(this.m_InstancesTemplate); return x_; } @Override public double[] distributionForInstance(final Instance x) throws Exception { return this.m_Classifier.distributionForInstance(this.convertInstance(x)); } @Override public String getRevision() { return RevisionUtils.extract("$Revision: 9117 $"); } public static void main(final String args[]) { ProblemTransformationMethod.evaluation(new RT(), args); } }
0
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers/multilabel/SemisupervisedClassifier.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ package meka.classifiers.multilabel; import weka.core.Instances; /** * SemisupervisedClassifier.java - An Interface for Multilabel Semisupervised Classifiers. * This is an interface for multi-label semisupervised classificatation, i.e., training on a partially labelled dataset. <br> * For classifiers implementing this interface, the method introduceUnlabelledData(unlabeledInstances) will be called prior to buildClassifier(trainingInstances). <br> * As of writing this comment, the unlabelled data comes only from the test data -- there is not yet any option for setting a seperate sete of unlabelled data (although this is planned for future versions). * * @author Jesse Read * @version September 2015 */ public interface SemisupervisedClassifier extends MultiLabelClassifier { /** * Set Unlabelled Data - provide unlabelled data prior to calling buildClassifier(Instances). * @param unlabeledInstances Instances for which the true class labels are not available for each instance. */ void introduceUnlabelledData(Instances unlabeledInstances); }
0
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers/multilabel
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers/multilabel/NN/AbstractDeepNeuralNet.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ package meka.classifiers.multilabel.NN; import meka.core.OptionUtils; import weka.core.Option; import java.util.*; /** * AbstractDeepNeuralNet.java - Extends AbstractNeuralNet with depth options. * @author Jesse Read (jesse@tsc.uc3m.es) * @version December 2012 */ public abstract class AbstractDeepNeuralNet extends AbstractNeuralNet { private static final long serialVersionUID = 5416731163612885485L; protected int m_N = 2; public int getN() { return m_N; } public void setN(int n) { m_N = n; } public String nTipText() { return "The number of RBMs."; } @Override public Enumeration listOptions() { Vector result = new Vector(); result.addElement(new Option("\tSets the number of RBMs\n\tdefault: 2", "N", 1, "-N <value>")); OptionUtils.add(result, super.listOptions()); return OptionUtils.toEnumeration(result); } @Override public void setOptions(String[] options) throws Exception { setN(OptionUtils.parse(options, 'N', 2)); super.setOptions(options); } @Override public String [] getOptions() { List<String> result = new ArrayList<>(); OptionUtils.add(result, 'N', getN()); OptionUtils.add(result, super.getOptions()); return OptionUtils.toArray(result); } }
0
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers/multilabel
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers/multilabel/NN/AbstractNeuralNet.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ package meka.classifiers.multilabel.NN; import meka.classifiers.multilabel.ProblemTransformationMethod; import meka.core.OptionUtils; import weka.core.Option; import weka.core.Randomizable; import java.util.*; /** * AbstractNeuralNet.java - Provides common options, constants, and other functions for NNs. * @author Jesse Read * @version December 2012 */ public abstract class AbstractNeuralNet extends ProblemTransformationMethod implements Randomizable { private static final long serialVersionUID = 5534606285449062819L; /** number of hidden units */ protected int m_H = 10; /** number of epochs */ protected int m_E = 1000; /** learning rate */ protected double m_R = 0.1; /** momentum */ protected double m_M = 0.1; /** random seed */ protected int m_Seed = 0; public void setH(int h) { m_H = h; } public int getH() { return m_H; } public String hTipText() { return "Number of hidden units."; } public void setE(int n) { m_E = n; } public int getE() { return m_E; } public String eTipText() { return "Number of epochs."; } public void setLearningRate(double r) { m_R = r; } public double getLearningRate() { return m_R; } public String learningRateTipText() { return "Learning Rate."; } public void setMomentum(double m) { m_M = m; } public double getMomentum() { return m_M; } public String momentumTipText() { return "Momentum."; } @Override public int getSeed() { return m_Seed; } @Override public void setSeed(int s) { m_Seed = s; } public String seedTipText() { return "The seed value for randomizing the data."; } @Override public String toString() { return "h="+getH()+", E="+getE(); } @Override public Enumeration listOptions() { Vector result = new Vector(); result.addElement(new Option("\tSets the number of hidden units\n\tdefault: 10", "H", 1, "-H <value>")); result.addElement(new Option("\tSets the maximum number of epochs\n\tdefault: 1000\t(auto-cut-out)", "E", 1, "-E <value>")); result.addElement(new Option("\tSets the learning rate (tyically somewhere between 'very small' and 0.1)\n\tdefault: 0.1", "r", 1, "-r <value>")); result.addElement(new Option("\tSets the momentum (typically somewhere between 0.1 and 0.9)\n\tdefault: 0.1", "m", 1, "-m <value>")); OptionUtils.add(result, super.listOptions()); return OptionUtils.toEnumeration(result); } @Override public void setOptions(String[] options) throws Exception { setH(OptionUtils.parse(options, 'H', 10)); setE(OptionUtils.parse(options, 'E', 1000)); setLearningRate(OptionUtils.parse(options, 'r', 0.1)); setMomentum(OptionUtils.parse(options, 'm', 0.1)); super.setOptions(options); } @Override public String [] getOptions() { List<String> result = new ArrayList<>(); OptionUtils.add(result, 'H', getH()); OptionUtils.add(result, 'E', getE()); OptionUtils.add(result, 'r', getLearningRate()); OptionUtils.add(result, 'm', getMomentum()); OptionUtils.add(result, super.getOptions()); return OptionUtils.toArray(result); } }
0
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers/multilabel
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers/multilabel/cc/CNode.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ package meka.classifiers.multilabel.cc; import meka.core.A; import meka.core.F; import weka.classifiers.AbstractClassifier; import weka.classifiers.Classifier; import weka.classifiers.functions.SMO; import weka.core.Instance; import weka.core.Instances; import weka.core.Utils; import java.io.FileReader; import java.io.Serializable; import java.util.Arrays; import java.util.Random; /** * CNode.java - A Classifier Node class (for CC-like clasifiers). * @author Jesse Read * @version June 2013 */ public class CNode implements Serializable { private int j = -1; private int d = -1; private int inX[] = null; private int paY[] = null; private Instances T = null; private Instance t_ = null; private Classifier h = null; private int map[] = null; /** * CNode - A Node 'j', taking inputs from all parents inX and paY. * @param j the label index of this node * @param inX attribute indices going into this node * @param paY label indices going into this node */ public CNode(int j, int inX[], int paY[]) { this.j = j; this.inX = inX; this.paY = paY; } /** getParentsY - get the parents (indices) of this node */ public int[] getParentsY() { return paY; } /** * Transform - transform dataset D for this node. * this.j defines the current node index, e.g., 3 * this.paY[] defines parents, e.g., [1,4] * we should remove the rest, e.g., [0,2,5,...,L-1] * @return dataset we should remove all variables from D EXCEPT current node, and parents. */ public Instances transform(Instances D) throws Exception { int L = D.classIndex(); d = D.numAttributes() - L; int keep[] = A.append(this.paY,j); // keep all parents and self! Arrays.sort(keep); int remv[] = A.invert(keep,L); // i.e., remove the rest < L Arrays.sort(remv); map = new int[L]; for(int j = 0; j < L; j++) { map[j] = Arrays.binarySearch(keep,j); } Instances D_ = F.remove(new Instances(D),remv, false); D_.setClassIndex(map[this.j]); return D_; } /** * Build - Create transformation for this node, and train classifier of type H upon it. * The dataset should have class as index 'j', and remove all indices less than L *not* in paY. */ public void build(Instances D, Classifier H) throws Exception { // transform data T = transform(D); // build SLC 'h' h = AbstractClassifier.makeCopy(H); h.buildClassifier(T); // save templates //t_ = new SparseInstance(T.numAttributes()); //t_.setDataset(T); //t_.setClassMissing(); // [?,x,x,x] T.clear(); } /** * The distribution this this node, given input x. * @return p( y_j = k | x , y_pred ) for k in {0,1} */ public double[] distribution(Instance x, double ypred[]) throws Exception { Instance x_ = transform(x,ypred); return h.distributionForInstance(x_); } /** Same as #distribution(Instance, double[]), but the Instance is pre-transformed with ypred inside. */ public double[] distributionT(Instance x_) throws Exception { return h.distributionForInstance(x_); } /** * Sample the distribution given by #distribution(Instance, double[]). * @return y_j ~ p( y_i | x , y_pred ) */ public double sample(Instance x, double ypred[], Random r) throws Exception { double p[] = distribution(x, ypred); return A.samplePMF(p,r); } /** * Transform - turn [y1,y2,y3,x1,x2] into [y1,y2,x1,x2]. * @return transformed Instance */ public Instance transform(Instance x, double ypred[]) throws Exception { x = (Instance)x.copy(); int L = x.classIndex(); int L_c = (paY.length + 1); x.setDataset(null); for(int j = 0; j < (L - L_c); j++) { x.deleteAttributeAt(0); } for(int pa : paY) { //System.out.println("x_["+map[pa]+"] <- "+ypred[pa]); x.setValue(map[pa],ypred[pa]); } x.setDataset(T); x.setClassMissing(); return x; } public void updateTransform(Instance t_, double ypred[]) throws Exception { for(int pa : this.paY) { t_.setValue(this.map[pa],ypred[pa]); } } /* * TODO I thought this would be faster, but apparenty not public Instance transform(Instance x, double ypred[]) throws Exception { int L = x.classIndex(); int L_ = paY.length + 1; t_ = MLUtils.copyValues(t_,x,L,L_); for(int pa : paY) { t_.setValue(map[pa],ypred[pa]); } //x_.setDataset(T); t_.setClassMissing(); //System.out.println("x_ = "+MLUtils.toBitString(x_,L_)); return t_; } */ /** * Return the argmax on #distribution(Instance, double[]). * @return argmax_{k in 0,1,...} p( y_j = k | x , y_pred ) */ public double classify(Instance x, double ypred[]) throws Exception { Instance x_ = transform(x,ypred); return Utils.maxIndex(h.distributionForInstance(x_)); } /** * Transform. * @param D original Instances * @param c to be the class Attribute * @param pa_c the parent indices of c * @return new Instances T */ public static Instances transform(Instances D, int c, int pa_c[]) throws Exception { int L = D.classIndex(); int keep[] = A.append(pa_c,c); // keep all parents and self! Arrays.sort(keep); int remv[] = A.invert(keep,L); // i.e., remove the rest < L Arrays.sort(remv); Instances T = F.remove(new Instances(D),remv, false); int map[] = new int[L]; for(int j = 0; j < L; j++) { map[j] = Arrays.binarySearch(keep,j); } T.setClassIndex(map[c]); return T; } /** * Returns the underlying classifier. * * @return the classifier */ public Classifier getClassifier() { return h; } /** * Main - run some tests. */ public static void main(String args[]) throws Exception { Instances D = new Instances(new FileReader(args[0])); Instance x = D.lastInstance(); D.remove(D.numInstances()-1); int L = Integer.parseInt(args[1]); D.setClassIndex(L); double y[] = new double[L]; Random r = new Random(); int s[] = new int[]{1,0,2}; int PA_J[][] = new int[][]{ {},{},{0,1}, }; //MLUtils.randomize(s,r); // MUST GO IN TREE ORDER !! for(int j : s) { int pa_j[] = PA_J[j]; System.out.println("PARENTS = "+Arrays.toString(pa_j)); //MLUtils.randomize(pa_j,r); System.out.println("**** TRAINING ***"); CNode n = new CNode(j,null,pa_j); n.build(D,new SMO()); /* */ //Instances D_ = n.transform(D); //n.T = D_; System.out.println("============== D_"+j+" / class = "+n.T.classIndex()+" ="); System.out.println(""+n.T); System.out.println("**** TESTING ****"); /* Instance x_ = MLUtils.setTemplate(x,(Instance)D_.firstInstance().copy(),D_); for(int pa : pa_j) { //System.out.println(""+map[pa]); x_.setValue(n.map[pa],y[pa]); } //x_.setDataset(T); x_.setClassMissing(); */ //n.T = D_; Instance x_ = n.transform(x,y); System.out.println(""+x_); y[j] = 1; } } }
0
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers/multilabel
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers/multilabel/cc/Trellis.java
package meka.classifiers.multilabel.cc; import meka.core.A; import meka.core.StatUtils; import weka.core.Instances; import java.util.Arrays; import java.io.Serializable; /** * CTrellis. Classifier Trellis Structure. * @author Jesse Read * @version Feb 2014 */ public class Trellis implements Serializable { /* * Make the Trellis. * @TODO use this format: * 1 2 3 4 5 6 (with 2) * 1 w w * 2 w * 3 w w * 4 w * 5 w w * 6 * where w > 0 for connection * CURRENTLY: * 1: * 2:1 * 3:1 2 * 4:2 3 */ public int trellis[][]; public int indices[]; public int WIDTH; public int TYPE; public int L = -1; public Trellis(int indicies[], int WIDTH, int conectivity) { this.indices = indicies; this.L = indices.length; this.WIDTH = WIDTH; this.TYPE = conectivity; this.make(); } public Trellis(int indicies[], int trellis[][], int WIDTH, int conectivity) { this.indices = indicies; this.trellis = trellis; this.WIDTH = WIDTH; this.TYPE = conectivity; this.L = indices.length; } public String toString() { /* StringBuilder sb = new StringBuilder(); for(int jv : this.indices) { sb.append("P(y_"+jv+" | y_"+Arrays.toString(this.trellis[jv])+")\n"); } return sb.toString(); */ StringBuilder sb = new StringBuilder(); int counter = 0; for(int jv : this.indices) { counter++; String num = String.format("%3d", jv); sb.append(num); if (counter % WIDTH == 0) sb.append("\n"); } sb.append("\n"); return sb.toString(); } // swap indicies j and k, return the difference of the weight of the neighbourhoods public void swap(int j, int k) { // swap indices this.indices = A.swap(this.indices,j,k); // fix parents int jv = this.indices[j]; int kv = this.indices[k]; this.trellis[jv] = getParents(j, indices, WIDTH, TYPE); this.trellis[kv] = getParents(k, indices, WIDTH, TYPE); } // try and put these two nodes together in the trellis public void putTogether(int j, int k) { int jv = this.indices[j]; int kv = this.indices[k]; if (jv + 1 % WIDTH != 0) { // if not at the edge swap(j + 1,k); } else if (kv + 1 % WIDTH != 0) { // if not at the edge swap(j, k + 1); } else { System.out.println("DO SOMETHING ELSE"); } } // return neigborhood of j in trellis private int[] ne(int j) { return new int[]{}; } public double weight(Instances D) { double sum = 0.0; for(int jv : this.indices) { for(int pa : this.trellis[jv]) { sum += StatUtils.I(D,pa,jv); } } return sum; } public double weightNeighbourhood(int j) { double w = 0.0; for(int n : ne(j)) { //w += StatUtils.I(D,n,j); } return w; } /** * What would the 'score' be, putting j_ at position j, in indices, with I matrix. */ public double weight(int indices[], int j, int j_, double I[][]) { int pa_j[] = getParents(j,indices,WIDTH,TYPE); double weight = 0.0; for (int pj : pa_j) { weight += I[pj][j_]; } return weight; } private final int COL_INDEX(int j) { return j % this.WIDTH; } private final int ROW_INDEX(int j) { return j / this.WIDTH; } /** * Get the neighbouring variables of a given index. * For example, * 4 1 * 2 5 * 3 0 * getNeighbours(3) = [2,1,0] // if TYPE = 1 * getNeighbours(3) = [4,2,1,0,3] // if TYPE = 2 * getNeighbours(3) = [] // if TYPE = 0 * @param j an index * @return neighbouring variables of index k * NOTE: takes an index, returns variables -- A BIT STRANGE -- should return indices also */ public int[] getNeighbours(int j) { //int jv = this.indices[j]; // variable int ne_j[] = new int[]{}; if (this.TYPE > 0) { // immediate neighbours if (j >= 1 && j % this.WIDTH > 0) // add prev ne_j = A.append(ne_j,indices[j-1]); if (j >= this.WIDTH) // add above ne_j = A.append(ne_j,indices[j-this.WIDTH]); if (j < (indices.length-1) && (j+1) % this.WIDTH > 0)// add next ne_j = A.append(ne_j,indices[j+1]); if (j < (indices.length-this.WIDTH)) // add below ne_j = A.append(ne_j,indices[j+this.WIDTH]); } if (this.TYPE > 1) { // diagonal if (j >= 1 && j >= WIDTH & j % this.WIDTH > 0) // add up left ne_j = A.append(ne_j,indices[j-1-this.WIDTH]); if (COL_INDEX(j) < (WIDTH-1) && j >= WIDTH) // add up right ne_j = A.append(ne_j,indices[j-WIDTH+1]); if (j < (L-this.WIDTH) && COL_INDEX(j) > 0) // add below left ne_j = A.append(ne_j,indices[j+1]); if (COL_INDEX(j) < (this.WIDTH-1) && L - j > this.WIDTH) // add below right ne_j = A.append(ne_j,indices[j+this.WIDTH+1]); } return ne_j; } /** * Get the parent variables of a given index. * For example, * 4 1 * 2 5 * 3 0 * getParents(3) = [2,1] // if CONNECTIVITY = 1 * @param j an index * @return parent variables of index k * NOTE: takes an index, returns variables -- A BIT STRANGE -- should return indices also */ private int[] getParents(int j, int indices[], int WIDTH, int CONNECTIVITY) { //int jv = indices[j]; int pa_j[] = new int[]{}; if (CONNECTIVITY > 0) { if (j >= 1 && j % WIDTH > 0) // add prev pa_j = A.append(pa_j,indices[j-1]); if (j >= WIDTH) // add above pa_j = A.append(pa_j,indices[j-WIDTH]); } if (CONNECTIVITY > 1) { if (j >= 1 && j >= WIDTH & j % WIDTH > 0) // add diag pa_j = A.append(pa_j,indices[j-WIDTH-1]); } if (CONNECTIVITY > 2) { if (j % WIDTH >= 2) // add 2nd prev pa_j = A.append(pa_j,indices[j-2]); if (j >= (WIDTH*2)) // add 2nd above pa_j = A.append(pa_j,indices[j-(WIDTH*2)]); } return pa_j; } public void make() { trellis = new int[this.L][]; for(int j = 0; j < L; j++) { int jv = indices[j]; trellis[jv] = getParents(j, indices, WIDTH,TYPE); } } }
0
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers/multilabel
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers/multilabel/incremental/BRUpdateable.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ package meka.classifiers.multilabel.incremental; import meka.classifiers.incremental.IncrementalEvaluation; import meka.classifiers.multilabel.BR; import meka.classifiers.multilabel.IncrementalMultiLabelClassifier; import meka.core.MLUtils; import weka.classifiers.UpdateableClassifier; import weka.classifiers.trees.HoeffdingTree; import weka.core.Instance; /** * BRUpdateable.java - Updateable BR. * Must be run with an UpdateableClassifier base classifier. * @see BR * @author Jesse Read * @version September, 2011 */ public class BRUpdateable extends BR implements IncrementalMultiLabelClassifier { /** for serialization. */ private static final long serialVersionUID = 6705611077773512052L; @Override public String globalInfo() { return "Updateable BR\nMust be run with an Updateable base classifier."; } public BRUpdateable() { // default classifier for GUI this.m_Classifier = new HoeffdingTree(); } @Override protected String defaultClassifierString() { // default classifier for CLI return "weka.classifiers.trees.HoeffdingTree"; } @Override public void updateClassifier(Instance x) throws Exception { int L = x.classIndex(); if(getDebug()) System.out.print("-: Updating "+L+" models"); for(int j = 0; j < L; j++) { Instance x_j = (Instance)x.copy(); x_j.setDataset(null); x_j = MLUtils.keepAttributesAt(x_j,new int[]{j},L); x_j.setDataset(m_InstancesTemplates[j]); ((UpdateableClassifier)m_MultiClassifiers[j]).updateClassifier(x_j); } if(getDebug()) System.out.println(":- "); } public static void main(String args[]) { IncrementalEvaluation.runExperiment(new BRUpdateable(),args); } }
0
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers/multilabel
java-sources/ai/libs/thirdparty/interruptible-meka/0.1.2/meka/classifiers/multilabel/incremental/CCUpdateable.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ package meka.classifiers.multilabel.incremental; import meka.classifiers.incremental.IncrementalEvaluation; import meka.classifiers.multilabel.CC; import meka.classifiers.multilabel.IncrementalMultiLabelClassifier; import meka.core.MLUtils; import weka.classifiers.AbstractClassifier; import weka.classifiers.UpdateableClassifier; import weka.classifiers.trees.HoeffdingTree; import weka.core.Instance; import weka.core.Instances; import java.util.Arrays; import java.util.Random; /** * CCUpdateable.java - Updateable version of CC. * * A CC method which can be updated incrementally (assuming an incremental base classifier). * @see CC * @author Jesse Read * @version September, 2011 */ public class CCUpdateable extends CC implements IncrementalMultiLabelClassifier { /** for serialization. */ private static final long serialVersionUID = 2856976982562474367L; public CCUpdateable() { // default classifier for GUI this.m_Classifier = new HoeffdingTree(); } @Override protected String defaultClassifierString() { // default classifier for CLI return "weka.classifiers.trees.HoeffdingTree"; } @Override public String globalInfo() { return "Updateable CC\nMust be run with an Updateable base classifier."; } protected ULink root = null; protected class ULink { private ULink next = null; private AbstractClassifier classifier = null; public Instances _template = null; private int index = -1; private int value = -1; private int excld[]; // to contain the indices to delete private int j = 0; public ULink(int chain[], int j, Instances train) throws Exception { this.j = j; this.index = chain[j]; // sort out excludes [4|5,1,0,2,3] this.excld = Arrays.copyOfRange(chain,j+1,chain.length); // sort out excludes [0,1,2,3,5] Arrays.sort(this.excld); this.classifier = (AbstractClassifier)AbstractClassifier.forName(getClassifier().getClass().getName(),((AbstractClassifier)getClassifier()).getOptions()); Instances new_train = new Instances(train); // delete all except one (leaving a binary problem) if(getDebug()) System.out.print(" "+this.index); new_train.setClassIndex(-1); // delete all the attributes (and track where our index ends up) this.value = chain[j]; int c_index = value; for(int i = excld.length-1; i >= 0; i--) { new_train.deleteAttributeAt(excld[i]); if (excld[i] < this.index) c_index--; } new_train.setClassIndex(c_index); _template = new Instances(new_train,0); this.classifier.buildClassifier(new_train); new_train = null; if(j+1 < chain.length) next = new ULink(chain, ++j, train); } protected void update(Instance x) throws Exception { Instance x_ = (Instance)x.copy(); x_.setDataset(null); // delete all except one (leaving a binary problem) // delete all the attributes (and track where our index ends up) int c_index = this.value; for(int i = excld.length-1; i >= 0; i--) { x_.deleteAttributeAt(excld[i]); if (excld[i] < this.index) c_index--; } x_.setDataset(this._template); ((UpdateableClassifier)this.classifier).updateClassifier(x_); if (next != null) next.update(x); } protected void classify(Instance test) throws Exception { // copy Instance copy = (Instance)test.copy(); copy.setDataset(null); // delete attributes we don't need for(int i = excld.length-1; i >= 0; i--) { copy.deleteAttributeAt(this.excld[i]); } //set template copy.setDataset(this._template); //set class test.setValue(this.index,(int)(this.classifier.classifyInstance(copy))); //carry on if (next!=null) next.classify(test); } @Override public String toString() { return (next == null) ? String.valueOf(this.index) : String.valueOf(this.index)+">"+next.toString(); } } @Override public void buildClassifier(Instances D) throws Exception { testCapabilities(D); int L = D.classIndex(); int indices[] = retrieveChain(); if (indices == null) { indices = MLUtils.gen_indices(L); MLUtils.randomize(indices,new Random(m_S)); } if(getDebug()) System.out.print(":- Chain ("); root = new ULink(indices,0,D); if (getDebug()) System.out.println(" ) -:"); } @Override public void updateClassifier(Instance x) throws Exception { if (root!=null) root.update(x); else throw new Exception("Train to update chain, but chain not build yet"); } @Override public double[] distributionForInstance(Instance x) throws Exception { int L = x.classIndex(); root.classify(x); return MLUtils.toDoubleArray(x,L); } public static void main(String args[]) { IncrementalEvaluation.runExperiment(new CCUpdateable(),args); } }