index
int64
repo_id
string
file_path
string
content
string
0
java-sources/ai/libs/mlplan-ext-metalearning/0.2.7/org/openml/webapplication/fantail
java-sources/ai/libs/mlplan-ext-metalearning/0.2.7/org/openml/webapplication/fantail/dc/package-info.java
/** * Provides means of computing meta features for a data set. * * @author Helena Graf * */ package org.openml.webapplication.fantail.dc;
0
java-sources/ai/libs/mlplan-ext-reduction/0.2.7/ai/libs/mlplan
java-sources/ai/libs/mlplan-ext-reduction/0.2.7/ai/libs/mlplan/multiclasswithreduction/NestedDichotomyUtil.java
package ai.libs.mlplan.multiclasswithreduction; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.HashSet; import java.util.Iterator; import java.util.List; import java.util.Random; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import ai.libs.jaicore.basic.sets.SetUtil; import ai.libs.jaicore.ml.weka.WekaUtil; import ai.libs.jaicore.ml.weka.classification.learner.reduction.splitter.RPNDSplitter; import ai.libs.jaicore.ml.weka.classification.pipeline.MLPipeline; import weka.attributeSelection.InfoGainAttributeEval; import weka.attributeSelection.Ranker; import weka.classifiers.AbstractClassifier; import weka.classifiers.Classifier; import weka.core.Instance; import weka.core.Instances; public class NestedDichotomyUtil { private NestedDichotomyUtil() { /* Intentionally left blank. Only prevent instantiation. */ } private static final Logger logger = LoggerFactory.getLogger(NestedDichotomyUtil.class); public static ClassSplit<String> createGeneralRPNDBasedSplit(final Collection<String> classes, final Random rand, final String classifierName, final Instances data) throws InterruptedException { if (classes.size() < 2) { throw new IllegalArgumentException("Cannot compute split for less than two classes!"); } try { RPNDSplitter splitter = new RPNDSplitter(rand, new MLPipeline(new Ranker(), new InfoGainAttributeEval(), AbstractClassifier.forName(classifierName, null))); Collection<Collection<String>> splitAsCollection = null; splitAsCollection = splitter.split(data); Iterator<Collection<String>> it = splitAsCollection.iterator(); return new ClassSplit<>(classes, it.next(), it.next()); } catch (InterruptedException e) { throw e; } catch (Exception e) { logger.error("Unexpected exception occurred while creating an RPND split", e); return null; } } public static ClassSplit<String> createGeneralRPNDBasedSplit(final Collection<String> classes, final Collection<String> s1, final Collection<String> s2, final Random rand, final String classifierName, final Instances data) throws InterruptedException { try { RPNDSplitter splitter = new RPNDSplitter(rand, AbstractClassifier.forName(classifierName, new String[] {})); Collection<Collection<String>> splitAsCollection = null; splitAsCollection = splitter.split(classes, s1, s2, data); Iterator<Collection<String>> it = splitAsCollection.iterator(); return new ClassSplit<>(classes, it.next(), it.next()); } catch (InterruptedException e) { throw e; } catch (Exception e) { logger.error("Unexpected exception occurred while creating an RPND split", e); } return null; } public static ClassSplit<String> createUnaryRPNDBasedSplit(final Collection<String> classes, final Random rand, final String classifierName, final Instances data) { /* 2. if we have a leaf node, abort */ if (classes.size() == 1) { return new ClassSplit<>(classes, null, null); } /* 3a. otherwise select randomly two classes */ List<String> copy = new ArrayList<>(classes); Collections.shuffle(copy, rand); String c1 = copy.get(0); String c2 = copy.get(1); Collection<String> s1 = new HashSet<>(); s1.add(c1); Collection<String> s2 = new HashSet<>(); s2.add(c2); /* 3b. and 3c. train binary classifiers for c1 vs c2 */ Instances reducedData = WekaUtil.mergeClassesOfInstances(data, s1, s2); Classifier c = null; try { c = AbstractClassifier.forName(classifierName, new String[] {}); } catch (Exception e1) { logger.error("Could not get object of classifier with name {}", classifierName, e1); return null; } try { c.buildClassifier(reducedData); } catch (Exception e) { logger.error("Could not train classifier", e); } /* 3d. insort the remaining classes */ List<String> remainingClasses = new ArrayList<>(SetUtil.difference(SetUtil.difference(classes, s1), s2)); int o1 = 0; int o2 = 0; for (int i = 0; i < remainingClasses.size(); i++) { String className = remainingClasses.get(i); Instances testData = WekaUtil.getInstancesOfClass(data, className); for (Instance inst : testData) { try { double prediction = c.classifyInstance(WekaUtil.getRefactoredInstance(inst)); if (prediction == 0) { o1++; } else { o2++; } } catch (Exception e) { logger.error("Could not get prediction for some instance to assign it to a meta-class", e); } } } if (o1 > o2) { s1.addAll(remainingClasses); } else { s2.addAll(remainingClasses); } return new ClassSplit<>(classes, s1, s2); } }
0
java-sources/ai/libs/mlplan-ext-reduction/0.2.7/ai/libs
java-sources/ai/libs/mlplan-ext-reduction/0.2.7/ai/libs/reduction/Util.java
package ai.libs.reduction; import java.io.BufferedReader; import java.io.FileReader; import java.util.ArrayList; import java.util.Collection; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Random; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import ai.libs.jaicore.ml.weka.WekaUtil; import ai.libs.jaicore.ml.weka.classification.learner.reduction.MCTreeNodeReD; import ai.libs.jaicore.ml.weka.classification.learner.reduction.splitter.RPNDSplitter; import ai.libs.reduction.ensemble.simple.EnsembleOfSimpleOneStepReductionsExperiment; import ai.libs.reduction.single.ReductionExperiment; import weka.classifiers.AbstractClassifier; import weka.classifiers.Classifier; import weka.classifiers.Evaluation; import weka.classifiers.meta.Vote; import weka.core.Instances; public class Util { private static final Logger logger = LoggerFactory.getLogger(Util.class); private static final String LABEL_TRAIN_TIME = "trainTime"; private Util() { /* Left blank to prevent instantiation of this class. */ } public static List<Map<String, Object>> conductSingleOneStepReductionExperiment(final ReductionExperiment experiment) throws Exception { /* load data */ Instances data = new Instances(new BufferedReader(new FileReader(experiment.getDataset()))); data.setClassIndex(data.numAttributes() - 1); /* prepare basis for experiments */ int seed = experiment.getSeed(); Classifier classifierForRPNDSplit = AbstractClassifier.forName(experiment.getNameOfInnerClassifier(), null); Classifier leftClassifier = AbstractClassifier.forName(experiment.getNameOfLeftClassifier(), null); Classifier innerClassifier = AbstractClassifier.forName(experiment.getNameOfInnerClassifier(), null); Classifier rightClassifier = AbstractClassifier.forName(experiment.getNameOfRightClassifier(), null); RPNDSplitter splitter = new RPNDSplitter(new Random(seed), classifierForRPNDSplit); /* conduct experiments */ List<Map<String, Object>> results = new ArrayList<>(); for (int k = 0; k < 10; k++) { List<Collection<String>> classSplit; try { classSplit = new ArrayList<>(splitter.split(data)); } catch (InterruptedException e) { throw e; } catch (Exception e) { throw new RuntimeException("Could not create RPND split.", e); } MCTreeNodeReD classifier = new MCTreeNodeReD(innerClassifier, classSplit.get(0), leftClassifier, classSplit.get(1), rightClassifier); long start = System.currentTimeMillis(); Map<String, Object> result = new HashMap<>(); List<Instances> dataSplit = WekaUtil.getStratifiedSplit(data, (seed + k), .7); classifier.buildClassifier(dataSplit.get(0)); long time = System.currentTimeMillis() - start; Evaluation eval = new Evaluation(dataSplit.get(0)); eval.evaluateModel(classifier, dataSplit.get(1)); double loss = (100 - eval.pctCorrect()) / 100f; logger.info("Conducted experiment {} with split {}/{}. Loss: {}. Time: {}ms.", k, classSplit.get(0), classSplit.get(1), loss, time); result.put("errorRate", loss); result.put(LABEL_TRAIN_TIME, time); results.add(result); } return results; } public static List<Map<String, Object>> conductEnsembleOfOneStepReductionsExperiment(final EnsembleOfSimpleOneStepReductionsExperiment experiment) throws Exception { /* load data */ Instances data = new Instances(new BufferedReader(new FileReader(experiment.getDataset()))); data.setClassIndex(data.numAttributes() - 1); /* prepare basis for experiments */ int seed = experiment.getSeed(); String classifier = experiment.getNameOfClassifier(); RPNDSplitter splitter = new RPNDSplitter(new Random(seed), AbstractClassifier.forName(classifier, null)); /* conduct experiments */ List<Map<String, Object>> results = new ArrayList<>(); for (int k = 0; k < 10; k++) { Vote ensemble = new Vote(); ensemble.setOptions(new String[] { "-R", "MAJ" }); long start = System.currentTimeMillis(); List<Instances> dataSplit = WekaUtil.getStratifiedSplit(data, (seed + k), .7); for (int i = 0; i < experiment.getNumberOfStumps(); i++) { List<Collection<String>> classSplit; classSplit = new ArrayList<>(splitter.split(data)); MCTreeNodeReD tree = new MCTreeNodeReD(classifier, classSplit.get(0), classifier, classSplit.get(1), classifier); tree.buildClassifier(dataSplit.get(0)); ensemble.addPreBuiltClassifier(tree); } Map<String, Object> result = new HashMap<>(); result.put(LABEL_TRAIN_TIME, System.currentTimeMillis() - start); /* now evaluate the ensemble */ ensemble.buildClassifier(data); Evaluation eval = new Evaluation(dataSplit.get(0)); eval.evaluateModel(ensemble, dataSplit.get(1)); double loss = (100 - eval.pctCorrect()) / 100f; logger.info("Conducted experiment {}. Loss: {}. Time: {}ms.", k, loss, result.get(LABEL_TRAIN_TIME)); result.put("errorRate", loss); results.add(result); } return results; } }
0
java-sources/ai/libs/mlplan-ext-reduction/0.2.7/ai/libs/reduction/ensemble
java-sources/ai/libs/mlplan-ext-reduction/0.2.7/ai/libs/reduction/ensemble/simple/MySQLEnsembleOfSimpleOneStepReductionsExperimentRunner.java
package ai.libs.reduction.ensemble.simple; import java.io.File; import java.sql.SQLException; import java.util.Collection; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Optional; import org.aeonbits.owner.ConfigFactory; import org.apache.commons.math.stat.descriptive.DescriptiveStatistics; import org.api4.java.datastructure.kvstore.IKVStore; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import ai.libs.jaicore.db.IDatabaseAdapter; import ai.libs.jaicore.db.IDatabaseConfig; import ai.libs.jaicore.db.sql.DatabaseAdapterFactory; import ai.libs.reduction.Util; public class MySQLEnsembleOfSimpleOneStepReductionsExperimentRunner { private static final String KEY_ERROR_RATE = "errorRate"; private static final String TABLE_NAME = "homogeneousensemblesofreductionstumps"; private final IDatabaseAdapter adapter; private final Collection<MySQLEnsembleOfSimpleOneStepReductionsExperiment> knownExperiments = new HashSet<>(); private final Logger logger = LoggerFactory.getLogger(MySQLEnsembleOfSimpleOneStepReductionsExperimentRunner.class); public MySQLEnsembleOfSimpleOneStepReductionsExperimentRunner(final String host, final String user, final String password, final String database) throws SQLException { IDatabaseConfig config = ConfigFactory.create(IDatabaseConfig.class); config.setProperty(IDatabaseConfig.DB_HOST, host); config.setProperty(IDatabaseConfig.DB_USER, user); config.setProperty(IDatabaseConfig.DB_PASS, password); config.setProperty(IDatabaseConfig.DB_NAME, database); this.adapter = DatabaseAdapterFactory.get(config); this.knownExperiments.addAll(this.getConductedExperiments()); } public Collection<MySQLEnsembleOfSimpleOneStepReductionsExperiment> getConductedExperiments() throws SQLException { Collection<MySQLEnsembleOfSimpleOneStepReductionsExperiment> experiments = new HashSet<>(); List<IKVStore> rs = this.adapter.getRowsOfTable(TABLE_NAME); for (IKVStore store : rs) { experiments.add(new MySQLEnsembleOfSimpleOneStepReductionsExperiment(store.getAsInt("evaluation_id"), new EnsembleOfSimpleOneStepReductionsExperiment(store.getAsInt("seed"), store.getAsString("dataset"), store.getAsString("classifier"), store.getAsInt("size"), store.getAsDouble(KEY_ERROR_RATE), store.getAsString("exception")))); } return experiments; } public MySQLEnsembleOfSimpleOneStepReductionsExperiment createAndGetExperimentIfNotConducted(final int seed, final File dataFile, final String nameOfClassifier, final int size) throws SQLException { /* first check whether exactly the same experiment (with the same seed) has been conducted previously */ EnsembleOfSimpleOneStepReductionsExperiment exp = new EnsembleOfSimpleOneStepReductionsExperiment(seed, dataFile.getAbsolutePath(), nameOfClassifier, size); Optional<MySQLEnsembleOfSimpleOneStepReductionsExperiment> existingExperiment = this.knownExperiments.stream().filter(e -> e.getExperiment().equals(exp)).findAny(); if (existingExperiment.isPresent()) { return null; } /* otherwise, check if the same classifier combination has been tried before */ if (this.canInfeasibilityBeDerived(this.knownExperiments, exp)) { return null; } Map<String, Object> map = new HashMap<>(); map.put("seed", String.valueOf(seed)); map.put("dataset", dataFile.getAbsolutePath()); map.put("classifier", nameOfClassifier); map.put("size", size); int[] id = this.adapter.insert(TABLE_NAME, map); return new MySQLEnsembleOfSimpleOneStepReductionsExperiment(id[0], exp); } private void updateExperiment(final MySQLEnsembleOfSimpleOneStepReductionsExperiment exp, final Map<String, ? extends Object> values) throws SQLException { Map<String, String> where = new HashMap<>(); where.put("evaluation_id", String.valueOf(exp.getId())); this.adapter.update(TABLE_NAME, values, where); } public void conductExperiment(final MySQLEnsembleOfSimpleOneStepReductionsExperiment exp) throws Exception { List<Map<String, Object>> mccvResults = Util.conductEnsembleOfOneStepReductionsExperiment(exp.getExperiment()); DescriptiveStatistics errorRate = new DescriptiveStatistics(); DescriptiveStatistics runtime = new DescriptiveStatistics(); for (Map<String, Object> result : mccvResults) { errorRate.addValue((double) result.get(KEY_ERROR_RATE)); runtime.addValue((long) result.get("trainTime")); } /* prepapre values for experiment update */ Map<String, Object> values = new HashMap<>(); values.put(KEY_ERROR_RATE, errorRate.getMean()); this.updateExperiment(exp, values); } public void markExperimentAsUnsolvable(final MySQLEnsembleOfSimpleOneStepReductionsExperiment exp) throws SQLException { Map<String, String> values = new HashMap<>(); for (String key : new String[] { KEY_ERROR_RATE }) { values.put(key, "-1"); } this.updateExperiment(exp, values); } public void associateExperimentWithException(final MySQLEnsembleOfSimpleOneStepReductionsExperiment exp, final Throwable e) throws SQLException { Map<String, String> values = new HashMap<>(); for (String key : new String[] { KEY_ERROR_RATE }) { values.put(key, "-1"); } values.put("exception", e.getClass().getName() + "\n" + e.getMessage()); this.updateExperiment(exp, values); } private boolean canInfeasibilityBeDerived(final Collection<MySQLEnsembleOfSimpleOneStepReductionsExperiment> experimentsWithResults, final EnsembleOfSimpleOneStepReductionsExperiment experimentInQuestion) { for (MySQLEnsembleOfSimpleOneStepReductionsExperiment knownExperiment : experimentsWithResults) { if (!knownExperiment.getExperiment().getDataset().equals(experimentInQuestion.getDataset())) { continue; } EnsembleOfSimpleOneStepReductionsExperiment re = knownExperiment.getExperiment(); if (re.getException() != null && re.getNameOfClassifier().equals(experimentInQuestion.getNameOfClassifier())) { this.logger.debug("Skipping because {} is known to be problematic as classifier on {} due to {}", experimentInQuestion.getNameOfClassifier(), re.getDataset(), re.getException()); return true; } } return false; } }
0
java-sources/ai/libs/mlplan-ext-reduction/0.2.7/ai/libs/reduction
java-sources/ai/libs/mlplan-ext-reduction/0.2.7/ai/libs/reduction/single/ABestOfKReductionStumpExperimentRunnerWrapper.java
package ai.libs.reduction.single; import java.sql.SQLException; import java.util.HashMap; import java.util.Map; import java.util.Random; import ai.libs.jaicore.db.IDatabaseAdapter; import ai.libs.jaicore.ml.weka.classification.learner.reduction.splitter.RandomSplitter; public abstract class ABestOfKReductionStumpExperimentRunnerWrapper { private final IDatabaseAdapter adapter; private final String tableName; private final int k; private final int mccvrepeats; protected ABestOfKReductionStumpExperimentRunnerWrapper(final IDatabaseAdapter adapter, final String tableName, final int k, final int mccvrepeats) { this.adapter = adapter; this.tableName = tableName; this.k = k; this.mccvrepeats = mccvrepeats; } public void markExperimentAsUnsolvable(final MySQLReductionExperiment exp) throws SQLException { Map<String, String> values = new HashMap<>(); values.put("errorRate", "-1"); this.updateExperiment(exp, values); } public void associateExperimentWithException(final MySQLReductionExperiment exp, final Throwable e) throws SQLException { Map<String, String> values = new HashMap<>(); values.put("errorRate", "-1"); values.put("exception", e.getClass().getName() + "\n" + e.getMessage()); this.updateExperiment(exp, values); } public void conductExperiment(final MySQLReductionExperiment exp) throws Exception { ExperimentRunner<RandomSplitter> runner = new ExperimentRunner<>(this.k, this.mccvrepeats, seed -> new RandomSplitter(new Random(seed))); Map<String, Object> results = runner.conductSingleOneStepReductionExperiment(exp.getExperiment()); this.updateExperiment(exp, results); } protected void updateExperiment(final MySQLReductionExperiment exp, final Map<String, ? extends Object> values) throws SQLException { Map<String, String> where = new HashMap<>(); where.put("evaluation_id", String.valueOf(exp.getId())); this.adapter.update(this.tableName, values, where); } public int getK() { return this.k; } public int getMCCVRepeats() { return this.mccvrepeats; } public IDatabaseAdapter getAdapter() { return this.adapter; } }
0
java-sources/ai/libs/mlplan-ext-reduction/0.2.7/ai/libs/reduction
java-sources/ai/libs/mlplan-ext-reduction/0.2.7/ai/libs/reduction/single/ExperimentRunner.java
package ai.libs.reduction.single; import java.io.BufferedReader; import java.io.FileReader; import java.util.ArrayList; import java.util.Collection; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Random; import org.api4.java.ai.ml.classification.IClassifierEvaluator; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import ai.libs.jaicore.experiments.exceptions.ExperimentEvaluationFailedException; import ai.libs.jaicore.ml.classification.loss.dataset.EClassificationPerformanceMeasure; import ai.libs.jaicore.ml.core.evaluation.evaluator.FixedSplitClassifierEvaluator; import ai.libs.jaicore.ml.core.evaluation.evaluator.MonteCarloCrossValidationEvaluator; import ai.libs.jaicore.ml.weka.WekaUtil; import ai.libs.jaicore.ml.weka.classification.learner.WekaClassifier; import ai.libs.jaicore.ml.weka.classification.learner.reduction.MCTreeNodeReD; import ai.libs.jaicore.ml.weka.classification.learner.reduction.splitter.ISplitter; import ai.libs.jaicore.ml.weka.classification.learner.reduction.splitter.ISplitterFactory; import ai.libs.jaicore.ml.weka.dataset.IWekaInstances; import ai.libs.jaicore.ml.weka.dataset.WekaInstances; import weka.classifiers.AbstractClassifier; import weka.classifiers.Classifier; import weka.core.Instances; public class ExperimentRunner<T extends ISplitter> { private final int k; private final int mccvRepeats; private final ISplitterFactory<T> splitterFactory; private final Logger logger = LoggerFactory.getLogger(ExperimentRunner.class); public ExperimentRunner(final int k, final int mccvRepeats, final ISplitterFactory<T> splitterFactory) { super(); this.k = k; this.mccvRepeats = mccvRepeats; this.splitterFactory = splitterFactory; } public Map<String, Object> conductSingleOneStepReductionExperiment(final ReductionExperiment experiment) throws Exception { /* load data */ Instances instances = new Instances(new BufferedReader(new FileReader(experiment.getDataset()))); instances.setClassIndex(instances.numAttributes() - 1); IWekaInstances data = new WekaInstances(instances); /* prepare basis for experiments */ int seed = experiment.getSeed(); Classifier leftClassifier = AbstractClassifier.forName(experiment.getNameOfLeftClassifier(), null); Classifier innerClassifier = AbstractClassifier.forName(experiment.getNameOfInnerClassifier(), null); Classifier rightClassifier = AbstractClassifier.forName(experiment.getNameOfRightClassifier(), null); List<IWekaInstances> outerSplit = WekaUtil.getStratifiedSplit(data, experiment.getSeed(), .7); IClassifierEvaluator mccv = new MonteCarloCrossValidationEvaluator(new WekaInstances(data), this.mccvRepeats, .7, new Random(seed)); ISplitter splitter = this.splitterFactory.getSplitter(seed); /* compute best of k splits */ MCTreeNodeReD bestFoundClassifier = null; double bestFoundScore = Double.MAX_VALUE; for (int i = 0; i < this.k; i++) { List<Collection<String>> classSplit; try { classSplit = new ArrayList<>(splitter.split(outerSplit.get(0).getInstances())); } catch (InterruptedException e) { throw e; } catch (Exception e) { throw new ExperimentEvaluationFailedException("Could not create a split.", e); } MCTreeNodeReD classifier = new MCTreeNodeReD(innerClassifier, classSplit.get(0), leftClassifier, classSplit.get(1), rightClassifier); double loss = mccv.evaluate(new WekaClassifier(classifier)); this.logger.info("\t\t\tComputed loss {}", loss); if (loss < bestFoundScore) { bestFoundScore = loss; bestFoundClassifier = classifier; } } /* train classifier on all data */ double loss = new FixedSplitClassifierEvaluator(outerSplit.get(0), outerSplit.get(1), EClassificationPerformanceMeasure.ERRORRATE).evaluate(new WekaClassifier(bestFoundClassifier)); Map<String, Object> result = new HashMap<>(); this.logger.info("\t\t\tBest previously observed loss was {}. The retrained classifier achieves {} on the full data.", bestFoundScore, loss); result.put("errorRate", loss); return result; } }
0
java-sources/ai/libs/mlplan-ext-reduction/0.2.7/ai/libs/reduction/single
java-sources/ai/libs/mlplan-ext-reduction/0.2.7/ai/libs/reduction/single/confusion/ConfusionBasedAlgorithm.java
package ai.libs.reduction.single.confusion; import java.util.ArrayList; import java.util.Collection; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Map.Entry; import java.util.stream.Collectors; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import ai.libs.jaicore.basic.sets.SetUtil; import ai.libs.jaicore.ml.weka.WekaUtil; import ai.libs.jaicore.ml.weka.classification.learner.reduction.MCTreeNodeReD; import weka.classifiers.AbstractClassifier; import weka.classifiers.Classifier; import weka.classifiers.Evaluation; import weka.core.Instances; public class ConfusionBasedAlgorithm extends AConfusionBasedAlgorithm { private Logger logger = LoggerFactory.getLogger(ConfusionBasedAlgorithm.class); public MCTreeNodeReD buildClassifier(final Instances data, final Collection<String> pClassifierNames) throws Exception { if (this.logger.isInfoEnabled()) { this.logger.info("START: {}", data.relationName()); } int seed = 0; Map<String, double[][]> confusionMatrices = new HashMap<>(); int numClasses = data.numClasses(); this.logger.info("Computing confusion matrices ..."); for (int i = 0; i < 10; i++) { List<Instances> split = WekaUtil.getStratifiedSplit(data, seed, .7f); /* compute confusion matrices for each classifier */ for (String classifier : pClassifierNames) { try { Classifier c = AbstractClassifier.forName(classifier, null); c.buildClassifier(split.get(0)); Evaluation eval = new Evaluation(split.get(0)); eval.evaluateModel(c, split.get(1)); if (!confusionMatrices.containsKey(classifier)) { confusionMatrices.put(classifier, new double[numClasses][numClasses]); } double[][] currentCM = confusionMatrices.get(classifier); double[][] addedCM = eval.confusionMatrix(); for (int j = 0; j < numClasses; j++) { for (int k = 0; k < numClasses; k++) { currentCM[j][k] += addedCM[j][k]; } } } catch (Exception e) { this.logger.error("Unexpected exception has been thrown", e); } } } this.logger.info("done"); /* compute zero-conflict sets for each classifier */ Map<String, Collection<Collection<Integer>>> zeroConflictSets = new HashMap<>(); for (Entry<String, double[][]> entry : confusionMatrices.entrySet()) { zeroConflictSets.put(entry.getKey(), this.getZeroConflictSets(entry.getValue())); } /* greedily identify the best left and right pair (that make least mistakes) */ Collection<List<String>> classifierPairs = SetUtil.cartesianProduct(confusionMatrices.keySet(), 2); String bestLeft = null; String bestRight = null; String bestInner = null; Collection<Integer> bestLeftClasses = null; Collection<Integer> bestRightClasses = null; for (List<String> classifierPair : classifierPairs) { String c1 = classifierPair.get(0); String c2 = classifierPair.get(1); Collection<Collection<Integer>> z1 = zeroConflictSets.get(c1); Collection<Collection<Integer>> z2 = zeroConflictSets.get(c2); /* create candidate split */ int sizeOfBestCombo = 0; for (Collection<Integer> zeroSet1 : z1) { for (Collection<Integer> zeroSet2 : z2) { Collection<Integer> coveredClassesOfThisPair = SetUtil.union(zeroSet1, zeroSet2); if (coveredClassesOfThisPair.size() > sizeOfBestCombo) { bestLeft = c1; bestRight = c2; sizeOfBestCombo = coveredClassesOfThisPair.size(); bestLeftClasses = zeroSet1; bestRightClasses = zeroSet2; } } } } /* greedily complete the best candidates */ double[][] cm1 = confusionMatrices.get(bestLeft); double[][] cm2 = confusionMatrices.get(bestRight); for (int cId = 0; cId < numClasses; cId++) { if (!bestLeftClasses.contains(cId) && !bestRightClasses.contains(cId)) { /* compute effect of adding this class to the respective clusters */ Collection<Integer> newBestZ1 = new ArrayList<>(bestLeftClasses); newBestZ1.add(cId); int p1 = this.getPenaltyOfCluster(newBestZ1, cm1); Collection<Integer> newBestZ2 = new ArrayList<>(bestRightClasses); newBestZ2.add(cId); int p2 = this.getPenaltyOfCluster(newBestZ2, cm2); if (p1 < p2) { bestLeftClasses = newBestZ1; } else { bestRightClasses = newBestZ2; } } } int p1 = this.getPenaltyOfCluster(bestLeftClasses, cm1); int p2 = this.getPenaltyOfCluster(bestRightClasses, cm2); /* create the split problem */ Map<String, String> classMap = new HashMap<>(); for (int i1 : bestLeftClasses) { classMap.put(data.classAttribute().value(i1), "l"); } for (int i2 : bestRightClasses) { classMap.put(data.classAttribute().value(i2), "r"); } Instances newData = WekaUtil.getRefactoredInstances(data, classMap); List<Instances> binaryInnerSplit = WekaUtil.getStratifiedSplit(newData, seed, .7f); /* now identify the classifier that can best separate these two clusters */ int leastSeenMistakes = Integer.MAX_VALUE; for (String classifier : pClassifierNames) { try { Classifier c = AbstractClassifier.forName(classifier, null); c.buildClassifier(binaryInnerSplit.get(0)); Evaluation eval = new Evaluation(newData); eval.evaluateModel(c, binaryInnerSplit.get(1)); int mistakes = (int) eval.incorrect(); int overallMistakes = p1 + p2 + mistakes; if (overallMistakes < leastSeenMistakes) { leastSeenMistakes = overallMistakes; this.logger.info("New best system: {}/{}/{} with {}", bestLeft, bestRight, classifier, leastSeenMistakes); bestInner = classifier; } } catch (Exception e) { this.logger.error("Exception has been thrown unexpectedly.", e); } } if (bestInner == null) { throw new IllegalStateException("No best inner has been chosen!"); } /* now create MCTreeNode with choices */ MCTreeNodeReD tree = new MCTreeNodeReD(bestInner, bestLeftClasses.stream().map(i -> data.classAttribute().value(i)).collect(Collectors.toList()), bestLeft, bestRightClasses.stream().map(i -> data.classAttribute().value(i)).collect(Collectors.toList()), bestRight); tree.buildClassifier(data); return tree; } private Collection<Collection<Integer>> getZeroConflictSets(final double[][] confusionMatrix) { Collection<Integer> blackList = new ArrayList<>(); Collection<Collection<Integer>> partitions = new ArrayList<>(); int leastConflictingClass = -1; do { leastConflictingClass = this.getLeastConflictingClass(confusionMatrix, blackList); if (leastConflictingClass >= 0) { Collection<Integer> cluster = new ArrayList<>(); cluster.add(leastConflictingClass); do { Collection<Integer> newCluster = this.incrementCluster(cluster, confusionMatrix, blackList); if (newCluster.size() == cluster.size()) { break; } cluster = newCluster; if (cluster.contains(-1)) { throw new IllegalStateException("Computed illegal cluster: " + cluster); } } while (this.getPenaltyOfCluster(cluster, confusionMatrix) == 0 && cluster.size() < confusionMatrix.length); blackList.addAll(cluster); partitions.add(cluster); } } while (leastConflictingClass >= 0 && blackList.size() < confusionMatrix.length); return partitions; } }
0
java-sources/ai/libs/mlplan-ext-reduction/0.2.7/ai/libs/reduction/single
java-sources/ai/libs/mlplan-ext-reduction/0.2.7/ai/libs/reduction/single/confusion/ConfusionBasedGreedyOptimizingAlgorithm.java
package ai.libs.reduction.single.confusion; import java.util.ArrayList; import java.util.Collection; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Map.Entry; import java.util.stream.Collectors; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import ai.libs.jaicore.basic.sets.SetUtil; import ai.libs.jaicore.ml.weka.WekaUtil; import ai.libs.jaicore.ml.weka.classification.learner.reduction.MCTreeNodeReD; import weka.classifiers.AbstractClassifier; import weka.classifiers.Classifier; import weka.classifiers.Evaluation; import weka.core.Instances; public class ConfusionBasedGreedyOptimizingAlgorithm extends AConfusionBasedAlgorithm { private static Logger logger = LoggerFactory.getLogger(ConfusionBasedGreedyOptimizingAlgorithm.class); public MCTreeNodeReD buildClassifier(final Instances data, final Collection<String> pClassifierNames) throws Exception { if (logger.isInfoEnabled()) { logger.info("START: {}", data.relationName()); } int seed = 0; List<Instances> split = WekaUtil.getStratifiedSplit(data, seed, .7f); int numClasses = data.numClasses(); /* compute confusion matrices for each classifier */ logger.info("Computing confusion matrices ..."); Map<String, double[][]> confusionMatrices = new HashMap<>(); for (String classifier : pClassifierNames) { logger.info("\t{} ...", classifier); try { Classifier c = AbstractClassifier.forName(classifier, null); c.buildClassifier(split.get(0)); Evaluation eval = new Evaluation(split.get(0)); eval.evaluateModel(c, split.get(1)); confusionMatrices.put(classifier, eval.confusionMatrix()); } catch (Exception e) { logger.error("Could not train classifier: {}", e); } } logger.info("done"); /* compute zero-conflict sets for each classifier */ Map<String, Collection<Collection<Integer>>> zeroConflictSets = new HashMap<>(); for (Entry<String, double[][]> entry : confusionMatrices.entrySet()) { zeroConflictSets.put(entry.getKey(), this.getZeroConflictSets(entry.getValue())); } /* greedily identify triplets */ Collection<List<String>> classifierPairs = SetUtil.cartesianProduct(confusionMatrices.keySet(), 2); int leastSeenMistakes = Integer.MAX_VALUE; String bestLeft = null; String bestRight = null; String bestInner = null; Collection<Integer> bestLeftClasses = null; Collection<Integer> bestRightClasses = null; int numPair = 0; for (List<String> classifierPair : classifierPairs) { numPair++; String c1 = classifierPair.get(0); String c2 = classifierPair.get(1); logger.info("\tConsidering {}/{} ({}/{})", c1, c2, numPair, classifierPairs.size()); double[][] cm1 = confusionMatrices.get(c1); double[][] cm2 = confusionMatrices.get(c2); Collection<Collection<Integer>> z1 = zeroConflictSets.get(c1); Collection<Collection<Integer>> z2 = zeroConflictSets.get(c2); /* create candidate split */ int sizeOfBestCombo = 0; Collection<Integer> bestZ1 = null; Collection<Integer> bestZ2 = null; for (Collection<Integer> zeroSet1 : z1) { for (Collection<Integer> zeroSet2 : z2) { Collection<Integer> coveredClassesOfThisPair = SetUtil.union(zeroSet1, zeroSet2); if (coveredClassesOfThisPair.size() > sizeOfBestCombo) { sizeOfBestCombo = coveredClassesOfThisPair.size(); bestZ1 = zeroSet1; bestZ2 = zeroSet2; } } } /* greedily complete these candidates */ for (int cId = 0; cId < numClasses; cId++) { if (!bestZ1.contains(cId) && !bestZ2.contains(cId)) { /* compute effect of adding this class to the respective clusters */ Collection<Integer> newBestZ1 = new ArrayList<>(bestZ1); newBestZ1.add(cId); int p1 = this.getPenaltyOfCluster(newBestZ1, cm1); Collection<Integer> newBestZ2 = new ArrayList<>(bestZ2); newBestZ2.add(cId); int p2 = this.getPenaltyOfCluster(newBestZ2, cm2); if (p1 < p2) { bestZ1 = newBestZ1; } else { bestZ2 = newBestZ2; } } } int p1 = this.getPenaltyOfCluster(bestZ1, cm1); int p2 = this.getPenaltyOfCluster(bestZ2, cm2); /* create the split problem */ Map<String, String> classMap = new HashMap<>(); for (int i1 : bestZ1) { classMap.put(data.classAttribute().value(i1), "l"); } for (int i2 : bestZ2) { classMap.put(data.classAttribute().value(i2), "r"); } Instances newData = WekaUtil.getRefactoredInstances(data, classMap); List<Instances> binaryInnerSplit = WekaUtil.getStratifiedSplit(newData, seed, .7f); /* now identify the classifier that can best separate these two clusters */ for (String classifier : pClassifierNames) { try { logger.info("\t\tConsidering {}/{}/{}", c1, c2, classifier); Classifier c = AbstractClassifier.forName(classifier, null); c.buildClassifier(binaryInnerSplit.get(0)); Evaluation eval = new Evaluation(newData); eval.evaluateModel(c, binaryInnerSplit.get(1)); int mistakes = (int) eval.incorrect(); int overallMistakes = p1 + p2 + mistakes; if (overallMistakes < leastSeenMistakes) { leastSeenMistakes = overallMistakes; logger.info("New best system: {}/{}/{} with {}", c1, c2, classifier, leastSeenMistakes); bestLeftClasses = bestZ1; bestRightClasses = bestZ2; bestLeft = c1; bestRight = c2; bestInner = classifier; } } catch (Exception e) { logger.error("Encountered error: {}", e); } } } if (bestLeftClasses == null) { throw new IllegalStateException("Best left classes must not be null"); } /* now create MCTreeNode with choices */ MCTreeNodeReD tree = new MCTreeNodeReD(bestInner, bestLeftClasses.stream().map(i -> data.classAttribute().value(i)).collect(Collectors.toList()), bestLeft, bestRightClasses.stream().map(i -> data.classAttribute().value(i)).collect(Collectors.toList()), bestRight); tree.buildClassifier(data); return tree; } private Collection<Collection<Integer>> getZeroConflictSets(final double[][] confusionMatrix) { Collection<Integer> blackList = new ArrayList<>(); Collection<Collection<Integer>> partitions = new ArrayList<>(); int leastConflictingClass = -1; do { leastConflictingClass = this.getLeastConflictingClass(confusionMatrix, blackList); if (leastConflictingClass >= 0) { Collection<Integer> cluster = new ArrayList<>(); cluster.add(leastConflictingClass); do { cluster = this.incrementCluster(cluster, confusionMatrix, blackList); if (cluster.contains(-1)) { throw new IllegalStateException("Computed illegal cluster: " + cluster); } } while (this.getPenaltyOfCluster(cluster, confusionMatrix) == 0); blackList.addAll(cluster); partitions.add(cluster); } } while (leastConflictingClass >= 0); return partitions; } }
0
java-sources/ai/libs/mlplan-ext-reduction/0.2.7/ai/libs/reduction/single/heterogeneous
java-sources/ai/libs/mlplan-ext-reduction/0.2.7/ai/libs/reduction/single/heterogeneous/bestofkrandom/BestOfKHeterogeneousReductionStumpExperimentRunnerWrapper.java
package ai.libs.reduction.single.heterogeneous.bestofkrandom; import java.io.File; import java.sql.SQLException; import java.util.Collection; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Optional; import org.api4.java.datastructure.kvstore.IKVStore; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import ai.libs.jaicore.db.IDatabaseConfig; import ai.libs.jaicore.db.sql.DatabaseAdapterFactory; import ai.libs.reduction.single.ABestOfKReductionStumpExperimentRunnerWrapper; import ai.libs.reduction.single.BestOfKAtRandomExperiment; import ai.libs.reduction.single.MySQLReductionExperiment; public class BestOfKHeterogeneousReductionStumpExperimentRunnerWrapper extends ABestOfKReductionStumpExperimentRunnerWrapper { private static final Logger LOGGER = LoggerFactory.getLogger(BestOfKHeterogeneousReductionStumpExperimentRunnerWrapper.class); private static final String TABLE_NAME = "reductionstumps_heterogeneous_random_bestofk"; private final Collection<MySQLReductionExperiment> knownExperiments = new HashSet<>(); public BestOfKHeterogeneousReductionStumpExperimentRunnerWrapper(final IDatabaseConfig config, final int k, final int mccvRepeats) { super(DatabaseAdapterFactory.get(config), TABLE_NAME, k, mccvRepeats); try { this.knownExperiments.addAll(this.getConductedExperiments()); } catch (SQLException e) { LOGGER.error("Could not get the already conducted experiments from the database.", e); } } public Collection<MySQLReductionExperiment> getConductedExperiments() throws SQLException { Collection<MySQLReductionExperiment> experiments = new HashSet<>(); List<IKVStore> rslist = this.getAdapter().getRowsOfTable(TABLE_NAME); for (IKVStore rs : rslist) { experiments.add(new MySQLReductionExperiment(rs.getAsInt("evaluation_id"), new BestOfKAtRandomExperiment(rs.getAsInt("seed"), rs.getAsString("dataset"), rs.getAsString("left_classifier"), rs.getAsString("inner_classifier"), rs.getAsString("right_classifier"), rs.getAsInt("k"), rs.getAsInt("mccvrepeats")))); } return experiments; } public MySQLReductionExperiment createAndGetExperimentIfNotConducted(final int seed, final File dataFile, final String nameOfLeftClassifier, final String nameOfInnerClassifier, final String nameOfRightClassifier) { /* first check whether exactly the same experiment (with the same seed) has been conducted previously */ BestOfKAtRandomExperiment exp = new BestOfKAtRandomExperiment(seed, dataFile.getAbsolutePath(), nameOfLeftClassifier, nameOfInnerClassifier, nameOfRightClassifier, this.getK(), this.getMCCVRepeats()); Optional<MySQLReductionExperiment> existingExperiment = this.knownExperiments.stream().filter(e -> e.getExperiment().equals(exp)).findAny(); if (existingExperiment.isPresent()) { return null; } Map<String, Object> map = new HashMap<>(); map.put("seed", seed); map.put("dataset", dataFile.getAbsolutePath()); map.put("left_classifier", nameOfLeftClassifier); map.put("inner_classifier", nameOfInnerClassifier); map.put("right_classifier", nameOfRightClassifier); map.put("k", this.getK()); map.put("mccvrepeats", this.getMCCVRepeats()); try { int id = this.getAdapter().insert(TABLE_NAME, map)[0]; return new MySQLReductionExperiment(id, exp); } catch (SQLException e) { LOGGER.error("Could not create experiment entry", e); return null; } } }
0
java-sources/ai/libs/mlplan-ext-reduction/0.2.7/ai/libs/reduction/single/heterogeneous
java-sources/ai/libs/mlplan-ext-reduction/0.2.7/ai/libs/reduction/single/heterogeneous/simplerpnd/SimpleRPNDHeterogeneousReductionStumpExperimentRunner.java
package ai.libs.reduction.single.heterogeneous.simplerpnd; import java.io.File; import java.sql.SQLException; import java.util.Collection; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Optional; import org.apache.commons.math.stat.descriptive.DescriptiveStatistics; import org.api4.java.datastructure.kvstore.IKVStore; import ai.libs.jaicore.db.IDatabaseAdapter; import ai.libs.jaicore.db.IDatabaseConfig; import ai.libs.jaicore.db.sql.DatabaseAdapterFactory; import ai.libs.reduction.Util; import ai.libs.reduction.single.MySQLReductionExperiment; import ai.libs.reduction.single.ReductionExperiment; public class SimpleRPNDHeterogeneousReductionStumpExperimentRunner { private static final String ERROR_RATE_MIN = "error_rate_min"; private static final String ERROR_RATE_MAX = "error_rate_max"; private static final String ERROR_RATE_MEAN = "error_rate_mean"; private static final String ERROR_RATE_STD = "error_rate_std"; private static final String RUNTIME_MIN = "runtime_min"; private static final String RUNTIME_MAX = "runtime_max"; private static final String RUNTIME_MEAN = "runtime_mean"; private static final String RUNTIME_STD = "runtime_std"; private static final String TABLE_NAME = "reductionstumps"; private final IDatabaseAdapter adapter; private final Collection<MySQLReductionExperiment> knownExperiments = new HashSet<>(); public SimpleRPNDHeterogeneousReductionStumpExperimentRunner(final IDatabaseConfig config) throws SQLException { this.adapter = DatabaseAdapterFactory.get(config); this.knownExperiments.addAll(this.getConductedExperiments()); } public Collection<MySQLReductionExperiment> getConductedExperiments() throws SQLException { Collection<MySQLReductionExperiment> experiments = new HashSet<>(); List<IKVStore> rslist = this.adapter.getRowsOfTable(TABLE_NAME); for (IKVStore rs : rslist) { experiments.add(new MySQLReductionExperiment(rs.getAsInt("evaluation_id"), new ReductionExperiment(rs.getAsInt("seed"), rs.getAsString("dataset"), rs.getAsString("left_classifier"), rs.getAsString("inner_classifier"), rs.getAsString("right_classifier"), rs.getAsString("exception_left"), rs.getAsString("exception_inner"), rs.getAsString("exception_right")))); } return experiments; } public MySQLReductionExperiment createAndGetExperimentIfNotConducted(final int seed, final File dataFile, final String nameOfLeftClassifier, final String nameOfInnerClassifier, final String nameOfRightClassifier) throws SQLException { /* first check whether exactly the same experiment (with the same seed) has been conducted previously */ ReductionExperiment exp = new ReductionExperiment(seed, dataFile.getAbsolutePath(), nameOfLeftClassifier, nameOfInnerClassifier, nameOfRightClassifier); Optional<MySQLReductionExperiment> existingExperiment = this.knownExperiments.stream().filter(e -> e.getExperiment().equals(exp)).findAny(); if (existingExperiment.isPresent()) { return null; } Map<String, String> map = new HashMap<>(); map.put("seed", String.valueOf(seed)); map.put("dataset", dataFile.getAbsolutePath()); map.put("rpnd_classifier", nameOfInnerClassifier); map.put("left_classifier", nameOfLeftClassifier); map.put("inner_classifier", nameOfInnerClassifier); map.put("right_classifier", nameOfRightClassifier); int id = this.adapter.insert(TABLE_NAME, map)[0]; return new MySQLReductionExperiment(id, exp); } private void updateExperiment(final MySQLReductionExperiment exp, final Map<String, ? extends Object> values) throws SQLException { Map<String, String> where = new HashMap<>(); where.put("evaluation_id", String.valueOf(exp.getId())); this.adapter.update(TABLE_NAME, values, where); } public void conductExperiment(final MySQLReductionExperiment exp) throws Exception { List<Map<String, Object>> mccvResults = Util.conductSingleOneStepReductionExperiment(exp.getExperiment()); DescriptiveStatistics errorRate = new DescriptiveStatistics(); DescriptiveStatistics runtime = new DescriptiveStatistics(); for (Map<String, Object> result : mccvResults) { errorRate.addValue((double) result.get("errorRate")); runtime.addValue((long) result.get("trainTime")); } /* prepapre values for experiment update */ Map<String, Object> values = new HashMap<>(); values.put(ERROR_RATE_MIN, errorRate.getMin()); values.put(ERROR_RATE_MAX, errorRate.getMax()); values.put(ERROR_RATE_MEAN, errorRate.getMean()); values.put(ERROR_RATE_STD, errorRate.getStandardDeviation()); values.put(RUNTIME_MIN, runtime.getMin()); values.put(RUNTIME_MAX, runtime.getMax()); values.put(RUNTIME_MEAN, runtime.getMean()); values.put(RUNTIME_STD, runtime.getStandardDeviation()); this.updateExperiment(exp, values); } public void markExperimentAsUnsolvable(final MySQLReductionExperiment exp) throws SQLException { Map<String, String> values = new HashMap<>(); for (String key : new String[] { ERROR_RATE_MIN, ERROR_RATE_MAX, ERROR_RATE_MEAN, ERROR_RATE_STD, RUNTIME_MIN, RUNTIME_MAX, RUNTIME_MEAN, RUNTIME_STD }) { values.put(key, "-1"); } this.updateExperiment(exp, values); } public void associateExperimentWithException(final MySQLReductionExperiment exp, final String classifier, final Throwable e) throws SQLException { Map<String, String> values = new HashMap<>(); for (String key : new String[] { ERROR_RATE_MIN, ERROR_RATE_MAX, ERROR_RATE_MEAN, ERROR_RATE_STD, RUNTIME_MIN, RUNTIME_MAX, RUNTIME_MEAN, RUNTIME_STD }) { values.put(key, "-1"); } values.put("exception_" + classifier, e.getClass().getName() + "\n" + e.getMessage()); this.updateExperiment(exp, values); } }
0
java-sources/ai/libs/mlplan-ext-reduction/0.2.7/ai/libs/reduction/single/homogeneous
java-sources/ai/libs/mlplan-ext-reduction/0.2.7/ai/libs/reduction/single/homogeneous/bestofkatrandom/BestOfKHomogeneousReductionStumpExperimentRunnerWrapper.java
package ai.libs.reduction.single.homogeneous.bestofkatrandom; import java.io.File; import java.sql.SQLException; import java.util.Collection; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Optional; import org.api4.java.datastructure.kvstore.IKVStore; import ai.libs.jaicore.db.IDatabaseConfig; import ai.libs.jaicore.db.sql.DatabaseAdapterFactory; import ai.libs.reduction.single.ABestOfKReductionStumpExperimentRunnerWrapper; import ai.libs.reduction.single.BestOfKAtRandomExperiment; import ai.libs.reduction.single.MySQLReductionExperiment; public class BestOfKHomogeneousReductionStumpExperimentRunnerWrapper extends ABestOfKReductionStumpExperimentRunnerWrapper { private static final String KEY_CLASSIFIER = "classifier"; private static final String TABLE_NAME = "reductionstumps_homogeneous_random_bestofk"; private final Collection<MySQLReductionExperiment> knownExperiments = new HashSet<>(); public BestOfKHomogeneousReductionStumpExperimentRunnerWrapper(final IDatabaseConfig config, final int k, final int mccvRepeats) throws SQLException { super(DatabaseAdapterFactory.get(config), TABLE_NAME, k, mccvRepeats); this.knownExperiments.addAll(this.getConductedExperiments()); } public Collection<MySQLReductionExperiment> getConductedExperiments() throws SQLException { Collection<MySQLReductionExperiment> experiments = new HashSet<>(); List<IKVStore> rslist = this.getAdapter().getRowsOfTable(TABLE_NAME); for (IKVStore rs : rslist) { experiments.add(new MySQLReductionExperiment(rs.getAsInt("evaluation_id"), new BestOfKAtRandomExperiment(rs.getAsInt("seed"), rs.getAsString("dataset"), rs.getAsString(KEY_CLASSIFIER), rs.getAsString(KEY_CLASSIFIER), rs.getAsString(KEY_CLASSIFIER), rs.getAsInt("k"), rs.getAsInt("mccvrepeats")))); } return experiments; } public MySQLReductionExperiment createAndGetExperimentIfNotConducted(final int seed, final File dataFile, final String nameOfClassifier) throws SQLException { /* first check whether exactly the same experiment (with the same seed) has been conducted previously */ BestOfKAtRandomExperiment exp = new BestOfKAtRandomExperiment(seed, dataFile.getAbsolutePath(), nameOfClassifier, nameOfClassifier, nameOfClassifier, this.getK(), this.getMCCVRepeats()); Optional<MySQLReductionExperiment> existingExperiment = this.knownExperiments.stream().filter(e -> e.getExperiment().equals(exp)).findAny(); if (existingExperiment.isPresent()) { return null; } Map<String, Object> map = new HashMap<>(); map.put("seed", seed); map.put("dataset", dataFile.getAbsolutePath()); map.put(KEY_CLASSIFIER, nameOfClassifier); map.put("k", this.getK()); map.put("mccvrepeats", this.getMCCVRepeats()); int id = this.getAdapter().insert(TABLE_NAME, map)[0]; return new MySQLReductionExperiment(id, exp); } }
0
java-sources/ai/libs/mlplan-full/0.2.7/ai/libs/mlplan
java-sources/ai/libs/mlplan-full/0.2.7/ai/libs/mlplan/cli/IMLPlanCLIConfig.java
package ai.libs.mlplan.cli; import org.aeonbits.owner.Config; public interface IMLPlanCLIConfig extends Config { @Key("mlplancli.config.timeunit.def") @DefaultValue("SECONDS") public String getDefaultTimeUnit(); }
0
java-sources/ai/libs/mlplan-full/0.2.7/ai/libs/mlplan
java-sources/ai/libs/mlplan-full/0.2.7/ai/libs/mlplan/cli/MLPlanCLI.java
package ai.libs.mlplan.cli; import java.io.BufferedWriter; import java.io.File; import java.io.FileReader; import java.io.FileWriter; import java.io.IOException; import java.io.PrintWriter; import java.util.ArrayList; import java.util.Arrays; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Map.Entry; import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; import org.aeonbits.owner.ConfigCache; import org.aeonbits.owner.ConfigFactory; import org.apache.commons.cli.CommandLine; import org.apache.commons.cli.CommandLineParser; import org.apache.commons.cli.DefaultParser; import org.apache.commons.cli.HelpFormatter; import org.apache.commons.cli.Option; import org.apache.commons.cli.Options; import org.apache.commons.cli.ParseException; import org.api4.java.ai.ml.core.dataset.supervised.ILabeledDataset; import org.api4.java.ai.ml.core.dataset.supervised.ILabeledInstance; import org.api4.java.ai.ml.core.evaluation.execution.ILearnerRunReport; import org.api4.java.ai.ml.core.learner.ISupervisedLearner; import org.api4.java.algorithm.Timeout; import org.openml.apiconnector.io.OpenmlConnector; import org.openml.apiconnector.xml.DataFeature; import org.openml.apiconnector.xml.DataFeature.Feature; import org.openml.apiconnector.xml.DataSetDescription; import org.openml.apiconnector.xml.Task; import org.openml.apiconnector.xml.Task.Input; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.ObjectMapper; import ai.libs.hasco.gui.civiewplugin.TFDNodeAsCIViewInfoGenerator; import ai.libs.jaicore.basic.FileUtil; import ai.libs.jaicore.basic.ResourceUtil; import ai.libs.jaicore.components.api.IComponentInstance; import ai.libs.jaicore.graphvisualizer.plugin.graphview.GraphViewPlugin; import ai.libs.jaicore.graphvisualizer.plugin.nodeinfo.NodeInfoGUIPlugin; import ai.libs.jaicore.graphvisualizer.window.AlgorithmVisualizationWindow; import ai.libs.jaicore.ml.core.dataset.serialization.ArffDatasetAdapter; import ai.libs.jaicore.ml.core.evaluation.evaluator.SupervisedLearnerExecutor; import ai.libs.jaicore.ml.core.filter.SplitterUtil; import ai.libs.jaicore.ml.scikitwrapper.IScikitLearnWrapperConfig; import ai.libs.jaicore.ml.weka.dataset.WekaInstances; import ai.libs.jaicore.planning.hierarchical.algorithms.forwarddecomposition.graphgenerators.tfd.TFDNodeInfoGenerator; import ai.libs.jaicore.search.gui.plugins.rolloutboxplots.SearchRolloutBoxplotPlugin; import ai.libs.jaicore.search.gui.plugins.rollouthistograms.SearchRolloutHistogramPlugin; import ai.libs.jaicore.search.model.travesaltree.JaicoreNodeInfoGenerator; import ai.libs.mlplan.cli.module.IMLPlanCLIModule; import ai.libs.mlplan.cli.module.regression.MLPlan4ScikitLearnRegressionCLIModule; import ai.libs.mlplan.cli.module.regression.MLPlan4WEKARegressionCLIModule; import ai.libs.mlplan.cli.module.slc.MLPlan4ScikitLearnClassificationCLIModule; import ai.libs.mlplan.cli.module.slc.MLPlan4WekaClassificationCLIModule; import ai.libs.mlplan.cli.report.OpenMLAutoMLBenchmarkReport; import ai.libs.mlplan.cli.report.StatisticsListener; import ai.libs.mlplan.cli.report.StatisticsReport; import ai.libs.mlplan.core.AMLPlanBuilder; import ai.libs.mlplan.core.MLPlan; import ai.libs.python.IPythonConfig; import weka.core.Instance; import weka.core.Instances; import weka.filters.Filter; import weka.filters.unsupervised.attribute.Remove; public class MLPlanCLI { // CLI variables private static Logger logger = LoggerFactory.getLogger(MLPlanCLI.class); private static final String CLI_SYNTAX = "java -jar <mlplan.jar>"; private static final String K_SHORT_OPT = "shortOpt"; private static final String K_DEFAULT = "default"; private static final String K_DESCRIPTION = "description"; private static final String K_LONG_OPT = "longOpt"; private static final String K_HAS_ARG = "hasArg"; private static final String K_NUM_ARGS = "numArgs"; private static final IMLPlanCLIConfig CONFIG = ConfigFactory.create(IMLPlanCLIConfig.class); private static final TimeUnit DEF_TIME_UNIT = TimeUnit.valueOf(CONFIG.getDefaultTimeUnit()); private static final int DEF_NUM_RANDOM_COMPLETIONS = 3; public static final String O_HELP = "h"; // print help public static final String O_MODULE = "m"; // select module public static final String O_FIT_DATASET = "f"; // provide fit dataset public static final String O_PREDICT_DATASET = "p"; // provide predict dataset public static final String O_LOSS = "l"; // specify loss function to use public static final String O_SEED = "s"; public static final String O_SSC = "ssc"; public static final String O_NUM_CPUS = "ncpus"; public static final String O_TIMEOUT = "t"; public static final String O_VISUALIZATION = "v"; public static final String O_CANDIDATE_TIMEOUT = "tc"; public static final String O_NODE_EVAL_TIMEOUT = "tn"; public static final String O_POS_CLASS_INDEX = "pci"; public static final String O_POS_CLASS_NAME = "pcn"; public static final String O_OPENML_TASK = "openMLTask"; // id of an openML taks as an alternative to fit and predict datasets public static final String O_OUT_OPENML_BENCHMARK = "ooab"; public static final String O_OUT_STATS = "os"; public static final String O_OUT_MODEL = "om"; public static final String O_TMP = "tmp"; public static final String O_PYTHON_CMD = "pythonCmd"; /** OPTIONAL PARAMETERS' DEFAULT VALUES */ // Communication options standard values private static final List<IMLPlanCLIModule> MODULES_TO_REGISTER = Arrays.asList(new MLPlan4WekaClassificationCLIModule(), new MLPlan4ScikitLearnClassificationCLIModule(), new MLPlan4WEKARegressionCLIModule(), new MLPlan4ScikitLearnRegressionCLIModule()); private static Map<String, IMLPlanCLIModule> moduleRegistry = null; private static Map<String, String> defaults = new HashMap<>(); private static String version; private static Double testPerformance; private static IComponentInstance incumbent; private static Map<String, IMLPlanCLIModule> getModuleRegistry() { if (moduleRegistry != null) { return moduleRegistry; } moduleRegistry = new HashMap<>(); for (IMLPlanCLIModule module : MODULES_TO_REGISTER) { for (String setting : module.getSettingOptionValues()) { moduleRegistry.put(setting, module); } } return moduleRegistry; } private static boolean isFlag(final JsonNode n, final String fieldName) { return n.has(fieldName) && n.get(fieldName).asBoolean(); } private MLPlanCLI() { // Intentionally left blank } private static Options generateOptions() throws IOException { ObjectMapper mapper = new ObjectMapper(); JsonNode root = mapper.readTree(ResourceUtil.readResourceFileToString("config.mlplan-cli.json")); final Options options = new Options(); for (JsonNode option : root.get("options")) { if (!option.has(K_SHORT_OPT)) { throw new IllegalArgumentException("Error in the cli configuration file. " + mapper.writeValueAsString(option) + " has no shortOpt field."); } options.addOption(Option.builder(option.get(K_SHORT_OPT).asText()) // set the long name of the option .longOpt(option.get(K_LONG_OPT).asText()) // set a flag whether this option is required .required(isFlag(option, "required")) // set a flag whether the option has an argument .hasArg(isFlag(option, K_HAS_ARG)) // set a flag whether the argument is optional .optionalArg(isFlag(option, "argOptional")) // set the number of args .numberOfArgs(option.has(K_NUM_ARGS) ? option.get(K_NUM_ARGS).asInt() : (isFlag(option, K_HAS_ARG) ? 1 : 0)) // set the description .desc(getDescription(option)).build()); if (option.has(K_DEFAULT)) { defaults.put(option.get(K_SHORT_OPT).asText(), option.get(K_DEFAULT).asText()); } } version = root.get("version").asText(); return options; } private static String getDescription(final JsonNode option) { StringBuilder sb = new StringBuilder(); sb.append(option.get(K_DESCRIPTION).asText()); if (option.has(K_DEFAULT)) { sb.append("(Default: ").append(option.get(K_DEFAULT).asText()).append(")"); } if (option.get(K_SHORT_OPT).asText().equals(O_LOSS)) { sb.append("\n"); for (Entry<String, IMLPlanCLIModule> entry : getModuleRegistry().entrySet()) { sb.append(entry.getKey()).append(": ").append(entry.getValue().getPerformanceMeasures().stream().collect(Collectors.joining(", "))).append("\n"); } } if (option.get(K_SHORT_OPT).asText().equals(O_MODULE)) { sb.append("\n").append(getModuleRegistry().keySet().stream().collect(Collectors.joining(", "))); } return sb.toString(); } private static CommandLine generateCommandLine(final Options options, final String[] commandLineArguments) { final CommandLineParser cmdLineParser = new DefaultParser(); CommandLine commandLine = null; try { commandLine = cmdLineParser.parse(options, commandLineArguments); } catch (ParseException parseException) { logger.error("ERROR: Unable to parse command-line arguments {} due to exception.", Arrays.toString(commandLineArguments), parseException); } return commandLine; } private static void printUsage(final Options options) { final HelpFormatter formatter = new HelpFormatter(); final PrintWriter pw = new PrintWriter(System.out); formatter.printUsage(pw, 400, CLI_SYNTAX, options); pw.println("use -h or --help for more detailed information about possible options."); pw.flush(); } private static void printHelp(final Options options) { final HelpFormatter formatter = new HelpFormatter(); formatter.printHelp(600, CLI_SYNTAX, "ML-Plan CLI " + version + "\n================================\n", options, "===============================\nVisit us at: https://mlplan.org"); } public static String getDefault(final String key) { return defaults.get(key); } private static List<ILabeledDataset<ILabeledInstance>> loadOpenMLTaskAsTrainTestSplit(final int taskID, final int fold) throws Exception { logger.info("Load train test split of task {} and fold {}", taskID, fold); OpenmlConnector con = new OpenmlConnector(); Task omlTask = con.taskGet(taskID); File foldAssignmentFile = con.taskSplitsGet(omlTask); Instances splitDescription = new Instances(new FileReader(foldAssignmentFile)); splitDescription.setClassIndex(splitDescription.numAttributes() - 1); List<Integer> fitFold = new ArrayList<>(); List<Integer> predictFold = new ArrayList<>(); for (Instance i : splitDescription) { if (((int) i.classValue()) == fold) { int instanceIndex = (int) i.value(1); switch (splitDescription.attribute(0).value((int) i.value(0))) { case "TRAIN": fitFold.add(instanceIndex); break; case "TEST": predictFold.add(instanceIndex); break; default: /* ignore this case */ break; } } } ILabeledDataset<?> dataset = null; for (Input input : omlTask.getInputs()) { if (input.getName().equals("source_data")) { DataSetDescription dsd = input.getData_set().getDataSetDescription(con); DataFeature feature = con.dataFeatures(dsd.getId()); List<String> removeAttributes = new ArrayList<>(); for (Entry<String, Feature> featureEntry : feature.getFeaturesAsMap().entrySet()) { if (featureEntry.getValue().getIs_row_identifier() || featureEntry.getValue().getIs_ignore()) { removeAttributes.add(featureEntry.getKey()); } } Instances wekaData = new Instances(new FileReader(con.datasetGet(con.dataGet(input.getData_set().getData_set_id())))); String rangeList = removeAttributes.stream().map(x -> (wekaData.attribute(x).index() + 1) + "").collect(Collectors.joining(",")); Remove remove = new Remove(); remove.setAttributeIndices(rangeList); remove.setInputFormat(wekaData); Instances cleanWekaData = Filter.useFilter(wekaData, remove); Integer classIndex = null; String targetName = input.getData_set().getTarget_feature(); for (int i = 0; i < cleanWekaData.numAttributes(); i++) { if (cleanWekaData.attribute(i).name().equals(targetName)) { classIndex = i; } } if (classIndex == null) { logger.error("Could not find target attribute with name {}. Assuming last column to be the target instead.", targetName); classIndex = cleanWekaData.numAttributes() - 1; } cleanWekaData.setClassIndex(classIndex); dataset = new WekaInstances(cleanWekaData); } } return SplitterUtil.getRealizationOfSplitSpecification(dataset, Arrays.asList(fitFold, predictFold)); } private static void runMLPlan(final CommandLine cl) throws Exception { // check whether a dataset is provided for fitting. if (!cl.hasOption(O_FIT_DATASET) && !cl.hasOption(O_OPENML_TASK)) { System.err.println("Either need a training dataset provided via " + O_FIT_DATASET + " or a task and fold of an OpenML task provided via " + O_OPENML_TASK); System.exit(1); } else if (cl.hasOption(O_FIT_DATASET) && cl.hasOption(O_OPENML_TASK)) { System.err.println("Cannot use both: local dataset and openml task. Only one option either " + O_FIT_DATASET + " or " + O_OPENML_TASK + " may be given."); } if (cl.hasOption(O_TMP)) { ConfigCache.getOrCreate(IScikitLearnWrapperConfig.class).setProperty(IScikitLearnWrapperConfig.K_TEMP_FOLDER, cl.getOptionValue(O_TMP)); } if (cl.hasOption(O_PYTHON_CMD)) { ConfigCache.getOrCreate(IScikitLearnWrapperConfig.class).setProperty(IPythonConfig.KEY_PYTHON, cl.getOptionValue(O_PYTHON_CMD)); ConfigCache.getOrCreate(IPythonConfig.class).setProperty(IPythonConfig.KEY_PYTHON, cl.getOptionValue(O_PYTHON_CMD)); } // Load CLI modules and identify module responsible for the requested ml-plan configuration Map<String, IMLPlanCLIModule> moduleRegistry = getModuleRegistry(); String moduleName = cl.getOptionValue(O_MODULE, getDefault(O_MODULE)); if (!moduleRegistry.containsKey(moduleName)) { System.err.println("There is no module registered for handling the requested mode " + moduleName); System.exit(1); } IMLPlanCLIModule module = moduleRegistry.get(moduleName); // load training data ILabeledDataset fitDataset; ILabeledDataset predictDataset = null; if (cl.hasOption(O_OPENML_TASK)) { String[] taskSpec = cl.getOptionValues(O_OPENML_TASK); int taskID = Integer.parseInt(taskSpec[0]); int fold = Integer.parseInt(taskSpec[1]); List<ILabeledDataset<ILabeledInstance>> split = loadOpenMLTaskAsTrainTestSplit(taskID, fold); fitDataset = split.get(0); predictDataset = split.get(1); } else { if (cl.getOptionValue(O_MODULE, getDefault(O_MODULE)).equals(MLPlan4ScikitLearnRegressionCLIModule.M_RUL)) { fitDataset = new ArffDatasetAdapter().readDataset(new File(cl.getOptionValue(O_FIT_DATASET))); } else { Instances wekaData = new Instances(new FileReader(new File(cl.getOptionValue(O_FIT_DATASET)))); wekaData.setClassIndex(wekaData.numAttributes() - 1); fitDataset = new WekaInstances(wekaData); } } // retrieve builder from module AMLPlanBuilder builder = module.getMLPlanBuilderForSetting(cl, fitDataset); // set common configs builder.withNumCpus(Integer.parseInt(cl.getOptionValue(O_NUM_CPUS, getDefault(O_NUM_CPUS)))); builder.withSeed(Long.parseLong(cl.getOptionValue(O_SEED, getDefault(O_SEED)))); // set timeouts builder.withTimeOut(new Timeout(Integer.parseInt(cl.getOptionValue(O_TIMEOUT, getDefault(O_TIMEOUT))), DEF_TIME_UNIT)); if (cl.hasOption(O_CANDIDATE_TIMEOUT)) { builder.withCandidateEvaluationTimeOut(new Timeout(Integer.parseInt(cl.getOptionValue(O_CANDIDATE_TIMEOUT)), DEF_TIME_UNIT)); } else { Timeout candidateTimeout; if (builder.getTimeOut().seconds() <= 60 * 15) { candidateTimeout = new Timeout(30, DEF_TIME_UNIT); } else if (builder.getTimeOut().seconds() <= 2 * 60 * 60) { candidateTimeout = new Timeout(300, DEF_TIME_UNIT); } else if (builder.getTimeOut().seconds() < 60 * 60 * 12) { candidateTimeout = new Timeout(600, DEF_TIME_UNIT); } else { candidateTimeout = new Timeout(1200, DEF_TIME_UNIT); } builder.withCandidateEvaluationTimeOut(candidateTimeout); } builder.withCandidateEvaluationTimeOut(new Timeout(Integer.parseInt(cl.getOptionValue(O_CANDIDATE_TIMEOUT, getDefault(O_CANDIDATE_TIMEOUT))), DEF_TIME_UNIT)); if (cl.hasOption(O_NODE_EVAL_TIMEOUT)) { builder.withNodeEvaluationTimeOut(new Timeout(Integer.parseInt(cl.getOptionValue(O_NODE_EVAL_TIMEOUT, getDefault(O_NODE_EVAL_TIMEOUT))), DEF_TIME_UNIT)); } else { builder.withNodeEvaluationTimeOut(new Timeout(builder.getNodeEvaluationTimeOut().seconds() * DEF_NUM_RANDOM_COMPLETIONS, DEF_TIME_UNIT)); } // finally provide the training data builder.withDataset(fitDataset); // build mlplan object MLPlan mlplan = builder.build(); mlplan.setLoggerName("mlplan"); StatisticsListener statsListener = new StatisticsListener(); mlplan.registerListener(statsListener); if (cl.hasOption(O_VISUALIZATION)) { AlgorithmVisualizationWindow window = new AlgorithmVisualizationWindow(mlplan); window.withMainPlugin(new GraphViewPlugin()); window.withPlugin(new NodeInfoGUIPlugin("Node Info", new JaicoreNodeInfoGenerator<>(new TFDNodeInfoGenerator())), new NodeInfoGUIPlugin("CI View", new TFDNodeAsCIViewInfoGenerator(builder.getComponents())), new SearchRolloutHistogramPlugin(), new SearchRolloutBoxplotPlugin()); } // call ml-plan to obtain the optimal supervised learner logger.info("Running ML-Plan ..."); ISupervisedLearner optimizedLearner = mlplan.call(); incumbent = mlplan.getComponentInstanceOfSelectedClassifier(); logger.info("ML-Plan finished. JSON description of selected solution: {}", incumbent); if (predictDataset != null || cl.hasOption(O_PREDICT_DATASET)) { if (cl.hasOption(O_PREDICT_DATASET)) { File predictDatasetFile = new File(cl.getOptionValue(O_PREDICT_DATASET)); logger.info("Load test data file: {}", predictDatasetFile.getAbsolutePath()); predictDataset = new ArffDatasetAdapter().readDataset(predictDatasetFile); } ILearnerRunReport runReport = new SupervisedLearnerExecutor().execute(optimizedLearner, predictDataset); logger.info("Run report of the module: {}", module.getRunReportAsString(mlplan.getSelectedClassifier(), runReport)); testPerformance = builder.getMetricForSearchPhase().loss(runReport.getPredictionDiffList()); if (cl.hasOption(O_OUT_OPENML_BENCHMARK)) { String outputFile = cl.getOptionValue(O_OUT_OPENML_BENCHMARK, getDefault(O_OUT_OPENML_BENCHMARK)); logger.info("Generating report conforming the OpenML AutoML Benchmark format which is then written to {}.", outputFile); writeFile(outputFile, new OpenMLAutoMLBenchmarkReport(runReport).toString()); } if (cl.hasOption(O_OUT_STATS)) { String outputFile = cl.getOptionValue(O_OUT_STATS, getDefault(O_OUT_STATS)); logger.info("Generating statistics report in json and writing it to file {}.", outputFile); writeFile(outputFile, new StatisticsReport(statsListener, mlplan.getComponentInstanceOfSelectedClassifier(), runReport).toString()); } } if (cl.hasOption(O_OUT_MODEL)) { String outputFile = cl.getOptionValue(O_OUT_MODEL, getDefault(O_OUT_MODEL)); logger.info("Serializing trained model of selected classifier {} to output file {}.", optimizedLearner, outputFile); FileUtil.serializeObject(optimizedLearner, outputFile); logger.info("Serialization completed."); } } public static IComponentInstance incumbent() { return incumbent; } public static Double testPerformance() { return testPerformance; } private static void writeFile(final String fileName, final String value) { File file = new File(fileName); if (file.getParentFile() != null) { file.getParentFile().mkdirs(); } try (BufferedWriter bw = new BufferedWriter(new FileWriter(file))) { bw.write(value); } catch (IOException e) { logger.error("Could not write value to file {}: {}", fileName, value); } } public static void main(final String[] args) throws Exception { String logLevel; if (logger.isTraceEnabled()) { logLevel = "TRACE"; } else if (logger.isDebugEnabled()) { logLevel = "DEBUG"; } else if (logger.isInfoEnabled()) { logLevel = "INFO"; } else if (logger.isWarnEnabled()) { logLevel = "WARN"; } else if (logger.isErrorEnabled()) { logLevel = "ERROR"; } else { logLevel = "UNKNOWN"; } logger.info("Logger works properly. Log-level is {}.", logLevel); final Options options = generateOptions(); if (args.length == 0) { printUsage(options); } else { CommandLine commandLine = generateCommandLine(options, args); if (commandLine != null) { if (commandLine.hasOption(O_HELP)) { printHelp(options); } else { runMLPlan(commandLine); } } } } }
0
java-sources/ai/libs/mlplan-full/0.2.7/ai/libs/mlplan
java-sources/ai/libs/mlplan-full/0.2.7/ai/libs/mlplan/cli/package-info.java
package ai.libs.mlplan.cli;
0
java-sources/ai/libs/mlplan-full/0.2.7/ai/libs/mlplan/cli
java-sources/ai/libs/mlplan-full/0.2.7/ai/libs/mlplan/cli/module/AMLPlanCLIModule.java
package ai.libs.mlplan.cli.module; import java.util.List; public abstract class AMLPlanCLIModule implements IMLPlanCLIModule { private List<String> subModules; private String defaultModule; private List<String> performanceMeasures; private String defaultPerformanceMeasure; protected AMLPlanCLIModule(final List<String> subModules, final String defaultModule, final List<String> performanceMeasures, final String defaultPerformanceMeasure) { if (!(subModules.contains(defaultModule) && performanceMeasures.contains(defaultPerformanceMeasure))) { throw new IllegalArgumentException("The default value needs to be contained in the list of available options"); } this.subModules = subModules; this.defaultModule = defaultModule; this.performanceMeasures = performanceMeasures; this.defaultPerformanceMeasure = defaultPerformanceMeasure; } @Override public String getDefaultSettingOptionValue() { return this.defaultModule; } @Override public List<String> getSettingOptionValues() { return this.subModules; } @Override public String getDefaultPerformanceMeasure() { return this.defaultPerformanceMeasure; } @Override public List<String> getPerformanceMeasures() { return this.performanceMeasures; } }
0
java-sources/ai/libs/mlplan-full/0.2.7/ai/libs/mlplan/cli
java-sources/ai/libs/mlplan-full/0.2.7/ai/libs/mlplan/cli/module/IMLPlanCLIModule.java
package ai.libs.mlplan.cli.module; import java.io.IOException; import java.util.List; import org.apache.commons.cli.CommandLine; import org.api4.java.ai.ml.core.dataset.supervised.ILabeledDataset; import org.api4.java.ai.ml.core.evaluation.execution.ILearnerRunReport; import org.api4.java.ai.ml.core.learner.ISupervisedLearner; import ai.libs.mlplan.cli.MLPlanCLI; import ai.libs.mlplan.core.AMLPlanBuilder; public interface IMLPlanCLIModule { public AMLPlanBuilder getMLPlanBuilderForSetting(CommandLine cl, ILabeledDataset fitDataset) throws IOException; public String getRunReportAsString(ISupervisedLearner learner, ILearnerRunReport runReport); public List<String> getSettingOptionValues(); public String getDefaultSettingOptionValue(); public List<String> getPerformanceMeasures(); public String getDefaultPerformanceMeasure(); default String getPerformanceMeasure(final CommandLine cl) { return cl.getOptionValue(MLPlanCLI.O_LOSS, this.getDefaultPerformanceMeasure()); } }
0
java-sources/ai/libs/mlplan-full/0.2.7/ai/libs/mlplan/cli
java-sources/ai/libs/mlplan-full/0.2.7/ai/libs/mlplan/cli/module/UnsupportedModuleConfigurationException.java
package ai.libs.mlplan.cli.module; public class UnsupportedModuleConfigurationException extends IllegalArgumentException { /** * Auto generated version UID for serialization. */ private static final long serialVersionUID = 6930502085568409139L; public UnsupportedModuleConfigurationException() { super(); } public UnsupportedModuleConfigurationException(final String s) { super(s); } public UnsupportedModuleConfigurationException(final Throwable cause) { super(cause); } public UnsupportedModuleConfigurationException(final String message, final Throwable cause) { super(message, cause); } }
0
java-sources/ai/libs/mlplan-full/0.2.7/ai/libs/mlplan/cli/module
java-sources/ai/libs/mlplan-full/0.2.7/ai/libs/mlplan/cli/module/mlc/MLPlan4MekaMultiLabelCLIModule.java
package ai.libs.mlplan.cli.module.mlc; import java.io.IOException; import java.util.Arrays; import org.apache.commons.cli.CommandLine; import org.api4.java.ai.ml.core.dataset.supervised.ILabeledDataset; import org.api4.java.ai.ml.core.evaluation.execution.ILearnerRunReport; import org.api4.java.ai.ml.core.learner.ISupervisedLearner; import ai.libs.jaicore.ml.classification.multilabel.evaluation.loss.ExactMatch; import ai.libs.jaicore.ml.classification.multilabel.evaluation.loss.F1MacroAverageL; import ai.libs.jaicore.ml.classification.multilabel.evaluation.loss.F1MicroAverage; import ai.libs.jaicore.ml.classification.multilabel.evaluation.loss.Hamming; import ai.libs.jaicore.ml.classification.multilabel.evaluation.loss.InstanceWiseF1; import ai.libs.jaicore.ml.classification.multilabel.evaluation.loss.JaccardScore; import ai.libs.mlplan.cli.MLPlanCLI; import ai.libs.mlplan.cli.module.AMLPlanCLIModule; import ai.libs.mlplan.cli.module.IMLPlanCLIModule; import ai.libs.mlplan.cli.module.UnsupportedModuleConfigurationException; import ai.libs.mlplan.meka.ML2PlanMekaBuilder; // Multi-Label: EXACT_MATCH, INSTANCE_F1, LABEL_F1, MICRO_F1, HAMMING, JACCARD, RANK public class MLPlan4MekaMultiLabelCLIModule extends AMLPlanCLIModule implements IMLPlanCLIModule { public static final String M_MEKA = "meka"; public static final String L_HAMMING = "HAMMING"; public static final String L_JACCARD = "JACCARD"; public static final String L_RANK = "RANK"; public static final String L_EXACT_MATCH = "EXACT_MATCH"; public static final String L_INSTANCE_F1 = "INSTANCE_F1"; public static final String L_LABEL_F1 = "LABEL_F1"; public static final String L_MICRO_F1 = "MICRO_F1"; public MLPlan4MekaMultiLabelCLIModule() { super(Arrays.asList(M_MEKA), M_MEKA, Arrays.asList(L_EXACT_MATCH, L_INSTANCE_F1, L_LABEL_F1, L_MICRO_F1), L_INSTANCE_F1); } @Override public ML2PlanMekaBuilder getMLPlanBuilderForSetting(final CommandLine cl, final ILabeledDataset fitDataset) throws IOException { ML2PlanMekaBuilder builder = new ML2PlanMekaBuilder(); switch (cl.getOptionValue(MLPlanCLI.O_MODULE)) { case L_INSTANCE_F1: builder.withPerformanceMeasure(new InstanceWiseF1()); break; case L_LABEL_F1: builder.withPerformanceMeasure(new F1MacroAverageL()); break; case L_MICRO_F1: builder.withPerformanceMeasure(new F1MicroAverage()); break; case L_EXACT_MATCH: builder.withPerformanceMeasure(new ExactMatch()); break; case L_HAMMING: builder.withPerformanceMeasure(new Hamming()); break; case L_JACCARD: builder.withPerformanceMeasure(new JaccardScore()); break; default: throw new UnsupportedModuleConfigurationException("Performance measure is not available for ML2-Plan"); } return builder; } @Override public String getRunReportAsString(final ISupervisedLearner learner, final ILearnerRunReport runReport) { return null; } }
0
java-sources/ai/libs/mlplan-full/0.2.7/ai/libs/mlplan/cli/module
java-sources/ai/libs/mlplan-full/0.2.7/ai/libs/mlplan/cli/module/regression/AMLPlan4RegressionCLIModule.java
package ai.libs.mlplan.cli.module.regression; import java.util.Arrays; import java.util.List; import org.apache.commons.cli.CommandLine; import ai.libs.jaicore.ml.regression.loss.ERegressionPerformanceMeasure; import ai.libs.jaicore.ml.regression.loss.ERulPerformanceMeasure; import ai.libs.mlplan.cli.module.AMLPlanCLIModule; import ai.libs.mlplan.cli.module.UnsupportedModuleConfigurationException; import ai.libs.mlplan.core.AMLPlanBuilder; public abstract class AMLPlan4RegressionCLIModule extends AMLPlanCLIModule { public static final String L_RMSE = "ROOT_MEAN_SQUARED_ERROR"; public static final String L_MSE = "MEAN_SQUARED_ERROR"; public static final String L_MAE = "MEAN_ABSOLUTE_ERROR"; public static final String L_RMSLE = "ROOT_MEAN_SQUARED_LOGARITHM_ERROR"; public static final String L_R2 = "R2"; public static final String L_AL = "ASYMMETRIC_LOSS"; public static final String L_MAPE = "MEAN_ABSOLUTE_PERCENTAGE_ERROR"; public AMLPlan4RegressionCLIModule(final List<String> subModules, final String defaultModule) { this(subModules, defaultModule, L_RMSE); } public AMLPlan4RegressionCLIModule(final List<String> subModules, final String defaultModule, final String defaultMeasure) { super(subModules, defaultModule, Arrays.asList(L_AL, L_MAE, L_MAPE, L_MSE, L_RMSE, L_R2, L_RMSLE), defaultMeasure); } protected void configureLoss(final CommandLine cl, final AMLPlanBuilder builder) { switch (this.getPerformanceMeasure(cl)) { case L_MAE: builder.withPerformanceMeasure(ERegressionPerformanceMeasure.MAE); break; case L_AL: builder.withPerformanceMeasure(ERulPerformanceMeasure.ASYMMETRIC_LOSS); break; case L_MAPE: builder.withPerformanceMeasure(ERulPerformanceMeasure.MEAN_ABSOLUTE_PERCENTAGE_ERROR); break; case L_RMSLE: builder.withPerformanceMeasure(ERegressionPerformanceMeasure.RMSLE); break; case L_MSE: builder.withPerformanceMeasure(ERegressionPerformanceMeasure.MSE); break; case L_RMSE: builder.withPerformanceMeasure(ERegressionPerformanceMeasure.RMSE); break; case L_R2: builder.withPerformanceMeasure(ERegressionPerformanceMeasure.R2); break; default: throw new UnsupportedModuleConfigurationException("Chosen performance measure is not available for ML-Plan regression"); } } }
0
java-sources/ai/libs/mlplan-full/0.2.7/ai/libs/mlplan/cli/module
java-sources/ai/libs/mlplan-full/0.2.7/ai/libs/mlplan/cli/module/regression/MLPlan4ScikitLearnRegressionCLIModule.java
package ai.libs.mlplan.cli.module.regression; import java.io.IOException; import java.util.Arrays; import org.apache.commons.cli.CommandLine; import org.api4.java.ai.ml.core.dataset.supervised.ILabeledDataset; import org.api4.java.ai.ml.core.evaluation.execution.ILearnerRunReport; import org.api4.java.ai.ml.core.learner.ISupervisedLearner; import ai.libs.mlplan.cli.MLPlanCLI; import ai.libs.mlplan.cli.module.IMLPlanCLIModule; import ai.libs.mlplan.cli.module.UnsupportedModuleConfigurationException; import ai.libs.mlplan.sklearn.builder.MLPlanScikitLearnBuilder; public class MLPlan4ScikitLearnRegressionCLIModule extends AMLPlan4RegressionCLIModule implements IMLPlanCLIModule { public static final String M_RUL = "sklearn-rul"; public static final String M_REGRESSION = "sklearn-regression"; public MLPlan4ScikitLearnRegressionCLIModule() { super(Arrays.asList(M_REGRESSION, M_RUL), M_REGRESSION, AMLPlan4RegressionCLIModule.L_RMSE); } @Override public MLPlanScikitLearnBuilder getMLPlanBuilderForSetting(final CommandLine cl, final ILabeledDataset fitDataset) throws IOException { MLPlanScikitLearnBuilder builder = null; switch (cl.getOptionValue(MLPlanCLI.O_MODULE)) { case M_REGRESSION: builder = MLPlanScikitLearnBuilder.forRegression(); break; case M_RUL: builder = MLPlanScikitLearnBuilder.forRUL(); break; default: throw new UnsupportedModuleConfigurationException("The chosen sub-module is not available in module " + this.getClass().getName()); } this.configureLoss(cl, builder); return builder; } @Override public String getRunReportAsString(final ISupervisedLearner learner, final ILearnerRunReport runReport) { return ""; } }
0
java-sources/ai/libs/mlplan-full/0.2.7/ai/libs/mlplan/cli/module
java-sources/ai/libs/mlplan-full/0.2.7/ai/libs/mlplan/cli/module/regression/MLPlan4WEKARegressionCLIModule.java
package ai.libs.mlplan.cli.module.regression; import java.io.IOException; import java.util.Arrays; import org.apache.commons.cli.CommandLine; import org.api4.java.ai.ml.core.dataset.supervised.ILabeledDataset; import org.api4.java.ai.ml.core.evaluation.execution.ILearnerRunReport; import org.api4.java.ai.ml.core.learner.ISupervisedLearner; import ai.libs.mlplan.cli.MLPlanCLI; import ai.libs.mlplan.cli.module.UnsupportedModuleConfigurationException; import ai.libs.mlplan.weka.MLPlanWekaBuilder; public class MLPlan4WEKARegressionCLIModule extends AMLPlan4RegressionCLIModule { private static final String M_WEKA = "weka-regression"; public MLPlan4WEKARegressionCLIModule() { super(Arrays.asList(M_WEKA), M_WEKA); } @Override public MLPlanWekaBuilder getMLPlanBuilderForSetting(final CommandLine cl, final ILabeledDataset fitDataset) throws IOException { MLPlanWekaBuilder builder = null; switch (cl.getOptionValue(MLPlanCLI.O_MODULE)) { case M_WEKA: builder = MLPlanWekaBuilder.forRegression(); break; default: throw new UnsupportedModuleConfigurationException("Chosen sub-module not available in module " + this.getClass().getName()); } this.configureLoss(cl, builder); return builder; } @Override public String getRunReportAsString(final ISupervisedLearner learner, final ILearnerRunReport runReport) { return ""; } }
0
java-sources/ai/libs/mlplan-full/0.2.7/ai/libs/mlplan/cli/module
java-sources/ai/libs/mlplan-full/0.2.7/ai/libs/mlplan/cli/module/slc/AMLPlan4ClassificationCLIModule.java
package ai.libs.mlplan.cli.module.slc; import static ai.libs.mlplan.cli.MLPlanCLI.getDefault; import java.util.Arrays; import java.util.List; import org.apache.commons.cli.CommandLine; import org.api4.java.ai.ml.classification.singlelabel.evaluation.ISingleLabelClassification; import org.api4.java.ai.ml.core.dataset.schema.attribute.ICategoricalAttribute; import org.api4.java.ai.ml.core.dataset.supervised.ILabeledDataset; import org.api4.java.ai.ml.core.evaluation.execution.ILearnerRunReport; import org.api4.java.ai.ml.core.learner.ISupervisedLearner; import ai.libs.jaicore.ml.classification.loss.dataset.AreaUnderROCCurve; import ai.libs.jaicore.ml.classification.loss.dataset.AveragedInstanceLoss; import ai.libs.jaicore.ml.classification.loss.dataset.EClassificationPerformanceMeasure; import ai.libs.jaicore.ml.classification.loss.dataset.ErrorRate; import ai.libs.jaicore.ml.classification.loss.dataset.F1Measure; import ai.libs.jaicore.ml.classification.loss.dataset.Precision; import ai.libs.jaicore.ml.classification.loss.dataset.Recall; import ai.libs.jaicore.ml.classification.loss.instance.LogLoss; import ai.libs.mlplan.cli.MLPlanCLI; import ai.libs.mlplan.cli.module.AMLPlanCLIModule; import ai.libs.mlplan.cli.module.IMLPlanCLIModule; import ai.libs.mlplan.cli.module.UnsupportedModuleConfigurationException; import ai.libs.mlplan.core.AMLPlanBuilder; public abstract class AMLPlan4ClassificationCLIModule extends AMLPlanCLIModule implements IMLPlanCLIModule { // binary only private static final String L_AUC = "AUC"; private static final String L_F1 = "F1"; private static final String L_PRECISION = "PRECISION"; private static final String L_RECALL = "RECALL"; // binary + multinomial private static final String L_ERRORRATE = "ERRORRATE"; private static final String L_LOGLOSS = "LOGLOSS"; private static final List<String> BINARY_ONLY_MEASURES = Arrays.asList(L_AUC, L_F1, L_PRECISION, L_RECALL); protected AMLPlan4ClassificationCLIModule(final List<String> subModules, final String defaultModule) { super(subModules, defaultModule, Arrays.asList(L_AUC, L_F1, L_PRECISION, L_RECALL, L_ERRORRATE, L_LOGLOSS), L_ERRORRATE); } protected void configureLoss(final CommandLine cl, final ICategoricalAttribute labelAtt, final AMLPlanBuilder builder) { int positiveClassIndex = Integer.parseInt(cl.getOptionValue(MLPlanCLI.O_POS_CLASS_INDEX, getDefault(MLPlanCLI.O_POS_CLASS_INDEX))); if (cl.hasOption(MLPlanCLI.O_POS_CLASS_NAME)) { positiveClassIndex = labelAtt.getLabels().indexOf(cl.getOptionValue(MLPlanCLI.O_POS_CLASS_NAME)); if (positiveClassIndex < 0) { throw new UnsupportedModuleConfigurationException("The provided name of the positive class is not contained in the list of class labels"); } } String performanceMeasure = cl.getOptionValue(MLPlanCLI.O_LOSS, L_ERRORRATE); if (BINARY_ONLY_MEASURES.contains(performanceMeasure) && labelAtt.getLabels().size() > 2) { throw new UnsupportedModuleConfigurationException("Cannot use binary performance measure for non-binary classification dataset."); } switch (performanceMeasure) { case L_ERRORRATE: builder.withPerformanceMeasure(EClassificationPerformanceMeasure.ERRORRATE); break; case L_LOGLOSS: builder.withPerformanceMeasure(new AveragedInstanceLoss(new LogLoss())); break; case L_AUC: builder.withPerformanceMeasure(new AreaUnderROCCurve(positiveClassIndex)); break; case L_F1: builder.withPerformanceMeasure(new F1Measure(positiveClassIndex)); break; case L_PRECISION: builder.withPerformanceMeasure(new Precision(positiveClassIndex)); break; case L_RECALL: builder.withPerformanceMeasure(new Recall(positiveClassIndex)); break; default: throw new UnsupportedModuleConfigurationException("Unsupported measure " + performanceMeasure); } } public ICategoricalAttribute getLabelAttribute(final ILabeledDataset fitDataset) { if (!(fitDataset.getLabelAttribute() instanceof ICategoricalAttribute)) { throw new UnsupportedModuleConfigurationException("ML-Plan for classification requires a categorical target attribute."); } return (ICategoricalAttribute) fitDataset.getLabelAttribute(); } @Override public String getRunReportAsString(final ISupervisedLearner learner, final ILearnerRunReport runReport) { StringBuilder sb = new StringBuilder(); sb.append(learner).append("\n"); sb.append("Error-Rate: ").append(new ErrorRate().loss(runReport.getPredictionDiffList().getCastedView(Integer.class, ISingleLabelClassification.class))); return sb.toString(); } }
0
java-sources/ai/libs/mlplan-full/0.2.7/ai/libs/mlplan/cli/module
java-sources/ai/libs/mlplan-full/0.2.7/ai/libs/mlplan/cli/module/slc/MLPlan4ScikitLearnClassificationCLIModule.java
package ai.libs.mlplan.cli.module.slc; import java.io.IOException; import java.util.Arrays; import org.apache.commons.cli.CommandLine; import org.api4.java.ai.ml.core.dataset.schema.attribute.ICategoricalAttribute; import org.api4.java.ai.ml.core.dataset.supervised.ILabeledDataset; import ai.libs.mlplan.cli.MLPlanCLI; import ai.libs.mlplan.cli.module.IMLPlanCLIModule; import ai.libs.mlplan.cli.module.UnsupportedModuleConfigurationException; import ai.libs.mlplan.sklearn.builder.MLPlanScikitLearnBuilder; public class MLPlan4ScikitLearnClassificationCLIModule extends AMLPlan4ClassificationCLIModule implements IMLPlanCLIModule { private static final String M_SKLEARN = "sklearn"; private static final String M_ULSKLEARN = "sklearn-unlimited"; public MLPlan4ScikitLearnClassificationCLIModule() { super(Arrays.asList(M_SKLEARN, M_ULSKLEARN), M_SKLEARN); } @Override public MLPlanScikitLearnBuilder getMLPlanBuilderForSetting(final CommandLine cl, final ILabeledDataset fitDataset) throws IOException { ICategoricalAttribute labelAtt = this.getLabelAttribute(fitDataset); // get the respective builder MLPlanScikitLearnBuilder builder; switch (cl.getOptionValue(MLPlanCLI.O_MODULE, this.getDefaultSettingOptionValue())) { case M_SKLEARN: builder = MLPlanScikitLearnBuilder.forClassification(); break; case M_ULSKLEARN: builder = MLPlanScikitLearnBuilder.forClassificationWithUnlimitedLength(); break; default: throw new UnsupportedModuleConfigurationException("Unknown module configured for scikit-learn classification module."); } // configure classification loss this.configureLoss(cl, labelAtt, builder); return builder; } }
0
java-sources/ai/libs/mlplan-full/0.2.7/ai/libs/mlplan/cli/module
java-sources/ai/libs/mlplan-full/0.2.7/ai/libs/mlplan/cli/module/slc/MLPlan4WekaClassificationCLIModule.java
package ai.libs.mlplan.cli.module.slc; import java.io.IOException; import java.util.Arrays; import org.apache.commons.cli.CommandLine; import org.api4.java.ai.ml.core.dataset.schema.attribute.ICategoricalAttribute; import org.api4.java.ai.ml.core.dataset.supervised.ILabeledDataset; import ai.libs.mlplan.cli.MLPlanCLI; import ai.libs.mlplan.cli.module.UnsupportedModuleConfigurationException; import ai.libs.mlplan.weka.MLPlanWekaBuilder; public class MLPlan4WekaClassificationCLIModule extends AMLPlan4ClassificationCLIModule { private static final String M_WEKA = "weka"; private static final String M_WEKA_TINY = "weka-tiny"; public MLPlan4WekaClassificationCLIModule() { super(Arrays.asList(M_WEKA, M_WEKA_TINY), M_WEKA); } @Override public MLPlanWekaBuilder getMLPlanBuilderForSetting(final CommandLine cl, final ILabeledDataset fitDataset) throws IOException { // try to get label attribute (also checks whether the dataset is really a classification dataset). ICategoricalAttribute labelAtt = this.getLabelAttribute(fitDataset); MLPlanWekaBuilder builder; switch (cl.getOptionValue(MLPlanCLI.O_MODULE, this.getDefaultSettingOptionValue())) { case M_WEKA: builder = MLPlanWekaBuilder.forClassification(); break; case M_WEKA_TINY: builder = MLPlanWekaBuilder.forClassificationWithTinySearchSpace(); break; default: throw new UnsupportedModuleConfigurationException("The selected module is not available via this CLI module."); } // configure the loss function this.configureLoss(cl, labelAtt, builder); return builder; } }
0
java-sources/ai/libs/mlplan-full/0.2.7/ai/libs/mlplan/cli
java-sources/ai/libs/mlplan-full/0.2.7/ai/libs/mlplan/cli/report/OpenMLAutoMLBenchmarkReport.java
package ai.libs.mlplan.cli.report; import java.util.List; import java.util.stream.Collectors; import java.util.stream.IntStream; import org.api4.java.ai.ml.classification.singlelabel.evaluation.ISingleLabelClassification; import org.api4.java.ai.ml.core.dataset.schema.attribute.ICategoricalAttribute; import org.api4.java.ai.ml.core.evaluation.IPredictionAndGroundTruthTable; import org.api4.java.ai.ml.core.evaluation.execution.ILearnerRunReport; import org.api4.java.ai.ml.regression.evaluation.IRegressionPrediction; public class OpenMLAutoMLBenchmarkReport { private final ILearnerRunReport runReport; public OpenMLAutoMLBenchmarkReport(final ILearnerRunReport runReport) { this.runReport = runReport; } @Override public String toString() { StringBuilder sb = new StringBuilder(); if (this.runReport.getTestSet().getLabelAttribute() instanceof ICategoricalAttribute) { // classification data List<String> labels = ((ICategoricalAttribute) this.runReport.getTestSet().getLabelAttribute()).getLabels(); // write headers of csv format. sb.append(labels.stream().collect(Collectors.joining(","))).append(",").append("predictions").append(",").append("truth").append("\n"); IPredictionAndGroundTruthTable<Integer, ISingleLabelClassification> castedReport = this.runReport.getPredictionDiffList().getCastedView(Integer.class, ISingleLabelClassification.class); // add all the prediction rows. for (int i = 0; i < castedReport.size(); i++) { ISingleLabelClassification pred = castedReport.getPrediction(i); sb.append(IntStream.range(0, labels.size()).mapToObj(x -> pred.getProbabilityOfLabel(x) + "").collect(Collectors.joining(","))).append(",").append(labels.get(pred.getPrediction())).append(",") .append(labels.get((int) this.runReport.getTestSet().get(i).getLabel())).append("\n"); } } else { // regression data this.runReport.getPredictionDiffList().getCastedView(Double.class, IRegressionPrediction.class); sb.append("predictions").append(",").append("truth").append("\n"); IPredictionAndGroundTruthTable<Double, IRegressionPrediction> castedReport = this.runReport.getPredictionDiffList().getCastedView(Double.class, IRegressionPrediction.class); for (int i = 0; i < castedReport.size(); i++) { sb.append(castedReport.getPrediction(i).getDoublePrediction()).append(",").append(castedReport.getGroundTruth(i)).append("\n"); } } return sb.toString(); } }
0
java-sources/ai/libs/mlplan-full/0.2.7/ai/libs/mlplan/cli
java-sources/ai/libs/mlplan-full/0.2.7/ai/libs/mlplan/cli/report/StatisticsListener.java
package ai.libs.mlplan.cli.report; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Map.Entry; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantLock; import com.google.common.eventbus.Subscribe; import ai.libs.jaicore.basic.StatisticsUtil; import ai.libs.mlplan.core.events.ClassifierFoundEvent; public class StatisticsListener { private AtomicInteger modelsEvaluatedCounter; private Map<String, List<Double>> rootLearnerStatistics; private Lock lock = new ReentrantLock(); public StatisticsListener() { this.modelsEvaluatedCounter = new AtomicInteger(0); this.rootLearnerStatistics = new HashMap<>(); } @Subscribe public void rcvClassifierFoundEvent(final ClassifierFoundEvent e) { this.modelsEvaluatedCounter.incrementAndGet(); this.lock.lock(); try { this.rootLearnerStatistics.computeIfAbsent(e.getComponentDescription().getComponent().getName(), t -> new ArrayList<>()).add(e.getScore()); } finally { this.lock.unlock(); } } public int getNumModelsEvaluated() { return this.modelsEvaluatedCounter.get(); } public Map<String, Map<String, Double>> getRootLearnerStatistics() { Map<String, Map<String, Double>> result = new HashMap<>(); for (Entry<String, List<Double>> entry : this.rootLearnerStatistics.entrySet()) { Map<String, Double> stats = new HashMap<>(); stats.put("n", (double) entry.getValue().size()); stats.put("max_score", StatisticsUtil.max(entry.getValue())); stats.put("min_score", StatisticsUtil.min(entry.getValue())); stats.put("mean_score", StatisticsUtil.mean(entry.getValue())); stats.put("median_score", StatisticsUtil.median(entry.getValue())); stats.put("score_variance", StatisticsUtil.variance(entry.getValue())); stats.put("score_standarddeviation", StatisticsUtil.standardDeviation(entry.getValue())); result.put(entry.getKey(), stats); } return result; } }
0
java-sources/ai/libs/mlplan-full/0.2.7/ai/libs/mlplan/cli
java-sources/ai/libs/mlplan-full/0.2.7/ai/libs/mlplan/cli/report/StatisticsReport.java
package ai.libs.mlplan.cli.report; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.stream.Collectors; import java.util.stream.IntStream; import org.api4.java.ai.ml.classification.singlelabel.evaluation.ISingleLabelClassification; import org.api4.java.ai.ml.core.dataset.schema.attribute.ICategoricalAttribute; import org.api4.java.ai.ml.core.evaluation.IPredictionAndGroundTruthTable; import org.api4.java.ai.ml.core.evaluation.execution.ILearnerRunReport; import org.api4.java.ai.ml.regression.evaluation.IRegressionPrediction; import com.fasterxml.jackson.core.JsonProcessingException; import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.SerializationFeature; import ai.libs.jaicore.components.api.IComponentInstance; import ai.libs.jaicore.components.model.ComponentInstanceUtil; import ai.libs.jaicore.ml.classification.loss.dataset.AreaUnderPrecisionRecallCurve; import ai.libs.jaicore.ml.classification.loss.dataset.AveragedInstanceLoss; import ai.libs.jaicore.ml.classification.loss.dataset.EClassificationPerformanceMeasure; import ai.libs.jaicore.ml.classification.loss.dataset.F1Measure; import ai.libs.jaicore.ml.classification.loss.instance.LogLoss; import ai.libs.jaicore.ml.regression.loss.dataset.MeanAbsoluteError; import ai.libs.jaicore.ml.regression.loss.dataset.MeanAbsolutePercentageError; import ai.libs.jaicore.ml.regression.loss.dataset.R2; import ai.libs.jaicore.ml.regression.loss.dataset.RootMeanSquaredError; public class StatisticsReport { private static final ObjectMapper mapper = new ObjectMapper(); private final IComponentInstance selectedSolution; private final StatisticsListener statsListener; private final ILearnerRunReport runReport; public StatisticsReport(final StatisticsListener statsListener, final IComponentInstance selectedSolution, final ILearnerRunReport runReport) { mapper.enable(SerializationFeature.INDENT_OUTPUT); this.statsListener = statsListener; this.selectedSolution = selectedSolution; this.runReport = runReport; } @Override public String toString() { Map<String, Object> root = new HashMap<>(); root.put("selected_solution", ComponentInstanceUtil.getComponentInstanceString(this.selectedSolution)); root.put("num_evaluations", this.statsListener.getNumModelsEvaluated()); root.put("model_evaluation_stats", this.statsListener.getRootLearnerStatistics()); root.put("final_candidate_predict_time_ms", this.runReport.getTestEndTime() - this.runReport.getTestStartTime()); if (this.runReport.getTestSet().getLabelAttribute() instanceof ICategoricalAttribute) { // classification data List<String> labels = ((ICategoricalAttribute) this.runReport.getTestSet().getLabelAttribute()).getLabels(); root.put("probabilities_labels", labels); // write headers of csv format. IPredictionAndGroundTruthTable<Integer, ISingleLabelClassification> castedReport = this.runReport.getPredictionDiffList().getCastedView(Integer.class, ISingleLabelClassification.class); List<double[]> probabilities = new ArrayList<>(castedReport.size()); List<Integer> predictions = new ArrayList<>(castedReport.size()); // add all the prediction rows. for (int i = 0; i < castedReport.size(); i++) { ISingleLabelClassification pred = castedReport.getPrediction(i); probabilities.add(IntStream.range(0, labels.size()).mapToDouble(pred::getProbabilityOfLabel).toArray()); predictions.add(pred.getIntPrediction()); } root.put("predictions", predictions.stream().map(labels::get).collect(Collectors.toList())); root.put("probabilities", probabilities); root.put("truth", castedReport.getGroundTruthAsList().stream().map(labels::get).collect(Collectors.toList())); root.put("m_error_rate", EClassificationPerformanceMeasure.ERRORRATE.loss(castedReport)); if (labels.size() == 2) { root.put("m_auc_0", new AreaUnderPrecisionRecallCurve(0).score(castedReport)); root.put("m_auc_1", new AreaUnderPrecisionRecallCurve(1).score(castedReport)); root.put("m_f1_0", new F1Measure(0).score(castedReport)); root.put("m_f1_1", new F1Measure(1).score(castedReport)); } else { root.put("m_logloss", new AveragedInstanceLoss(new LogLoss()).loss(castedReport)); } } else { // regression data IPredictionAndGroundTruthTable<Double, IRegressionPrediction> castedReport = this.runReport.getPredictionDiffList().getCastedView(Double.class, IRegressionPrediction.class); root.put("predictions", castedReport.getPredictionsAsList().stream().map(IRegressionPrediction::getDoublePrediction).collect(Collectors.toList())); root.put("truth", castedReport.getGroundTruthAsList()); root.put("m_rmse", new RootMeanSquaredError().loss(castedReport)); root.put("m_mae", new MeanAbsoluteError().loss(castedReport)); root.put("m_mape", new MeanAbsolutePercentageError().loss(castedReport)); root.put("m_r2", new R2().loss(castedReport)); } try { return mapper.writerWithDefaultPrettyPrinter().writeValueAsString(root); } catch (JsonProcessingException e) { throw new IllegalArgumentException(e); } } }
0
java-sources/ai/libs/mlplan-gui/0.2.7/ai/libs/mlplan/gui
java-sources/ai/libs/mlplan-gui/0.2.7/ai/libs/mlplan/gui/outofsampleplots/OutOfSampleErrorPlotPlugin.java
package ai.libs.mlplan.gui.outofsampleplots; import java.util.Arrays; import java.util.Collection; import org.api4.java.ai.ml.core.dataset.supervised.ILabeledDataset; import ai.libs.jaicore.graphvisualizer.events.recorder.property.AlgorithmEventPropertyComputer; import ai.libs.jaicore.graphvisualizer.plugin.ASimpleMVCPlugin; public class OutOfSampleErrorPlotPlugin extends ASimpleMVCPlugin<OutOfSampleErrorPlotPluginModel, OutOfSampleErrorPlotPluginView, OutOfSampleErrorPlotPluginController> { private final ILabeledDataset<?> trainData; private final ILabeledDataset<?> testData; public OutOfSampleErrorPlotPlugin(final String title, final ILabeledDataset<?> trainData, final ILabeledDataset<?> testData) { super(title); this.trainData = trainData; this.testData = testData; this.getController().setTrain(trainData); this.getController().setTest(testData); } public OutOfSampleErrorPlotPlugin(final ILabeledDataset<?> trainData, final ILabeledDataset<?> testData) { this("Out-of-Sample Error Timeline", trainData, testData); } public ILabeledDataset<?> getTrainData() { return this.trainData; } public ILabeledDataset<?> getTestData() { return this.testData; } @Override public Collection<AlgorithmEventPropertyComputer> getPropertyComputers() { return Arrays.asList(); // no property computer required } }
0
java-sources/ai/libs/mlplan-gui/0.2.7/ai/libs/mlplan/gui
java-sources/ai/libs/mlplan-gui/0.2.7/ai/libs/mlplan/gui/outofsampleplots/OutOfSampleErrorPlotPluginController.java
package ai.libs.mlplan.gui.outofsampleplots; import java.util.ArrayList; import java.util.List; import org.apache.commons.lang3.exception.ExceptionUtils; import org.api4.java.ai.ml.classification.IClassifier; import org.api4.java.ai.ml.core.dataset.supervised.ILabeledDataset; import org.api4.java.algorithm.events.serializable.IPropertyProcessedAlgorithmEvent; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.fasterxml.jackson.databind.ObjectMapper; import ai.libs.jaicore.basic.reconstruction.ReconstructionPlan; import ai.libs.jaicore.graphvisualizer.events.gui.GUIEvent; import ai.libs.jaicore.graphvisualizer.plugin.ASimpleMVCPluginController; import ai.libs.jaicore.graphvisualizer.plugin.controlbar.ResetEvent; import ai.libs.jaicore.graphvisualizer.plugin.solutionperformanceplotter.ScoredSolutionCandidateInfo; import ai.libs.jaicore.graphvisualizer.plugin.solutionperformanceplotter.ScoredSolutionCandidateInfoAlgorithmEventPropertyComputer; import ai.libs.jaicore.graphvisualizer.plugin.timeslider.GoToTimeStepEvent; import ai.libs.jaicore.ml.classification.loss.dataset.EClassificationPerformanceMeasure; import ai.libs.jaicore.ml.core.evaluation.MLEvaluationUtil; import ai.libs.mlplan.core.events.ClassifierFoundEvent; public class OutOfSampleErrorPlotPluginController extends ASimpleMVCPluginController<OutOfSampleErrorPlotPluginModel, OutOfSampleErrorPlotPluginView> { private ILabeledDataset<?> train, test; private Logger logger = LoggerFactory.getLogger(OutOfSampleErrorPlotPlugin.class); public OutOfSampleErrorPlotPluginController(final OutOfSampleErrorPlotPluginModel model, final OutOfSampleErrorPlotPluginView view) { super(model, view); } public ILabeledDataset<?> getTrain() { return this.train; } public void setTrain(final ILabeledDataset<?> train) { this.train = train; } public ILabeledDataset<?> getTest() { return this.test; } public void setTest(final ILabeledDataset<?> test) { this.test = test; } @Override public void handleGUIEvent(final GUIEvent guiEvent) { if (guiEvent instanceof ResetEvent || guiEvent instanceof GoToTimeStepEvent) { this.getModel().clear(); } } @Override public void handleAlgorithmEventInternally(final IPropertyProcessedAlgorithmEvent algorithmEvent) { if (algorithmEvent.correspondsToEventOfClass(ClassifierFoundEvent.class)) { this.logger.debug("Received classifier found event {}", algorithmEvent); Object rawScoredSolutionCandidateInfo = algorithmEvent.getProperty(ScoredSolutionCandidateInfoAlgorithmEventPropertyComputer.SCORED_SOLUTION_CANDIDATE_INFO_PROPERTY_NAME); if (rawScoredSolutionCandidateInfo != null) { ScoredSolutionCandidateInfo scoredSolutionCandidateInfo = (ScoredSolutionCandidateInfo) rawScoredSolutionCandidateInfo; try { IClassifier classifier = this.deserializeClassifier(scoredSolutionCandidateInfo.getSolutionCandidateRepresentation()); this.logger.debug("Building classifier"); classifier.fit(this.train); List<Double> performances = new ArrayList<>(); performances.add(this.parseScoreToDouble(scoredSolutionCandidateInfo.getScore())); performances.add(MLEvaluationUtil.getLossForTrainedClassifier(classifier, this.test, EClassificationPerformanceMeasure.ERRORRATE)); this.logger.debug("Adding solution to model and updating view."); this.getModel().addEntry(algorithmEvent.getTimestampOfEvent(), classifier, performances); this.logger.debug("Added solution to model."); } catch (NumberFormatException exception) { this.logger.warn("Received processed SolutionCandidateFoundEvent, but the score {} cannot be parsed to a double.", scoredSolutionCandidateInfo.getScore()); return; } catch (Exception e) { this.logger.error("Could not train classifier! \n" + ExceptionUtils.getMessage(e)); if (e instanceof InterruptedException) { currentThread().interrupt(); } } } } } private IClassifier deserializeClassifier(final String serializedClassifier) throws Exception { ReconstructionPlan plan = new ObjectMapper().readValue(serializedClassifier, ReconstructionPlan.class); return (IClassifier) plan.reconstructObject(); } private double parseScoreToDouble(final String score) throws NumberFormatException { return Double.parseDouble(score); } }
0
java-sources/ai/libs/mlplan-gui/0.2.7/ai/libs/mlplan/gui
java-sources/ai/libs/mlplan-gui/0.2.7/ai/libs/mlplan/gui/outofsampleplots/OutOfSampleErrorPlotPluginModel.java
package ai.libs.mlplan.gui.outofsampleplots; import java.util.ArrayList; import java.util.List; import org.api4.java.ai.ml.classification.IClassifier; import ai.libs.jaicore.graphvisualizer.plugin.ASimpleMVCPluginModel; /** * * @author fmohr * */ public class OutOfSampleErrorPlotPluginModel extends ASimpleMVCPluginModel<OutOfSampleErrorPlotPluginView, OutOfSampleErrorPlotPluginController> { private final List<Integer> timestamps = new ArrayList<>(); private final List<IClassifier> classifiers = new ArrayList<>(); private final List<List<Double>> performances = new ArrayList<>(); private long timestampOfFirstEvent = -1; public final void addEntry(final long timestamp, final IClassifier classifier, final List<Double> performances) { int offset = 0; if (this.timestampOfFirstEvent == -1) { this.timestampOfFirstEvent = timestamp; } else { offset = (int) (timestamp - this.timestampOfFirstEvent); } this.timestamps.add(offset); this.classifiers.add(classifier); this.performances.add(performances); this.getView().update(); } public long getTimestampOfFirstEvent() { return this.timestampOfFirstEvent; } @Override public void clear() { this.timestamps.clear(); this.classifiers.clear(); this.performances.clear(); this.timestampOfFirstEvent = -1; this.getView().clear(); } public List<Integer> getTimestamps() { return this.timestamps; } public List<IClassifier> getClassifiers() { return this.classifiers; } public List<List<Double>> getPerformances() { return this.performances; } public void setTimestampOfFirstEvent(final long timestampOfFirstEvent) { this.timestampOfFirstEvent = timestampOfFirstEvent; } }
0
java-sources/ai/libs/mlplan-gui/0.2.7/ai/libs/mlplan/gui
java-sources/ai/libs/mlplan-gui/0.2.7/ai/libs/mlplan/gui/outofsampleplots/OutOfSampleErrorPlotPluginView.java
package ai.libs.mlplan.gui.outofsampleplots; import java.util.ArrayList; import java.util.List; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import ai.libs.jaicore.graphvisualizer.plugin.ASimpleMVCPluginView; import javafx.application.Platform; import javafx.scene.chart.LineChart; import javafx.scene.chart.NumberAxis; import javafx.scene.chart.XYChart.Data; import javafx.scene.chart.XYChart.Series; /** * * @author fmohr * */ public class OutOfSampleErrorPlotPluginView extends ASimpleMVCPluginView<OutOfSampleErrorPlotPluginModel, OutOfSampleErrorPlotPluginController, LineChart<Number, Number>> { private Logger logger = LoggerFactory.getLogger(OutOfSampleErrorPlotPluginView.class); private final Series<Number, Number> believedErrorSeries; private final Series<Number, Number> outOfSampleErrorSeries; private int nextIndexToDisplay = 0; public OutOfSampleErrorPlotPluginView(final OutOfSampleErrorPlotPluginModel model) { super(model, new LineChart<>(new NumberAxis(), new NumberAxis())); this.getNode().getXAxis().setLabel("elapsed time (s)"); this.believedErrorSeries = new Series<>(); this.believedErrorSeries.setName("Believed (internal) Error"); this.outOfSampleErrorSeries = new Series<>(); this.outOfSampleErrorSeries.setName("Out-of-Sample Error"); this.getNode().getData().add(this.believedErrorSeries); this.getNode().getData().add(this.outOfSampleErrorSeries); } @Override public void update() { /* compute data to add */ List<Integer> observedTimestamps = this.getModel().getTimestamps(); List<List<Double>> performances = this.getModel().getPerformances(); List<Data<Number, Number>> believedErrors = new ArrayList<>(); List<Data<Number, Number>> outOfSampleErrors = new ArrayList<>(); for (; this.nextIndexToDisplay < observedTimestamps.size(); this.nextIndexToDisplay++) { int timestamp = observedTimestamps.get(this.nextIndexToDisplay) / 100; believedErrors.add(new Data<>(timestamp, performances.get(this.nextIndexToDisplay).get(0))); outOfSampleErrors.add(new Data<>(timestamp, performances.get(this.nextIndexToDisplay).get(1))); } this.logger.info("Adding {} values to chart.", believedErrors.size()); Platform.runLater(() -> { this.believedErrorSeries.getData().addAll(believedErrors); this.outOfSampleErrorSeries.getData().addAll(outOfSampleErrors); }); } @Override public void clear() { this.nextIndexToDisplay = 0; this.believedErrorSeries.getData().clear(); this.outOfSampleErrorSeries.getData().clear(); } public int getNextIndexToDisplay() { return this.nextIndexToDisplay; } }
0
java-sources/ai/libs/mlplan-gui/0.2.7/ai/libs/mlplan/gui
java-sources/ai/libs/mlplan-gui/0.2.7/ai/libs/mlplan/gui/outofsampleplots/WekaClassifierSolutionCandidateRepresenter.java
package ai.libs.mlplan.gui.outofsampleplots; import ai.libs.jaicore.graphvisualizer.plugin.solutionperformanceplotter.SolutionCandidateRepresenter; public class WekaClassifierSolutionCandidateRepresenter implements SolutionCandidateRepresenter { @Override public String getStringRepresentationOfSolutionCandidate(final Object solutionCandidate) { return solutionCandidate.toString(); } }
0
java-sources/ai/libs/mlplan-meka/0.2.7/ai/libs/mlplan
java-sources/ai/libs/mlplan-meka/0.2.7/ai/libs/mlplan/meka/EMLPlanMekaProblemType.java
package ai.libs.mlplan.meka; import org.api4.java.ai.ml.core.dataset.splitter.IFoldSizeConfigurableRandomDatasetSplitter; import org.api4.java.ai.ml.core.dataset.supervised.ILabeledDataset; import org.api4.java.ai.ml.core.evaluation.supervised.loss.IDeterministicPredictionPerformanceMeasure; import ai.libs.jaicore.ml.classification.multilabel.evaluation.loss.InstanceWiseF1; import ai.libs.jaicore.ml.classification.multilabel.learner.IMekaClassifier; import ai.libs.jaicore.ml.core.dataset.splitter.RandomHoldoutSplitter; import ai.libs.mlplan.core.ILearnerFactory; import ai.libs.mlplan.core.IProblemType; import ai.libs.mlplan.core.PipelineValidityCheckingNodeEvaluator; public enum EMLPlanMekaProblemType implements IProblemType<IMekaClassifier> { CLASSIFICATION_MULTILABEL("automl/searchmodels/meka/mlplan-meka.json", "conf/searchmodels/mlplan-meka.json", "mlplan/meka-preferenceList.txt", "conf/mlpan-meka-preferenceList.txt", "MLClassifier", "MLClassifier", new MekaPipelineFactory(), new InstanceWiseF1(), new InstanceWiseF1(), new RandomHoldoutSplitter<>(0, .7)); private final String searchSpaceConfigFileFromResource; private final String systemSearchSpaceConfigFromFileSystem; private final String preferedComponentsListFromResource; private final String preferedComponentsListFromFileSystem; private final String requestedHascoInterface; private final String requestedBasicProblemInterface; private final ILearnerFactory<IMekaClassifier> learnerFactory; private final IDeterministicPredictionPerformanceMeasure<?, ?> performanceMetricForSearchPhase; private final IDeterministicPredictionPerformanceMeasure<?, ?> performanceMetricForSelectionPhase; private final IFoldSizeConfigurableRandomDatasetSplitter<ILabeledDataset<?>> searchSelectionDatasetSplitter; private EMLPlanMekaProblemType(final String searchSpaceConfigFileFromResource, final String systemSearchSpaceConfigFromFileSystem, final String preferedComponentsListFromResource, final String preferedComponentsListFromFileSystem, final String requestedHascoInterface, final String requestedBasicProblemInterface, final ILearnerFactory<IMekaClassifier> learnerFactory, final IDeterministicPredictionPerformanceMeasure<?, ?> performanceMetricForSearchPhase, final IDeterministicPredictionPerformanceMeasure<?, ?> performanceMetricForSelectionPhase, final IFoldSizeConfigurableRandomDatasetSplitter<ILabeledDataset<?>> searchSelectionDatasetSplitter) { this.searchSpaceConfigFileFromResource = searchSpaceConfigFileFromResource; this.systemSearchSpaceConfigFromFileSystem = systemSearchSpaceConfigFromFileSystem; this.preferedComponentsListFromResource = preferedComponentsListFromResource; this.preferedComponentsListFromFileSystem = preferedComponentsListFromFileSystem; this.requestedHascoInterface = requestedHascoInterface; this.requestedBasicProblemInterface = requestedBasicProblemInterface; this.learnerFactory = learnerFactory; this.performanceMetricForSearchPhase = performanceMetricForSearchPhase; this.performanceMetricForSelectionPhase = performanceMetricForSelectionPhase; this.searchSelectionDatasetSplitter = searchSelectionDatasetSplitter; } @Override public String getSearchSpaceConfigFileFromResource() { return this.searchSpaceConfigFileFromResource; } @Override public String getSearchSpaceConfigFromFileSystem() { return this.systemSearchSpaceConfigFromFileSystem; } @Override public String getPreferredComponentListFromResource() { return this.preferedComponentsListFromResource; } @Override public String getPreferredComponentListFromFileSystem() { return this.preferedComponentsListFromFileSystem; } @Override public String getRequestedInterface() { return this.requestedHascoInterface; } @Override public String getLastHASCOMethodPriorToParameterRefinementOfBareLearner() { return this.getPreferredComponentName(this.requestedHascoInterface); } @Override public String getLastHASCOMethodPriorToParameterRefinementOfPipeline() { return this.getPreferredComponentName(this.requestedBasicProblemInterface); } private String getPreferredComponentName(final String requestedInterface) { return "resolve" + requestedInterface + "With"; } @Override public IDeterministicPredictionPerformanceMeasure<?, ?> getPerformanceMetricForSearchPhase() { return this.performanceMetricForSearchPhase; } @Override public IDeterministicPredictionPerformanceMeasure<?, ?> getPerformanceMetricForSelectionPhase() { return this.performanceMetricForSelectionPhase; } @Override public String getName() { return this.getClass().getSimpleName() + "." + this.toString(); } @Override public IFoldSizeConfigurableRandomDatasetSplitter<ILabeledDataset<?>> getSearchSelectionDatasetSplitter() { return this.searchSelectionDatasetSplitter; } @Override public ILearnerFactory<IMekaClassifier> getLearnerFactory() { return this.learnerFactory; } @Override public PipelineValidityCheckingNodeEvaluator getValidityCheckingNodeEvaluator() { return null; // we do not have such a checker for meka pipelines } }
0
java-sources/ai/libs/mlplan-meka/0.2.7/ai/libs/mlplan
java-sources/ai/libs/mlplan-meka/0.2.7/ai/libs/mlplan/meka/IMekaPipelineFactory.java
package ai.libs.mlplan.meka; import ai.libs.jaicore.ml.classification.multilabel.learner.IMekaClassifier; import ai.libs.mlplan.core.ILearnerFactory; public interface IMekaPipelineFactory extends ILearnerFactory<IMekaClassifier> { }
0
java-sources/ai/libs/mlplan-meka/0.2.7/ai/libs/mlplan
java-sources/ai/libs/mlplan-meka/0.2.7/ai/libs/mlplan/meka/ML2Plan4Meka.java
package ai.libs.mlplan.meka; import org.api4.java.ai.ml.core.dataset.supervised.ILabeledDataset; import ai.libs.jaicore.ml.classification.multilabel.learner.IMekaClassifier; import ai.libs.mlplan.core.MLPlan; public class ML2Plan4Meka extends MLPlan<IMekaClassifier> { public ML2Plan4Meka(final ML2PlanMekaBuilder builder, final ILabeledDataset<?> data) { super(builder, data); } }
0
java-sources/ai/libs/mlplan-meka/0.2.7/ai/libs/mlplan
java-sources/ai/libs/mlplan-meka/0.2.7/ai/libs/mlplan/meka/ML2PlanMekaBuilder.java
package ai.libs.mlplan.meka; import java.io.IOException; import java.util.Arrays; import java.util.List; import org.api4.java.ai.ml.classification.multilabel.evaluation.loss.IMultiLabelClassificationPredictionPerformanceMeasure; import ai.libs.jaicore.ml.classification.multilabel.evaluation.loss.AutoMEKAGGPFitnessMeasureLoss; import ai.libs.jaicore.ml.classification.multilabel.learner.IMekaClassifier; import ai.libs.jaicore.ml.core.evaluation.evaluator.factory.ISupervisedLearnerEvaluatorFactory; import ai.libs.jaicore.ml.core.evaluation.evaluator.factory.MonteCarloCrossValidationEvaluatorFactory; import ai.libs.mlplan.core.AMLPlanBuilder; import ai.libs.mlplan.core.IProblemType; public class ML2PlanMekaBuilder extends AMLPlanBuilder<IMekaClassifier, ML2PlanMekaBuilder> { public ML2PlanMekaBuilder() throws IOException { this (EMLPlanMekaProblemType.CLASSIFICATION_MULTILABEL); } public ML2PlanMekaBuilder(final IProblemType<IMekaClassifier> problemType) throws IOException { super(problemType); } /** * Configures ML-Plan with the configuration as compared to AutoMEKA_GGP and GA-Auto-MLC. * @return The builder object. */ public ML2PlanMekaBuilder withAutoMEKADefaultConfiguration() { this.withPerformanceMeasure(new AutoMEKAGGPFitnessMeasureLoss()); return this; } /** * Sets the performance measure to evaluate a candidate solution's generalization performance. Caution: This resets the evaluators to MCCV for both search and selection phase if these are not already MCCVs. * @param lossFunction The loss function to be used. * @return The builder object. */ public ML2PlanMekaBuilder withPerformanceMeasure(final IMultiLabelClassificationPredictionPerformanceMeasure measure) { List<ISupervisedLearnerEvaluatorFactory> phaseList = Arrays.asList(this.getSearchEvaluatorFactory(), this.getSelectionEvaluatorFactory()); for (ISupervisedLearnerEvaluatorFactory factory : phaseList) { if (factory instanceof MonteCarloCrossValidationEvaluatorFactory) { ((MonteCarloCrossValidationEvaluatorFactory) factory).withMeasure(measure); } } return this; } @Override public ML2PlanMekaBuilder getSelf() { return this; } }
0
java-sources/ai/libs/mlplan-meka/0.2.7/ai/libs/mlplan
java-sources/ai/libs/mlplan-meka/0.2.7/ai/libs/mlplan/meka/MekaPipelineFactory.java
package ai.libs.mlplan.meka; import java.util.LinkedList; import java.util.List; import java.util.Map.Entry; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import ai.libs.jaicore.basic.sets.SetUtil; import ai.libs.jaicore.components.api.IComponentInstance; import ai.libs.jaicore.components.api.IParameter; import ai.libs.jaicore.components.exceptions.ComponentInstantiationFailedException; import ai.libs.jaicore.components.model.NumericParameterDomain; import ai.libs.jaicore.ml.classification.multilabel.learner.IMekaClassifier; import ai.libs.jaicore.ml.classification.multilabel.learner.MekaClassifier; import meka.classifiers.multilabel.MultiLabelClassifier; import weka.classifiers.Classifier; import weka.classifiers.MultipleClassifiersCombiner; import weka.classifiers.SingleClassifierEnhancer; import weka.classifiers.functions.SMO; import weka.classifiers.functions.SMOreg; import weka.classifiers.functions.supportVector.Kernel; import weka.core.OptionHandler; /** * A pipeline factory that converts a given ComponentInstance that consists of * components that correspond to MEKA algorithms to a MultiLabelClassifier. * */ public class MekaPipelineFactory implements IMekaPipelineFactory { private static final String PARAMETER_NAME_WITH_DASH_WARNING = "Required interface of component {} has dash or underscore in interface id {}"; /* loggin */ private static final Logger logger = LoggerFactory.getLogger(MekaPipelineFactory.class); @Override public IMekaClassifier getComponentInstantiation(final IComponentInstance ci) throws ComponentInstantiationFailedException { MultiLabelClassifier instance = null; try { instance = (MultiLabelClassifier) this.getClassifier(ci); return new MekaClassifier(instance); } catch (Exception e) { throw new ComponentInstantiationFailedException(e, "Could not instantiate " + ci.getComponent().getName()); } } private Classifier getClassifier(final IComponentInstance ci) throws Exception { Classifier c = (Classifier) Class.forName(ci.getComponent().getName()).getDeclaredConstructor().newInstance(); List<String> optionsList = getOptionsForParameterValues(ci); if (c instanceof OptionHandler) { ((OptionHandler) c).setOptions(optionsList.toArray(new String[0])); } for (Entry<String, List<IComponentInstance>> reqI : ci.getSatisfactionOfRequiredInterfaces().entrySet()) { if (reqI.getKey().startsWith("-") || reqI.getKey().startsWith("_")) { logger.warn(PARAMETER_NAME_WITH_DASH_WARNING, ci.getComponent(), reqI.getKey()); } IComponentInstance subCI = reqI.getValue().iterator().next(); if (reqI.getKey().equals("K") && (ci.getComponent().getName().endsWith("SMO") || ci.getComponent().getName().endsWith("SMOreg"))) { if (logger.isDebugEnabled()) { logger.debug("Set kernel for SMO to be {}", subCI.getComponent().getName()); } Kernel k = (Kernel) Class.forName(subCI.getComponent().getName()).getDeclaredConstructor().newInstance(); k.setOptions(getOptionsForParameterValues(subCI).toArray(new String[0])); if (c instanceof SMO) { ((SMO) c).setKernel(k); } else if (c instanceof SMOreg) { ((SMOreg) c).setKernel(k); } } else if (reqI.getKey().equals("B") && (c instanceof MultipleClassifiersCombiner)) { Classifier[] classifiers = this.getListOfBaseLearners(subCI).toArray(new Classifier[0]); ((MultipleClassifiersCombiner) c).setClassifiers(classifiers); } else if (reqI.getKey().equals("W") && (c instanceof SingleClassifierEnhancer)) { if (logger.isTraceEnabled()) { logger.trace("Set {} as a base classifier for {}", subCI.getComponent().getName(), ci.getComponent().getName()); } ((SingleClassifierEnhancer) c).setClassifier(this.getClassifier(subCI)); } } return c; } private List<Classifier> getListOfBaseLearners(final IComponentInstance ci) throws Exception { List<Classifier> baseLearnerList = new LinkedList<>(); if (ci.getComponent().getName().equals("MultipleBaseLearnerListElement")) { baseLearnerList.add(this.getClassifier(ci.getSatisfactionOfRequiredInterface("classifier").iterator().next())); } else if (ci.getComponent().getName().equals("MultipleBaseLearnerListChain")) { baseLearnerList.add(this.getClassifier(ci.getSatisfactionOfRequiredInterface("classifier").iterator().next())); baseLearnerList.addAll(this.getListOfBaseLearners(ci.getSatisfactionOfRequiredInterface("chain").iterator().next())); } return baseLearnerList; } public static List<String> getOptionsForParameterValues(final IComponentInstance ci) { List<String> optionsList = new LinkedList<>(); for (Entry<String, String> parameterValue : ci.getParameterValues().entrySet()) { IParameter param = ci.getComponent().getParameter(parameterValue.getKey()); if (param.isDefaultValue(parameterValue.getValue()) || parameterValue.getKey().toLowerCase().contains("activator") || parameterValue.getValue().equals("false")) { continue; } if (parameterValue.getValue().equals("true")) { optionsList.add("-" + parameterValue.getKey()); } else { optionsList.add("-" + parameterValue.getKey()); if (ci.getComponent().getParameter(parameterValue.getKey()).isNumeric()) { NumericParameterDomain numDom = (NumericParameterDomain) ci.getComponent().getParameter(parameterValue.getKey()).getDefaultDomain(); if (numDom.isInteger()) { optionsList.add(((int) Double.parseDouble(parameterValue.getValue())) + ""); } else { optionsList.add(parameterValue.getValue()); } } else { optionsList.add(parameterValue.getValue()); } } } return optionsList; } private List<String> getOptionsRecursively(final IComponentInstance ci) { List<String> optionsList = getOptionsForParameterValues(ci); for (Entry<String, List<IComponentInstance>> reqI : ci.getSatisfactionOfRequiredInterfaces().entrySet()) { if (reqI.getKey().startsWith("-") || reqI.getKey().startsWith("_")) { logger.warn(PARAMETER_NAME_WITH_DASH_WARNING, ci.getComponent(), reqI.getKey()); } optionsList.add("-" + reqI.getKey()); IComponentInstance subComponentInstance = reqI.getValue().iterator().next(); if (reqI.getKey().equals("B") || reqI.getKey().equals("K")) { List<String> valueList = new LinkedList<>(); valueList.add(subComponentInstance.getComponent().getName()); valueList.addAll(this.getOptionsRecursively(subComponentInstance)); optionsList.add(SetUtil.implode(valueList, " ")); } else { optionsList.add(subComponentInstance.getComponent().getName()); if (!subComponentInstance.getParameterValues().isEmpty() || !subComponentInstance.getSatisfactionOfRequiredInterfaces().isEmpty()) { optionsList.add("--"); optionsList.addAll(this.getOptionsRecursively(subComponentInstance)); } } } return optionsList; } }
0
java-sources/ai/libs/mlplan-sklearn/0.2.7/ai/libs/mlplan
java-sources/ai/libs/mlplan-sklearn/0.2.7/ai/libs/mlplan/sklearn/AScikitLearnLearnerFactory.java
package ai.libs.mlplan.sklearn; import java.io.IOException; import java.util.Arrays; import java.util.HashSet; import java.util.List; import java.util.Map.Entry; import java.util.Set; import java.util.stream.Collectors; import org.aeonbits.owner.ConfigFactory; import org.api4.java.algorithm.Timeout; import org.api4.java.common.control.ILoggingCustomizable; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import ai.libs.jaicore.components.api.IComponentInstance; import ai.libs.jaicore.components.api.IParameter; import ai.libs.jaicore.components.exceptions.ComponentInstantiationFailedException; import ai.libs.jaicore.components.model.CategoricalParameterDomain; import ai.libs.jaicore.components.model.NumericParameterDomain; import ai.libs.jaicore.ml.scikitwrapper.IScikitLearnWrapper; import ai.libs.mlplan.core.ILearnerFactory; import ai.libs.python.IPythonConfig; /** * The SKLearnClassifierFactory takes a ground component instance and parses it into a <code>ScikitLearnWrapper</code> as defined in the project jaicore-ml. This factory may be used in the context of HASCO, especially for ML-Plan. * * @author wever */ public abstract class AScikitLearnLearnerFactory implements ILearnerFactory<IScikitLearnWrapper>, ILoggingCustomizable { public static final String N_PREPROCESSOR = "preprocessor"; private static final CategoricalParameterDomain BOOL_DOMAIN = new CategoricalParameterDomain(Arrays.asList("True", "False")); private static final List<String> EXCEPTIONS = Arrays.asList("None", "np.inf", "f_regression"); private Logger logger = LoggerFactory.getLogger(AScikitLearnLearnerFactory.class); private String loggerName; private IPythonConfig pythonConfig = ConfigFactory.create(IPythonConfig.class); private long seed; private Timeout timeout; protected AScikitLearnLearnerFactory() { super(); } public abstract IScikitLearnWrapper getScikitLearnWrapper(String constructionString, String imports) throws IOException, InterruptedException; @Override public IScikitLearnWrapper getComponentInstantiation(final IComponentInstance groundComponent) throws ComponentInstantiationFailedException, InterruptedException { this.logger.debug("Parse ground component instance {} to ScikitLearnWrapper object.", groundComponent); StringBuilder constructInstruction = new StringBuilder(); Set<String> importSet = new HashSet<>(); constructInstruction.append(this.extractSKLearnConstructInstruction(groundComponent, importSet)); StringBuilder imports = new StringBuilder(); importSet.stream().sorted().collect(Collectors.toList()).forEach(imports::append); String constructionString = constructInstruction.toString(); this.logger.info("Created construction string: {}", constructionString); try { IScikitLearnWrapper wrapper = this.getScikitLearnWrapper(constructionString, imports.toString()); if (this.pythonConfig != null) { wrapper.setPythonConfig(this.pythonConfig); } wrapper.setSeed(this.seed); wrapper.setTimeout(this.timeout); return wrapper; } catch (IOException e) { this.logger.error("Could not create sklearn wrapper for construction {} and imports {}.", constructInstruction, imports); return null; } } public abstract String getPipelineBuildString(final IComponentInstance groundComponent, final Set<String> importSet); public String extractSKLearnConstructInstruction(final IComponentInstance groundComponent, final Set<String> importSet) { StringBuilder sb = new StringBuilder(); if (groundComponent.getComponent().getName().startsWith("mlplan.util.model.make_forward")) { sb.append(this.extractSKLearnConstructInstruction(groundComponent.getSatisfactionOfRequiredInterface("source").iterator().next(), importSet)); sb.append(","); sb.append(this.extractSKLearnConstructInstruction(groundComponent.getSatisfactionOfRequiredInterface("base").iterator().next(), importSet)); return sb.toString(); } String[] packagePathSplit = groundComponent.getComponent().getName().split("\\."); StringBuilder fromSB = new StringBuilder(); fromSB.append(packagePathSplit[0]); for (int i = 1; i < packagePathSplit.length - 1; i++) { fromSB.append("." + packagePathSplit[i]); } String className = packagePathSplit[packagePathSplit.length - 1]; if (packagePathSplit.length > 1) { importSet.add("from " + fromSB.toString() + " import " + className + "\n"); } if (groundComponent.getComponent().getName().startsWith("sklearn.feature_selection.f_classif")) { sb.append("f_classif(features, targets)"); return sb.toString(); } sb.append(className); sb.append("("); if (groundComponent.getComponent().getName().contains("make_pipeline")) { sb.append(this.getPipelineBuildString(groundComponent, importSet)); } else if (groundComponent.getComponent().getName().contains("make_union")) { sb.append(this.extractSKLearnConstructInstruction(groundComponent.getSatisfactionOfRequiredInterface("p1").iterator().next(), importSet)); sb.append(","); sb.append(this.extractSKLearnConstructInstruction(groundComponent.getSatisfactionOfRequiredInterface("p2").iterator().next(), importSet)); } else { boolean first = true; for (Entry<String, String> parameterValue : groundComponent.getParameterValues().entrySet().stream().sorted((o1, o2) -> o1.getKey().compareTo(o2.getKey())).collect(Collectors.toList())) { if (first) { first = false; } else { sb.append(","); } IParameter param = groundComponent.getComponent().getParameter(parameterValue.getKey()); sb.append(parameterValue.getKey() + "="); if (param.isNumeric()) { if (((NumericParameterDomain) param.getDefaultDomain()).isInteger()) { sb.append((int) Double.parseDouble(parameterValue.getValue())); } else { sb.append(Double.parseDouble(parameterValue.getValue())); } } else if (param.isCategorical()) { if (BOOL_DOMAIN.subsumes(param.getDefaultDomain()) || EXCEPTIONS.contains(parameterValue.getValue())) { sb.append(parameterValue.getValue()); } else { // if the categorical parameter contains numeric values, try to parse it as int or as double, and use the value itself if neither works try { sb.append(Integer.parseInt(parameterValue.getValue())); } catch (NumberFormatException e) { try { sb.append(Double.parseDouble(parameterValue.getValue())); } catch (NumberFormatException e1) { sb.append("\"" + parameterValue.getValue() + "\""); } } } } else { throw new UnsupportedOperationException("The given parameter type is unknown for parameter " + param); } } for (Entry<String, List<IComponentInstance>> satReqI : groundComponent.getSatisfactionOfRequiredInterfaces().entrySet().stream().sorted((o1, o2) -> o1.getKey().compareTo(o2.getKey())).collect(Collectors.toList())) { if (first) { first = false; } else { sb.append(","); } sb.append(satReqI.getKey() + "="); sb.append(this.extractSKLearnConstructInstruction(satReqI.getValue().iterator().next(), importSet)); } } sb.append(")"); return sb.toString(); } @Override public String getLoggerName() { return this.loggerName; } @Override public void setLoggerName(final String name) { this.loggerName = name; this.logger.debug("Switching logger name to {}", name); this.logger = LoggerFactory.getLogger(name); this.logger.debug("Switched SKLearnClassifierFactory logger to {}", name); } public void setPythonConfig(final IPythonConfig pythonConfig) { this.pythonConfig = pythonConfig; } public void setSeed(final long seed) { this.seed = seed; } public void setTimeout(final Timeout timeout) { this.timeout = timeout; } }
0
java-sources/ai/libs/mlplan-sklearn/0.2.7/ai/libs/mlplan
java-sources/ai/libs/mlplan-sklearn/0.2.7/ai/libs/mlplan/sklearn/ATwoStepPipelineScikitLearnFactory.java
package ai.libs.mlplan.sklearn; import java.util.Set; import ai.libs.jaicore.components.api.IComponentInstance; public abstract class ATwoStepPipelineScikitLearnFactory extends AScikitLearnLearnerFactory { private final String learnerFieldName; protected ATwoStepPipelineScikitLearnFactory(final String learnerFieldName) { super(); this.learnerFieldName = learnerFieldName; } @Override public String getPipelineBuildString(final IComponentInstance groundComponent, final Set<String> importSet) { StringBuilder sb = new StringBuilder(); sb.append(this.extractSKLearnConstructInstruction(groundComponent.getSatisfactionOfRequiredInterface(N_PREPROCESSOR).iterator().next(), importSet)); sb.append(","); sb.append(this.extractSKLearnConstructInstruction(groundComponent.getSatisfactionOfRequiredInterface(this.learnerFieldName).iterator().next(), importSet)); return sb.toString(); } }
0
java-sources/ai/libs/mlplan-sklearn/0.2.7/ai/libs/mlplan
java-sources/ai/libs/mlplan-sklearn/0.2.7/ai/libs/mlplan/sklearn/EMLPlanScikitLearnProblemType.java
package ai.libs.mlplan.sklearn; import java.util.Random; import org.api4.java.ai.ml.core.dataset.splitter.IFoldSizeConfigurableRandomDatasetSplitter; import org.api4.java.ai.ml.core.dataset.supervised.ILabeledDataset; import org.api4.java.ai.ml.core.evaluation.supervised.loss.IDeterministicPredictionPerformanceMeasure; import ai.libs.jaicore.ml.classification.loss.dataset.EClassificationPerformanceMeasure; import ai.libs.jaicore.ml.core.EScikitLearnProblemType; import ai.libs.jaicore.ml.core.dataset.splitter.RandomHoldoutSplitter; import ai.libs.jaicore.ml.core.filter.FilterBasedDatasetSplitter; import ai.libs.jaicore.ml.core.filter.sampling.inmemory.factories.LabelBasedStratifiedSamplingFactory; import ai.libs.jaicore.ml.regression.loss.ERegressionPerformanceMeasure; import ai.libs.jaicore.ml.regression.loss.ERulPerformanceMeasure; import ai.libs.jaicore.ml.scikitwrapper.IScikitLearnWrapper; import ai.libs.mlplan.core.ILearnerFactory; import ai.libs.mlplan.core.IProblemType; import ai.libs.mlplan.core.PipelineValidityCheckingNodeEvaluator; public enum EMLPlanScikitLearnProblemType implements IProblemType<IScikitLearnWrapper> { CLASSIFICATION_MULTICLASS(EScikitLearnProblemType.CLASSIFICATION, "automl/searchmodels/sklearn/sklearn-classification.json", "conf/mlplan-sklearn.json", "automl/searchmodels/sklearn/sklearn-preferenceList.txt", "conf/sklearn-preferenceList.txt", "AbstractClassifier", "BasicClassifier", EClassificationPerformanceMeasure.ERRORRATE, EClassificationPerformanceMeasure.ERRORRATE, new ScikitLearnClassifierFactory(), new FilterBasedDatasetSplitter<>(new LabelBasedStratifiedSamplingFactory<>()), new ScikitLearnPipelineValidityCheckingNodeEvaluator()), // CLASSIFICATION_MULTICLASS_UNLIMITED_LENGTH_PIPELINES(EScikitLearnProblemType.CLASSIFICATION, "automl/searchmodels/sklearn/sklearn-classification-ul.json", EMLPlanScikitLearnProblemType.CLASSIFICATION_MULTICLASS.getSearchSpaceConfigFromFileSystem(), EMLPlanScikitLearnProblemType.CLASSIFICATION_MULTICLASS.getPreferredComponentListFromResource(), EMLPlanScikitLearnProblemType.CLASSIFICATION_MULTICLASS.getPreferredComponentListFromFileSystem(), EMLPlanScikitLearnProblemType.CLASSIFICATION_MULTICLASS.getRequestedInterface(), EMLPlanScikitLearnProblemType.CLASSIFICATION_MULTICLASS.getRequestedBasicProblemInterface(), EClassificationPerformanceMeasure.ERRORRATE, EClassificationPerformanceMeasure.ERRORRATE, CLASSIFICATION_MULTICLASS.getLearnerFactory(), CLASSIFICATION_MULTICLASS.getSearchSelectionDatasetSplitter(), CLASSIFICATION_MULTICLASS.getValidityCheckingNodeEvaluator()), // REGRESSION(EScikitLearnProblemType.REGRESSION, "automl/searchmodels/sklearn/sklearn-regression.json", EMLPlanScikitLearnProblemType.CLASSIFICATION_MULTICLASS.getSearchSpaceConfigFromFileSystem(), EMLPlanScikitLearnProblemType.CLASSIFICATION_MULTICLASS.getPreferredComponentListFromResource(), EMLPlanScikitLearnProblemType.CLASSIFICATION_MULTICLASS.getPreferredComponentListFromFileSystem(), "AbstractRegressor", "BasicRegressor", ERegressionPerformanceMeasure.RMSE, ERegressionPerformanceMeasure.RMSE, new ScikitLearnRegressorFactory(), new RandomHoldoutSplitter<>(new Random(0), 0.7), null), // RUL(EScikitLearnProblemType.TIME_SERIES_REGRESSION, "automl/searchmodels/sklearn/sklearn-rul.json", "conf/sklearn-rul.json", null, "conf/sklearn-preferenceList.txt", "MLPipeline", "BasicRegressor", ERulPerformanceMeasure.ASYMMETRIC_LOSS, ERulPerformanceMeasure.ASYMMETRIC_LOSS, new ScikitLearnTimeSeriesRegressionFactory(), EMLPlanScikitLearnProblemType.REGRESSION.getSearchSelectionDatasetSplitter(), null); private final EScikitLearnProblemType problemType; private final String searchSpaceConfigFileFromResource; private final String systemSearchSpaceConfigFromFileSystem; private final String preferedComponentsListFromResource; private final String preferedComponentsListFromFileSystem; private final String requestedHascoInterface; private final String requestedBasicProblemInterface; private final IDeterministicPredictionPerformanceMeasure<?, ?> performanceMetricForSearchPhase; private final IDeterministicPredictionPerformanceMeasure<?, ?> performanceMetricForSelectionPhase; private final ILearnerFactory<IScikitLearnWrapper> learnerFactory; private final IFoldSizeConfigurableRandomDatasetSplitter<ILabeledDataset<?>> searchSelectionDatasetSplitter; private PipelineValidityCheckingNodeEvaluator validityCheckingNoteEvaluator; private EMLPlanScikitLearnProblemType(final EScikitLearnProblemType problemType, final String searchSpaceConfigFileFromResource, final String systemSearchSpaceConfigFromFileSystem, final String preferedComponentsListFromResource, final String preferedComponentsListFromFileSystem, final String requestedHascoInterface, final String requestedBasicProblemInterface, final IDeterministicPredictionPerformanceMeasure<?, ?> performanceMetricForSearchPhase, final IDeterministicPredictionPerformanceMeasure<?, ?> performanceMetricForSelectionPhase, final ILearnerFactory<IScikitLearnWrapper> learnerFactory, final IFoldSizeConfigurableRandomDatasetSplitter<ILabeledDataset<?>> searchSelectionDatasetSplitter, final PipelineValidityCheckingNodeEvaluator validityCheckingNodeEvaluator) { this.problemType = problemType; this.searchSpaceConfigFileFromResource = searchSpaceConfigFileFromResource; this.systemSearchSpaceConfigFromFileSystem = systemSearchSpaceConfigFromFileSystem; this.preferedComponentsListFromResource = preferedComponentsListFromResource; this.preferedComponentsListFromFileSystem = preferedComponentsListFromFileSystem; this.requestedHascoInterface = requestedHascoInterface; this.requestedBasicProblemInterface = requestedBasicProblemInterface; this.performanceMetricForSearchPhase = performanceMetricForSearchPhase; this.performanceMetricForSelectionPhase = performanceMetricForSelectionPhase; this.learnerFactory = learnerFactory; this.searchSelectionDatasetSplitter = searchSelectionDatasetSplitter; this.validityCheckingNoteEvaluator = validityCheckingNodeEvaluator; } public EScikitLearnProblemType getSkLearnProblemType() { return this.problemType; } @Override public String getSearchSpaceConfigFileFromResource() { return this.searchSpaceConfigFileFromResource; } @Override public String getSearchSpaceConfigFromFileSystem() { return this.systemSearchSpaceConfigFromFileSystem; } @Override public String getPreferredComponentListFromResource() { return this.preferedComponentsListFromResource; } @Override public String getPreferredComponentListFromFileSystem() { return this.preferedComponentsListFromFileSystem; } @Override public String getRequestedInterface() { return this.requestedHascoInterface; } public String getRequestedBasicProblemInterface() { return this.requestedBasicProblemInterface; } @Override public String getLastHASCOMethodPriorToParameterRefinementOfBareLearner() { return this.getPreferredComponentName(this.requestedHascoInterface); } @Override public String getLastHASCOMethodPriorToParameterRefinementOfPipeline() { return this.getPreferredComponentName(this.requestedBasicProblemInterface); } private String getPreferredComponentName(final String requestedInterface) { return "resolve" + requestedInterface + "With"; } @Override public IDeterministicPredictionPerformanceMeasure<?, ?> getPerformanceMetricForSearchPhase() { return this.performanceMetricForSearchPhase; } @Override public IDeterministicPredictionPerformanceMeasure<?, ?> getPerformanceMetricForSelectionPhase() { return this.performanceMetricForSelectionPhase; } @Override public String getName() { return this.getClass().getSimpleName() + "." + this.toString(); } @Override public ILearnerFactory<IScikitLearnWrapper> getLearnerFactory() { return this.learnerFactory; } @Override public IFoldSizeConfigurableRandomDatasetSplitter<ILabeledDataset<?>> getSearchSelectionDatasetSplitter() { return this.searchSelectionDatasetSplitter; } @Override public PipelineValidityCheckingNodeEvaluator getValidityCheckingNodeEvaluator() { return this.validityCheckingNoteEvaluator; } }
0
java-sources/ai/libs/mlplan-sklearn/0.2.7/ai/libs/mlplan
java-sources/ai/libs/mlplan-sklearn/0.2.7/ai/libs/mlplan/sklearn/MLPlan4ScikitLearn.java
package ai.libs.mlplan.sklearn; import org.api4.java.ai.ml.core.dataset.supervised.ILabeledDataset; import ai.libs.jaicore.ml.scikitwrapper.IScikitLearnWrapper; import ai.libs.mlplan.core.MLPlan; import ai.libs.mlplan.sklearn.builder.MLPlanScikitLearnBuilder; public class MLPlan4ScikitLearn extends MLPlan<IScikitLearnWrapper> { public MLPlan4ScikitLearn(final MLPlanScikitLearnBuilder builder, final ILabeledDataset<?> data) { super(builder, data); } }
0
java-sources/ai/libs/mlplan-sklearn/0.2.7/ai/libs/mlplan
java-sources/ai/libs/mlplan-sklearn/0.2.7/ai/libs/mlplan/sklearn/MLPlanScikitLearnClassifierConfig.java
package ai.libs.mlplan.sklearn; import org.aeonbits.owner.Config.LoadPolicy; import org.aeonbits.owner.Config.LoadType; import org.aeonbits.owner.Config.Sources; import ai.libs.hasco.twophase.TwoPhaseHASCOConfig; @LoadPolicy(LoadType.MERGE) @Sources({ "file:conf/hasco/hasco.properties", "file:conf/mlplan/scikitlearn.properties" }) public interface MLPlanScikitLearnClassifierConfig extends TwoPhaseHASCOConfig { }
0
java-sources/ai/libs/mlplan-sklearn/0.2.7/ai/libs/mlplan
java-sources/ai/libs/mlplan-sklearn/0.2.7/ai/libs/mlplan/sklearn/ScikitLearnClassifierFactory.java
package ai.libs.mlplan.sklearn; import java.io.IOException; import ai.libs.jaicore.ml.scikitwrapper.IScikitLearnWrapper; import ai.libs.jaicore.ml.scikitwrapper.simple.SimpleScikitLearnClassifier; public class ScikitLearnClassifierFactory extends ATwoStepPipelineScikitLearnFactory { public ScikitLearnClassifierFactory() { super("classifier"); } @Override public IScikitLearnWrapper getScikitLearnWrapper(final String constructionString, final String imports) throws IOException, InterruptedException { return new SimpleScikitLearnClassifier(constructionString, imports); } }
0
java-sources/ai/libs/mlplan-sklearn/0.2.7/ai/libs/mlplan
java-sources/ai/libs/mlplan-sklearn/0.2.7/ai/libs/mlplan/sklearn/ScikitLearnPipelineValidityCheckingNodeEvaluator.java
package ai.libs.mlplan.sklearn; import java.util.Collection; import org.api4.java.ai.ml.core.dataset.supervised.ILabeledDataset; import org.api4.java.common.control.ILoggingCustomizable; import org.api4.java.datastructure.graph.ILabeledPath; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import ai.libs.hasco.core.HASCOUtil; import ai.libs.jaicore.components.api.IComponentInstance; import ai.libs.jaicore.components.model.Component; import ai.libs.jaicore.components.model.ComponentInstance; import ai.libs.jaicore.planning.hierarchical.algorithms.forwarddecomposition.graphgenerators.tfd.TFDNode; import ai.libs.jaicore.search.algorithms.standard.bestfirst.exceptions.ControlledNodeEvaluationException; import ai.libs.mlplan.core.PipelineValidityCheckingNodeEvaluator; public class ScikitLearnPipelineValidityCheckingNodeEvaluator extends PipelineValidityCheckingNodeEvaluator implements ILoggingCustomizable { private Logger logger = LoggerFactory.getLogger(ScikitLearnPipelineValidityCheckingNodeEvaluator.class); public ScikitLearnPipelineValidityCheckingNodeEvaluator() { super(); } public ScikitLearnPipelineValidityCheckingNodeEvaluator(final Collection<Component> components, final ILabeledDataset<?> data) { super(components, data); } @Override public Double evaluate(final ILabeledPath<TFDNode, String> path) throws ControlledNodeEvaluationException { if (!this.propertiesDetermined) { this.extractDatasetProperties(); } /* get partial component */ ComponentInstance instance = HASCOUtil.getSolutionCompositionFromState(this.getComponents(), path.getHead().getState(), false); if (instance != null) { /* check invalid classifiers for this kind of dataset */ IComponentInstance classifier; if (instance.getComponent().getName().toLowerCase().contains("pipeline")) { classifier = instance.getSatisfactionOfRequiredInterface("classifier").iterator().next(); } else { classifier = instance; } if (classifier != null) { this.checkValidity(classifier); } } return null; } private void checkValidity(final IComponentInstance classifier) throws ControlledNodeEvaluationException { String classifierName = classifier.getComponent().getName().toLowerCase(); if (this.containsNegativeValues && classifierName.matches("(.*)(multinomialnb)(.*)")) { throw new ControlledNodeEvaluationException("Negative numeric attribute values are not supported by the classifier."); } } @Override public String getLoggerName() { return this.logger.getName(); } @Override public void setLoggerName(final String name) { this.logger = LoggerFactory.getLogger(name); } }
0
java-sources/ai/libs/mlplan-sklearn/0.2.7/ai/libs/mlplan
java-sources/ai/libs/mlplan-sklearn/0.2.7/ai/libs/mlplan/sklearn/ScikitLearnRegressorFactory.java
package ai.libs.mlplan.sklearn; import java.io.IOException; import ai.libs.jaicore.ml.scikitwrapper.IScikitLearnWrapper; import ai.libs.jaicore.ml.scikitwrapper.simple.SimpleScikitLearnRegressor; public class ScikitLearnRegressorFactory extends ATwoStepPipelineScikitLearnFactory { public ScikitLearnRegressorFactory() { super("regressor"); } @Override public IScikitLearnWrapper getScikitLearnWrapper(final String constructionString, final String imports) throws IOException, InterruptedException { return new SimpleScikitLearnRegressor(constructionString, imports); } }
0
java-sources/ai/libs/mlplan-sklearn/0.2.7/ai/libs/mlplan
java-sources/ai/libs/mlplan-sklearn/0.2.7/ai/libs/mlplan/sklearn/ScikitLearnTimeSeriesRegressionFactory.java
package ai.libs.mlplan.sklearn; import java.io.IOException; import java.util.Arrays; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Set; import java.util.StringJoiner; import java.util.stream.Collectors; import org.api4.java.ai.ml.core.evaluation.IPrediction; import org.api4.java.ai.ml.core.evaluation.IPredictionBatch; import ai.libs.jaicore.components.api.IComponentInstance; import ai.libs.jaicore.components.model.ComponentInstance; import ai.libs.jaicore.ml.scikitwrapper.AScikitLearnWrapper; import ai.libs.jaicore.ml.scikitwrapper.ScikitLearnTimeSeriesRegressionWrapper; public class ScikitLearnTimeSeriesRegressionFactory extends AScikitLearnLearnerFactory { public ScikitLearnTimeSeriesRegressionFactory() { super(); } @Override public String getPipelineBuildString(final IComponentInstance groundComponent, final Set<String> importSet) { StringBuilder sb = new StringBuilder(); List<IComponentInstance> timeseriesFeatureGenerator = groundComponent.getSatisfactionOfRequiredInterface("timeseries_feature_generator"); sb.append(this.getTimeseriesConstructionInstruction(timeseriesFeatureGenerator, importSet)); sb.append(","); sb.append(this.extractSKLearnConstructInstruction(groundComponent.getSatisfactionOfRequiredInterface("regressor").iterator().next(), importSet)); return sb.toString(); } private String getTimeseriesConstructionInstruction(final List<IComponentInstance> timeseriesComponentInstances, final Set<String> importSet) { StringJoiner stringJoiner = new StringJoiner(","); int numberOfComponentInstancesFound = 0; for (IComponentInstance componentInstance : timeseriesComponentInstances.stream().sorted((o1, o2) -> o1.getComponent().getName().compareTo(o2.getComponent().getName())).collect(Collectors.toList())) { if (componentInstance.getComponent().getName().endsWith("UniToMultivariateNumpyBasedFeatureGenerator")) { for (IComponentInstance satCI : componentInstance.getSatisfactionOfRequiredInterface("univariate_ts_feature_generator").stream().sorted((o1, o2) -> o1.getComponent().getName().compareTo(o2.getComponent().getName())) .collect(Collectors.toList())) { Map<String, List<IComponentInstance>> satisfactionOfRequiredInterfaces = new HashMap<>(); satisfactionOfRequiredInterfaces.put("univariate_ts_feature_generator", Arrays.asList(satCI)); IComponentInstance newCI = new ComponentInstance(componentInstance.getComponent(), componentInstance.getParameterValues(), satisfactionOfRequiredInterfaces); stringJoiner.add(this.extractSKLearnConstructInstruction(newCI, importSet)); numberOfComponentInstancesFound++; } } else { numberOfComponentInstancesFound++; stringJoiner.add(this.extractSKLearnConstructInstruction(componentInstance, importSet)); } } if (numberOfComponentInstancesFound > 1) { return "make_union(" + stringJoiner.toString() + ")"; } return stringJoiner.toString(); } @Override public AScikitLearnWrapper<IPrediction, IPredictionBatch> getScikitLearnWrapper(final String constructionString, final String imports) throws IOException, InterruptedException { return new ScikitLearnTimeSeriesRegressionWrapper<>(constructionString, imports); } }
0
java-sources/ai/libs/mlplan-sklearn/0.2.7/ai/libs/mlplan/sklearn
java-sources/ai/libs/mlplan-sklearn/0.2.7/ai/libs/mlplan/sklearn/builder/MLPlanScikitLearnBuilder.java
package ai.libs.mlplan.sklearn.builder; import java.io.IOException; import org.api4.java.ai.ml.core.dataset.supervised.ILabeledDataset; import org.api4.java.ai.ml.core.dataset.supervised.ILabeledInstance; import org.api4.java.algorithm.Timeout; import ai.libs.jaicore.ml.core.evaluation.evaluator.factory.AMonteCarloCrossValidationBasedEvaluatorFactory; import ai.libs.jaicore.ml.core.evaluation.evaluator.factory.ISupervisedLearnerEvaluatorFactory; import ai.libs.jaicore.ml.scikitwrapper.AScikitLearnWrapper; import ai.libs.jaicore.ml.scikitwrapper.IScikitLearnWrapper; import ai.libs.mlplan.core.AMLPlanBuilder; import ai.libs.mlplan.core.IProblemType; import ai.libs.mlplan.core.MLPlan; import ai.libs.mlplan.sklearn.AScikitLearnLearnerFactory; import ai.libs.mlplan.sklearn.EMLPlanScikitLearnProblemType; import ai.libs.python.IPythonConfig; import ai.libs.python.PythonRequirementDefinition; public class MLPlanScikitLearnBuilder extends AMLPlanBuilder<IScikitLearnWrapper, MLPlanScikitLearnBuilder> { private IPythonConfig pythonConfig; private final boolean skipSetupCheck; public static MLPlanScikitLearnBuilder forClassification() throws IOException { return new MLPlanScikitLearnBuilder(EMLPlanScikitLearnProblemType.CLASSIFICATION_MULTICLASS); } public static MLPlanScikitLearnBuilder forClassificationWithUnlimitedLength() throws IOException { return new MLPlanScikitLearnBuilder(EMLPlanScikitLearnProblemType.CLASSIFICATION_MULTICLASS_UNLIMITED_LENGTH_PIPELINES); } public static MLPlanScikitLearnBuilder forRegression() throws IOException { return new MLPlanScikitLearnBuilder(EMLPlanScikitLearnProblemType.REGRESSION); } public static MLPlanScikitLearnBuilder forRUL() throws IOException { return new MLPlanScikitLearnBuilder(EMLPlanScikitLearnProblemType.RUL); } /** * Creates a new ML-Plan Builder for scikit-learn. * * @throws IOException * Thrown if configuration files cannot be read. */ protected MLPlanScikitLearnBuilder(final EMLPlanScikitLearnProblemType problemType) throws IOException { this(problemType, false); } /** * Creates a new ML-Plan Builder for scikit-learn. * * @param skipSetupCheck * Flag whether to skip the system's setup check, which examines whether the operating system has python installed in the required version and all the required python modules are installed. * @throws IOException * Thrown if configuration files cannot be read. */ public MLPlanScikitLearnBuilder(final EMLPlanScikitLearnProblemType problemType, final boolean skipSetupCheck) throws IOException { super(problemType); this.skipSetupCheck = skipSetupCheck; } @Override public MLPlanScikitLearnBuilder withProblemType(final IProblemType<IScikitLearnWrapper> problemType) throws IOException { super.withProblemType(problemType); return this.getSelf(); } @Override public MLPlanScikitLearnBuilder withSeed(final long seed) { super.withSeed(seed); if (this.getLearnerFactory() != null) { this.getLearnerFactory().setSeed(seed); } return this.getSelf(); } @Override public MLPlanScikitLearnBuilder withCandidateEvaluationTimeOut(final Timeout timeout) { super.withCandidateEvaluationTimeOut(timeout); if (this.getLearnerFactory() != null) { this.getLearnerFactory().setTimeout(timeout); } return this.getSelf(); } @Override public AScikitLearnLearnerFactory getLearnerFactory() { return (AScikitLearnLearnerFactory) super.getLearnerFactory(); } @Override public MLPlanScikitLearnBuilder getSelf() { return this; } private void setDeterministicDatasetSplitter(final ISupervisedLearnerEvaluatorFactory<ILabeledInstance, ILabeledDataset<? extends ILabeledInstance>> factory) { if (factory instanceof AMonteCarloCrossValidationBasedEvaluatorFactory<?>) { ((AMonteCarloCrossValidationBasedEvaluatorFactory<?>) factory).withCacheSplitSets(true); } } @Override public MLPlan<IScikitLearnWrapper> build() throws InterruptedException { if (!this.skipSetupCheck) { new PythonRequirementDefinition(AScikitLearnWrapper.PYTHON_MINIMUM_REQUIRED_VERSION_REL, AScikitLearnWrapper.PYTHON_MINIMUM_REQUIRED_VERSION_MAJ, AScikitLearnWrapper.PYTHON_MINIMUM_REQUIRED_VERSION_MIN).check(this.pythonConfig); } this.setDeterministicDatasetSplitter(this.getLearnerEvaluationFactoryForSearchPhase()); this.setDeterministicDatasetSplitter(this.getLearnerEvaluationFactoryForSelectionPhase()); return super.build(); } }
0
java-sources/ai/libs/mlplan-weka/0.2.7/ai/libs/mlplan
java-sources/ai/libs/mlplan-weka/0.2.7/ai/libs/mlplan/weka/EMLPlanWekaProblemType.java
package ai.libs.mlplan.weka; import java.util.Random; import org.api4.java.ai.ml.core.dataset.splitter.IFoldSizeConfigurableRandomDatasetSplitter; import org.api4.java.ai.ml.core.dataset.supervised.ILabeledDataset; import org.api4.java.ai.ml.core.evaluation.supervised.loss.IDeterministicPredictionPerformanceMeasure; import ai.libs.jaicore.ml.classification.loss.dataset.EClassificationPerformanceMeasure; import ai.libs.jaicore.ml.core.dataset.splitter.RandomHoldoutSplitter; import ai.libs.jaicore.ml.core.filter.FilterBasedDatasetSplitter; import ai.libs.jaicore.ml.core.filter.sampling.inmemory.factories.LabelBasedStratifiedSamplingFactory; import ai.libs.jaicore.ml.regression.loss.dataset.RootMeanSquaredError; import ai.libs.jaicore.ml.weka.classification.learner.IWekaClassifier; import ai.libs.mlplan.core.ILearnerFactory; import ai.libs.mlplan.core.IProblemType; import ai.libs.mlplan.core.PipelineValidityCheckingNodeEvaluator; import ai.libs.mlplan.weka.weka.WekaPipelineFactory; import ai.libs.mlplan.weka.weka.WekaPipelineValidityCheckingNodeEvaluator; import ai.libs.mlplan.weka.weka.WekaRegressorFactory; public enum EMLPlanWekaProblemType implements IProblemType<IWekaClassifier> { CLASSIFICATION_MULTICLASS("automl/searchmodels/weka/weka-full.json", "conf/mlplan-weka.json", "mlplan/weka-preferenceList-classification.txt", "conf/preferenceList.txt", "AbstractClassifier", new WekaPipelineFactory(), EClassificationPerformanceMeasure.ERRORRATE, EClassificationPerformanceMeasure.ERRORRATE, new FilterBasedDatasetSplitter<>(new LabelBasedStratifiedSamplingFactory<>()), "Classifier"), // CLASSIFICATION_MULTICLASS_REDUCED("automl/searchmodels/weka/weka-reduced.json", EMLPlanWekaProblemType.CLASSIFICATION_MULTICLASS.getSearchSpaceConfigFromFileSystem(), EMLPlanWekaProblemType.CLASSIFICATION_MULTICLASS.getPreferredComponentListFromResource(), EMLPlanWekaProblemType.CLASSIFICATION_MULTICLASS.getPreferredComponentListFromFileSystem(), EMLPlanWekaProblemType.CLASSIFICATION_MULTICLASS.getRequestedInterface(), new WekaPipelineFactory(), EClassificationPerformanceMeasure.ERRORRATE, EClassificationPerformanceMeasure.ERRORRATE, new FilterBasedDatasetSplitter<>(new LabelBasedStratifiedSamplingFactory<>()), "Classifier"), // CLASSIFICATION_MULTICLASS_BASE("automl/searchmodels/weka/base/index.json", "conf/mlplan-weka.json", "mlplan/weka-preferenceList-classification.txt", "conf/preferenceList.txt", EMLPlanWekaProblemType.CLASSIFICATION_MULTICLASS.getRequestedInterface(), new WekaPipelineFactory(), EClassificationPerformanceMeasure.ERRORRATE, EClassificationPerformanceMeasure.ERRORRATE, new FilterBasedDatasetSplitter<>(new LabelBasedStratifiedSamplingFactory<>()), "Classifier"), // REGRESSION("automl/searchmodels/weka/weka-full-regression.json", EMLPlanWekaProblemType.CLASSIFICATION_MULTICLASS.getSearchSpaceConfigFromFileSystem(), "mlplan/weka-preferenceList-regression.txt", EMLPlanWekaProblemType.CLASSIFICATION_MULTICLASS.getPreferredComponentListFromFileSystem(), "AbstractRegressor", new WekaRegressorFactory(), new RootMeanSquaredError(), new RootMeanSquaredError(), new RandomHoldoutSplitter<>(new Random(0), .7), "Regressor"), // CLASSIFICATION_MULTICLASS_TINY("automl/searchmodels/weka/weka-small.json", EMLPlanWekaProblemType.CLASSIFICATION_MULTICLASS.getSearchSpaceConfigFromFileSystem(), "mlplan/weka-preferenceList-tiny.txt", EMLPlanWekaProblemType.CLASSIFICATION_MULTICLASS.getPreferredComponentListFromFileSystem(), EMLPlanWekaProblemType.CLASSIFICATION_MULTICLASS.getRequestedInterface(), EMLPlanWekaProblemType.CLASSIFICATION_MULTICLASS.getLearnerFactory(), EMLPlanWekaProblemType.CLASSIFICATION_MULTICLASS.getPerformanceMetricForSearchPhase(), EMLPlanWekaProblemType.CLASSIFICATION_MULTICLASS.getPerformanceMetricForSelectionPhase(), EMLPlanWekaProblemType.CLASSIFICATION_MULTICLASS.getSearchSelectionDatasetSplitter(), "Classifier"); private final String preferredComponentName; private final String searchSpaceConfigFileFromResource; private final String systemSearchSpaceConfigFromFileSystem; private final String preferedComponentsListFromResource; private final String preferedComponentsListFromFileSystem; private final String requestedHascoInterface; private final ILearnerFactory<IWekaClassifier> learnerFactory; private final IDeterministicPredictionPerformanceMeasure<?, ?> performanceMetricForSearchPhase; private final IDeterministicPredictionPerformanceMeasure<?, ?> performanceMetricForSelectionPhase; private final IFoldSizeConfigurableRandomDatasetSplitter<ILabeledDataset<?>> searchSelectionDatasetSplitter; private EMLPlanWekaProblemType(final String searchSpaceConfigFileFromResource, final String systemSearchSpaceConfigFromFileSystem, final String preferedComponentsListFromResource, final String preferedComponentsListFromFileSystem, final String requestedHascoInterface, final ILearnerFactory<IWekaClassifier> learnerFactory, final IDeterministicPredictionPerformanceMeasure<?, ?> performanceMetricForSearchPhase, final IDeterministicPredictionPerformanceMeasure<?, ?> performanceMetricForSelectionPhase, final IFoldSizeConfigurableRandomDatasetSplitter<ILabeledDataset<?>> searchSelectionDatasetSplitter, final String preferredComponentName) { this.searchSpaceConfigFileFromResource = searchSpaceConfigFileFromResource; this.systemSearchSpaceConfigFromFileSystem = systemSearchSpaceConfigFromFileSystem; this.preferedComponentsListFromResource = preferedComponentsListFromResource; this.preferedComponentsListFromFileSystem = preferedComponentsListFromFileSystem; this.requestedHascoInterface = requestedHascoInterface; this.learnerFactory = learnerFactory; this.performanceMetricForSearchPhase = performanceMetricForSearchPhase; this.performanceMetricForSelectionPhase = performanceMetricForSelectionPhase; this.searchSelectionDatasetSplitter = searchSelectionDatasetSplitter; this.preferredComponentName = preferredComponentName; } @Override public String getSearchSpaceConfigFileFromResource() { return this.searchSpaceConfigFileFromResource; } @Override public String getSearchSpaceConfigFromFileSystem() { return this.systemSearchSpaceConfigFromFileSystem; } @Override public String getPreferredComponentListFromResource() { return this.preferedComponentsListFromResource; } @Override public String getPreferredComponentListFromFileSystem() { return this.preferedComponentsListFromFileSystem; } @Override public String getRequestedInterface() { return this.requestedHascoInterface; } @Override public String getLastHASCOMethodPriorToParameterRefinementOfBareLearner() { return this.getPreferredComponentName(this.requestedHascoInterface); } @Override public String getLastHASCOMethodPriorToParameterRefinementOfPipeline() { return this.getPreferredComponentName(this.preferredComponentName); } private String getPreferredComponentName(final String requestedInterface) { return "resolve" + requestedInterface + "With"; } @Override public IDeterministicPredictionPerformanceMeasure<?, ?> getPerformanceMetricForSearchPhase() { return this.performanceMetricForSearchPhase; } @Override public IDeterministicPredictionPerformanceMeasure<?, ?> getPerformanceMetricForSelectionPhase() { return this.performanceMetricForSelectionPhase; } @Override public String getName() { return this.getClass().getSimpleName() + "." + this.toString(); } @Override public IFoldSizeConfigurableRandomDatasetSplitter<ILabeledDataset<?>> getSearchSelectionDatasetSplitter() { return this.searchSelectionDatasetSplitter; } @Override public ILearnerFactory<IWekaClassifier> getLearnerFactory() { return this.learnerFactory; } @Override public PipelineValidityCheckingNodeEvaluator getValidityCheckingNodeEvaluator() { return new WekaPipelineValidityCheckingNodeEvaluator(); } }
0
java-sources/ai/libs/mlplan-weka/0.2.7/ai/libs/mlplan
java-sources/ai/libs/mlplan-weka/0.2.7/ai/libs/mlplan/weka/MLPlan4Weka.java
package ai.libs.mlplan.weka; import org.api4.java.ai.ml.core.dataset.supervised.ILabeledDataset; import ai.libs.jaicore.ml.weka.classification.learner.IWekaClassifier; import ai.libs.mlplan.core.MLPlan; public class MLPlan4Weka extends MLPlan<IWekaClassifier> { MLPlan4Weka(final MLPlanWekaBuilder builder, final ILabeledDataset<?> data) { super(builder, data); } }
0
java-sources/ai/libs/mlplan-weka/0.2.7/ai/libs/mlplan
java-sources/ai/libs/mlplan-weka/0.2.7/ai/libs/mlplan/weka/MLPlanWekaBuilder.java
package ai.libs.mlplan.weka; import java.io.IOException; import org.api4.java.ai.ml.core.dataset.supervised.ILabeledDataset; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import ai.libs.hasco.twophase.TwoPhaseHASCOConfig; import ai.libs.jaicore.ml.core.evaluation.evaluator.factory.LearningCurveExtrapolationEvaluatorFactory; import ai.libs.jaicore.ml.core.evaluation.evaluator.factory.MonteCarloCrossValidationEvaluatorFactory; import ai.libs.jaicore.ml.core.filter.sampling.inmemory.ASamplingAlgorithm; import ai.libs.jaicore.ml.core.filter.sampling.inmemory.factories.interfaces.ISamplingAlgorithmFactory; import ai.libs.jaicore.ml.functionprediction.learner.learningcurveextrapolation.LearningCurveExtrapolationMethod; import ai.libs.jaicore.ml.weka.classification.learner.IWekaClassifier; import ai.libs.jaicore.ml.weka.dataset.WekaInstances; import ai.libs.mlplan.core.AMLPlanBuilder; public class MLPlanWekaBuilder extends AMLPlanBuilder<IWekaClassifier, MLPlanWekaBuilder> { private Logger logger = LoggerFactory.getLogger(MLPlanWekaBuilder.class); public static MLPlanWekaBuilder forClassification() throws IOException { return new MLPlanWekaBuilder(EMLPlanWekaProblemType.CLASSIFICATION_MULTICLASS); } public static MLPlanWekaBuilder forClassificationReduced() throws IOException { return new MLPlanWekaBuilder(EMLPlanWekaProblemType.CLASSIFICATION_MULTICLASS_REDUCED); } public static MLPlanWekaBuilder forRegression() throws IOException { return new MLPlanWekaBuilder(EMLPlanWekaProblemType.REGRESSION); } public static MLPlanWekaBuilder forClassificationWithTinySearchSpace() throws IOException { return new MLPlanWekaBuilder(EMLPlanWekaProblemType.CLASSIFICATION_MULTICLASS_TINY); } public MLPlanWekaBuilder() throws IOException { this(EMLPlanWekaProblemType.CLASSIFICATION_MULTICLASS); } public MLPlanWekaBuilder(final EMLPlanWekaProblemType problemType) throws IOException { super(problemType); } /** * Allows to use learning curve extrapolation for predicting the quality of candidate solutions. * @param anchorpoints The anchor points for which samples are actually evaluated on the respective data. * @param subsamplingAlgorithmFactory The factory for the sampling algorithm that is to be used to randomly draw training instances. * @param trainSplitForAnchorpointsMeasurement The training fold size for measuring the acnhorpoints. * @param extrapolationMethod The method to be used in order to extrapolate the learning curve from the anchorpoints. */ public void withLearningCurveExtrapolationEvaluation(final int[] anchorpoints, final ISamplingAlgorithmFactory<ILabeledDataset<?>, ? extends ASamplingAlgorithm<ILabeledDataset<?>>> subsamplingAlgorithmFactory, final double trainSplitForAnchorpointsMeasurement, final LearningCurveExtrapolationMethod extrapolationMethod) { this.withSearchPhaseEvaluatorFactory(new LearningCurveExtrapolationEvaluatorFactory(anchorpoints, subsamplingAlgorithmFactory, trainSplitForAnchorpointsMeasurement, extrapolationMethod)); this.withSelectionPhaseEvaluatorFactory(new MonteCarloCrossValidationEvaluatorFactory().withNumMCIterations(3).withTrainFoldSize(.7)); this.getAlgorithmConfig().setProperty(TwoPhaseHASCOConfig.K_BLOWUP_SELECTION, "" + 10); throw new UnsupportedOperationException("Learning Curve Prediction based ML-Plan runs are not supported in this release. They will be activated again in the upcoming release."); } @Override public MLPlanWekaBuilder withDataset(final ILabeledDataset<?> dataset) { WekaInstances instances = dataset instanceof WekaInstances ? (WekaInstances) dataset : new WekaInstances(dataset); super.withDataset(instances); this.logger.info("Setting dataset as WekaInstances object."); return this.getSelf(); } @Override public MLPlanWekaBuilder getSelf() { return this; } @Override public MLPlan4Weka build() { this.checkPreconditionsForInitialization(); return new MLPlan4Weka(this, this.getDataset()); } }
0
java-sources/ai/libs/mlplan-weka/0.2.7/ai/libs/mlplan
java-sources/ai/libs/mlplan-weka/0.2.7/ai/libs/mlplan/weka/MLPlanWekaClassifier.java
package ai.libs.mlplan.weka; import java.io.IOException; import java.util.ArrayList; import java.util.Collection; import java.util.Enumeration; import java.util.List; import java.util.Map; import java.util.Objects; import org.api4.java.ai.ml.core.dataset.supervised.ILabeledDataset; import org.api4.java.ai.ml.core.dataset.supervised.ILabeledInstance; import org.api4.java.ai.ml.core.evaluation.IPrediction; import org.api4.java.ai.ml.core.evaluation.IPredictionBatch; import org.api4.java.ai.ml.core.exception.LearnerConfigurationFailedException; import org.api4.java.ai.ml.core.exception.PredictionException; import org.api4.java.ai.ml.core.exception.TrainingException; import org.api4.java.algorithm.Timeout; import org.api4.java.algorithm.exceptions.AlgorithmException; import org.api4.java.algorithm.exceptions.AlgorithmExecutionCanceledException; import org.api4.java.algorithm.exceptions.AlgorithmTimeoutedException; import org.api4.java.common.control.ILoggingCustomizable; import org.api4.java.common.event.IEventEmitter; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import ai.libs.jaicore.components.api.IComponent; import ai.libs.jaicore.ml.weka.classification.learner.IWekaClassifier; import ai.libs.jaicore.ml.weka.dataset.WekaInstances; import ai.libs.mlplan.core.MLPlan; import ai.libs.mlplan.multiclass.IMLPlanClassifierConfig; import weka.classifiers.Classifier; import weka.core.Capabilities; import weka.core.Capabilities.Capability; import weka.core.CapabilitiesHandler; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.OptionHandler; /** * A WEKA classifier wrapping the functionality of ML-Plan where the constructed object is a WEKA classifier. * * It implements the algorithm interface with itself (with modified state) as an output * * @author wever, fmohr * */ @SuppressWarnings("serial") public class MLPlanWekaClassifier implements Classifier, CapabilitiesHandler, OptionHandler, ILoggingCustomizable, IWekaClassifier, IEventEmitter<Object> { /* Logger for controlled output. */ private transient Logger logger = LoggerFactory.getLogger(MLPlanWekaClassifier.class); private String loggerName; /* MLPlan Builder and the instance of mlplan */ private final transient MLPlanWekaBuilder builder; /* The timeout for the selecting a classifier. */ private Timeout timeout; /* The output of mlplan, i.e., the selected classifier and the internal validation error measured on the given data. */ private IWekaClassifier classifierFoundByMLPlan; private double internalValidationErrorOfSelectedClassifier; private final transient List<Object> listeners = new ArrayList<>(); public MLPlanWekaClassifier(final MLPlanWekaBuilder builder) { this.builder = builder; this.timeout = builder.getTimeOut(); } @Override public void buildClassifier(final Instances data) throws Exception { this.fit(new WekaInstances(data)); } public double[] classifyInstances(final Instances instances) throws PredictionException, InterruptedException { double[] predictionsAsDoubles = new double[instances.size()]; List<? extends IPrediction> predictions = this.classifierFoundByMLPlan.predict(new WekaInstances(instances)).getPredictions(); for (int i = 0; i < instances.size(); i++) { predictionsAsDoubles[i] = (double) predictions.get(i).getPrediction(); } return predictionsAsDoubles; } @Override public double classifyInstance(final Instance instance) throws Exception { if (this.classifierFoundByMLPlan == null) { throw new IllegalStateException("Classifier has not been built yet."); } return this.classifierFoundByMLPlan.getClassifier().classifyInstance(instance); } @Override public double[] distributionForInstance(final Instance instance) throws Exception { if (this.classifierFoundByMLPlan == null) { throw new IllegalStateException("Classifier has not been built yet."); } return this.classifierFoundByMLPlan.getClassifier().distributionForInstance(instance); } @Override public Capabilities getCapabilities() { Capabilities result = new Capabilities(this); result.disableAll(); // attributes result.enable(Capability.NOMINAL_ATTRIBUTES); result.enable(Capability.NUMERIC_ATTRIBUTES); result.enable(Capability.DATE_ATTRIBUTES); result.enable(Capability.STRING_ATTRIBUTES); result.enable(Capability.RELATIONAL_ATTRIBUTES); result.enable(Capability.MISSING_VALUES); // class result.enable(Capability.NOMINAL_CLASS); result.enable(Capability.NUMERIC_CLASS); result.enable(Capability.DATE_CLASS); result.enable(Capability.MISSING_CLASS_VALUES); // instances result.setMinimumNumberInstances(1); return result; } @Override public Enumeration<Option> listOptions() { /* As there are no options, simply return null. */ return null; } @Override public void setOptions(final String[] options) throws Exception { /* Intentionally left blank. */ } @Override public String[] getOptions() { /* As there are no options, simply return an empty array. */ return new String[] {}; } public void setTimeout(final Timeout timeout) { this.timeout = timeout; } public IMLPlanClassifierConfig getMLPlanConfig() { return this.builder.getAlgorithmConfig(); } public Collection<IComponent> getComponents() throws IOException { return this.builder.getComponents(); } /** * @return An object of the classifier ML-Plan has selected during the build. */ public Classifier getSelectedWekaClassifier() { return this.classifierFoundByMLPlan.getClassifier(); } /** * @return The internal validation error (during selection phase) of the selected classifier. */ public double getInternalValidationErrorOfSelectedClassifier() { return this.internalValidationErrorOfSelectedClassifier; } @Override public void setLoggerName(final String name) { this.loggerName = name; this.logger.info("Switching logger name to {}", name); this.logger = LoggerFactory.getLogger(name); this.logger.info("Switched ML-Plan logger to {}", name); } @Override public String getLoggerName() { return this.loggerName; } @Override public void registerListener(final Object listener) { this.listeners.add(listener); } @Override public IPrediction fitAndPredict(final ILabeledDataset<? extends ILabeledInstance> dTrain, final ILabeledInstance xTest) throws TrainingException, PredictionException, InterruptedException { this.fit(dTrain); return this.predict(xTest); } @Override public IPredictionBatch fitAndPredict(final ILabeledDataset<? extends ILabeledInstance> dTrain, final ILabeledInstance[] xTest) throws TrainingException, PredictionException, InterruptedException { this.fit(dTrain); return this.predict(xTest); } @Override public IPredictionBatch fitAndPredict(final ILabeledDataset<? extends ILabeledInstance> dTrain, final ILabeledDataset<? extends ILabeledInstance> dTest) throws TrainingException, PredictionException, InterruptedException { this.fit(dTrain); return this.predict(dTest); } @Override public void fit(final ILabeledDataset<? extends ILabeledInstance> dTrain) throws TrainingException, InterruptedException { Objects.requireNonNull(this.timeout, "Timeout must be set before running ML-Plan."); MLPlan<IWekaClassifier> mlplan = this.builder.withDataset(dTrain).build(); this.listeners.forEach(mlplan::registerListener); mlplan.setTimeout(this.timeout); if (this.loggerName != null) { mlplan.setLoggerName(this.loggerName + "." + "mlplan"); } try { this.classifierFoundByMLPlan = mlplan.call(); } catch (AlgorithmTimeoutedException | AlgorithmException | AlgorithmExecutionCanceledException e) { throw new TrainingException("Could not finish ML-Plan training.", e); } } @Override public IPrediction predict(final ILabeledInstance xTest) throws PredictionException, InterruptedException { return this.classifierFoundByMLPlan.predict(xTest); } @Override public IPredictionBatch predict(final ILabeledDataset<? extends ILabeledInstance> dTest) throws PredictionException, InterruptedException { return this.classifierFoundByMLPlan.predict(dTest); } @Override public IPredictionBatch predict(final ILabeledInstance[] dTest) throws PredictionException, InterruptedException { return this.classifierFoundByMLPlan.predict(dTest); } @Override public void setConfig(final Map<String, Object> config) throws LearnerConfigurationFailedException, InterruptedException { throw new UnsupportedOperationException(); } @Override public Map<String, Object> getConfig() { throw new UnsupportedOperationException(); } @Override public Classifier getClassifier() { return this.getSelectedWekaClassifier(); } }
0
java-sources/ai/libs/mlplan-weka/0.2.7/ai/libs/mlplan/weka
java-sources/ai/libs/mlplan-weka/0.2.7/ai/libs/mlplan/weka/weka/MLPipelineComponentInstanceFactory.java
package ai.libs.mlplan.weka.weka; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.HashMap; import java.util.List; import java.util.Map; import org.aeonbits.owner.util.Collections; import org.apache.commons.lang3.math.NumberUtils; import org.apache.commons.lang3.tuple.ImmutablePair; import org.apache.commons.lang3.tuple.Pair; import ai.libs.jaicore.components.api.IComponent; import ai.libs.jaicore.components.api.IComponentInstance; import ai.libs.jaicore.components.exceptions.ComponentNotFoundException; import ai.libs.jaicore.components.model.ComponentInstance; import ai.libs.jaicore.components.model.ComponentUtil; import ai.libs.jaicore.ml.weka.classification.pipeline.MLPipeline; import ai.libs.jaicore.ml.weka.classification.pipeline.SupervisedFilterSelector; import weka.core.OptionHandler; /** * A factory that provides the ability to wrap given MLPipelines to a ComponentInstance * * @author Helena Graf * */ public class MLPipelineComponentInstanceFactory { private Collection<IComponent> components; /** * Creates a new factory object using the given configuration file * * @param components */ public MLPipelineComponentInstanceFactory(final Collection<? extends IComponent> components) { this.components = new ArrayList<>(components); } /** * Converts the given MLPipelines object to a ComponentInstance. * * @param pipeline * The pipelines to convert * @return The converted pipelines as a ComponentInstance * @throws ComponentNotFoundException * When the pipelines contains elements that are not in the loaded configuration */ @SuppressWarnings("unchecked") public ComponentInstance convertToComponentInstance(final MLPipeline pipeline) throws ComponentNotFoundException { if (pipeline.getPreprocessors() != null && !pipeline.getPreprocessors().isEmpty()) { // Pipeline has preprocessor SupervisedFilterSelector preprocessor = pipeline.getPreprocessors().get(0); // CI for searcher ComponentInstance searcherCI = this.getComponentInstanceForPipelineElement(preprocessor.getSearcher()); // CI for evaluator ComponentInstance evaluatorCI = this.getComponentInstanceForPipelineElement(preprocessor.getEvaluator()); // CI for preprocessor ComponentInstance preprocessorCI = this.getComponentInstanceForPipelineElement(preprocessor.getSelector(), new ImmutablePair<>("eval", evaluatorCI), new ImmutablePair<>("search", searcherCI)); // CI for classifier ComponentInstance classifierCI = this.getComponentInstanceForPipelineElement(pipeline.getBaseClassifier()); // Pipeline HashMap<String, List<IComponentInstance>> satisfactionOfRequiredInterfaces = new HashMap<>(); satisfactionOfRequiredInterfaces.put("preprocessor", Arrays.asList(preprocessorCI)); satisfactionOfRequiredInterfaces.put("classifier", Arrays.asList(classifierCI)); return new ComponentInstance(ComponentUtil.getComponentByName("pipeline", this.components), new HashMap<>(), satisfactionOfRequiredInterfaces); } else { // Pipeline is only classifier return new ComponentInstance(ComponentUtil.getComponentByName(pipeline.getBaseClassifier().getClass().getName(), this.components), this.getParametersForPipelineElement(pipeline.getBaseClassifier()), new HashMap<>()); } } /** * Converts a single element of the pipeline to a ComponentInstance, e.g. a classifier. * * @param pipelineElement * The pipeline element to convert * @param satisfactionOfRegquiredInterfaces * If the elements has this component, it must be included, otherwise it is left out * @return The converted ComponentInstance * @throws ComponentNotFoundException * If the pipeline element contains elements that are not in the loaded configuration */ private ComponentInstance getComponentInstanceForPipelineElement(final Object pipelineElement, @SuppressWarnings("unchecked") final Pair<String, ComponentInstance>... satisfactionOfRegquiredInterfaces) throws ComponentNotFoundException { HashMap<String, List<IComponentInstance>> satisfactionOfRequiredInterfaces = new HashMap<>(); Arrays.stream(satisfactionOfRegquiredInterfaces).forEach(entry -> satisfactionOfRequiredInterfaces.put(entry.getKey(), Arrays.asList(entry.getValue()))); return new ComponentInstance(ComponentUtil.getComponentByName(pipelineElement.getClass().getName(), this.components), this.getParametersForPipelineElement(pipelineElement), satisfactionOfRequiredInterfaces); } /** * Gets the parameters for the given pipeline element as a map from parameter name to value * * @param classifier * The classifier for which to get the parameters * @return The parameter map */ private Map<String, String> getParametersForPipelineElement(final Object classifier) { if (classifier instanceof OptionHandler) { OptionHandler handler = (OptionHandler) classifier; HashMap<String, String> parametersWithValues = new HashMap<>(handler.getOptions().length); String optionName = null; boolean previousStringWasAValue = true; for (String option : handler.getOptions()) { if (option.equals("--")) { break; } if (previousStringWasAValue || (!(NumberUtils.isCreatable(option) || NumberUtils.isParsable(option)) && option.startsWith("-"))) { // Current String is option if (!previousStringWasAValue) { parametersWithValues.put(optionName, "true"); } previousStringWasAValue = false; optionName = option.equals("") ? option : option.substring(1, option.length()); } else { // Current String is value previousStringWasAValue = true; parametersWithValues.put(optionName, option); } } if (!previousStringWasAValue) { parametersWithValues.put(optionName, Collections.list(handler.getOptions()).get(handler.getOptions().length - 1)); } return parametersWithValues; } return new HashMap<>(0); } }
0
java-sources/ai/libs/mlplan-weka/0.2.7/ai/libs/mlplan/weka
java-sources/ai/libs/mlplan-weka/0.2.7/ai/libs/mlplan/weka/weka/WekaMLPlanWekaClassifier.java
package ai.libs.mlplan.weka.weka; import java.io.IOException; import ai.libs.mlplan.weka.MLPlanWekaBuilder; import ai.libs.mlplan.weka.MLPlanWekaClassifier; public class WekaMLPlanWekaClassifier extends MLPlanWekaClassifier { /** * Automatically generated version uid for serialization. */ private static final long serialVersionUID = 985257791846750757L; public WekaMLPlanWekaClassifier(final MLPlanWekaBuilder builder) { super(builder); } public WekaMLPlanWekaClassifier() throws IOException { this(new MLPlanWekaBuilder()); } }
0
java-sources/ai/libs/mlplan-weka/0.2.7/ai/libs/mlplan/weka
java-sources/ai/libs/mlplan-weka/0.2.7/ai/libs/mlplan/weka/weka/WekaPipelineFactory.java
package ai.libs.mlplan.weka.weka; import java.util.LinkedList; import java.util.List; import java.util.Map.Entry; import java.util.Objects; import java.util.stream.Collectors; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import ai.libs.jaicore.components.api.IComponentInstance; import ai.libs.jaicore.components.api.IParameter; import ai.libs.jaicore.components.api.IParameterDomain; import ai.libs.jaicore.components.exceptions.ComponentInstantiationFailedException; import ai.libs.jaicore.components.model.NumericParameterDomain; import ai.libs.jaicore.ml.weka.classification.learner.IWekaClassifier; import ai.libs.jaicore.ml.weka.classification.learner.WekaClassifier; import ai.libs.jaicore.ml.weka.classification.pipeline.MLPipeline; import ai.libs.mlplan.core.ILearnerFactory; import weka.attributeSelection.ASEvaluation; import weka.attributeSelection.ASSearch; import weka.classifiers.AbstractClassifier; import weka.classifiers.Classifier; import weka.classifiers.SingleClassifierEnhancer; import weka.classifiers.functions.SMO; import weka.classifiers.functions.SMOreg; import weka.classifiers.functions.supportVector.Kernel; import weka.classifiers.meta.Stacking; import weka.classifiers.meta.Vote; import weka.core.OptionHandler; public class WekaPipelineFactory implements ILearnerFactory<IWekaClassifier> { private Logger logger = LoggerFactory.getLogger(WekaPipelineFactory.class); private static final String L_CLASSIFIER = "learner"; @Override public IWekaClassifier getComponentInstantiation(final IComponentInstance groundComponent) throws ComponentInstantiationFailedException { this.logger.debug("Instantiate weka classifier from component instance {}.", groundComponent); try { if (groundComponent.getComponent().getName().equals("pipeline")) { IComponentInstance preprocessorCI = null; /* Retrieve component instances of pipeline */ preprocessorCI = groundComponent.getSatisfactionOfRequiredInterface("preprocessor").iterator().next(); IComponentInstance evaluatorCI = preprocessorCI.getSatisfactionOfRequiredInterface("eval").iterator().next(); IComponentInstance searcherCI = preprocessorCI.getSatisfactionOfRequiredInterface("search").iterator().next(); ASEvaluation eval = ASEvaluation.forName(evaluatorCI.getComponent().getName(), this.getParameterList(evaluatorCI).toArray(new String[0])); ASSearch search = ASSearch.forName(searcherCI.getComponent().getName(), this.getParameterList(searcherCI).toArray(new String[0])); IWekaClassifier c = this.getComponentInstantiation(groundComponent.getSatisfactionOfRequiredInterface(L_CLASSIFIER).iterator().next()); this.logger.debug("Returning a MLPipeline object (aseval: {}, assearch: {}, classifier: {})", eval != null, search != null, c != null); return new WekaClassifier(new MLPipeline(search, eval, c.getClassifier())); } else { Classifier c = AbstractClassifier.forName(groundComponent.getComponent().getName(), this.getParameterList(groundComponent).toArray(new String[0])); List<String> options = this.getParameterList(groundComponent); options.add("-do-not-check-capabilities"); if (c instanceof OptionHandler) { ((OptionHandler) c).setOptions(options.toArray(new String[0])); } for (Entry<String, List<IComponentInstance>> reqI : groundComponent.getSatisfactionOfRequiredInterfaces().entrySet()) { switch (reqI.getKey()) { case "W": if (c instanceof SingleClassifierEnhancer) { // suppose that this defines a base classifier ((SingleClassifierEnhancer) c).setClassifier(this.getComponentInstantiation(reqI.getValue().iterator().next()).getClassifier()); } else { this.logger.error("Got required interface W but classifier {} is not single classifier enhancer", c.getClass().getName()); } break; case "K": if (c instanceof SMO || c instanceof SMOreg) { IComponentInstance kernel = reqI.getValue().iterator().next(); Kernel k = (Kernel) Class.forName(kernel.getComponent().getName()).getDeclaredConstructor().newInstance(); k.setOptions(this.getParameterList(kernel).toArray(new String[0])); if (c instanceof SMO) { ((SMO) c).setKernel(k); } else if (c instanceof SMOreg) { ((SMOreg) c).setKernel(k); } } else { this.logger.error("Got required interface K but classifier {} is not SMO", c.getClass().getName()); } break; case "B": // suppose that this defines a base classifier List<Classifier> baseClassifierList = reqI.getValue().stream().map(x -> { try { return this.getComponentInstantiation(x).getClassifier(); } catch (ComponentInstantiationFailedException e) { return null; } }).filter(Objects::nonNull).collect(Collectors.toList()); if (baseClassifierList.size() != reqI.getValue().size()) { this.logger.error("Could not instantiate base learners correctly."); throw new ComponentInstantiationFailedException("Could not instantiate base learner list of Stacking."); } if (c instanceof Stacking) { ((Stacking) c).setClassifiers(baseClassifierList.toArray(new Classifier[] {})); } else if (c instanceof Vote) { ((Vote) c).setClassifiers(baseClassifierList.toArray(new Classifier[] {})); } else { this.logger.error("Unsupported option B for classifier {}", c.getClass().getName()); } break; case "M": if (c instanceof Stacking) { Classifier baseClassifier = this.getComponentInstantiation(reqI.getValue().iterator().next()).getClassifier(); ((Stacking) c).setMetaClassifier(baseClassifier); } else { this.logger.error("Unsupported option B for classifier {}", c.getClass().getName()); } break; default: this.logger.error("Got required interface {} for classifier {}. Dont know what to do with it...", reqI.getKey(), c.getClass().getName()); break; } } return new WekaClassifier(c); } } catch (Exception e) { throw new ComponentInstantiationFailedException(e, "Could not instantiate component."); } } private List<IWekaClassifier> getListOfBaseLearners(final IComponentInstance ci) throws ComponentInstantiationFailedException { List<IWekaClassifier> baseLearnerList = new LinkedList<>(); if (ci.getComponent().getName().equals("MultipleBaseLearnerListElement")) { baseLearnerList.add(this.getComponentInstantiation(ci.getSatisfactionOfRequiredInterface(L_CLASSIFIER).iterator().next())); } else if (ci.getComponent().getName().equals("MultipleBaseLearnerListChain")) { baseLearnerList.add(this.getComponentInstantiation(ci.getSatisfactionOfRequiredInterface(L_CLASSIFIER).iterator().next())); baseLearnerList.addAll(this.getListOfBaseLearners(ci.getSatisfactionOfRequiredInterface("chain").iterator().next())); } return baseLearnerList; } private List<String> getParameterList(final IComponentInstance ci) { List<String> parameters = new LinkedList<>(); for (Entry<String, String> parameterValues : ci.getParameterValues().entrySet()) { IParameter param = ci.getComponent().getParameter(parameterValues.getKey()); boolean isDefault = param.isDefaultValue(parameterValues.getValue()); if (parameterValues.getKey().toLowerCase().endsWith("activator") || parameterValues.getValue().equals("REMOVED") || isDefault) { continue; } if (!parameterValues.getValue().equals("false")) { parameters.add("-" + parameterValues.getKey()); } IParameterDomain domain = ci.getComponent().getParameter(parameterValues.getKey()).getDefaultDomain(); if (parameterValues.getValue() != null && !parameterValues.getValue().equals("") && !parameterValues.getValue().equals("true") && !parameterValues.getValue().equals("false")) { if (domain instanceof NumericParameterDomain && ((NumericParameterDomain) domain).isInteger()) { parameters.add(((int) Double.parseDouble(parameterValues.getValue())) + ""); } else { parameters.add(parameterValues.getValue()); } } } return parameters; } }
0
java-sources/ai/libs/mlplan-weka/0.2.7/ai/libs/mlplan/weka
java-sources/ai/libs/mlplan-weka/0.2.7/ai/libs/mlplan/weka/weka/WekaPipelineValidityCheckingNodeEvaluator.java
package ai.libs.mlplan.weka.weka; import java.util.Collection; import org.api4.java.ai.ml.core.dataset.supervised.ILabeledDataset; import org.api4.java.common.control.ILoggingCustomizable; import org.api4.java.datastructure.graph.ILabeledPath; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import ai.libs.hasco.core.HASCOUtil; import ai.libs.jaicore.components.api.IComponentInstance; import ai.libs.jaicore.components.model.Component; import ai.libs.jaicore.components.model.ComponentInstance; import ai.libs.jaicore.ml.weka.WekaUtil; import ai.libs.jaicore.planning.hierarchical.algorithms.forwarddecomposition.graphgenerators.tfd.TFDNode; import ai.libs.jaicore.search.algorithms.standard.bestfirst.exceptions.ControlledNodeEvaluationException; import ai.libs.mlplan.core.PipelineValidityCheckingNodeEvaluator; public class WekaPipelineValidityCheckingNodeEvaluator extends PipelineValidityCheckingNodeEvaluator implements ILoggingCustomizable { private Logger logger = LoggerFactory.getLogger(WekaPipelineValidityCheckingNodeEvaluator.class); public static final String COMPNAME_PREPROCESSOR = "preprocessor"; public static final String COMPNAME_SEARCH = "search"; public static final String COMPNAME_EVAL = "eval"; public static final String COMPNAME_LEARNER= "learner"; public WekaPipelineValidityCheckingNodeEvaluator() { super(); } public WekaPipelineValidityCheckingNodeEvaluator(final Collection<Component> components, final ILabeledDataset<?> data) { super(components, data); } @Override public Double evaluate(final ILabeledPath<TFDNode, String> path) throws ControlledNodeEvaluationException { if (!this.propertiesDetermined) { this.extractDatasetProperties(); } /* get partial component */ ComponentInstance instance = HASCOUtil.getSolutionCompositionFromState(this.getComponents(), path.getHead().getState(), false); if (instance != null) { /* check invalid preprocessor combinations */ if (instance.getSatisfactionOfRequiredInterfaces().containsKey(COMPNAME_PREPROCESSOR) && !instance.getSatisfactionOfRequiredInterface(COMPNAME_PREPROCESSOR).isEmpty()) { IComponentInstance pp = instance.getSatisfactionOfRequiredInterface(COMPNAME_PREPROCESSOR).iterator().next(); if (pp != null && pp.getComponent().getName().contains("AttributeSelection") && !pp.getSatisfactionOfRequiredInterface(COMPNAME_SEARCH).isEmpty() && !pp.getSatisfactionOfRequiredInterface(COMPNAME_EVAL).isEmpty()) { IComponentInstance search = pp.getSatisfactionOfRequiredInterface(COMPNAME_SEARCH).iterator().next(); IComponentInstance eval = pp.getSatisfactionOfRequiredInterface(COMPNAME_EVAL).iterator().next(); if (search != null && eval != null && !WekaUtil.isValidPreprocessorCombination(search.getComponent().getName(), eval.getComponent().getName())) { throw new ControlledNodeEvaluationException("The given combination of searcher and evaluator cannot be benchmarked since they are incompatible."); } } } /* check invalid classifiers for this kind of dataset */ IComponentInstance classifier; if (instance.getComponent().getName().toLowerCase().contains("pipeline")) { if (instance.getSatisfactionOfRequiredInterfaces().containsKey(COMPNAME_LEARNER) && !instance.getSatisfactionOfRequiredInterface(COMPNAME_LEARNER).isEmpty()) { classifier = instance.getSatisfactionOfRequiredInterface(COMPNAME_LEARNER).iterator().next(); } else { classifier = null; } } else { classifier = instance; } if (classifier != null) { this.checkValidity(classifier); } } return null; } private void checkValidity(final IComponentInstance classifier) throws ControlledNodeEvaluationException { String classifierName = classifier.getComponent().getName().toLowerCase(); /* forbid M5regression algorithms on non-binary classes */ boolean binaryClassifierMatch = classifierName.matches("(.*)(additiveregression|simplelinearregression|m5rules|votedperceptron|m5p)(.*)"); if (!this.binaryClass && binaryClassifierMatch) { throw new ControlledNodeEvaluationException(classifierName + " cannot be adopted on non-binary datasets."); } boolean noBinaryClassifierMatch = classifierName.matches("(.*)(additiveregression|m5p|m5rules|simplelinearregression)(.*)"); if (this.binaryClass && noBinaryClassifierMatch) { throw new ControlledNodeEvaluationException(classifierName + " cannot be adopted for binary classification tasks."); } /* forbid NaiveBayesMultinomial on multi-valued nominal attributes */ if (this.multiValuedNominalAttributes && (classifierName.matches("(.*)(naivebayesmultinomial|simplelinearregression)(.*)"))) { throw new ControlledNodeEvaluationException(classifierName + " cannot be adopted on datasets with multi-valued nominal attributes."); } boolean noMulticlassClassifierMatch = classifierName.matches("(.*)(votedperceptron)(.*)"); if (this.multiClass && noMulticlassClassifierMatch) { throw new ControlledNodeEvaluationException(classifierName + " cannot be adopted on multinomial classification dataset."); } if (this.containsNegativeValues && classifierName.matches("(.*)(naivebayesmultinomial)(.*)")) { throw new ControlledNodeEvaluationException("Negative numeric attribute values are not supported by the classifier."); } /* Exclude some learners for regression problems */ if (this.regression && classifierName.matches("(.*)(oner|smo|j48|jrip|naivebayes|logistic|lmt|bayesnet)(.*)")) { throw new ControlledNodeEvaluationException("Learner does not support regression"); } } @Override public String getLoggerName() { return this.logger.getName(); } @Override public void setLoggerName(final String name) { this.logger = LoggerFactory.getLogger(name); } }
0
java-sources/ai/libs/mlplan-weka/0.2.7/ai/libs/mlplan/weka
java-sources/ai/libs/mlplan-weka/0.2.7/ai/libs/mlplan/weka/weka/WekaRegressorFactory.java
package ai.libs.mlplan.weka.weka; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import ai.libs.jaicore.components.api.IComponentInstance; import ai.libs.jaicore.components.exceptions.ComponentInstantiationFailedException; import ai.libs.jaicore.ml.weka.classification.learner.IWekaClassifier; import ai.libs.jaicore.ml.weka.regression.learner.WekaRegressor; import ai.libs.mlplan.core.ILearnerFactory; public class WekaRegressorFactory implements ILearnerFactory<IWekaClassifier> { private Logger logger = LoggerFactory.getLogger(WekaRegressorFactory.class); private final WekaPipelineFactory classifierFactory = new WekaPipelineFactory(); @Override public IWekaClassifier getComponentInstantiation(final IComponentInstance groundComponent) throws ComponentInstantiationFailedException { this.logger.debug("Instantiate weka regressor from component instance {}.", groundComponent); return new WekaRegressor(this.classifierFactory.getComponentInstantiation(groundComponent).getClassifier()); } }
0
java-sources/ai/libs/mlplancli/0.1.4/ai/libs
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlpipeline_evaluation/CacheEvaluatorMeasureBridge.java
package ai.libs.mlpipeline_evaluation; import java.time.Duration; import java.time.Instant; import java.util.Optional; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import ai.libs.hasco.model.ComponentInstance; import ai.libs.jaicore.basic.algorithm.exceptions.ObjectEvaluationFailedException; import ai.libs.jaicore.ml.cache.ReproducibleInstances; import ai.libs.jaicore.ml.core.evaluation.measure.IMeasure; import ai.libs.jaicore.ml.evaluation.evaluators.weka.splitevaluation.AbstractSplitBasedClassifierEvaluator; import ai.libs.jaicore.ml.evaluation.evaluators.weka.splitevaluation.SimpleSLCSplitBasedClassifierEvaluator; import weka.classifiers.Classifier; import weka.core.Instances; /** * Implements a cache for the {@link AbstractSplitBasedClassifierEvaluator}. If no cache entry is found {@link SimpleSLCSplitBasedClassifierEvaluator} is used. * * @author mirko * */ public class CacheEvaluatorMeasureBridge extends AbstractSplitBasedClassifierEvaluator<Double, Double> { /** Logger for controlled output. */ private static final Logger logger = LoggerFactory.getLogger(CacheEvaluatorMeasureBridge.class); private ComponentInstance evaluatedComponent; /* Used for evaluating, when no cache entry could be found. */ private SimpleSLCSplitBasedClassifierEvaluator simpleEvaluatorMeasureBridge; /* Used for looking up cache entries. */ private PerformanceDBAdapter performanceDBAdapter; public CacheEvaluatorMeasureBridge(final IMeasure<Double, Double> basicEvaluator, final PerformanceDBAdapter performanceDBAdapter) { super(basicEvaluator); this.performanceDBAdapter = performanceDBAdapter; this.simpleEvaluatorMeasureBridge = new SimpleSLCSplitBasedClassifierEvaluator(basicEvaluator); } @Override public Double evaluateSplit(final Classifier pl, final Instances trainingData, final Instances validationData) throws ObjectEvaluationFailedException, InterruptedException { if (trainingData instanceof ReproducibleInstances) { if (((ReproducibleInstances) trainingData).isCacheLookup()) { // check in the cache if the result exists already Optional<Double> potentialCache = this.performanceDBAdapter.exists(this.evaluatedComponent, (ReproducibleInstances) trainingData, (ReproducibleInstances) validationData, this.simpleEvaluatorMeasureBridge.getBasicEvaluator().getClass().getName()); if (potentialCache.isPresent()) { logger.debug("Cache hit"); return potentialCache.get(); } } logger.debug("Cache miss"); // query the underlying loss function Instant start = Instant.now(); double performance = this.simpleEvaluatorMeasureBridge.evaluateSplit(pl, trainingData, validationData); Instant end = Instant.now(); Duration delta = Duration.between(start, end); // cache it if (((ReproducibleInstances) trainingData).isCacheStorage()) { this.performanceDBAdapter.store(this.evaluatedComponent, (ReproducibleInstances) trainingData, (ReproducibleInstances) validationData, performance, this.simpleEvaluatorMeasureBridge.getBasicEvaluator().getClass().getName(), delta.toMillis()); } return performance; } else { return this.simpleEvaluatorMeasureBridge.evaluateSplit(pl, trainingData, validationData); } } /** * Returns a lightweight copy of this object. That is, that the database * connection stays established and only the component instance is updated. * * @param componentInstance * @return the lightweight copy */ public CacheEvaluatorMeasureBridge getShallowCopy(final ComponentInstance componentInstance) { CacheEvaluatorMeasureBridge bridge = new CacheEvaluatorMeasureBridge(this.getBasicEvaluator(), this.performanceDBAdapter); bridge.evaluatedComponent = componentInstance; return bridge; } }
0
java-sources/ai/libs/mlplancli/0.1.4/ai/libs
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlpipeline_evaluation/ConsistentMLPipelineEvaluator.java
package ai.libs.mlpipeline_evaluation; import java.util.Collection; import java.util.List; import java.util.Random; import org.apache.commons.lang.NotImplementedException; import ai.libs.jaicore.ml.WekaUtil; import ai.libs.jaicore.ml.core.evaluation.measure.singlelabel.ZeroOneLoss; import ai.libs.jaicore.ml.evaluation.evaluators.weka.IClassifierEvaluator; import ai.libs.jaicore.ml.evaluation.evaluators.weka.MonteCarloCrossValidationEvaluator; import ai.libs.jaicore.ml.evaluation.evaluators.weka.splitevaluation.SimpleSLCSplitBasedClassifierEvaluator; import weka.classifiers.Classifier; import weka.classifiers.Evaluation; import weka.core.Instances; /** * For consistent evaluations of MLPipelines. * * @author Helena Graf * @author Lukas * @author Joshua * */ public class ConsistentMLPipelineEvaluator { private ConsistentMLPipelineEvaluator() { /* Private c'tor to prevent instantiation. */ } /** * Get the error rate of the classifier according to the given info about the * split and evaluation technique. * * @param testSplitTechnique * @param testEvaluationTechnique * @param testSeed * @param valSplitTechnique * @param valEvaluationTechnique * @param valSeed * @param data * @param classifier * @return * @throws Exception */ public static double evaluateClassifier(final String testSplitTechnique, final String testEvaluationTechnique, final int testSeed, final String valSplitTechnique, final String valEvaluationTechnique, final int valSeed, final Instances data, final Classifier classifier) throws Exception { switch (testEvaluationTechnique) { case "single": return evaluateClassifier(valSplitTechnique, valEvaluationTechnique, valSeed, getTrainSplit(testSplitTechnique, data, testSeed), classifier); case "multi": throw new NotImplementedException("\"multi\" not yet supported!"); default: throw new IllegalArgumentException("Unkown evaluation technique."); } } /** * Get the error rate of the classifier according to the given info about the * split and evaluation technique. * * @param splitTechnique * @param evaluationTechnique * @param seed * @param data * @param classifier * @return * @throws Exception */ public static double evaluateClassifier(final String splitTechnique, final String evaluationTechnique, final int seed, final Instances data, final Classifier classifier) throws Exception { switch (evaluationTechnique) { case "single": Instances trainSplit = ConsistentMLPipelineEvaluator.getTrainSplit(splitTechnique, data, seed); Evaluation eval = new Evaluation(trainSplit); classifier.buildClassifier(trainSplit); eval.evaluateModel(classifier, ConsistentMLPipelineEvaluator.getTestSplit(splitTechnique, data, seed)); return (1 - eval.pctCorrect() / 100.0d); case "multi": IClassifierEvaluator evaluator = ConsistentMLPipelineEvaluator.getEvaluatorForSplitTechnique(splitTechnique, data, seed); if (evaluator != null) { return evaluator.evaluate(classifier); } else { throw new IllegalArgumentException("Could not find classifier evaluator."); } default: throw new IllegalArgumentException("Invalid split technique: " + evaluationTechnique); } } /** * Get an evaluator object for the given split configuration for the datasets, * which can then be used to evaluate a classifier. * * @param split_technique * @param data * @param seed * @return */ public static IClassifierEvaluator getEvaluatorForSplitTechnique(final String split_technique, final Instances data, final int seed) { String[] techniqueAndDescription = split_technique.split("_"); if (techniqueAndDescription[0].equals("3MCCV")) { return new MonteCarloCrossValidationEvaluator(new SimpleSLCSplitBasedClassifierEvaluator(new ZeroOneLoss()), 3, data, Float.parseFloat(techniqueAndDescription[1]), seed); } return null; } /** * Split the dataset according to the given parameters and return the train * portion of the split. * * @param split_technique * @param data * @param seed * @return */ public static Instances getTrainSplit(final String split_technique, final Instances data, final int seed) { String[] techniquAndDescription = split_technique.split("_"); if (techniquAndDescription[0].equals("MCCV")) { Collection<Integer>[] instancesInFolds = WekaUtil.getArbitrarySplit(data, new Random(seed), Double.parseDouble(techniquAndDescription[1])); List<Instances> folds = WekaUtil.realizeSplit(data, instancesInFolds); return folds.get(0); } return null; } /** * Split the dataset according to the given parameters and return the test * portion of the split. * * @param split_technique * @param data * @param seed * @return */ public static Instances getTestSplit(final String split_technique, final Instances data, final int seed) { String[] techniquAndDescription = split_technique.split("_"); if (techniquAndDescription[0].equals("MCCV")) { Collection<Integer>[] instancesInFolds = WekaUtil.getArbitrarySplit(data, new Random(seed), Double.parseDouble(techniquAndDescription[1])); List<Instances> folds = WekaUtil.realizeSplit(data, instancesInFolds); return folds.get(1); } return null; } }
0
java-sources/ai/libs/mlplancli/0.1.4/ai/libs
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlpipeline_evaluation/DatasetOrigin.java
package ai.libs.mlpipeline_evaluation; /** * Allowed origins for datasets. Must be in db mapping table. * * @author Helena Graf * @author Lukas * @author Joshua * */ public enum DatasetOrigin { OPENML_DATASET_ID, CLUSTER_LOCATION_NEW, LOCAL; /** * Maps the dataset origin to its column name in the database for the dataset * equivalence mappings. * * @param origin * The dataset origin to map * @return The column name for the dataset origin */ @SuppressWarnings("incomplete-switch") static String mapOriginToColumnIdentifier(DatasetOrigin origin) { switch (origin) { case OPENML_DATASET_ID: return "openML_dataset_id"; case CLUSTER_LOCATION_NEW: return "cluster_location_new"; } throw new IllegalArgumentException("Invalid dataset origin."); } }
0
java-sources/ai/libs/mlplancli/0.1.4/ai/libs
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlpipeline_evaluation/InvalidDatasetOriginException.java
package ai.libs.mlpipeline_evaluation; public class InvalidDatasetOriginException extends Exception { /** * Automatically generated version UID for serialization. */ private static final long serialVersionUID = 4082480943566557877L; public InvalidDatasetOriginException(final String msg) { super(msg); } public InvalidDatasetOriginException(final Throwable cause) { super(cause); } public InvalidDatasetOriginException(final String msg, final Throwable cause) { super(msg, cause); } }
0
java-sources/ai/libs/mlplancli/0.1.4/ai/libs
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlpipeline_evaluation/PerformanceDBAdapter.java
package ai.libs.mlpipeline_evaluation; import java.io.Closeable; import java.io.IOException; import java.security.MessageDigest; import java.security.NoSuchAlgorithmException; import java.sql.ResultSet; import java.sql.SQLException; import java.text.SimpleDateFormat; import java.time.Instant; import java.util.ArrayList; import java.util.Date; import java.util.HashMap; import java.util.Map; import java.util.Optional; import javax.xml.bind.annotation.adapters.HexBinaryAdapter; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.fasterxml.jackson.core.JsonProcessingException; import com.fasterxml.jackson.databind.ObjectMapper; import ai.libs.hasco.model.ComponentInstance; import ai.libs.jaicore.basic.SQLAdapter; import ai.libs.jaicore.ml.cache.ReproducibleInstances; /** * Database adapter for performance data. Functionality to store and save performance values in a database. json to reproduce the {@link ReproducibleInstances} is saved as well as the solution that produced the performance value. * * @author jmhansel * */ public class PerformanceDBAdapter implements Closeable { /** Logger for controlled output. */ private static final Logger logger = LoggerFactory.getLogger(PerformanceDBAdapter.class); private final SQLAdapter sqlAdapter; private final String performanceSampleTableName; public PerformanceDBAdapter(final SQLAdapter sqlAdapter, final String performanceSampleTableName) { this.sqlAdapter = sqlAdapter; this.performanceSampleTableName = performanceSampleTableName; /* initialize tables if not existent */ try { ResultSet rs = sqlAdapter.getResultsOfQuery("SHOW TABLES"); boolean hasPerformanceTable = false; while (rs.next()) { String tableName = rs.getString(1); if (tableName.equals(this.performanceSampleTableName)) { hasPerformanceTable = true; } } // if there is no performance table, create it. we hash the composition and // trajectory and use the hash value as primary key for performance reasons. if (!hasPerformanceTable) { logger.info("Creating table for evaluations"); sqlAdapter.update( "CREATE TABLE `" + this.performanceSampleTableName + "` (\r\n" + " `evaluation_id` int(10) NOT NULL AUTO_INCREMENT,\r\n" + " `composition` json NOT NULL,\r\n" + " `train_trajectory` json NOT NULL,\r\n" + " `test_trajectory` json NOT NULL,\r\n" + " `loss_function` varchar(200) NOT NULL,\r\n" + " `score` double NOT NULL,\r\n" + " `evaluation_time_ms` bigint NOT NULL,\r\n" + "`evaluation_date` timestamp NULL DEFAULT NULL," + "`hash_value` char(64) NOT NULL," + " PRIMARY KEY (`evaluation_id`)\r\n" + ") ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8 COLLATE=utf8_bin", new ArrayList<>()); } } catch (SQLException e) { logger.error("Exception: {}", e); } } /** * Checks whether there is an entry for the composition and corresponding evaluation specified by the reproducable instances. If so, it returns the corresponding performance score. * * * @param composition * - Solution composition. * @param reproducableInstances * - Instances object that includes the trajectory, i.e. all operations that have been applied to the instances like loading, splitting etc. * @param testData * - The reproducible instances of the test data used for this evaluation process * @param className * - the java qualified class name of the loss function that was used * @return opt - Optional that contains the score corresponding to the composition and the reproducible instances or is empty if no suiting entry is found in the database. */ public Optional<Double> exists(final ComponentInstance composition, final ReproducibleInstances reproducibleInstances, final ReproducibleInstances testData, final String className) { Optional<Double> opt = Optional.empty(); ObjectMapper mapper = new ObjectMapper(); try { String compositionString = mapper.writeValueAsString(composition); String trainTrajectoryString = mapper.writeValueAsString(reproducibleInstances.getInstructions()); String testTrajectoryString = mapper.writeValueAsString(testData.getInstructions()); MessageDigest md = MessageDigest.getInstance("SHA-256"); md.update(compositionString.getBytes()); md.update(trainTrajectoryString.getBytes()); md.update(testTrajectoryString.getBytes()); md.update(className.getBytes()); byte[] digest = md.digest(); String hexHash = (new HexBinaryAdapter()).marshal(digest); ResultSet rs = this.sqlAdapter.getResultsOfQuery("SELECT score FROM " + this.performanceSampleTableName + " WHERE hash_value = '" + hexHash + "'"); while (rs.next()) { double score = rs.getDouble("score"); opt = Optional.of(score); } } catch (JsonProcessingException | SQLException | NoSuchAlgorithmException e) { logger.error("Observed exception during existence check: {}", e); } return opt; } /** * Stores the composition, the trajectory and the achieved score in the database. * * @param composition * - Solution composition * @param reproducableInstances * - Instances object that includes the trajectory, i.e. all operations that have been applied to the instances like loading, splitting etc. * @param testData * - The reproducible instances of the test data used for this evaluation process * @param score * - Score achieved by the composition on the reproducible instances * @param className * - The java qualified class name of the loss function that was used * @param evaluationTime * - The time it took for the corresponding evaluation in milliseconds */ public void store(final ComponentInstance composition, final ReproducibleInstances reproducibleInstances, final ReproducibleInstances testData, final double score, final String className, final long evaluationTime) { ObjectMapper mapper = new ObjectMapper(); try { String compositionString = mapper.writeValueAsString(composition); String trainTrajectoryString = mapper.writeValueAsString(reproducibleInstances.getInstructions()); String testTrajectoryString = mapper.writeValueAsString(testData.getInstructions()); MessageDigest md = MessageDigest.getInstance("SHA-256"); md.update(compositionString.getBytes()); md.update(trainTrajectoryString.getBytes()); md.update(testTrajectoryString.getBytes()); md.update(className.getBytes()); byte[] digest = md.digest(); String hexHash = (new HexBinaryAdapter()).marshal(digest); ResultSet rs = this.sqlAdapter.getResultsOfQuery("SELECT score FROM " + this.performanceSampleTableName + " WHERE hash_value = '" + hexHash + "'"); if (rs.next()) { return; } Map<String, String> valueMap = new HashMap<>(); valueMap.put("composition", compositionString); valueMap.put("train_trajectory", trainTrajectoryString); valueMap.put("test_trajectory", testTrajectoryString); valueMap.put("loss_function", className); valueMap.put("evaluation_date", new SimpleDateFormat("yyyy-MM-dd HH:mm:ss").format(Date.from(Instant.now()))); valueMap.put("evaluation_time_ms", Long.toString(evaluationTime)); valueMap.put("hash_value", hexHash); valueMap.put("score", Double.toString(score)); this.sqlAdapter.insert(this.performanceSampleTableName, valueMap); } catch (JsonProcessingException | NoSuchAlgorithmException | SQLException e) { logger.warn("Error while storing results: {}", e); } } @Override public void close() throws IOException { this.sqlAdapter.close(); } }
0
java-sources/ai/libs/mlplancli/0.1.4/ai/libs
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlpipeline_evaluation/PipelineEvaluationCache.java
package ai.libs.mlpipeline_evaluation; import java.sql.ResultSet; import java.sql.SQLException; import java.util.Arrays; import java.util.HashMap; import java.util.List; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import ai.libs.hasco.model.ComponentInstance; import ai.libs.hasco.serialization.CompositionSerializer; import ai.libs.jaicore.ml.openml.OpenMLHelper; import ai.libs.mlplan.multiclass.wekamlplan.weka.WEKAPipelineFactory; import weka.classifiers.Classifier; import weka.core.converters.ConverterUtils.DataSource; /** * For caching and evaluation MLPipelines. * * @author Helena Graf * @author Joshua * @author Lukas * */ public class PipelineEvaluationCache { private Logger logger = LoggerFactory.getLogger(PipelineEvaluationCache.class); private static final String LOG_CANT_CONNECT_TO_CACHE = "Cannot connect to cache. Switching to offline mode."; private static final String INTERMEDIATE_RESULTS_TABLENAME = "pgotfml_hgraf.intermediate_results"; // Evaluation configuration private boolean useCache = true; private final PipelineEvaluationCacheConfigBuilder config; /** * Construct a new cache for evaluations. The valid split and evaluation * techniques can be looked up in {@link ConsistentMLPipelineEvaluator}. The * dataset origin can be looked up in {@link DatasetOrigin}. * * @param configBuilder * @throws NumberFormatException * @throws Exception * If the dataset cannot be loaded */ public PipelineEvaluationCache(final PipelineEvaluationCacheConfigBuilder configBuilder) throws Exception { super(); this.config = configBuilder; switch (this.config.getDatasetOrigin()) { case LOCAL: case CLUSTER_LOCATION_NEW: this.config.withDataset(new DataSource(this.config.getDatasetId()).getDataSet()); break; case OPENML_DATASET_ID: OpenMLHelper.setApiKey("4350e421cdc16404033ef1812ea38c01"); this.config.withDataset(OpenMLHelper.getInstancesById(Integer.parseInt(this.config.getDatasetId()))); break; default: throw new InvalidDatasetOriginException("Invalid dataset origin."); } } /** * Get an evaluation results for the given pipeline represented by the component * instance in the setting this cache is configured. * * @param cI * @return Either the looked-up value for the pipeline or the newly evaluated * result * @throws Exception * If the pipeline cannot be evaluated */ public double getResultOrExecuteEvaluation(final ComponentInstance cI) throws Exception { // Lookup String serializedCI = null; if (this.useCache && this.config.getDatasetOrigin() != DatasetOrigin.LOCAL) { this.logger.debug("DB Lookup"); serializedCI = CompositionSerializer.serializeComponentInstance(cI).toString(); this.logger.debug("Pipeline: {}", serializedCI); Double result = this.doDBLookUp(serializedCI); if (result != null) { this.logger.debug("Return DB result"); return result; } } // Execute this.logger.debug("Execute new evaluation"); double result = this.evaluate(cI); this.logger.debug("Score: {}", result); // Write back if (this.useCache && this.config.getDatasetOrigin() != DatasetOrigin.LOCAL) { this.logger.debug("Write new evaluation back into DB"); this.uploadResultToDB(serializedCI, result); } // Return result return result; } private Double doDBLookUp(final String serializedCI) { String query; List<String> values; if (this.doNotValidate()) { query = "SELECT error_rate FROM " + INTERMEDIATE_RESULTS_TABLENAME + " WHERE pipeline=? AND dataset_id=? AND dataset_origin=? AND test_evaluation_technique=? AND test_split_technique=? AND test_seed=? AND val_evaluation_technique IS NULL AND val_split_technique IS NULL AND val_seed IS NULL"; values = Arrays.asList(serializedCI, this.config.getDatasetId(), DatasetOrigin.mapOriginToColumnIdentifier(this.config.getDatasetOrigin()), this.config.getTestEvaluationTechnique(), this.config.getTestSplitTechnique(), String.valueOf(this.config.getTestSeed())); } else { query = "SELECT error_rate FROM " + INTERMEDIATE_RESULTS_TABLENAME + " WHERE pipeline=? AND dataset_id=? AND dataset_origin=? AND test_evaluation_technique=? AND test_split_technique=? AND test_seed=? AND val_evaluation_technique=? AND val_split_technique=? AND val_seed=?"; values = Arrays.asList(serializedCI, this.config.getDatasetId(), DatasetOrigin.mapOriginToColumnIdentifier(this.config.getDatasetOrigin()), this.config.getTestEvaluationTechnique(), this.config.getTestSplitTechnique(), String.valueOf(this.config.getTestSeed()), this.config.getValEvaluationTechnique(), this.config.getValSplitTechnique(), String.valueOf(this.config.getValSeed())); } try { ResultSet resultSet = this.config.getAdapter().getResultsOfQuery(query, values); if (resultSet.next()) { return resultSet.getDouble("error_rate"); } else { return null; } } catch (SQLException e) { this.logger.warn(LOG_CANT_CONNECT_TO_CACHE, e); this.useCache = false; return null; } } private double evaluate(final ComponentInstance cI) throws Exception { // Get dataset Classifier classifier = new WEKAPipelineFactory().getComponentInstantiation(cI); if (this.doNotValidate()) { return ConsistentMLPipelineEvaluator.evaluateClassifier(this.config.getTestSplitTechnique(), this.config.getTestEvaluationTechnique(), this.config.getTestSeed(), this.config.getData(), classifier); } else { return ConsistentMLPipelineEvaluator.evaluateClassifier(this.config.getTestSplitTechnique(), this.config.getTestEvaluationTechnique(), this.config.getTestSeed(), this.config.getValSplitTechnique(), this.config.getValEvaluationTechnique(), this.config.getValSeed(), this.config.getData(), classifier); } } private void uploadResultToDB(final String serializedCI, final double result) { HashMap<String, Object> map = new HashMap<>(); map.put("pipeline", serializedCI); map.put("dataset_id", this.config.getDatasetId()); map.put("dataset_origin", DatasetOrigin.mapOriginToColumnIdentifier(this.config.getDatasetOrigin())); map.put("test_evaluation_technique", this.config.getTestEvaluationTechnique()); map.put("test_split_technique", this.config.getTestSplitTechnique()); map.put("test_seed", this.config.getTestSeed()); map.put("error_rate", result); if (!this.doNotValidate()) { map.put("val_split_technique", this.config.getValSplitTechnique()); map.put("val_evaluation_technique", this.config.getValEvaluationTechnique()); map.put("val_seed", this.config.getValSeed()); } try { this.config.getAdapter().insert(INTERMEDIATE_RESULTS_TABLENAME, map); } catch (SQLException e) { this.logger.warn(LOG_CANT_CONNECT_TO_CACHE, e); this.useCache = false; } } private boolean doNotValidate() { return this.config.getValSplitTechnique() == null || this.config.getValSplitTechnique().trim().equals(""); } public void configureValidation(final String valSplitTechnique, final String valEvaluationTechnique, final int valSeed) { this.config.withValEvaluationTechnique(valEvaluationTechnique); this.config.withValSplitTechnique(valSplitTechnique); this.config.withValSeed(valSeed); } public boolean usesCache() { return this.useCache; } public void setUseCache(final boolean useCache) { this.useCache = useCache; } }
0
java-sources/ai/libs/mlplancli/0.1.4/ai/libs
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlpipeline_evaluation/PipelineEvaluationCacheConfigBuilder.java
package ai.libs.mlpipeline_evaluation; import ai.libs.jaicore.basic.SQLAdapter; import weka.core.Instances; public class PipelineEvaluationCacheConfigBuilder { // Evaluation configuration private String datasetId; private DatasetOrigin datasetOrigin; private String testEvaluationTechnique; private String testSplitTechnique; private int testSeed = 0; private String valSplitTechnique; private String valEvaluationTechnique; private int valSeed = 0; private Instances data; private SQLAdapter adapter; public PipelineEvaluationCacheConfigBuilder() { super(); } public PipelineEvaluationCacheConfigBuilder withDatasetID(final String datasetId) { this.datasetId = datasetId; return this; } public PipelineEvaluationCacheConfigBuilder withDatasetOrigin(final DatasetOrigin datasetOrigin) { this.datasetOrigin = datasetOrigin; return this; } public PipelineEvaluationCacheConfigBuilder withTestEvaluationTechnique(final String testEvaluationTechnique) { this.testEvaluationTechnique = testEvaluationTechnique; return this; } public PipelineEvaluationCacheConfigBuilder withtestSplitTechnique(final String testSplitTechnique) { this.testSplitTechnique = testSplitTechnique; return this; } public PipelineEvaluationCacheConfigBuilder withTestSeed(final int testSeed) { this.testSeed = testSeed; return this; } public PipelineEvaluationCacheConfigBuilder withValSplitTechnique(final String valSplitTechnique) { this.valSplitTechnique = valSplitTechnique; return this; } public PipelineEvaluationCacheConfigBuilder withValEvaluationTechnique(final String valEvaluationTechnique) { this.valEvaluationTechnique = valEvaluationTechnique; return this; } public PipelineEvaluationCacheConfigBuilder withValSeed(final int valSeed) { this.valSeed = valSeed; return this; } public PipelineEvaluationCacheConfigBuilder withDataset(final Instances data) { this.data = data; return this; } public PipelineEvaluationCacheConfigBuilder withSQLAdapter(final SQLAdapter sqlAdapter) { this.adapter = sqlAdapter; return this; } public String getDatasetId() { return this.datasetId; } public DatasetOrigin getDatasetOrigin() { return this.datasetOrigin; } public String getTestEvaluationTechnique() { return this.testEvaluationTechnique; } public String getTestSplitTechnique() { return this.testSplitTechnique; } public int getTestSeed() { return this.testSeed; } public String getValSplitTechnique() { return this.valSplitTechnique; } public String getValEvaluationTechnique() { return this.valEvaluationTechnique; } public int getValSeed() { return this.valSeed; } public Instances getData() { return this.data; } public SQLAdapter getAdapter() { return this.adapter; } }
0
java-sources/ai/libs/mlplancli/0.1.4/ai/libs
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlpipeline_evaluation/SimpleResultsUploader.java
package ai.libs.mlpipeline_evaluation; import java.sql.SQLException; import java.util.Arrays; import java.util.HashMap; import java.util.Map; import java.util.stream.Collectors; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import ai.libs.jaicore.basic.SQLAdapter; import ai.libs.mlplan.multiclass.wekamlplan.weka.model.MLPipeline; import ai.libs.mlplan.multiclass.wekamlplan.weka.model.SupervisedFilterSelector; import weka.classifiers.Classifier; import weka.core.OptionHandler; /** * Uploads intermediate evaluations during a run of ML-Plan. * * @author Helena Graf * */ public class SimpleResultsUploader { private Logger logger = LoggerFactory.getLogger(SimpleResultsUploader.class); /** * db adapter for uploading */ private SQLAdapter adapter; /** * table to use for intermediate results */ private String table; /** * the name of the algorithm for which results are uploaded */ private String algorithmName; /** * the id of the experiment for which results are uploaded */ private int experimentId; /** * start time of the search */ private long timeStart = System.currentTimeMillis(); /** * Construct a new simple results uploader with the given configuration for the * entries and table. * * @param adapter * db adapter for uploading * @param table * table to use for intermediate results * @param algorithmName * the name of the algorithm for which results are uploaded * @param experimentId * the id of the experiment for which results are uploaded */ public SimpleResultsUploader(SQLAdapter adapter, String table, String algorithmName, int experimentId) { this.adapter = adapter; this.table = table; this.algorithmName = algorithmName; this.experimentId = experimentId; } /** * Uploads the given intermediate results * * @param classifier * the pipeline for which a result was found * @param evaluationTime * the time it took to evaluate the pipeline * @param solutionQuality * the error of the pipeline * @param phase * the phase in which the pipeline was found (search or selection) * @throws SQLException */ public void uploadResult(MLPipeline classifier, long evaluationTime, double solutionQuality, String phase) throws SQLException { String solutionString = getSolutionString(classifier); if (adapter == null) { logger.warn("Not uploading result after {}ms with value {}. Solution: {}", evaluationTime, solutionQuality, solutionString); } else { Map<String, Object> map = new HashMap<>(); map.put("classifier", solutionString); map.put("phase", phase); map.put("loss", solutionQuality); map.put("time", evaluationTime); map.put("found", System.currentTimeMillis() - timeStart); map.put("algorithm", algorithmName); map.put("experiment_id", experimentId); adapter.insert(table, map); } } /** * Converts the given pipeline to a simple string representation * * @param classifier * the pipeline to convert * @return the string representation */ public static String getSolutionString(MLPipeline classifier) { if (classifier == null) { return "error"; } Classifier baseClassifier = classifier.getBaseClassifier(); String[] classifierOptionsArray; String classifierOptionsString = ""; String classifierString = baseClassifier.getClass().getName(); if (baseClassifier instanceof OptionHandler) { classifierOptionsArray = ((OptionHandler) baseClassifier).getOptions(); classifierOptionsString = classifierOptionsArray.length > 0 ? Arrays.stream(classifierOptionsArray).collect(Collectors.joining(", ", "[", "]")) : ""; } SupervisedFilterSelector preprocessor = !classifier.getPreprocessors().isEmpty() ? classifier.getPreprocessors().get(0) : null; String preprocessorString = preprocessor != null ? preprocessor.getClass().getName() : ""; String[] preprocessorOptionsArray; String preprocessorOptionsString = ""; if (preprocessor instanceof OptionHandler) { preprocessorOptionsArray = !preprocessorString.equals("") ? ((OptionHandler) preprocessor).getOptions() : new String[0]; preprocessorOptionsString = preprocessorOptionsArray.length > 0 ? Arrays.stream(preprocessorOptionsArray).collect(Collectors.joining(",", "[", "]")) : ""; } return classifierString + " " + classifierOptionsString + " " + preprocessorString + " " + preprocessorOptionsString; } }
0
java-sources/ai/libs/mlplancli/0.1.4/ai/libs
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlpipeline_evaluation/package-info.java
/** * Package containing classes for the evaluation of MLPipelines and the caching * of those evaluations in a database. * * @author Helena Graf * @author Joshua * @author Lukas * */ package ai.libs.mlpipeline_evaluation;
0
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan/bigdata/MLPlan4BigFileInput.java
package ai.libs.mlplan.bigdata; import java.io.File; import java.io.FileReader; import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Random; import java.util.concurrent.TimeUnit; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.google.common.eventbus.Subscribe; import ai.libs.hasco.model.ComponentInstance; import ai.libs.jaicore.basic.ILoggingCustomizable; import ai.libs.jaicore.basic.StatisticsUtil; import ai.libs.jaicore.basic.TimeOut; import ai.libs.jaicore.basic.algorithm.AAlgorithm; import ai.libs.jaicore.basic.algorithm.AlgorithmExecutionCanceledException; import ai.libs.jaicore.basic.algorithm.events.AlgorithmEvent; import ai.libs.jaicore.basic.algorithm.exceptions.AlgorithmException; import ai.libs.jaicore.basic.algorithm.exceptions.AlgorithmTimeoutedException; import ai.libs.jaicore.ml.core.dataset.sampling.infiles.ReservoirSampling; import ai.libs.jaicore.ml.core.dataset.sampling.inmemory.factories.SimpleRandomSamplingFactory; import ai.libs.jaicore.ml.evaluation.evaluators.weka.events.MCCVSplitEvaluationEvent; import ai.libs.jaicore.ml.learningcurve.extrapolation.LearningCurveExtrapolatedEvent; import ai.libs.jaicore.ml.learningcurve.extrapolation.ipl.InversePowerLawExtrapolationMethod; import ai.libs.mlplan.core.AbstractMLPlanBuilder; import ai.libs.mlplan.core.MLPlan; import ai.libs.mlplan.core.MLPlanWekaBuilder; import ai.libs.mlplan.core.events.ClassifierCreatedEvent; import weka.classifiers.Classifier; import weka.classifiers.functions.LinearRegression; import weka.core.Attribute; import weka.core.DenseInstance; import weka.core.Instance; import weka.core.Instances; /** * This is a version of ML-Plan that tries to cope with medium sized data in the sense of big files. * That is, the data is still enough to be organized in a single file such that no streaming is required. * The data is, however, in general too large to be entirely loaded into memory. * * We use simple sampling to create a relatively small subset of the data, then run info gain, and then ML-Plan with * learning curve prediction. * * @author fmohr * */ public class MLPlan4BigFileInput extends AAlgorithm<File, Classifier> implements ILoggingCustomizable { private Logger logger = LoggerFactory.getLogger(MLPlan4BigFileInput.class); private File intermediateSizeDownsampledFile = new File("testrsc/sampled/intermediate/" + this.getInput().getName()); private final int[] anchorpointsTraining = new int[] { 8, 16, 64, 128 }; private Map<Classifier, ComponentInstance> classifier2modelMap = new HashMap<>(); private Map<ComponentInstance, int[]> trainingTimesDuringSearch = new HashMap<>(); private Map<ComponentInstance, List<Integer>> trainingTimesDuringSelection = new HashMap<>(); private int numTrainingInstancesUsedInSelection; private MLPlan mlplan; public MLPlan4BigFileInput(final File input) { super(input); } private void downsampleData(final File from, final File to, final int size) throws InterruptedException, AlgorithmExecutionCanceledException, AlgorithmException { ReservoirSampling sampler = new ReservoirSampling(new Random(0), this.getInput()); try { File outputFolder = to.getParentFile(); if (!outputFolder.exists()) { this.logger.info("Creating data output folder {}", outputFolder.getAbsolutePath()); outputFolder.mkdirs(); } this.logger.info("Starting sampler {} for data source {}", sampler.getClass().getName(), from.getAbsolutePath()); sampler.setOutputFileName(to.getAbsolutePath()); sampler.setSampleSize(size); sampler.call(); this.logger.info("Reduced dataset size to {}", size); } catch (IOException e) { throw new AlgorithmException(e, "Could not create a sub-sample of the given data."); } } @Override public AlgorithmEvent nextWithException() throws InterruptedException, AlgorithmExecutionCanceledException, AlgorithmTimeoutedException, AlgorithmException { switch (this.getState()) { case CREATED: /* first create an intermediate sized downsampled file (10k instances), which is the basis for further operations */ this.downsampleData(this.getInput(), this.intermediateSizeDownsampledFile, 10000); /* down-sample the intermediate sized input data again for ML-Plan */ File downsampledFile = new File("testrsc/sampled/" + this.getInput().getName()); this.downsampleData(this.intermediateSizeDownsampledFile, downsampledFile, 1000); if (!downsampledFile.exists()) { throw new AlgorithmException("The file " + downsampledFile.getAbsolutePath() + " that should be used for ML-Plan does not exist!"); } Instances data; try { data = new Instances(new FileReader(downsampledFile)); data.setClassIndex(data.numAttributes() - 1); this.logger.info("Loaded {}x{} dataset", data.size(), data.numAttributes()); } catch (IOException e) { throw new AlgorithmException(e, "Could not create a sub-sample of the given data."); } /* apply ML-Plan to reduced data */ MLPlanWekaBuilder builder; try { builder = AbstractMLPlanBuilder.forWeka(); builder.withLearningCurveExtrapolationEvaluation(this.anchorpointsTraining, new SimpleRandomSamplingFactory<>(), .7, new InversePowerLawExtrapolationMethod()); builder.withNodeEvaluationTimeOut(new TimeOut(15, TimeUnit.MINUTES)); builder.withCandidateEvaluationTimeOut(new TimeOut(5, TimeUnit.MINUTES)); this.mlplan = new MLPlan(builder, data); this.mlplan.setLoggerName(this.getLoggerName() + ".mlplan"); this.mlplan.registerListener(this); this.mlplan.setTimeout(new TimeOut(this.getTimeout().seconds() - 30, TimeUnit.SECONDS)); this.mlplan.setNumCPUs(3); this.mlplan.setBuildSelectedClasifierOnGivenData(false); // we will build the classifier, ML-Plan should not waste time with this this.logger.info("ML-Plan initialized, activation finished!"); return this.activate(); } catch (IOException e) { throw new AlgorithmException(e, "Could not initialize ML-Plan!"); } case ACTIVE: /* run ML-Plan */ this.logger.info("Starting ML-Plan."); this.mlplan.call(); this.logger.info("ML-Plan has finished. Selected classifier is {} with observed internal performance {}. Will now try to determine the portion of training data that may be used for final training.", this.mlplan.getSelectedClassifier(), this.mlplan.getInternalValidationErrorOfSelectedClassifier()); /* fit regression model to estimate the runtime behavior of the selected classifier */ int[] trainingTimesDuringSearch = this.trainingTimesDuringSearch.get(this.mlplan.getComponentInstanceOfSelectedClassifier()); List<Integer> trainingTimesDuringSelection = this.trainingTimesDuringSelection.get(this.mlplan.getComponentInstanceOfSelectedClassifier()); this.logger.info("Observed training times of selected classifier: {} (search) and {} (selection on {} training instances)", Arrays.toString(trainingTimesDuringSearch), trainingTimesDuringSelection, this.numTrainingInstancesUsedInSelection); Instances observedRuntimeData = this.getTrainingTimeInstancesForClassifier(this.mlplan.getComponentInstanceOfSelectedClassifier()); this.logger.info("Infered the following data:\n{}", observedRuntimeData); LinearRegression lr = new LinearRegression(); try { lr.buildClassifier(observedRuntimeData); this.logger.info("Obtained the following output for the regression model: {}", lr); } catch (Exception e1) { throw new AlgorithmException(e1, "Could not build a regression model for the runtime."); } /* determine the number of instances that can be used for training with this classifier in the remaining time */ int numInstances = 500; int remainingTime = (int)this.getRemainingTimeToDeadline().milliseconds(); this.logger.info("Determining number of instances that can be used for training given that {}s are remaining.", (int)Math.round(remainingTime / 1000.0)); while (numInstances < 10000) { Instance low = this.getInstanceForRuntimeAnalysis(numInstances); try { double predictedRuntime = lr.classifyInstance(low); if (predictedRuntime > remainingTime) { this.logger.info("Obtained predicted runtime of {}ms for {} training instances, which is more time than we still have. Choosing this number.", predictedRuntime, numInstances); break; } else { this.logger.info("Obtained predicted runtime of {}ms for {} training instances, which still seems managable.", predictedRuntime, numInstances); numInstances += 50; } } catch (Exception e) { throw new AlgorithmException(e, "Could not obtain a runtime prediction for " + numInstances + " instances."); } } this.logger.info("Believe that {} instances can be used for training in time!", numInstances); /* train the classifier with the determined number of samples */ try { File finalDataFile = new File("testrsc/sampled/final/" + this.getInput().getName()); this.downsampleData(this.intermediateSizeDownsampledFile, finalDataFile, numInstances); Instances completeData = new Instances(new FileReader(finalDataFile)); completeData.setClassIndex(completeData.numAttributes() - 1); this.logger.info("Created final dataset with {} instances. Now building the final classifier.", completeData.size()); long startFinalTraining = System.currentTimeMillis(); this.mlplan.getSelectedClassifier().buildClassifier(completeData); this.logger.info("Classifier has been fully trained within {}ms.", System.currentTimeMillis() - startFinalTraining); } catch (Exception e) { throw new AlgorithmException(e, "Could not train the final classifier with the full data."); } return this.terminate(); default: throw new IllegalStateException(); } } private Instances getTrainingTimeInstancesForClassifier(final ComponentInstance ci) { ArrayList<Attribute> attributes = new ArrayList<>(); attributes.add(new Attribute("numInstances")); // attributes.add(new Attribute("numInstancesSquared")); attributes.add(new Attribute("runtime")); Instances data = new Instances("Runtime Analysis Regression Data for " + ci, attributes, 0); /* create one instance for each data point during search phase */ for (int i = 0; i < this.anchorpointsTraining.length; i++) { Instance inst = this.getInstanceForRuntimeAnalysis(this.anchorpointsTraining[i]); inst.setValue(1, this.trainingTimesDuringSearch.get(ci)[i]); data.add(inst); } /* create one instance for the mean of the values observed in selection phase */ if (this.trainingTimesDuringSelection.containsKey(ci)) { Instance inst = this.getInstanceForRuntimeAnalysis(this.numTrainingInstancesUsedInSelection); inst.setValue(1, StatisticsUtil.mean(this.trainingTimesDuringSelection.get(ci))); data.add(inst); } else { this.logger.warn("Classifier {} has not been evaluated in selection phase. Cannot use this information to fit its regression model.", ci); } /* set target attribute and return data */ data.setClassIndex(1); return data; } private Instance getInstanceForRuntimeAnalysis(final int numberOfInstances) { Instance inst = new DenseInstance(3); inst.setValue(0, numberOfInstances); // inst.setValue(1, Math.pow(numberOfInstances, 2)); return inst; } @Subscribe public void receiveClassifierCreatedEvent(final ClassifierCreatedEvent e) { this.logger.info("Binding component instance {} to classifier {}", e.getInstance(), e.getClassifier()); this.classifier2modelMap.put(e.getClassifier(), e.getInstance()); } @Subscribe public void receiveExtrapolationFinishedEvent(final LearningCurveExtrapolatedEvent e) { ComponentInstance ci = this.classifier2modelMap.get(e.getExtrapolator().getLearner()); this.logger.info("Storing training times {} for classifier {}", Arrays.toString(e.getExtrapolator().getTrainingTimes()), ci); this.trainingTimesDuringSearch.put(ci, e.getExtrapolator().getTrainingTimes()); } @Subscribe public void receiveMCCVFinishedEvent(final MCCVSplitEvaluationEvent e) { ComponentInstance ci = this.classifier2modelMap.get(e.getClassifier()); this.logger.info("Storing training time {} for classifier {} in selection phase with {} training instances and {} validation instances", e.getSplitEvaluationTime(), ci, e.getNumInstancesUsedForTraining(), e.getNumInstancesUsedForValidation()); if (this.numTrainingInstancesUsedInSelection == 0) { this.numTrainingInstancesUsedInSelection = e.getNumInstancesUsedForTraining(); } else if (this.numTrainingInstancesUsedInSelection != e.getNumInstancesUsedForTraining()) { this.logger.warn("Memorized {} as number of instances used for training in selection phase, but now observed one classifier using {} instances.", this.numTrainingInstancesUsedInSelection, e.getNumInstancesUsedForTraining()); } if (!this.trainingTimesDuringSelection.containsKey(ci)) { this.trainingTimesDuringSelection.put(ci, new ArrayList<>()); } this.trainingTimesDuringSelection.get(ci).add(e.getSplitEvaluationTime()); } @Override public Classifier call() throws InterruptedException, AlgorithmExecutionCanceledException, AlgorithmTimeoutedException, AlgorithmException { while (this.hasNext()) { this.next(); } return this.mlplan.getSelectedClassifier(); } @Override public void setLoggerName(final String loggerName) { this.logger = LoggerFactory.getLogger(loggerName); } @Override public String getLoggerName() { return this.logger.getName(); } }
0
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan/cli/MLPlanCLI.java
package ai.libs.mlplan.cli; import java.io.BufferedWriter; import java.io.File; import java.io.FileReader; import java.io.FileWriter; import java.io.IOException; import java.io.PrintWriter; import java.util.Arrays; import java.util.concurrent.TimeUnit; import org.apache.commons.cli.CommandLine; import org.apache.commons.cli.CommandLineParser; import org.apache.commons.cli.DefaultParser; import org.apache.commons.cli.HelpFormatter; import org.apache.commons.cli.Option; import org.apache.commons.cli.Options; import org.apache.commons.cli.ParseException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import ai.libs.hasco.gui.statsplugin.HASCOModelStatisticsPlugin; import ai.libs.jaicore.basic.TimeOut; import ai.libs.jaicore.concurrent.GlobalTimer; import ai.libs.jaicore.graphvisualizer.plugin.graphview.GraphViewPlugin; import ai.libs.jaicore.graphvisualizer.plugin.nodeinfo.NodeInfoGUIPlugin; import ai.libs.jaicore.graphvisualizer.plugin.solutionperformanceplotter.SolutionPerformanceTimelinePlugin; import ai.libs.jaicore.graphvisualizer.window.AlgorithmVisualizationWindow; import ai.libs.jaicore.ml.core.evaluation.measure.multilabel.AutoMEKAGGPFitnessMeasureLoss; import ai.libs.jaicore.ml.core.evaluation.measure.multilabel.ExactMatchLoss; import ai.libs.jaicore.ml.core.evaluation.measure.multilabel.F1MacroAverageLLoss; import ai.libs.jaicore.ml.core.evaluation.measure.multilabel.HammingLoss; import ai.libs.jaicore.ml.core.evaluation.measure.multilabel.InstanceWiseF1AsLoss; import ai.libs.jaicore.ml.core.evaluation.measure.multilabel.JaccardLoss; import ai.libs.jaicore.ml.core.evaluation.measure.multilabel.RankLoss; import ai.libs.jaicore.ml.core.evaluation.measure.singlelabel.MeanSquaredErrorLoss; import ai.libs.jaicore.ml.core.evaluation.measure.singlelabel.PrecisionAsLoss; import ai.libs.jaicore.ml.core.evaluation.measure.singlelabel.RootMeanSquaredErrorLoss; import ai.libs.jaicore.ml.core.evaluation.measure.singlelabel.ZeroOneLoss; import ai.libs.jaicore.planning.hierarchical.algorithms.forwarddecomposition.graphgenerators.tfd.TFDNodeInfoGenerator; import ai.libs.jaicore.search.gui.plugins.rollouthistograms.SearchRolloutHistogramPlugin; import ai.libs.jaicore.search.model.travesaltree.JaicoreNodeInfoGenerator; import ai.libs.mlplan.core.AbstractMLPlanBuilder; import ai.libs.mlplan.core.AbstractMLPlanSingleLabelBuilder; import ai.libs.mlplan.core.MLPlan; import ai.libs.mlplan.core.MLPlanMekaBuilder; import ai.libs.mlplan.gui.outofsampleplots.OutOfSampleErrorPlotPlugin; import ai.libs.mlplan.multiclass.wekamlplan.weka.model.MLPipeline; import javafx.application.Platform; import javafx.embed.swing.JFXPanel; import meka.classifiers.multilabel.MultiLabelClassifier; import meka.core.MLUtils; import meka.core.Metrics; import meka.core.Result; import weka.classifiers.Classifier; import weka.classifiers.evaluation.Evaluation; import weka.core.Instances; import weka.core.SerializationHelper; /** * Enables command-line usage of ML-Plan. * * @author Helena Graf * */ @SuppressWarnings("restriction") public class MLPlanCLI { // CLI variables private static Logger logger = LoggerFactory.getLogger("MLPlanCLI"); // MLPlan options private static String trainOption = "train"; private static String testOption = "test"; private static String totalTimeoutOption = "timeoutTotal"; private static String nodeEvaluationTimeoutOption = "timeoutNodeEval"; private static String solutionEvaluationTimeoutOption = "timeoutSolutionEval"; private static String algorithmConfigurationOption = "algorithmConfig"; private static String searchSpaceConfigurationOption = "searchSpaceConfig"; private static String evaluationMeasureOption = "evaluationMeasure"; private static String numCPUsOption = "numCPUS"; private static String randomSeedOption = "randomSeed"; private static String multiLabelOption = "multilabel"; private static String positiveClassIndex = "positiveClassIndex"; // MLPlan options standard values private static String totalTimeout = "150"; private static String nodeEvaluationTimeout = "60"; private static String solutionEvaluationTimeout = "60"; private static String numCPUS = "4"; private static String randomSeed = "0"; // Communication options private static String modelFileOption = "modelFile"; private static String resultsFileOption = "resultsFile"; private static String printModelOption = "printModel"; private static String visualizeOption = "visualize"; private static String helpOption = "help"; // Communication options standard values private static String modelFile = "model.txt"; private static String resultsFile = "results.txt"; private MLPlanCLI() { // Intentionally left blank } private static Options generateOptions() { // MLPLan options final Option train = Option.builder("t").required(false).hasArg().longOpt(trainOption).desc("location of the .arff training data file").build(); final Option test = Option.builder("T").required(false).longOpt(testOption).hasArg().desc("location of the .arff test data file").build(); final Option totalTimeout = Option.builder("tt").longOpt(totalTimeoutOption).required(false).hasArg().desc("timeout for the complete run of mlplan in seconds").build(); final Option nodeEvaluationTimeout = Option.builder("tne").longOpt(nodeEvaluationTimeoutOption).required(false).hasArg().desc("timeout for the evaluation of a single node in seconds").build(); final Option solutionEvaluation = Option.builder("tse").longOpt(solutionEvaluationTimeoutOption).required(false).hasArg().desc("timeout for the evaluation of a solution in seconds").build(); final Option algorithmConfiguration = Option.builder("ac").longOpt(algorithmConfigurationOption).required(false).hasArg().desc("configuration file for mlplan").build(); final Option searchSpaceConfiguration = Option.builder("sc").longOpt(searchSpaceConfigurationOption).required(false).hasArg().desc("search space configuration file, or alternatively: weka, weka-tiny, sklearn, sklearn-ul, meka") .build(); final Option evaluationMeasure = Option.builder("em").longOpt(evaluationMeasureOption).required(false).hasArg().desc( "measure for assessing solution quality, allowed values: \nsinglelabel: \nERRORRATE, MEAN_SQUARED_ERROR, PRECISION, ROOT_MEAN_SQUARED_ERROR \nmultilabel: \nAUTO_MEKA_GGP_FITNESS, AUTO_MEKA_GGP_FITNESS_LOSS, EXACT_MATCH_ACCURARY, EXACT_MATCH_LOSS, F1_MACRO_AVG_D, F1_MACRO_AVG_D_LOSS, F1_MACRO_AVG_L, F1_MACRO_AVG_L_LOSS, HAMMING_ACCURACY, HAMMING_LOSS, JACCARD_LOSS, JACCARD_SCORE, RANK_LOSS, RANK_SCORE") .build(); final Option positiveClass = Option.builder("pci").longOpt(positiveClassIndex).required(false).hasArg(true).desc("Index of the class (in the list of classes) which is to be considered as the positive class").build(); final Option numCPUS = Option.builder("ncpus").longOpt(numCPUsOption).required(false).hasArg().desc("number of used CPUs, default: " + MLPlanCLI.numCPUS).build(); final Option randomSeed = Option.builder("rs").longOpt(randomSeedOption).required(false).hasArg().desc("randomization seed, default: " + MLPlanCLI.randomSeed).build(); final Option multiLabel = Option.builder("ml").longOpt(multiLabelOption).required(false).hasArg(false).desc("enable for multilabel settings").build(); // Communication options final Option modelFile = Option.builder("mf").longOpt(modelFileOption).required(false).hasArg() .desc("serialize model to the given output file, \"off\" if no model file shall be written; turn off for search spaces that contain non-serializable models").build(); final Option resultsFile = Option.builder("rf").longOpt(resultsFileOption).required(false).hasArg().desc("serialize model to the given output file, \"off\" if no results file shall be written").build(); final Option visualize = Option.builder("v").longOpt(visualizeOption).required(false).hasArg(false).desc("enable visualization").build(); final Option printModel = Option.builder("p").longOpt(printModelOption).required(false).hasArg(false).desc("whether a visual representation of the final model shall be added to the model file").build(); final Option help = Option.builder("h").longOpt(helpOption).required(false).hasArg(false).desc("supply help").build(); // Add options to Options final Options options = new Options(); options.addOption(train); options.addOption(test); options.addOption(totalTimeout); options.addOption(nodeEvaluationTimeout); options.addOption(solutionEvaluation); options.addOption(algorithmConfiguration); options.addOption(searchSpaceConfiguration); options.addOption(evaluationMeasure); options.addOption(numCPUS); options.addOption(randomSeed); options.addOption(multiLabel); options.addOption(resultsFile); options.addOption(modelFile); options.addOption(visualize); options.addOption(printModel); options.addOption(help); options.addOption(positiveClass); return options; } private static CommandLine generateCommandLine(final Options options, final String[] commandLineArguments) { final CommandLineParser cmdLineParser = new DefaultParser(); CommandLine commandLine = null; try { commandLine = cmdLineParser.parse(options, commandLineArguments); } catch (ParseException parseException) { logger.error("ERROR: Unable to parse command-line arguments {} due to {}", Arrays.toString(commandLineArguments), parseException); } return commandLine; } private static void printUsage(final Options options) { final HelpFormatter formatter = new HelpFormatter(); final String syntax = "mlplan"; final PrintWriter pw = new PrintWriter(System.out); formatter.printUsage(pw, 400, syntax, options); pw.println("use -h or --help for help"); pw.flush(); } private static void printHelp(final Options options) { final HelpFormatter formatter = new HelpFormatter(); final String syntax = "mlplan [options]"; formatter.printHelp(syntax, options); } private static void runMLPlan(final CommandLine commandLine) throws Exception { File trainDataFile = new File(commandLine.getOptionValue(trainOption)); logger.info("Load train data file: {}", trainDataFile.getAbsolutePath()); Instances trainData = new Instances(new FileReader(trainDataFile)); if (commandLine.hasOption(multiLabelOption)) { MLUtils.prepareData(trainData); } else { trainData.setClassIndex(trainData.numAttributes() - 1); } AbstractMLPlanBuilder builder; if (commandLine.hasOption(searchSpaceConfigurationOption)) { switch (commandLine.getOptionValue(searchSpaceConfigurationOption)) { case "weka": builder = AbstractMLPlanBuilder.forWeka(); break; case "weka-tiny": builder = AbstractMLPlanBuilder.forWeka().withTinyWekaSearchSpace(); break; case "sklearn": builder = AbstractMLPlanBuilder.forSKLearn(); break; case "sklearn-ul": builder = AbstractMLPlanBuilder.forSKLearn().withUnlimitedLengthPipelineSearchSpace(); break; case "meka": builder = AbstractMLPlanBuilder.forMeka(); break; default: throw new IllegalArgumentException("Could not identify search space configuration"); } } else { builder = AbstractMLPlanBuilder.forWeka(); } if (commandLine.hasOption(multiLabelOption)) { MLPlanMekaBuilder mekaBuilder = (MLPlanMekaBuilder) builder; switch (commandLine.getOptionValue(evaluationMeasureOption)) { case "AUTO_MEKA_GGP_FITNESS": mekaBuilder.withPerformanceMeasure(new AutoMEKAGGPFitnessMeasureLoss()); break; case "EXACT_MATCH": mekaBuilder.withPerformanceMeasure(new ExactMatchLoss()); break; case "F1_MACRO_AVG_D": mekaBuilder.withPerformanceMeasure(new InstanceWiseF1AsLoss()); break; case "F1_MACRO_AVG_L": mekaBuilder.withPerformanceMeasure(new F1MacroAverageLLoss()); break; case "HAMMING": mekaBuilder.withPerformanceMeasure(new HammingLoss()); break; case "JACCARD": mekaBuilder.withPerformanceMeasure(new JaccardLoss()); break; case "RANK_LOSS": mekaBuilder.withPerformanceMeasure(new RankLoss()); break; default: throw new IllegalArgumentException("Invalid multilabel measure " + commandLine.getOptionValue(evaluationMeasureOption)); } } else { AbstractMLPlanSingleLabelBuilder slcBuilder = (AbstractMLPlanSingleLabelBuilder) builder; switch (commandLine.getOptionValue(evaluationMeasureOption)) { case "ERRORRATE": slcBuilder.withPerformanceMeasure(new ZeroOneLoss()); break; case "MEAN_SQUARED_ERROR": slcBuilder.withPerformanceMeasure(new MeanSquaredErrorLoss()); break; case "PRECISION": int classIndex = Integer.parseInt(commandLine.getOptionValue(positiveClassIndex, "0")); slcBuilder.withPerformanceMeasure(new PrecisionAsLoss(classIndex)); break; case "ROOT_MEAN_SQUARED_ERROR": slcBuilder.withPerformanceMeasure(new RootMeanSquaredErrorLoss()); break; default: throw new IllegalArgumentException("Invalid singlelabel measure " + commandLine.getOptionValue(evaluationMeasureOption)); } } if (commandLine.hasOption(algorithmConfigurationOption)) { File algoConfigFile = new File(commandLine.getOptionValue(algorithmConfigurationOption)); builder.withAlgorithmConfigFile(algoConfigFile); } builder.withNodeEvaluationTimeOut(new TimeOut(Integer.parseInt(commandLine.getOptionValue(nodeEvaluationTimeoutOption, nodeEvaluationTimeout)), TimeUnit.SECONDS)); builder.withCandidateEvaluationTimeOut(new TimeOut(Integer.parseInt(commandLine.getOptionValue(solutionEvaluationTimeoutOption, solutionEvaluationTimeout)), TimeUnit.SECONDS)); builder.withTimeOut(new TimeOut(Integer.parseInt(commandLine.getOptionValue(totalTimeoutOption, totalTimeout)), TimeUnit.SECONDS)); builder.withNumCpus(Integer.parseInt(commandLine.getOptionValue(numCPUsOption, numCPUS))); MLPlan mlplan = builder.build(trainData); mlplan.setLoggerName("mlplan"); mlplan.setRandomSeed(Integer.parseInt(commandLine.getOptionValue(randomSeedOption, randomSeed))); Instances testData = null; if (commandLine.hasOption(testOption)) { File testDataFile = new File(commandLine.getOptionValue(testOption)); logger.info("Load test data file: {}", testDataFile.getAbsolutePath()); testData = new Instances(new FileReader(testDataFile)); if (commandLine.hasOption(multiLabelOption)) { MLUtils.prepareData(testData); } else { testData.setClassIndex(testData.numAttributes() - 1); } } if (commandLine.hasOption(visualizeOption)) { new JFXPanel(); AlgorithmVisualizationWindow window; if (commandLine.hasOption(testOption)) { window = new AlgorithmVisualizationWindow(mlplan, new GraphViewPlugin(), new NodeInfoGUIPlugin<>(new JaicoreNodeInfoGenerator<>(new TFDNodeInfoGenerator())), new SearchRolloutHistogramPlugin<>(), new SolutionPerformanceTimelinePlugin(), new HASCOModelStatisticsPlugin(), new OutOfSampleErrorPlotPlugin(trainData, testData)); } else { window = new AlgorithmVisualizationWindow(mlplan, new GraphViewPlugin(), new NodeInfoGUIPlugin<>(new JaicoreNodeInfoGenerator<>(new TFDNodeInfoGenerator())), new SearchRolloutHistogramPlugin<>(), new SolutionPerformanceTimelinePlugin(), new HASCOModelStatisticsPlugin()); } Platform.runLater(window); } logger.info("Build mlplan classifier"); Classifier optimizedClassifier = mlplan.call(); logger.info("Open timeout tasks: {}", GlobalTimer.getInstance().getActiveTasks()); if (!"off".equals(commandLine.getOptionValue(modelFileOption))) { serializeModel(commandLine, mlplan.getSelectedClassifier()); } if (commandLine.hasOption(testOption)) { double error = -1; if (commandLine.hasOption(multiLabelOption)) { logger.info("Assess test performance..."); Result result = meka.classifiers.multilabel.Evaluation.evaluateModel((MultiLabelClassifier) mlplan.getSelectedClassifier(), trainData, testData); switch (commandLine.getOptionValue(evaluationMeasureOption, "AUTO_MEKA_GGP_FITNESS_LOSS")) { case "AUTO_MEKA_GGP_FITNESS": error = (Metrics.P_ExactMatch(result.allTrueValues(), result.allPredictions(0.5)) + (1 - Metrics.L_Hamming(result.allTrueValues(), result.allPredictions(0.5))) + Metrics.P_FmacroAvgL(result.allTrueValues(), result.allPredictions(0.5)) + (1 - Metrics.L_RankLoss(result.allTrueValues(), result.allPredictions()))) / 4.0; break; case "AUTO_MEKA_GGP_FITNESS_LOSS": error = 1 - (Metrics.P_ExactMatch(result.allTrueValues(), result.allPredictions(0.5)) + (1 - Metrics.L_Hamming(result.allTrueValues(), result.allPredictions(0.5))) + Metrics.P_FmacroAvgL(result.allTrueValues(), result.allPredictions(0.5)) + (1 - Metrics.L_RankLoss(result.allTrueValues(), result.allPredictions()))) / 4.0; break; case "EXACT_MATCH_ACCURARY": error = Metrics.P_ExactMatch(result.allTrueValues(), result.allPredictions(0.5)); break; case "EXACT_MATCH_LOSS": error = 1 - Metrics.P_ExactMatch(result.allTrueValues(), result.allPredictions(0.5)); break; case "F1_MACRO_AVG_D": error = Metrics.P_FmacroAvgD(result.allTrueValues(), result.allPredictions(0.5)); break; case "F1_MACRO_AVG_D_LOSS": error = 1 - Metrics.P_FmacroAvgD(result.allTrueValues(), result.allPredictions(0.5)); break; case "F1_MACRO_AVG_L": error = Metrics.P_FmacroAvgL(result.allTrueValues(), result.allPredictions(0.5)); break; case "F1_MACRO_AVG_L_LOSS": error = 1 - Metrics.P_FmacroAvgL(result.allTrueValues(), result.allPredictions(0.5)); break; case "HAMMING_ACCURACY": error = Metrics.P_Hamming(result.allTrueValues(), result.allPredictions(0.5)); break; case "HAMMING_LOSS": error = Metrics.L_Hamming(result.allTrueValues(), result.allPredictions(0.5)); break; case "JACCARD_LOSS": error = Metrics.L_JaccardDist(result.allTrueValues(), result.allPredictions(0.5)); break; case "JACCARD_SCORE": error = Metrics.P_JaccardIndex(result.allTrueValues(), result.allPredictions(0.5)); break; case "RANK_LOSS": error = Metrics.L_RankLoss(result.allTrueValues(), result.allPredictions()); break; case "RANK_SCORE": error = 1 - Metrics.L_RankLoss(result.allTrueValues(), result.allPredictions()); break; default: throw new IllegalArgumentException("Invalid multilabel measure " + commandLine.getOptionValue(evaluationMeasureOption)); } if (!"off".equals(commandLine.getOptionValue(resultsFileOption))) { writeMultiLabelEvaluationFile(result, mlplan.getInternalValidationErrorOfSelectedClassifier(), commandLine, mlplan.getSelectedClassifier()); } } else { Evaluation eval = new Evaluation(trainData); logger.info("Assess test performance..."); eval.evaluateModel(optimizedClassifier, testData); switch (commandLine.getOptionValue(evaluationMeasureOption, "ERRORRATE")) { case "ERRORRATE": error = eval.errorRate(); break; case "MEAN_SQUARED_ERROR": error = Math.pow(eval.rootMeanSquaredError(), 2); break; case "ROOT_MEAN_SQUARED_ERROR": error = eval.rootMeanSquaredError(); break; case "PRECISION": error = 1 - eval.precision(Integer.parseInt(commandLine.getOptionValue(positiveClassIndex, "0"))); break; default: throw new IllegalArgumentException("Invalid singlelabel measure " + commandLine.getOptionValue(evaluationMeasureOption)); } if (!"off".equals(commandLine.getOptionValue(resultsFileOption))) { writeSingleLabelEvaluationFile(eval, mlplan.getInternalValidationErrorOfSelectedClassifier(), commandLine, mlplan.getSelectedClassifier()); } } logger.info("Test error was {}. Internally estimated error for this model was {}", error, mlplan.getInternalValidationErrorOfSelectedClassifier()); } logger.info("Experiment done."); } private static void serializeModel(final CommandLine commandLine, final Classifier bestClassifier) throws Exception { SerializationHelper.write(commandLine.getOptionValue(modelFileOption, modelFile), bestClassifier); } private static void writeMultiLabelEvaluationFile(final Result result, final double internalError, final CommandLine commandLine, final Classifier bestModel) { StringBuilder builder = new StringBuilder(); builder.append("Internally believed error: "); builder.append(internalError); builder.append(System.lineSeparator()); builder.append(System.lineSeparator()); builder.append("Best Model: "); builder.append(System.lineSeparator()); builder.append(bestModel.toString()); builder.append(System.lineSeparator()); builder.append(System.lineSeparator()); builder.append(result.toString()); builder.append(System.lineSeparator()); builder.append(System.lineSeparator()); if (commandLine.hasOption(printModelOption)) { builder.append("Classifier Representation: "); builder.append(System.lineSeparator()); builder.append(System.lineSeparator()); if (bestModel instanceof MLPipeline) { builder.append(((MLPipeline) bestModel).getBaseClassifier().toString()); } else { builder.append(bestModel.toString()); } } writeFile(commandLine.getOptionValue(resultsFileOption, resultsFile), builder.toString()); } private static void writeSingleLabelEvaluationFile(final Evaluation eval, final double internalError, final CommandLine commandLine, final Classifier bestModel) throws Exception { StringBuilder builder = new StringBuilder(); builder.append("Internally believed error: "); builder.append(internalError); builder.append(System.lineSeparator()); builder.append(System.lineSeparator()); builder.append("Best Model: "); builder.append(System.lineSeparator()); builder.append(bestModel.toString()); builder.append(System.lineSeparator()); builder.append(System.lineSeparator()); builder.append(eval.toSummaryString("Summary", true)); builder.append(System.lineSeparator()); builder.append(eval.toClassDetailsString("Class Details")); builder.append(System.lineSeparator()); builder.append("Evaluation Overview"); builder.append(System.lineSeparator()); builder.append(eval.toCumulativeMarginDistributionString()); builder.append(System.lineSeparator()); builder.append(eval.toMatrixString("Matrix")); if (commandLine.hasOption(printModelOption)) { builder.append("Classifier Representation: "); builder.append(System.lineSeparator()); builder.append(System.lineSeparator()); if (bestModel instanceof MLPipeline) { builder.append(((MLPipeline) bestModel).getBaseClassifier().toString()); } else { builder.append(bestModel.toString()); } } writeFile(commandLine.getOptionValue(resultsFileOption, resultsFile), builder.toString()); } private static void writeFile(final String fileName, final String value) { try (BufferedWriter bw = new BufferedWriter(new FileWriter(new File(fileName)))) { bw.write(value); } catch (IOException e) { logger.error("Could not write value to file {}: {}", fileName, value); } } public static void main(final String[] args) throws Exception { final Options options = generateOptions(); if (args.length == 0) { printUsage(options); } else { CommandLine commandLine = generateCommandLine(options, args); if (commandLine != null) { if (commandLine.hasOption(helpOption)) { printHelp(options); } else { runMLPlan(commandLine); } } } } }
0
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan/core/AbstractMLPlanBuilder.java
package ai.libs.mlplan.core; import java.io.File; import java.io.IOException; import java.util.ArrayList; import java.util.Collection; import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.Objects; import java.util.concurrent.TimeUnit; import java.util.function.Predicate; import org.aeonbits.owner.ConfigFactory; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import ai.libs.hasco.core.HASCOFactory; import ai.libs.hasco.model.Component; import ai.libs.hasco.model.Parameter; import ai.libs.hasco.model.ParameterRefinementConfiguration; import ai.libs.hasco.serialization.ComponentLoader; import ai.libs.hasco.variants.forwarddecomposition.HASCOViaFDAndBestFirstFactory; import ai.libs.hasco.variants.forwarddecomposition.HASCOViaFDFactory; import ai.libs.jaicore.basic.FileUtil; import ai.libs.jaicore.basic.ILoggingCustomizable; import ai.libs.jaicore.basic.ResourceFile; import ai.libs.jaicore.basic.ResourceUtil; import ai.libs.jaicore.basic.TimeOut; import ai.libs.jaicore.basic.algorithm.reduction.AlgorithmicProblemReduction; import ai.libs.jaicore.ml.evaluation.evaluators.weka.IClassifierEvaluator; import ai.libs.jaicore.ml.evaluation.evaluators.weka.LearningCurveExtrapolationEvaluator; import ai.libs.jaicore.ml.evaluation.evaluators.weka.factory.ClassifierEvaluatorConstructionFailedException; import ai.libs.jaicore.ml.evaluation.evaluators.weka.factory.IClassifierEvaluatorFactory; import ai.libs.jaicore.ml.weka.dataset.splitter.IDatasetSplitter; import ai.libs.jaicore.planning.hierarchical.algorithms.forwarddecomposition.graphgenerators.tfd.TFDNode; import ai.libs.jaicore.search.algorithms.standard.bestfirst.StandardBestFirstFactory; import ai.libs.jaicore.search.algorithms.standard.bestfirst.nodeevaluation.AlternativeNodeEvaluator; import ai.libs.jaicore.search.algorithms.standard.bestfirst.nodeevaluation.INodeEvaluator; import ai.libs.jaicore.search.core.interfaces.IOptimalPathInORGraphSearchFactory; import ai.libs.jaicore.search.probleminputs.GraphSearchWithPathEvaluationsInput; import ai.libs.jaicore.search.problemtransformers.GraphSearchProblemInputToGraphSearchWithSubpathEvaluationInputTransformerViaRDFS; import ai.libs.mlpipeline_evaluation.PerformanceDBAdapter; import ai.libs.mlplan.multiclass.MLPlanClassifierConfig; import ai.libs.mlplan.multiclass.wekamlplan.IClassifierFactory; import ai.libs.mlplan.multiclass.wekamlplan.weka.PreferenceBasedNodeEvaluator; import weka.core.Instances; /** * The MLPlanBuilder helps to easily configure and initialize ML-Plan with specific parameter settings. * For convenient use, the MLPlanBuilder also offers methods for initializing ML-Plan with default * configuration to use ML-Plan for single label classification in combination with WEKA or scikit-learn * or for multi-label classification in combination with MEKA and consequently with WEKA (for baselearners * of multi-label reduction strategies). * * @author mwever, fmohr */ public abstract class AbstractMLPlanBuilder implements IMLPlanBuilder, ILoggingCustomizable { /* Logging */ private Logger logger = LoggerFactory.getLogger(AbstractMLPlanBuilder.class); private String loggerName = AbstractMLPlanBuilder.class.getName(); private static final String RES_ALGORITHM_CONFIG = "mlplan/mlplan.properties"; private static final String FS_ALGORITHM_CONFIG = "conf/mlplan.properties"; /* Default configuration values */ private static final File DEF_ALGORITHM_CONFIG = FileUtil.getExistingFileWithHighestPriority(RES_ALGORITHM_CONFIG, FS_ALGORITHM_CONFIG); /* Builder (self) status variables */ private boolean factoryPreparedWithData = false; /* Data for initializing ML-Plan */ private MLPlanClassifierConfig algorithmConfig; @SuppressWarnings("rawtypes") private HASCOViaFDFactory hascoFactory = new HASCOViaFDFactory<GraphSearchWithPathEvaluationsInput<TFDNode, String, Double>, Double>(); private Predicate<TFDNode> priorizingPredicate = null; private File searchSpaceFile; private String requestedHASCOInterface; private IClassifierFactory classifierFactory; private INodeEvaluator<TFDNode, Double> preferredNodeEvaluator = null; private PipelineValidityCheckingNodeEvaluator pipelineValidityCheckingNodeEvaluator; /* The splitter is used to create the split for separating search and selection data */ private IDatasetSplitter searchSelectionDatasetSplitter; private IClassifierEvaluatorFactory factoryForPipelineEvaluationInSearchPhase = null; private IClassifierEvaluatorFactory factoryForPipelineEvaluationInSelectionPhase = null; private Collection<Component> components = new LinkedList<>(); private String performanceMeasureName; /* Use caching */ private boolean useCache; private PerformanceDBAdapter dbAdapter = null; /* The problem input for ML-Plan. */ private Instances dataset; protected AbstractMLPlanBuilder() { super(); this.withAlgorithmConfigFile(DEF_ALGORITHM_CONFIG); this.withRandomCompletionBasedBestFirstSearch(); } public static MLPlanSKLearnBuilder forSKLearn() throws IOException { return new MLPlanSKLearnBuilder(); } public static MLPlanWekaBuilder forWeka() throws IOException { return new MLPlanWekaBuilder(); } public static MLPlanMekaBuilder forMeka() throws IOException { return new MLPlanMekaBuilder(); } /** * This ADDs a new preferred node evaluator; requires that the search will be a best-first search. * * It is possible to specify several preferred node evaluators, which will be ordered by the order in which they are specified. The latest given evaluator is the most preferred one. * * @param preferredNodeEvaluator * @return */ public AbstractMLPlanBuilder withPreferredNodeEvaluator(final INodeEvaluator<TFDNode, Double> preferredNodeEvaluator) { if (this.factoryPreparedWithData) { throw new IllegalStateException("The method prepareNodeEvaluatorInFactoryWithData has already been called. No changes to the preferred node evaluator possible anymore"); } /* first update the preferred node evaluator */ if (this.preferredNodeEvaluator == null) { this.preferredNodeEvaluator = preferredNodeEvaluator; } else { this.preferredNodeEvaluator = new AlternativeNodeEvaluator<>(preferredNodeEvaluator, this.preferredNodeEvaluator); } this.update(); return this; } @SuppressWarnings("unchecked") public AbstractMLPlanBuilder withSearchFactory(@SuppressWarnings("rawtypes") final IOptimalPathInORGraphSearchFactory searchFactory, @SuppressWarnings("rawtypes") final AlgorithmicProblemReduction transformer) { this.hascoFactory.setSearchFactory(searchFactory); this.hascoFactory.setSearchProblemTransformer(transformer); return this; } @SuppressWarnings("unchecked") public AbstractMLPlanBuilder withRandomCompletionBasedBestFirstSearch() { this.hascoFactory.setSearchFactory(new StandardBestFirstFactory<TFDNode, String, Double>()); this.update(); return this; } public Collection<Component> getComponents() throws IOException { return new ComponentLoader(this.searchSpaceFile).getComponents(); } public Map<Component, Map<Parameter, ParameterRefinementConfiguration>> getComponentParameterConfigurations() throws IOException { return new ComponentLoader(this.searchSpaceFile).getParamConfigs(); } /***********************************************************************************************************************************************************************************************************************/ /***********************************************************************************************************************************************************************************************************************/ /***********************************************************************************************************************************************************************************************************************/ /***********************************************************************************************************************************************************************************************************************/ /** * Loads the MLPlanClassifierConfig with default values and replaces all properties according to the properties defined in the given config file. * * @param algorithmConfigFile The file specifying the property values to replace the default configuration. * @return The MLPlanBuilder object. * @throws IOException An IOException is thrown if there are issues reading the config file. */ public AbstractMLPlanBuilder withAlgorithmConfigFile(final File algorithmConfigFile) { return this.withAlgorithmConfig((MLPlanClassifierConfig) ConfigFactory.create(MLPlanClassifierConfig.class).loadPropertiesFromFile(algorithmConfigFile)); } /** * Loads the MLPlanClassifierConfig with default values and replaces all properties according to the properties defined in the given config file. * * @param config The algorithm configuration. * @return The MLPlanBuilder object. * @throws IOException An IOException is thrown if there are issues reading the config file. */ public AbstractMLPlanBuilder withAlgorithmConfig(final MLPlanClassifierConfig config) { this.algorithmConfig = config; this.hascoFactory.withAlgorithmConfig(this.algorithmConfig); this.update(); return this; } /** * Creates a preferred node evaluator that can be used to prefer components over other components. * * @param preferredComponentsFile The file containing a priority list of component names. * @param preferableCompnentMethodPrefix The prefix of a method's name for refining a complex task to preferable components. * @return The builder object. * @throws IOException Thrown if a problem occurs while trying to read the file containing the priority list. */ public AbstractMLPlanBuilder withPreferredComponentsFile(final File preferredComponentsFile, final String preferableCompnentMethodPrefix) throws IOException { this.getAlgorithmConfig().setProperty(MLPlanClassifierConfig.PREFERRED_COMPONENTS, preferredComponentsFile.getAbsolutePath()); List<String> ordering; if (preferredComponentsFile instanceof ResourceFile) { ordering = ResourceUtil.readResourceFileToStringList((ResourceFile) preferredComponentsFile); } else if (!preferredComponentsFile.exists()) { this.logger.warn("The configured file for preferred components \"{}\" does not exist. Not using any particular ordering.", preferredComponentsFile.getAbsolutePath()); ordering = new ArrayList<>(); } else { ordering = FileUtil.readFileAsList(preferredComponentsFile); } return this.withPreferredNodeEvaluator(new PreferenceBasedNodeEvaluator(this.components, ordering, preferableCompnentMethodPrefix)); } /** * Sets the name of the performance measure that is used. * * @param name The name of the performance measure. */ public void setPerformanceMeasureName(final String name) { this.performanceMeasureName = name; } /** * Set the data for which ML-Plan is supposed to find the best pipeline. * * @param dataset The dataset for which ML-Plan is to be run. * @return The builder object. */ public AbstractMLPlanBuilder withDataset(final Instances dataset) { this.dataset = dataset; return this; } /** * Specify the search space in which ML-Plan is required to work. * * @param searchSpaceConfig The file of the search space configuration. * @return The builder object. * @throws IOException Thrown if the given file does not exist. */ public AbstractMLPlanBuilder withSearchSpaceConfigFile(final File searchSpaceConfig) throws IOException { FileUtil.requireFileExists(searchSpaceConfig); this.searchSpaceFile = searchSpaceConfig; this.components.clear(); this.components.addAll(new ComponentLoader(this.searchSpaceFile).getComponents()); return this; } /** * Set the classifier factory that translates <code>CompositionInstance</code> objects to classifiers that can be evaluated. * * @param classifierFactory The classifier factory to be used to translate CompositionInstance objects to classifiers. * @return The builder object. */ public AbstractMLPlanBuilder withClassifierFactory(final IClassifierFactory classifierFactory) { this.classifierFactory = classifierFactory; return this; } /** * Set the dataset splitter that is used for generating the holdout data portion that is put aside during search. * * @param datasetSplitter The dataset splitter to be used. * @return The builder obect. */ public AbstractMLPlanBuilder withDatasetSplitterForSearchSelectionSplit(final IDatasetSplitter datasetSplitter) { this.searchSelectionDatasetSplitter = datasetSplitter; return this; } public AbstractMLPlanBuilder withRequestedInterface(final String requestedInterface) { this.requestedHASCOInterface = requestedInterface; return this; } /** * @param timeout The timeout for ML-Plan to search for the best classifier. * @return The builder object. */ public AbstractMLPlanBuilder withTimeOut(final TimeOut timeout) { this.algorithmConfig.setProperty(MLPlanClassifierConfig.K_TIMEOUT, timeout.milliseconds() + ""); this.update(); return this; } /** * @return The timeout for ML-Plan to search for the best classifier. */ public TimeOut getTimeOut() { return new TimeOut(this.algorithmConfig.timeout(), TimeUnit.MILLISECONDS); } /** * @param timeout The timeout for a single candidate evaluation. * @return The builder object. */ public AbstractMLPlanBuilder withNodeEvaluationTimeOut(final TimeOut timeout) { this.algorithmConfig.setProperty(MLPlanClassifierConfig.K_RANDOM_COMPLETIONS_TIMEOUT_NODE, timeout.milliseconds() + ""); this.update(); return this; } /** * @return The timeout for ML-Plan to search for the best classifier. */ public TimeOut getNodeEvaluationTimeOut() { return new TimeOut(this.algorithmConfig.timeoutForNodeEvaluation(), TimeUnit.MILLISECONDS); } /** * @param timeout The timeout for a single candidate evaluation. * @return The builder object. */ public AbstractMLPlanBuilder withCandidateEvaluationTimeOut(final TimeOut timeout) { this.algorithmConfig.setProperty(MLPlanClassifierConfig.K_RANDOM_COMPLETIONS_TIMEOUT_PATH, timeout.milliseconds() + ""); this.update(); return this; } /** * @return The timeout for ML-Plan to search for the best classifier. */ public TimeOut getCandidateEvaluationTimeOut() { return new TimeOut(this.algorithmConfig.timeoutForCandidateEvaluation(), TimeUnit.MILLISECONDS); } @Override public PipelineEvaluator getClassifierEvaluationInSearchPhase(final Instances data, final int seed, final int fullDatasetSize) throws ClassifierEvaluatorConstructionFailedException { Objects.requireNonNull(this.factoryForPipelineEvaluationInSearchPhase, "No factory for pipeline evaluation in search phase has been set!"); IClassifierEvaluator evaluator = this.factoryForPipelineEvaluationInSearchPhase.getIClassifierEvaluator(data, seed); if (evaluator instanceof LearningCurveExtrapolationEvaluator) { ((LearningCurveExtrapolationEvaluator) evaluator).setFullDatasetSize(fullDatasetSize); } return new PipelineEvaluator(this.getClassifierFactory(), evaluator, this.getAlgorithmConfig().timeoutForCandidateEvaluation()); } @Override public PipelineEvaluator getClassifierEvaluationInSelectionPhase(final Instances data, final int seed) throws ClassifierEvaluatorConstructionFailedException { if (this.factoryForPipelineEvaluationInSelectionPhase == null) { throw new IllegalStateException("No factory for pipeline evaluation in selection phase has been set!"); } return new PipelineEvaluator(this.getClassifierFactory(), this.factoryForPipelineEvaluationInSelectionPhase.getIClassifierEvaluator(data, seed), Integer.MAX_VALUE); } /** * Sets the evaluator factory for the search phase. * * @param evaluatorFactory The evaluator factory for the search phase. * @return The builder object. */ public void withSearchPhaseEvaluatorFactory(final IClassifierEvaluatorFactory evaluatorFactory) { this.factoryForPipelineEvaluationInSearchPhase = evaluatorFactory; } /** * @return The factory for the classifier evaluator of the search phase. */ protected IClassifierEvaluatorFactory getSearchEvaluatorFactory() { return this.factoryForPipelineEvaluationInSearchPhase; } /** * Sets the evaluator factory for the selection phase. * * @param evaluatorFactory The evaluator factory for the selection phase. * @return The builder object. */ public AbstractMLPlanBuilder withSelectionPhaseEvaluatorFactory(final IClassifierEvaluatorFactory evaluatorFactory) { this.factoryForPipelineEvaluationInSelectionPhase = evaluatorFactory; return this; } /** * Sets the number of cpus that may be used by ML-Plan. * * @param numCpus The number of cpus to use. * @return The builder object. */ public AbstractMLPlanBuilder withNumCpus(final int numCpus) { this.algorithmConfig.setProperty(MLPlanClassifierConfig.K_CPUS, numCpus + ""); this.update(); return this; } /** * @return The factory for the classifier evaluator of the selection phase. */ protected IClassifierEvaluatorFactory getSelectionEvaluatorFactory() { return this.factoryForPipelineEvaluationInSelectionPhase; } @Override public String getPerformanceMeasureName() { return this.performanceMeasureName; } @Override @SuppressWarnings("rawtypes") public HASCOFactory getHASCOFactory() { return this.hascoFactory; } @Override public IClassifierFactory getClassifierFactory() { return this.classifierFactory; } @Override public String getLoggerName() { return this.loggerName; } @Override public void setLoggerName(final String name) { this.logger = LoggerFactory.getLogger(name); this.loggerName = name; } @Override public String getRequestedInterface() { return this.requestedHASCOInterface; } @Override public IDatasetSplitter getSearchSelectionDatasetSplitter() { return this.searchSelectionDatasetSplitter; } @Override public File getSearchSpaceConfigFile() { return this.searchSpaceFile; } @Override public MLPlanClassifierConfig getAlgorithmConfig() { return this.algorithmConfig; } @Override public boolean getUseCache() { return this.useCache; } @Override public PerformanceDBAdapter getDBAdapter() { return this.dbAdapter; } @Override public void prepareNodeEvaluatorInFactoryWithData(final Instances data) { if (!(this.hascoFactory instanceof HASCOViaFDAndBestFirstFactory)) { return; } if (this.factoryPreparedWithData) { throw new IllegalStateException("Factory has already been prepared with data. This can only be done once!"); } this.factoryPreparedWithData = true; /* nothing to do if there are no preferred node evaluators */ if (this.pipelineValidityCheckingNodeEvaluator == null && this.preferredNodeEvaluator == null) { return; } /* now determine the real node evaluator to be used. A semantic node evaluator has highest priority */ INodeEvaluator<TFDNode, Double> actualNodeEvaluator; if (this.pipelineValidityCheckingNodeEvaluator != null) { this.pipelineValidityCheckingNodeEvaluator.setComponents(this.components); this.pipelineValidityCheckingNodeEvaluator.setData(data); if (this.preferredNodeEvaluator != null) { actualNodeEvaluator = new AlternativeNodeEvaluator<>(this.pipelineValidityCheckingNodeEvaluator, this.preferredNodeEvaluator); } else { actualNodeEvaluator = this.pipelineValidityCheckingNodeEvaluator; } } else { actualNodeEvaluator = this.preferredNodeEvaluator; } /* update the preferred node evaluator in the HascoFactory */ this.preferredNodeEvaluator = actualNodeEvaluator; this.update(); } @SuppressWarnings("unchecked") private void update() { this.hascoFactory.setSearchProblemTransformer(new GraphSearchProblemInputToGraphSearchWithSubpathEvaluationInputTransformerViaRDFS<TFDNode, String, Double>(this.preferredNodeEvaluator, this.priorizingPredicate, this.algorithmConfig.randomSeed(), this.algorithmConfig.numberOfRandomCompletions(), this.algorithmConfig.timeoutForCandidateEvaluation(), this.algorithmConfig.timeoutForNodeEvaluation())); this.hascoFactory.withAlgorithmConfig(this.getAlgorithmConfig()); } /** * Builds an ML-Plan object for the given dataset as input. * * @param dataset The dataset for which an ML-Plan object is to be built. * @return The ML-Plan object configured with this builder. */ public MLPlan build(final Instances dataset) { this.dataset = dataset; return this.build(); } /** * Builds an ML-Plan object with the dataset provided earlier to this builder. * * @return The ML-Plan object configured with this builder. */ public MLPlan build() { Objects.requireNonNull(this.dataset, "A dataset needs to be provided as input to ML-Plan"); MLPlan mlplan = new MLPlan(this, this.dataset); mlplan.setTimeout(this.getTimeOut()); return mlplan; } }
0
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan/core/AbstractMLPlanSingleLabelBuilder.java
package ai.libs.mlplan.core; import ai.libs.jaicore.ml.core.evaluation.measure.IMeasure; import ai.libs.jaicore.ml.core.evaluation.measure.singlelabel.ZeroOneLoss; import ai.libs.jaicore.ml.evaluation.evaluators.weka.factory.MonteCarloCrossValidationEvaluatorFactory; import ai.libs.jaicore.ml.evaluation.evaluators.weka.splitevaluation.SimpleSLCSplitBasedClassifierEvaluator; import ai.libs.jaicore.ml.weka.dataset.splitter.IDatasetSplitter; import ai.libs.jaicore.ml.weka.dataset.splitter.MulticlassClassStratifiedSplitter; public abstract class AbstractMLPlanSingleLabelBuilder extends AbstractMLPlanBuilder { /* Default configuration values. */ protected static final int SEARCH_NUM_MC_ITERATIONS = 5; protected static final double SEARCH_TRAIN_FOLD_SIZE = 0.7; protected static final int SELECTION_NUM_MC_ITERATIONS = 5; protected static final double SELECTION_TRAIN_FOLD_SIZE = 0.7; protected static final IMeasure<Double, Double> LOSS_FUNCTION = new ZeroOneLoss(); protected AbstractMLPlanSingleLabelBuilder() { super(); } /** * Configure ML-Plan to use MCCV for the given number of iterations, train fold size and loss function in the search phase. * @param numIterations The number of iterations of the MCCV. * @param trainFoldSize The portion of the data that is to be used as training data. * @param lossFunction The loss function to evaluate the performance of the classifier. * @return The builder object. */ public AbstractMLPlanSingleLabelBuilder withMonteCarloCrossValidationInSearchPhase(final int numIterations, final double trainFoldSize, final IMeasure<Double, Double> lossFunction) { if (!(this.getSearchEvaluatorFactory() instanceof MonteCarloCrossValidationEvaluatorFactory)) { this.withSearchPhaseEvaluatorFactory(new MonteCarloCrossValidationEvaluatorFactory().withDatasetSplitter(new MulticlassClassStratifiedSplitter())); } ((MonteCarloCrossValidationEvaluatorFactory) this.getSearchEvaluatorFactory()).withNumMCIterations(numIterations).withTrainFoldSize(trainFoldSize).withSplitBasedEvaluator(new SimpleSLCSplitBasedClassifierEvaluator(lossFunction)); return this; } /** * Configure ML-Plan to use MCCV for the given number of iterations, train fold size and loss function in the selection phase. * @param numIterations The number of iterations of the MCCV. * @param trainFoldSize The portion of the data that is to be used as training data. * @param lossFunction The loss function to evaluate the performance of the classifier. * @return The builder object. */ public AbstractMLPlanSingleLabelBuilder withMonteCarloCrossValidationInSelectionPhase(final int numIterations, final double trainFoldSize, final IMeasure<Double, Double> lossFunction) { if (!(this.getSelectionEvaluatorFactory() instanceof MonteCarloCrossValidationEvaluatorFactory)) { this.withSelectionPhaseEvaluatorFactory(new MonteCarloCrossValidationEvaluatorFactory().withDatasetSplitter(new MulticlassClassStratifiedSplitter())); } ((MonteCarloCrossValidationEvaluatorFactory) this.getSelectionEvaluatorFactory()).withNumMCIterations(numIterations).withTrainFoldSize(trainFoldSize).withSplitBasedEvaluator(new SimpleSLCSplitBasedClassifierEvaluator(lossFunction)); return this; } /** * Sets the performance measure to evaluate a candidate solution's generalization performance. Caution: This resets the evaluators to MCCV for both search and selection phase if these are not already MCCVs. * @param lossFunction The loss function to be used. * @return The builder object. */ public AbstractMLPlanSingleLabelBuilder withPerformanceMeasure(final IMeasure<Double, Double> lossFunction) { if (!(this.getSearchEvaluatorFactory() instanceof MonteCarloCrossValidationEvaluatorFactory)) { this.withSearchPhaseEvaluatorFactory( new MonteCarloCrossValidationEvaluatorFactory().withDatasetSplitter(new MulticlassClassStratifiedSplitter()).withNumMCIterations(SEARCH_NUM_MC_ITERATIONS).withTrainFoldSize(SEARCH_TRAIN_FOLD_SIZE)); } if (!(this.getSearchEvaluatorFactory() instanceof MonteCarloCrossValidationEvaluatorFactory)) { this.withSearchPhaseEvaluatorFactory( new MonteCarloCrossValidationEvaluatorFactory().withDatasetSplitter(new MulticlassClassStratifiedSplitter()).withNumMCIterations(SELECTION_NUM_MC_ITERATIONS).withTrainFoldSize(SELECTION_TRAIN_FOLD_SIZE)); } ((MonteCarloCrossValidationEvaluatorFactory) this.getSelectionEvaluatorFactory()).withSplitBasedEvaluator(new SimpleSLCSplitBasedClassifierEvaluator(lossFunction)); return this; } protected IDatasetSplitter getDefaultDatasetSplitter() { return new MulticlassClassStratifiedSplitter(); } }
0
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan/core/IMLPlanBuilder.java
package ai.libs.mlplan.core; import java.io.File; import ai.libs.hasco.core.HASCOFactory; import ai.libs.jaicore.ml.evaluation.evaluators.weka.factory.ClassifierEvaluatorConstructionFailedException; import ai.libs.jaicore.ml.weka.dataset.splitter.IDatasetSplitter; import ai.libs.jaicore.planning.hierarchical.algorithms.forwarddecomposition.graphgenerators.tfd.TFDNode; import ai.libs.jaicore.search.probleminputs.GraphSearchWithPathEvaluationsInput; import ai.libs.mlpipeline_evaluation.PerformanceDBAdapter; import ai.libs.mlplan.multiclass.MLPlanClassifierConfig; import ai.libs.mlplan.multiclass.wekamlplan.IClassifierFactory; import weka.core.Instances; /** * The IMLPlanBuilder provides the general interface of an ML-Plan builder independent * of the problem domain or specific library that is used for the configuration of machine * learning pipelines. * * @author mwever * */ public interface IMLPlanBuilder { public IDatasetSplitter getSearchSelectionDatasetSplitter(); public PipelineEvaluator getClassifierEvaluationInSearchPhase(Instances dataShownToSearch, int randomSeed, int size) throws ClassifierEvaluatorConstructionFailedException; public PipelineEvaluator getClassifierEvaluationInSelectionPhase(Instances dataShownToSearch, int randomSeed) throws ClassifierEvaluatorConstructionFailedException; public String getPerformanceMeasureName(); public String getRequestedInterface(); public File getSearchSpaceConfigFile(); public IClassifierFactory getClassifierFactory(); public HASCOFactory<GraphSearchWithPathEvaluationsInput<TFDNode, String, Double>, TFDNode, String, Double> getHASCOFactory(); public MLPlanClassifierConfig getAlgorithmConfig(); public void prepareNodeEvaluatorInFactoryWithData(Instances data); public PerformanceDBAdapter getDBAdapter(); public boolean getUseCache(); }
0
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan/core/MLPlan.java
package ai.libs.mlplan.core; import java.io.IOException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.google.common.eventbus.Subscribe; import ai.libs.hasco.core.HASCOFactory; import ai.libs.hasco.core.HASCOSolutionCandidate; import ai.libs.hasco.events.HASCOSolutionEvent; import ai.libs.hasco.exceptions.ComponentInstantiationFailedException; import ai.libs.hasco.model.ComponentInstance; import ai.libs.hasco.optimizingfactory.OptimizingFactory; import ai.libs.hasco.optimizingfactory.OptimizingFactoryProblem; import ai.libs.hasco.variants.forwarddecomposition.twophase.TwoPhaseHASCO; import ai.libs.hasco.variants.forwarddecomposition.twophase.TwoPhaseHASCOFactory; import ai.libs.hasco.variants.forwarddecomposition.twophase.TwoPhaseSoftwareConfigurationProblem; import ai.libs.jaicore.basic.ILoggingCustomizable; import ai.libs.jaicore.basic.MathExt; import ai.libs.jaicore.basic.algorithm.AAlgorithm; import ai.libs.jaicore.basic.algorithm.AlgorithmExecutionCanceledException; import ai.libs.jaicore.basic.algorithm.events.AlgorithmEvent; import ai.libs.jaicore.basic.algorithm.events.AlgorithmFinishedEvent; import ai.libs.jaicore.basic.algorithm.events.AlgorithmInitializedEvent; import ai.libs.jaicore.basic.algorithm.exceptions.AlgorithmException; import ai.libs.jaicore.basic.algorithm.exceptions.AlgorithmTimeoutedException; import ai.libs.jaicore.ml.evaluation.evaluators.weka.events.MCCVSplitEvaluationEvent; import ai.libs.jaicore.ml.evaluation.evaluators.weka.factory.ClassifierEvaluatorConstructionFailedException; import ai.libs.jaicore.ml.learningcurve.extrapolation.LearningCurveExtrapolatedEvent; import ai.libs.jaicore.planning.hierarchical.algorithms.forwarddecomposition.graphgenerators.tfd.TFDNode; import ai.libs.jaicore.search.core.interfaces.GraphGenerator; import ai.libs.jaicore.search.probleminputs.GraphSearchInput; import ai.libs.jaicore.search.probleminputs.GraphSearchWithPathEvaluationsInput; import ai.libs.mlplan.core.events.ClassifierCreatedEvent; import ai.libs.mlplan.core.events.ClassifierFoundEvent; import ai.libs.mlplan.multiclass.MLPlanClassifierConfig; import weka.classifiers.Classifier; import weka.core.Instances; public class MLPlan extends AAlgorithm<Instances, Classifier> implements ILoggingCustomizable { /** Logger for controlled output. */ private Logger logger = LoggerFactory.getLogger(MLPlan.class); private String loggerName; private Classifier selectedClassifier; private double internalValidationErrorOfSelectedClassifier; private ComponentInstance componentInstanceOfSelectedClassifier; private final IMLPlanBuilder builder; private final Instances data; private TwoPhaseHASCOFactory<GraphSearchWithPathEvaluationsInput<TFDNode, String, Double>, TFDNode, String> twoPhaseHASCOFactory; private OptimizingFactory<TwoPhaseSoftwareConfigurationProblem, Classifier, HASCOSolutionCandidate<Double>, Double> optimizingFactory; private boolean buildSelectedClasifierOnGivenData = true; public MLPlan(final IMLPlanBuilder builder, final Instances data) { super(builder.getAlgorithmConfig(), data); builder.prepareNodeEvaluatorInFactoryWithData(data); /* sanity checks */ if (builder.getSearchSpaceConfigFile() == null || !builder.getSearchSpaceConfigFile().exists()) { throw new IllegalArgumentException("The search space configuration file must be set in MLPlanBuilder, and it must be set to a file that exists!"); } if (builder.getClassifierFactory() == null) { throw new IllegalArgumentException("ClassifierFactory must be set in MLPlanBuilder!"); } /* store builder and data for main algorithm */ this.builder = builder; this.data = data; } @Override public AlgorithmEvent nextWithException() throws AlgorithmException, InterruptedException, AlgorithmExecutionCanceledException, AlgorithmTimeoutedException { switch (this.getState()) { case CREATED: this.logger.info("Starting an ML-Plan instance."); AlgorithmInitializedEvent event = this.activate(); /* check number of CPUs assigned */ if (this.getConfig().cpus() < 1) { throw new IllegalStateException("Cannot generate search where number of CPUs is " + this.getConfig().cpus()); } /* set up exact splits */ final double dataPortionUsedForSelection = this.getConfig().dataPortionForSelection(); this.logger.debug("Splitting given {} data points into search data ({}%) and selection data ({}%).", this.data.size(), MathExt.round((1 - dataPortionUsedForSelection) * 100, 2), MathExt.round(dataPortionUsedForSelection, 2)); Instances dataShownToSearch; if (dataPortionUsedForSelection > 0) { dataShownToSearch = this.builder.getSearchSelectionDatasetSplitter().split(this.getInput(), this.getConfig().randomSeed(), dataPortionUsedForSelection).get(1); } else { dataShownToSearch = this.getInput(); } if (dataShownToSearch.isEmpty()) { throw new IllegalStateException("Cannot search on no data."); } /* dynamically compute blow-ups */ if (Double.isNaN(this.getConfig().expectedBlowupInSelection())) { double blowUpInSelectionPhase = 1; this.getConfig().setProperty(MLPlanClassifierConfig.K_BLOWUP_SELECTION, String.valueOf(blowUpInSelectionPhase)); this.logger.info("No expected blow-up for selection phase has been defined. Automatically configuring {}", blowUpInSelectionPhase); } if (!this.buildSelectedClasifierOnGivenData) { this.getConfig().setProperty(MLPlanClassifierConfig.K_BLOWUP_POSTPROCESS, String.valueOf(0)); this.logger.info("Selected classifier won't be built, so now blow-up is calculated."); } else if (Double.isNaN(this.getConfig().expectedBlowupInPostprocessing())) { double blowUpInPostprocessing = 1; this.getConfig().setProperty(MLPlanClassifierConfig.K_BLOWUP_POSTPROCESS, String.valueOf(blowUpInPostprocessing)); this.logger.info("No expected blow-up for postprocessing phase has been defined. Automatically configuring {}", blowUpInPostprocessing); } /* setup the pipeline evaluators */ this.logger.debug("Setting up the pipeline evaluators."); PipelineEvaluator classifierEvaluatorForSearch; PipelineEvaluator classifierEvaluatorForSelection; try { classifierEvaluatorForSearch = this.builder.getClassifierEvaluationInSearchPhase(dataShownToSearch, this.getConfig().randomSeed(), MLPlan.this.getInput().size()); classifierEvaluatorForSelection = this.builder.getClassifierEvaluationInSelectionPhase(dataShownToSearch, this.getConfig().randomSeed()); } catch (ClassifierEvaluatorConstructionFailedException e2) { throw new AlgorithmException(e2, "Could not create the pipeline evaluator"); } classifierEvaluatorForSearch.registerListener(this); // events will be forwarded classifierEvaluatorForSelection.registerListener(this); // events will be forwarded /* communicate the parameters with which ML-Plan will run */ if (this.logger.isInfoEnabled()) { this.logger.info( "Starting ML-Plan with the following setup:\n\tDataset: {}\n\tTarget: {}\n\tCPUs: {}\n\tTimeout: {}s\n\tTimeout for single candidate evaluation: {}s\n\tTimeout for node evaluation: {}s\n\tRandom Completions per node evaluation: {}\n\tPortion of data for selection phase: {}%\n\tPipeline evaluation during search: {}\n\tPipeline evaluation during selection: {}\n\tBlow-ups are {} for selection phase and {} for post-processing phase.", this.getInput().hashCode(), this.builder.getPerformanceMeasureName(), this.getConfig().cpus(), this.getTimeout().seconds(), this.getConfig().timeoutForCandidateEvaluation() / 1000, this.getConfig().timeoutForNodeEvaluation() / 1000, this.getConfig().numberOfRandomCompletions(), MathExt.round(this.getConfig().dataPortionForSelection() * 100, 2), classifierEvaluatorForSearch.getBenchmark(), classifierEvaluatorForSelection.getBenchmark(), this.getConfig().expectedBlowupInSelection(), this.getConfig().expectedBlowupInPostprocessing()); } /* create 2-phase software configuration problem */ this.logger.debug("Creating 2-phase software configuration problem."); TwoPhaseSoftwareConfigurationProblem problem = null; try { problem = new TwoPhaseSoftwareConfigurationProblem(this.builder.getSearchSpaceConfigFile(), this.builder.getRequestedInterface(), classifierEvaluatorForSearch, classifierEvaluatorForSelection); } catch (IOException e1) { throw new AlgorithmException(e1, "Could not activate ML-Plan!"); } /* create 2-phase HASCO */ this.logger.info("Creating the twoPhaseHASCOFactory."); OptimizingFactoryProblem<TwoPhaseSoftwareConfigurationProblem, Classifier, Double> optimizingFactoryProblem = new OptimizingFactoryProblem<>(this.builder.getClassifierFactory(), problem); HASCOFactory<GraphSearchWithPathEvaluationsInput<TFDNode, String, Double>, TFDNode, String, Double> hascoFactory = this.builder.getHASCOFactory(); this.twoPhaseHASCOFactory = new TwoPhaseHASCOFactory<>(hascoFactory); this.twoPhaseHASCOFactory.setConfig(this.getConfig()); this.optimizingFactory = new OptimizingFactory<>(optimizingFactoryProblem, this.twoPhaseHASCOFactory); this.logger.info("Setting logger of {} to {}.optimizingfactory", this.optimizingFactory.getClass().getName(), this.loggerName); this.optimizingFactory.setLoggerName(this.loggerName + ".optimizingfactory"); this.optimizingFactory.registerListener(new Object() { @Subscribe public void receiveEventFromFactory(final AlgorithmEvent event) { if (event instanceof AlgorithmInitializedEvent || event instanceof AlgorithmFinishedEvent) { return; } if (event instanceof HASCOSolutionEvent) { @SuppressWarnings("unchecked") HASCOSolutionCandidate<Double> solution = ((HASCOSolutionEvent<Double>) event).getSolutionCandidate(); try { MLPlan.this.logger.info("Received new solution {} with score {} and evaluation time {}ms", solution.getComponentInstance().getNestedComponentDescription(), solution.getScore(), solution.getTimeToEvaluateCandidate()); } catch (Exception e) { MLPlan.this.logger.warn("Could not print log due to exception while preparing the log message.", e); } if (dataPortionUsedForSelection == 0.0 && solution.getScore() < MLPlan.this.internalValidationErrorOfSelectedClassifier) { try { MLPlan.this.selectedClassifier = MLPlan.this.builder.getClassifierFactory().getComponentInstantiation(solution.getComponentInstance()); MLPlan.this.internalValidationErrorOfSelectedClassifier = solution.getScore(); MLPlan.this.componentInstanceOfSelectedClassifier = solution.getComponentInstance(); } catch (ComponentInstantiationFailedException e) { MLPlan.this.logger.error("Could not update selectedClassifier with newly best seen solution due to issues building the classifier from its ComponentInstance description.", e); } } try { MLPlan.this.post( new ClassifierFoundEvent(MLPlan.this.getId(), solution.getComponentInstance(), MLPlan.this.builder.getClassifierFactory().getComponentInstantiation(solution.getComponentInstance()), solution.getScore())); } catch (ComponentInstantiationFailedException e) { MLPlan.this.logger.error("An issue occurred while preparing the description for the post of a ClassifierFoundEvent", e); } } else { MLPlan.this.post(event); } } }); this.logger.info("Initializing the optimization factory."); this.optimizingFactory.init(); this.logger.info("Started and activated ML-Plan."); return event; case ACTIVE: /* train the classifier returned by the optimizing factory */ long startOptimizationTime = System.currentTimeMillis(); try { this.selectedClassifier = this.optimizingFactory.call(); this.logger.info("2-Phase-HASCO has chosen classifier {}, which will now be built on the entire data given, i.e. {} data points.", this.selectedClassifier, this.getInput().size()); } catch (AlgorithmException | InterruptedException | AlgorithmExecutionCanceledException | AlgorithmTimeoutedException e) { this.terminate(); // send the termination event throw e; } this.internalValidationErrorOfSelectedClassifier = this.optimizingFactory.getPerformanceOfObject(); this.componentInstanceOfSelectedClassifier = this.optimizingFactory.getComponentInstanceOfObject(); if (this.buildSelectedClasifierOnGivenData) { long startBuildTime = System.currentTimeMillis(); try { this.selectedClassifier.buildClassifier(this.getInput()); } catch (Exception e) { throw new AlgorithmException(e, "Training the classifier failed!"); } long endBuildTime = System.currentTimeMillis(); this.logger.info("Selected model has been built on entire dataset. Build time of chosen model was {}ms. Total construction time was {}ms. The chosen classifier is: {}", endBuildTime - startBuildTime, endBuildTime - startOptimizationTime, this.selectedClassifier); } else { this.logger.info("Selected model has not been built, since model building has been disabled. Total construction time was {}ms.", System.currentTimeMillis() - startOptimizationTime); } return this.terminate(); default: throw new IllegalStateException("Cannot do anything in state " + this.getState()); } } @Override public Classifier call() throws AlgorithmException, InterruptedException, AlgorithmExecutionCanceledException, AlgorithmTimeoutedException { while (this.hasNext()) { this.nextWithException(); } return this.selectedClassifier; } @Override public void setLoggerName(final String name) { this.loggerName = name; this.logger.info("Switching logger name to {}", name); this.logger = LoggerFactory.getLogger(name); this.logger.info("Activated ML-Plan logger {}. Now setting logger of twoPhaseHASCO to {}.2phasehasco", name, name); if (this.optimizingFactory != null) { this.logger.info("Setting logger of {} to {}.optimizingfactory", this.optimizingFactory.getClass().getName(), this.loggerName); this.optimizingFactory.setLoggerName(this.loggerName + ".optimizingfactory"); } else { this.logger.debug("Optimizingfactory has not been set yet, so not customizing its logger."); } this.logger.info("Switched ML-Plan logger to {}", name); } public void setPortionOfDataForPhase2(final float portion) { this.getConfig().setProperty(MLPlanClassifierConfig.SELECTION_PORTION, String.valueOf(portion)); } @Override public String getLoggerName() { return this.loggerName; } @Override public MLPlanClassifierConfig getConfig() { return (MLPlanClassifierConfig) super.getConfig(); } public void setRandomSeed(final int seed) { this.getConfig().setProperty(MLPlanClassifierConfig.K_RANDOM_SEED, String.valueOf(seed)); } public Classifier getSelectedClassifier() { return this.selectedClassifier; } public ComponentInstance getComponentInstanceOfSelectedClassifier() { return this.componentInstanceOfSelectedClassifier; } @SuppressWarnings("unchecked") public GraphGenerator<TFDNode, String> getGraphGenerator() { return ((TwoPhaseHASCO<? extends GraphSearchInput<TFDNode, String>, TFDNode, String>) this.optimizingFactory.getOptimizer()).getGraphGenerator(); } public double getInternalValidationErrorOfSelectedClassifier() { return this.internalValidationErrorOfSelectedClassifier; } @Override public synchronized void cancel() { this.logger.info("Received cancel. First canceling optimizer, then invoking general shutdown."); this.optimizingFactory.cancel(); this.logger.debug("Now canceling main ML-Plan routine"); super.cancel(); assert this.isCanceled() : "Canceled-flag is not positive at the end of the cancel routine!"; this.logger.info("Completed cancellation of ML-Plan. Cancel status is {}", this.isCanceled()); } public OptimizingFactory<TwoPhaseSoftwareConfigurationProblem, Classifier, HASCOSolutionCandidate<Double>, Double> getOptimizingFactory() { return this.optimizingFactory; } @Subscribe public void receiveClassifierCreatedEvent(final ClassifierCreatedEvent e) { this.post(e); } @Subscribe public void receiveClassifierCreatedEvent(final LearningCurveExtrapolatedEvent e) { this.post(e); } @Subscribe public void receiveClassifierCreatedEvent(final MCCVSplitEvaluationEvent e) { this.post(e); } public TwoPhaseHASCOFactory<GraphSearchWithPathEvaluationsInput<TFDNode, String, Double>, TFDNode, String> getTwoPhaseHASCOFactory() { return this.twoPhaseHASCOFactory; } public boolean isBuildSelectedClasifierOnGivenData() { return this.buildSelectedClasifierOnGivenData; } public void setBuildSelectedClasifierOnGivenData(final boolean buildSelectedClasifierOnGivenData) { this.buildSelectedClasifierOnGivenData = buildSelectedClasifierOnGivenData; } }
0
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan/core/MLPlanBuilder.java
package ai.libs.mlplan.core; import java.io.File; import java.io.IOException; import java.util.ArrayList; import java.util.Collection; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.function.Predicate; import org.aeonbits.owner.ConfigFactory; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import ai.libs.hasco.core.HASCOFactory; import ai.libs.hasco.model.Component; import ai.libs.hasco.serialization.ComponentLoader; import ai.libs.hasco.variants.forwarddecomposition.HASCOViaFDAndBestFirstFactory; import ai.libs.hasco.variants.forwarddecomposition.HASCOViaFDFactory; import ai.libs.jaicore.basic.FileUtil; import ai.libs.jaicore.basic.MathExt; import ai.libs.jaicore.basic.TimeOut; import ai.libs.jaicore.basic.algorithm.reduction.AlgorithmicProblemReduction; import ai.libs.jaicore.logging.ToJSONStringUtil; import ai.libs.jaicore.ml.core.dataset.sampling.inmemory.ASamplingAlgorithm; import ai.libs.jaicore.ml.core.dataset.sampling.inmemory.factories.interfaces.ISamplingAlgorithmFactory; import ai.libs.jaicore.ml.core.dataset.weka.WekaInstances; import ai.libs.jaicore.ml.core.evaluation.measure.IMeasure; import ai.libs.jaicore.ml.core.evaluation.measure.multilabel.AutoMEKAGGPFitnessMeasureLoss; import ai.libs.jaicore.ml.core.evaluation.measure.multilabel.EMultilabelPerformanceMeasure; import ai.libs.jaicore.ml.core.evaluation.measure.singlelabel.EMultiClassPerformanceMeasure; import ai.libs.jaicore.ml.core.evaluation.measure.singlelabel.MultiClassMeasureBuilder; import ai.libs.jaicore.ml.core.evaluation.measure.singlelabel.ZeroOneLoss; import ai.libs.jaicore.ml.evaluation.evaluators.weka.IClassifierEvaluator; import ai.libs.jaicore.ml.evaluation.evaluators.weka.LearningCurveExtrapolationEvaluator; import ai.libs.jaicore.ml.evaluation.evaluators.weka.factory.ClassifierEvaluatorConstructionFailedException; import ai.libs.jaicore.ml.evaluation.evaluators.weka.factory.IClassifierEvaluatorFactory; import ai.libs.jaicore.ml.evaluation.evaluators.weka.factory.LearningCurveExtrapolationEvaluatorFactory; import ai.libs.jaicore.ml.evaluation.evaluators.weka.factory.MonteCarloCrossValidationEvaluatorFactory; import ai.libs.jaicore.ml.evaluation.evaluators.weka.splitevaluation.ISplitBasedClassifierEvaluator; import ai.libs.jaicore.ml.evaluation.evaluators.weka.splitevaluation.SimpleMLCSplitBasedClassifierEvaluator; import ai.libs.jaicore.ml.evaluation.evaluators.weka.splitevaluation.SimpleSLCSplitBasedClassifierEvaluator; import ai.libs.jaicore.ml.learningcurve.extrapolation.LearningCurveExtrapolationMethod; import ai.libs.jaicore.ml.weka.dataset.splitter.ArbitrarySplitter; import ai.libs.jaicore.ml.weka.dataset.splitter.IDatasetSplitter; import ai.libs.jaicore.ml.weka.dataset.splitter.MulticlassClassStratifiedSplitter; import ai.libs.jaicore.planning.hierarchical.algorithms.forwarddecomposition.graphgenerators.tfd.TFDNode; import ai.libs.jaicore.search.algorithms.standard.bestfirst.StandardBestFirstFactory; import ai.libs.jaicore.search.algorithms.standard.bestfirst.nodeevaluation.AlternativeNodeEvaluator; import ai.libs.jaicore.search.algorithms.standard.bestfirst.nodeevaluation.INodeEvaluator; import ai.libs.jaicore.search.core.interfaces.IOptimalPathInORGraphSearchFactory; import ai.libs.jaicore.search.problemtransformers.GraphSearchProblemInputToGraphSearchWithSubpathEvaluationInputTransformerViaRDFS; import ai.libs.mlpipeline_evaluation.CacheEvaluatorMeasureBridge; import ai.libs.mlpipeline_evaluation.PerformanceDBAdapter; import ai.libs.mlplan.multiclass.MLPlanClassifierConfig; import ai.libs.mlplan.multiclass.wekamlplan.IClassifierFactory; import ai.libs.mlplan.multiclass.wekamlplan.sklearn.SKLearnClassifierFactory; import ai.libs.mlplan.multiclass.wekamlplan.weka.PreferenceBasedNodeEvaluator; import ai.libs.mlplan.multiclass.wekamlplan.weka.WEKAPipelineFactory; import ai.libs.mlplan.multiclass.wekamlplan.weka.WekaPipelineValidityCheckingNodeEvaluator; import ai.libs.mlplan.multilabel.MekaPipelineFactory; import weka.core.Instances; /** * The MLPlanBuilder helps to easily configure and initialize ML-Plan with specific parameter settings. * For convenient use, the MLPlanBuilder also offers methods for initializing ML-Plan with default * configuration to use ML-Plan for single label classification in combination with WEKA or scikit-learn * or for multi-label classification in combination with MEKA and consequently with WEKA (for baselearners * of multi-label reduction strategies). * * @author mwever, fmohr */ public class MLPlanBuilder { /* Logging */ private Logger logger = LoggerFactory.getLogger(MLPlanBuilder.class); private static final String SLC_REQUESTED_HASCO_INTERFACE = "AbstractClassifier"; private static final String MLC_REQUESTED_HASCO_INTERFACE = "MLClassifier"; /* Search space configuration files for default configurations */ private static final File SPC_TINYTEST = new File("resources/automl/searchmodels/weka/tinytest.json"); private static final File SPC_AUTO_WEKA = new File("resources/automl/searchmodels/weka/weka-all-autoweka.json"); private static final File SPC_SKLEARN = new File("resources/automl/searchmodels/sklearn/sklearn-mlplan.json"); private static final File SPC_SKLEARN_UL = new File("resources/automl/searchmodels/sklearn/ml-plan-ul.json"); private static final File SPC_MEKA = new File("resources/automl/searchmodels/meka/meka-multilabel.json"); /* Preferred classifier lists to define an order for the classifiers to be evaluated. */ private static final File PREFC_AUTO_WEKA = new File("resources/mlplan/weka-precedenceList.txt"); private static final File PREFC_SKLEARN = new File("resources/mlplan/sklearn-precedenceList.txt"); private static final File PREFC_SKLEARN_UL = new File("resources/mlplan/sklearn-ul-precedenceList.txt"); private static final File PREFC_MEKA = new File("resources/mlplan/meka-preferenceList.txt"); /* Default values initially set when creating a builder. */ private static final File DEFAULT_ALGORITHM_CONFIG_FILE = new File("conf/mlplan.properties"); private static final boolean DEFAULT_USE_CACHE = false; private static final Predicate<TFDNode> DEFAULT_PRIORIZING_PREDICATE = null; private static final String DEFAULT_REQUESTED_HASCO_INTERFACE = SLC_REQUESTED_HASCO_INTERFACE; /** * Default configurations including search space configuration files and lists of preferences with respect to the classifiers to be evaluated. * * @author mwever */ private enum EDefaultConfig { TINYTEST(SPC_TINYTEST, PREFC_AUTO_WEKA), AUTO_WEKA(SPC_AUTO_WEKA, PREFC_AUTO_WEKA), SKLEARN(SPC_SKLEARN, PREFC_SKLEARN), SKLEARN_UL(SPC_SKLEARN_UL, PREFC_SKLEARN_UL), MEKA(SPC_MEKA, PREFC_MEKA); /* Search space configuration file */ private final File searchSpaceConfigurationFile; /* File containing a list of components defining an ordering of preference. */ private final File preferredComponentsFile; private EDefaultConfig(final File spcFile, final File preferredComponentsFile) { this.searchSpaceConfigurationFile = spcFile; this.preferredComponentsFile = preferredComponentsFile; } public File getSearchSpaceConfigFile() { return this.searchSpaceConfigurationFile; } public File getPreferredComponentsFile() { return this.preferredComponentsFile; } } private boolean factoryPreparedWithData = false; private MLPlanClassifierConfig algorithmConfig; @SuppressWarnings("rawtypes") private HASCOViaFDFactory hascoFactory = new HASCOViaFDFactory<>(); private File searchSpaceConfigFile; private Collection<Component> components; private IClassifierFactory classifierFactory; private String requestedHASCOInterface; private PipelineValidityCheckingNodeEvaluator pipelineValidityCheckingNodeEvaluator; private INodeEvaluator<TFDNode, Double> preferredNodeEvaluator = null; /* The splitter is used to create the split for separating search and selection data */ private IDatasetSplitter searchSelectionDatasetSplitter = new MulticlassClassStratifiedSplitter(); private IDatasetSplitter searchPhaseDatasetSplitter = new MulticlassClassStratifiedSplitter(); private IDatasetSplitter selectionPhaseDatasetSplitter = new MulticlassClassStratifiedSplitter(); private boolean useCache; private PerformanceDBAdapter dbAdapter = null; private EMultiClassPerformanceMeasure singleLabelPerformanceMeasure; private EMultilabelPerformanceMeasure multiLabelPerformanceMeasure; private ISplitBasedClassifierEvaluator<Double> splitBasedClassifierEvaluator; private Predicate<TFDNode> priorizingPredicate = null; private IClassifierEvaluatorFactory factoryForPipelineEvaluationInSearchPhase = null; private IClassifierEvaluatorFactory factoryForPipelineEvaluationInSelectionPhase = null; public MLPlanBuilder() { super(); /* Setting up all generic default values. */ try { this.withAlgorithmConfigFile(DEFAULT_ALGORITHM_CONFIG_FILE); } catch (IllegalArgumentException e) { this.logger.error("The default algorithm configuration file could not be loaded.", e); } this.useCache = DEFAULT_USE_CACHE; this.priorizingPredicate = DEFAULT_PRIORIZING_PREDICATE; this.requestedHASCOInterface = DEFAULT_REQUESTED_HASCO_INTERFACE; } public MLPlanBuilder(final File searchSpaceConfigFile, final File algorithmConfigFile, final EMultiClassPerformanceMeasure performanceMeasure) { this(); this.withAlgorithmConfigFile(algorithmConfigFile); this.searchSpaceConfigFile = searchSpaceConfigFile; this.singleLabelPerformanceMeasure = performanceMeasure; this.useCache = false; } public MLPlanBuilder(final File searchSpaceConfigFile, final File algorithmConfigFile, final EMultiClassPerformanceMeasure performanceMeasure, final PerformanceDBAdapter dbAdapter) { this(searchSpaceConfigFile, algorithmConfigFile, performanceMeasure); this.useCache = true; this.dbAdapter = dbAdapter; } /** * Set the classifier factory that translates <code>CompositionInstance</code> objects to classifiers that can be evaluated. * * @param classifierFactory The classifier factory to be used to translate CompositionInstance objects to classifiers. */ public void withClassifierFactory(final IClassifierFactory classifierFactory) { this.classifierFactory = classifierFactory; } public MLPlanBuilder withSearchSpaceConfigFile(final File searchSpaceConfig) throws IOException { FileUtil.requireFileExists(searchSpaceConfig); this.searchSpaceConfigFile = searchSpaceConfig; this.components = new ComponentLoader(searchSpaceConfig).getComponents(); return this; } public MLPlanBuilder withDatasetSplitterForSearchSelectionSplit(final IDatasetSplitter datasetSplitter) { this.searchSelectionDatasetSplitter = datasetSplitter; return this; } public MLPlanBuilder withSearchPhaseDatasetSplitter(final IDatasetSplitter datasetSplitter) { this.searchPhaseDatasetSplitter = datasetSplitter; return this; } public MLPlanBuilder withSelectionPhaseDatasetSplitter(final IDatasetSplitter datasetSplitter) { this.selectionPhaseDatasetSplitter = datasetSplitter; return this; } public MLPlanBuilder withRequestedInterface(final String requestedInterface) { this.requestedHASCOInterface = requestedInterface; return this; } /** * Configures the MLPlanBuilder to deal with the AutoSKLearn search space configuration. * * @return Returns the current MLPlanBuilder object with the AutoSKLearn search space configuration. * @throws IOException Throws an IOException if the search space config file could not be loaded. */ public MLPlanBuilder withAutoSKLearnConfig() throws IOException { this.classifierFactory = new SKLearnClassifierFactory(); return this.withDefaultConfiguration(EDefaultConfig.SKLEARN); } public MLPlanBuilder withTpotConfig() throws IOException { this.classifierFactory = new SKLearnClassifierFactory(); return this.withDefaultConfiguration(EDefaultConfig.SKLEARN_UL); } public MLPlanBuilder withAutoWEKAConfiguration() throws IOException { this.classifierFactory = new WEKAPipelineFactory(); this.pipelineValidityCheckingNodeEvaluator = new WekaPipelineValidityCheckingNodeEvaluator(); return this.withDefaultConfiguration(EDefaultConfig.AUTO_WEKA); } public MLPlanBuilder withTinyTestConfiguration() throws IOException { this.classifierFactory = new WEKAPipelineFactory(); this.pipelineValidityCheckingNodeEvaluator = new WekaPipelineValidityCheckingNodeEvaluator(); return this.withDefaultConfiguration(EDefaultConfig.TINYTEST); } public MLPlanBuilder withMekaDefaultConfiguration() throws IOException { this.withDefaultConfiguration(EDefaultConfig.MEKA); this.singleLabelPerformanceMeasure = null; this.multiLabelPerformanceMeasure = EMultilabelPerformanceMeasure.AUTO_MEKA_GGP_FITNESS_LOSS; this.splitBasedClassifierEvaluator = new SimpleMLCSplitBasedClassifierEvaluator(new AutoMEKAGGPFitnessMeasureLoss()); this.classifierFactory = new MekaPipelineFactory(); this.requestedHASCOInterface = MLC_REQUESTED_HASCO_INTERFACE; this.withDatasetSplitterForSearchSelectionSplit(new ArbitrarySplitter()); this.withSearchPhaseDatasetSplitter(new ArbitrarySplitter()); this.withSelectionPhaseDatasetSplitter(new ArbitrarySplitter()); return this; } private MLPlanBuilder withDefaultConfiguration(final EDefaultConfig defConfig) throws IOException { if (this.searchSpaceConfigFile == null) { this.withSearchSpaceConfigFile(defConfig.getSearchSpaceConfigFile()); } this.withPreferredComponentsFile(defConfig.preferredComponentsFile); this.withRandomCompletionBasedBestFirstSearch(); if (defConfig != EDefaultConfig.MEKA && this.singleLabelPerformanceMeasure == null) { this.singleLabelPerformanceMeasure = EMultiClassPerformanceMeasure.ERRORRATE; this.withSingleLabelClassificationMeasure(this.singleLabelPerformanceMeasure); } /* use MCCV for pipeline evaluation */ int mccvIterationsDuringSearch = 5; int mccvIterationsDuringSelection = 5; double mccvPortion = 0.7; if (!(this.factoryForPipelineEvaluationInSearchPhase instanceof MonteCarloCrossValidationEvaluatorFactory)) { this.factoryForPipelineEvaluationInSearchPhase = new MonteCarloCrossValidationEvaluatorFactory().withNumMCIterations(mccvIterationsDuringSearch).withTrainFoldSize(mccvPortion).withSplitBasedEvaluator(new SimpleSLCSplitBasedClassifierEvaluator(new ZeroOneLoss())); } if (!(this.factoryForPipelineEvaluationInSelectionPhase instanceof MonteCarloCrossValidationEvaluatorFactory)) { this.factoryForPipelineEvaluationInSelectionPhase = new MonteCarloCrossValidationEvaluatorFactory().withNumMCIterations(mccvIterationsDuringSearch).withTrainFoldSize(mccvPortion).withSplitBasedEvaluator(new SimpleSLCSplitBasedClassifierEvaluator(new ZeroOneLoss())); } /* configure blow-ups for MCCV */ double blowUpInSelectionPhase = MathExt.round(1f / mccvPortion * mccvIterationsDuringSelection / mccvIterationsDuringSearch, 2); this.algorithmConfig.setProperty(MLPlanClassifierConfig.K_BLOWUP_SELECTION, String.valueOf(blowUpInSelectionPhase)); double blowUpInPostprocessing = MathExt.round((1 / (1 - this.algorithmConfig.dataPortionForSelection())) / mccvIterationsDuringSelection, 2); this.algorithmConfig.setProperty(MLPlanClassifierConfig.K_BLOWUP_POSTPROCESS, String.valueOf(blowUpInPostprocessing)); return this; } public MLPlanBuilder withPreferredComponentsFile(final File preferredComponentsFile) throws IOException { this.getAlgorithmConfig().setProperty(MLPlanClassifierConfig.PREFERRED_COMPONENTS, preferredComponentsFile.getAbsolutePath()); List<String> ordering; if (!preferredComponentsFile.exists()) { this.logger.warn("The configured file for preferred components \"{}\" does not exist. Not using any particular ordering.", preferredComponentsFile.getAbsolutePath()); ordering = new ArrayList<>(); } else { ordering = FileUtil.readFileAsList(preferredComponentsFile); } return this.withPreferredNodeEvaluator(new PreferenceBasedNodeEvaluator(this.components, ordering)); } /** * Loads the MLPlanClassifierConfig with default values and replaces all properties according to the properties defined in the given config file. * * @param algorithmConfigFile The file specifying the property values to replace the default configuration. * @return The MLPlanBuilder object. * @throws IOException An IOException is thrown if there are issues reading the config file. */ public MLPlanBuilder withAlgorithmConfigFile(final File algorithmConfigFile) { return this.withAlgorithmConfig((MLPlanClassifierConfig) ConfigFactory.create(MLPlanClassifierConfig.class).loadPropertiesFromFile(algorithmConfigFile)); } public MLPlanBuilder withAlgorithmConfig(final MLPlanClassifierConfig config) { this.algorithmConfig = config; this.hascoFactory.withAlgorithmConfig(this.algorithmConfig); this.updateEverything(); return this; } public MLPlanBuilder withSingleLabelClassificationMeasure(final EMultiClassPerformanceMeasure measure) { this.singleLabelPerformanceMeasure = measure; return this.withSplitBasedClassifierEvaluator(this.getSingleLabelEvaluationMeasurementBridge(new MultiClassMeasureBuilder().getEvaluator(measure))); } public MLPlanBuilder withMultiLabelClassificationMeasure(final EMultilabelPerformanceMeasure measure) { this.multiLabelPerformanceMeasure = measure; return this.withSplitBasedClassifierEvaluator(this.getMultiLabelEvaluationMeasurementBridge(new MultiClassMeasureBuilder().getEvaluator(measure))); } /** * This ADDs a new preferred node evaluator; requires that the search will be a best-first search. * * It is possible to specify several preferred node evaluators, which will be ordered by the order in which they are specified. The latest given evaluator is the most preferred one. * * @param preferredNodeEvaluator * @return */ public MLPlanBuilder withPreferredNodeEvaluator(final INodeEvaluator<TFDNode, Double> preferredNodeEvaluator) { if (this.factoryPreparedWithData) { throw new IllegalStateException("The method prepareNodeEvaluatorInFactoryWithData has already been called. No changes to the preferred node evaluator possible anymore"); } /* first update the preferred node evaluator */ if (this.preferredNodeEvaluator == null) { this.preferredNodeEvaluator = preferredNodeEvaluator; } else { this.preferredNodeEvaluator = new AlternativeNodeEvaluator<>(preferredNodeEvaluator, this.preferredNodeEvaluator); } this.updateEverything(); return this; } public MLPlanBuilder withSplitBasedClassifierEvaluator(final ISplitBasedClassifierEvaluator<Double> evaluator) { this.splitBasedClassifierEvaluator = evaluator; return this; } @SuppressWarnings("unchecked") public MLPlanBuilder withSearchFactory(@SuppressWarnings("rawtypes") final IOptimalPathInORGraphSearchFactory searchFactory, @SuppressWarnings("rawtypes") final AlgorithmicProblemReduction transformer) { this.hascoFactory.setSearchFactory(searchFactory); this.hascoFactory.setSearchProblemTransformer(transformer); return this; } @SuppressWarnings("unchecked") public MLPlanBuilder withRandomCompletionBasedBestFirstSearch() { this.hascoFactory.setSearchFactory(new StandardBestFirstFactory<TFDNode, String, Double>()); this.updateEverything(); return this; } public MLPlanBuilder withTimeoutForSingleSolutionEvaluation(final TimeOut timeout) { this.getAlgorithmConfig().setProperty(MLPlanClassifierConfig.K_RANDOM_COMPLETIONS_TIMEOUT_PATH, String.valueOf(timeout.milliseconds())); this.updateEverything(); return this; } public MLPlanBuilder withTimeoutForNodeEvaluation(final TimeOut timeout) { this.getAlgorithmConfig().setProperty(MLPlanClassifierConfig.K_RANDOM_COMPLETIONS_TIMEOUT_NODE, String.valueOf(timeout.milliseconds())); this.updateEverything(); return this; } public void prepareNodeEvaluatorInFactoryWithData(final Instances data) { if (!(this.hascoFactory instanceof HASCOViaFDAndBestFirstFactory)) { return; } if (this.factoryPreparedWithData) { throw new IllegalStateException("Factory has already been prepared with data. This can only be done once!"); } this.factoryPreparedWithData = true; /* nothing to do if there are no preferred node evaluators */ if (this.pipelineValidityCheckingNodeEvaluator == null && this.preferredNodeEvaluator == null) { return; } /* now determine the real node evaluator to be used. A semantic node evaluator has highest priority */ INodeEvaluator<TFDNode, Double> actualNodeEvaluator; if (this.pipelineValidityCheckingNodeEvaluator != null) { this.pipelineValidityCheckingNodeEvaluator.setComponents(this.components); this.pipelineValidityCheckingNodeEvaluator.setData(data); if (this.preferredNodeEvaluator != null) { actualNodeEvaluator = new AlternativeNodeEvaluator<>(this.pipelineValidityCheckingNodeEvaluator, this.preferredNodeEvaluator); } else { actualNodeEvaluator = this.pipelineValidityCheckingNodeEvaluator; } } else { actualNodeEvaluator = this.preferredNodeEvaluator; } /* update the preferred node evaluator in the HascoFactory */ this.preferredNodeEvaluator = actualNodeEvaluator; this.updateEverything(); } @SuppressWarnings("unchecked") private void updateSearchProblemTransformer() { this.hascoFactory.setSearchProblemTransformer(new GraphSearchProblemInputToGraphSearchWithSubpathEvaluationInputTransformerViaRDFS<TFDNode, String, Double>(this.preferredNodeEvaluator, this.priorizingPredicate, this.algorithmConfig.randomSeed(), this.algorithmConfig.numberOfRandomCompletions(), this.algorithmConfig.timeoutForCandidateEvaluation(), this.algorithmConfig.timeoutForNodeEvaluation())); } private void updateAlgorithmConfigOfHASCO() { this.hascoFactory.withAlgorithmConfig(this.getAlgorithmConfig()); } private void updateEverything() { this.updateSearchProblemTransformer(); this.updateAlgorithmConfigOfHASCO(); } /** * @return The dataset splitter that is used for separating search and selection data. */ public IDatasetSplitter getSearchSelectionDatasetSplitter() { return this.searchSelectionDatasetSplitter; } /** * @return The dataset splitter to be used in search phase for generating benchmark splits. */ public IDatasetSplitter getSearchPhaseDatasetSplitter() { return this.searchPhaseDatasetSplitter; } /** * @return The dataset splitter to be used in selection phase for generating benchmark splits. */ public IDatasetSplitter getSelectionPhaseDatasetSplitter() { return this.selectionPhaseDatasetSplitter; } /** * @return The interface that is requested to be provided by a solution candidate component instance. */ public String getRequestedInterface() { return this.requestedHASCOInterface; } // public void withExtrapolatedSaturationPointEvaluation(final int[] anchorpoints, final ISamplingAlgorithmFactory<IInstance, ? extends ASamplingAlgorithm<IInstance>> subsamplingAlgorithmFactory, // final double trainSplitForAnchorpointsMeasurement, final LearningCurveExtrapolationMethod extrapolationMethod) { // this.builderForPipelineEvaluationInSearchPhase = new ExtrapolatedSaturationPointEvaluatorFactory(anchorpoints, subsamplingAlgorithmFactory, trainSplitForAnchorpointsMeasurement, extrapolationMethod); // // } public void withLearningCurveExtrapolationEvaluation(final int[] anchorpoints, final ISamplingAlgorithmFactory<WekaInstances<Object>, ? extends ASamplingAlgorithm<WekaInstances<Object>>> subsamplingAlgorithmFactory, final double trainSplitForAnchorpointsMeasurement, final LearningCurveExtrapolationMethod extrapolationMethod) { this.factoryForPipelineEvaluationInSearchPhase = new LearningCurveExtrapolationEvaluatorFactory(anchorpoints, subsamplingAlgorithmFactory, trainSplitForAnchorpointsMeasurement, extrapolationMethod); // this.factoryForPipelineEvaluationInSelectionPhase = new LearningCurveExtrapolationEvaluatorFactory(anchorpoints, subsamplingAlgorithmFactory, trainSplitForAnchorpointsMeasurement, extrapolationMethod); this.factoryForPipelineEvaluationInSelectionPhase = new MonteCarloCrossValidationEvaluatorFactory().withNumMCIterations(3).withTrainFoldSize(.7).withSplitBasedEvaluator(new SimpleSLCSplitBasedClassifierEvaluator(new ZeroOneLoss())); this.algorithmConfig.setProperty(MLPlanClassifierConfig.K_BLOWUP_SELECTION, "" + 4); // evaluating on 1000 in selection MCCV is, assuming quadratic growth, roughly max 4 times costlier than search phase evaluations } public boolean getUseCache() { return this.useCache; } public PerformanceDBAdapter getDBAdapter() { return this.dbAdapter; } public IClassifierFactory getClassifierFactory() { return this.classifierFactory; } public Collection<Component> getComponents() { return this.components; } public File getSearchSpaceConfigFile() { return this.searchSpaceConfigFile; } public MLPlanClassifierConfig getAlgorithmConfig() { return this.algorithmConfig; } public EMultiClassPerformanceMeasure getSingleLabelPerformanceMeasure() { return this.singleLabelPerformanceMeasure; } public EMultilabelPerformanceMeasure getMultiLabelPerformanceMeasure() { return this.multiLabelPerformanceMeasure; } public ISplitBasedClassifierEvaluator<Double> getSingleLabelEvaluationMeasurementBridge(final IMeasure<Double, Double> measure) { if (this.splitBasedClassifierEvaluator == null) { if (this.getUseCache()) { return new CacheEvaluatorMeasureBridge(measure, this.getDBAdapter()); } else { return new SimpleSLCSplitBasedClassifierEvaluator(measure); } } else { return this.splitBasedClassifierEvaluator; } } public ISplitBasedClassifierEvaluator<Double> getMultiLabelEvaluationMeasurementBridge(final IMeasure<double[], Double> measure) { if (this.splitBasedClassifierEvaluator == null) { return new SimpleMLCSplitBasedClassifierEvaluator(measure); } else { return this.splitBasedClassifierEvaluator; } } @SuppressWarnings("rawtypes") public HASCOFactory getHASCOFactory() { return this.hascoFactory; } // public ISplitBasedClassifierEvaluator<Double> getEvaluationMeasurementBridge() { // if (this.splitBasedClassifierEvaluator != null) { // return this.splitBasedClassifierEvaluator; // } // // if (this.measure != null) { // return this.getSingleLabelEvaluationMeasurementBridge(this.measure); // } else { // throw new IllegalStateException("Can not create evaluator measure bridge without a measure."); // } // } @Override public String toString() { Map<String, Object> fields = new HashMap<>(); fields.put("algorithmConfig", this.getAlgorithmConfig()); fields.put("classifierFactory", this.classifierFactory); return ToJSONStringUtil.toJSONString(fields); } public IClassifierEvaluatorFactory getFactoryForPipelineEvaluationInSearchPhase() { return this.factoryForPipelineEvaluationInSearchPhase; } public IClassifierEvaluatorFactory getFactoryForPipelineEvaluationInSelectionPhase() { return this.factoryForPipelineEvaluationInSelectionPhase; } public PipelineEvaluator getClassifierEvaluationInSearchPhase(final Instances data, final int seed, final int fullDatasetSize) throws ClassifierEvaluatorConstructionFailedException { if (this.factoryForPipelineEvaluationInSearchPhase == null) { throw new IllegalStateException("No factory for pipeline evaluation in search phase has been set!"); } IClassifierEvaluator evaluator = this.factoryForPipelineEvaluationInSearchPhase.getIClassifierEvaluator(data, seed); if (evaluator instanceof LearningCurveExtrapolationEvaluator) { ((LearningCurveExtrapolationEvaluator) evaluator).setFullDatasetSize(fullDatasetSize); } return new PipelineEvaluator(this.getClassifierFactory(), evaluator, this.getAlgorithmConfig().timeoutForCandidateEvaluation()); } public PipelineEvaluator getClassifierEvaluationInSelectionPhase(final Instances data, final int seed) throws ClassifierEvaluatorConstructionFailedException { if (this.factoryForPipelineEvaluationInSelectionPhase == null) { throw new IllegalStateException("No factory for pipeline evaluation in selection phase has been set!"); } return new PipelineEvaluator(this.getClassifierFactory(), this.factoryForPipelineEvaluationInSelectionPhase.getIClassifierEvaluator(data, seed), Integer.MAX_VALUE); } }
0
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan/core/MLPlanMekaBuilder.java
package ai.libs.mlplan.core; import java.io.File; import java.io.IOException; import ai.libs.jaicore.basic.FileUtil; import ai.libs.jaicore.ml.core.evaluation.measure.IMeasure; import ai.libs.jaicore.ml.core.evaluation.measure.multilabel.AutoMEKAGGPFitnessMeasureLoss; import ai.libs.jaicore.ml.core.evaluation.measure.multilabel.InstanceWiseF1AsLoss; import ai.libs.jaicore.ml.evaluation.evaluators.weka.factory.MonteCarloCrossValidationEvaluatorFactory; import ai.libs.jaicore.ml.evaluation.evaluators.weka.factory.ProbabilisticMonteCarloCrossValidationEvaluatorFactory; import ai.libs.jaicore.ml.evaluation.evaluators.weka.splitevaluation.SimpleMLCSplitBasedClassifierEvaluator; import ai.libs.jaicore.ml.weka.dataset.splitter.ArbitrarySplitter; import ai.libs.jaicore.ml.weka.dataset.splitter.IDatasetSplitter; import ai.libs.mlplan.multiclass.wekamlplan.IClassifierFactory; import ai.libs.mlplan.multilabel.MekaPipelineFactory; public class MLPlanMekaBuilder extends AbstractMLPlanBuilder { private static final String RES_SSC_MEKA_COMPLETE = "automl/searchmodels/meka/mlplan-meka.json"; private static final String FS_SSC_MEKA_COMPLETE = "conf/mlplan-meka.json"; private static final String RES_PREFC_MEKA = "mlplan/meka-preferenceList.txt"; private static final String FS_PREFC_MEKA = "conf/mlpan-meka-preferenceList.txt"; /* Default configuration values. */ private static final int SEARCH_NUM_MC_ITERATIONS = 5; private static final double SEARCH_TRAIN_FOLD_SIZE = 0.7; private static final int SELECTION_NUM_MC_ITERATIONS = 5; private static final double SELECTION_TRAIN_FOLD_SIZE = 0.7; private static final IMeasure<double[], Double> LOSS_FUNCTION = new InstanceWiseF1AsLoss(); /* Default configurations */ private static final String DEF_REQUESTED_HASCO_INTERFACE = "MLClassifier"; private static final String DEF_PREFERRED_COMPONENT_NAME_PREFIX = "resolveMLClassifierWith"; private static final IDatasetSplitter DEF_SELECTION_HOLDOUT_SPLITTER = new ArbitrarySplitter(); private static final File DEF_SEARCH_SPACE_CONFIG = FileUtil.getExistingFileWithHighestPriority(RES_SSC_MEKA_COMPLETE, FS_SSC_MEKA_COMPLETE); private static final File DEF_PREFERRED_COMPONENTS_CONFIG = FileUtil.getExistingFileWithHighestPriority(RES_PREFC_MEKA, FS_PREFC_MEKA); private static final IClassifierFactory CLASSIFIER_FACTORY = new MekaPipelineFactory(); private static final ProbabilisticMonteCarloCrossValidationEvaluatorFactory DEF_SEARCH_PHASE_EVALUATOR = new ProbabilisticMonteCarloCrossValidationEvaluatorFactory().withNumMCIterations(SEARCH_NUM_MC_ITERATIONS) .withTrainFoldSize(SEARCH_TRAIN_FOLD_SIZE).withSplitBasedEvaluator(new SimpleMLCSplitBasedClassifierEvaluator(LOSS_FUNCTION)).withDatasetSplitter(new ArbitrarySplitter()); private static final ProbabilisticMonteCarloCrossValidationEvaluatorFactory DEF_SELECTION_PHASE_EVALUATOR = new ProbabilisticMonteCarloCrossValidationEvaluatorFactory().withNumMCIterations(SELECTION_NUM_MC_ITERATIONS) .withTrainFoldSize(SELECTION_TRAIN_FOLD_SIZE).withSplitBasedEvaluator(new SimpleMLCSplitBasedClassifierEvaluator(LOSS_FUNCTION)).withDatasetSplitter(new ArbitrarySplitter()); public MLPlanMekaBuilder() throws IOException { super(); this.withSearchSpaceConfigFile(DEF_SEARCH_SPACE_CONFIG); this.withRequestedInterface(DEF_REQUESTED_HASCO_INTERFACE); this.withPreferredComponentsFile(DEF_PREFERRED_COMPONENTS_CONFIG, DEF_PREFERRED_COMPONENT_NAME_PREFIX); this.withDatasetSplitterForSearchSelectionSplit(DEF_SELECTION_HOLDOUT_SPLITTER); this.withClassifierFactory(CLASSIFIER_FACTORY); this.withSearchPhaseEvaluatorFactory(DEF_SEARCH_PHASE_EVALUATOR); this.withSelectionPhaseEvaluatorFactory(DEF_SELECTION_PHASE_EVALUATOR); } /** * Configures ML-Plan with the configuration as compared to AutoMEKA_GGP and GA-Auto-MLC. * @return The builder object. */ public MLPlanMekaBuilder withAutoMEKADefaultConfiguration() { this.withPerformanceMeasure(new AutoMEKAGGPFitnessMeasureLoss()); return this; } /** * Sets the performance measure to evaluate a candidate solution's generalization performance. Caution: This resets the evaluators to MCCV for both search and selection phase if these are not already MCCVs. * @param lossFunction The loss function to be used. * @return The builder object. */ public MLPlanMekaBuilder withPerformanceMeasure(final IMeasure<double[], Double> lossFunction) { if (!(this.getSearchEvaluatorFactory() instanceof MonteCarloCrossValidationEvaluatorFactory)) { this.withSearchPhaseEvaluatorFactory(new MonteCarloCrossValidationEvaluatorFactory().withDatasetSplitter(this.getDefaultDatasetSplitter()).withNumMCIterations(SEARCH_NUM_MC_ITERATIONS).withTrainFoldSize(SEARCH_TRAIN_FOLD_SIZE)); } if (!(this.getSearchEvaluatorFactory() instanceof MonteCarloCrossValidationEvaluatorFactory)) { this.withSearchPhaseEvaluatorFactory( new MonteCarloCrossValidationEvaluatorFactory().withDatasetSplitter(this.getDefaultDatasetSplitter()).withNumMCIterations(SELECTION_NUM_MC_ITERATIONS).withTrainFoldSize(SELECTION_TRAIN_FOLD_SIZE)); } ((MonteCarloCrossValidationEvaluatorFactory) this.getSelectionEvaluatorFactory()).withSplitBasedEvaluator(new SimpleMLCSplitBasedClassifierEvaluator(lossFunction)); return this; } protected IDatasetSplitter getDefaultDatasetSplitter() { return new ArbitrarySplitter(); } }
0
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan/core/MLPlanSKLearnBuilder.java
package ai.libs.mlplan.core; import java.io.BufferedReader; import java.io.File; import java.io.IOException; import java.io.InputStreamReader; import java.util.Arrays; import java.util.LinkedList; import java.util.List; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import ai.libs.jaicore.basic.FileUtil; import ai.libs.jaicore.basic.SystemRequirementsNotMetException; import ai.libs.jaicore.basic.sets.SetUtil; import ai.libs.jaicore.ml.evaluation.evaluators.weka.factory.MonteCarloCrossValidationEvaluatorFactory; import ai.libs.jaicore.ml.evaluation.evaluators.weka.splitevaluation.SimpleSLCSplitBasedClassifierEvaluator; import ai.libs.jaicore.ml.weka.dataset.splitter.IDatasetSplitter; import ai.libs.jaicore.ml.weka.dataset.splitter.MulticlassClassStratifiedSplitter; import ai.libs.mlplan.multiclass.wekamlplan.IClassifierFactory; import ai.libs.mlplan.multiclass.wekamlplan.sklearn.SKLearnClassifierFactory; public class MLPlanSKLearnBuilder extends AbstractMLPlanSingleLabelBuilder { private Logger logger = LoggerFactory.getLogger(MLPlanSKLearnBuilder.class); private static final String MSG_MODULE_NOT_AVAILABLE = "Could not load python module {}: {}"; private static final String PYTHON_MINIMUM_REQUIRED_VERSION = "Python 3.5.0"; private static final int PYTHON_MINIMUM_REQUIRED_VERSION_REL = 3; private static final int PYTHON_MINIMUM_REQUIRED_VERSION_MAJ = 5; private static final int PYTHON_MINIMUM_REQUIRED_VERSION_MIN = 0; private static final String[] PYTHON_REQUIRED_MODULES = { "arff", "numpy", "json", "pickle", "os", "sys", "warnings", "scipy", "sklearn" }; private static final String COMMAND_PYTHON = "python"; private static final String[] COMMAND_PYTHON_VERSION = { COMMAND_PYTHON, "--version" }; private static final String[] COMMAND_PYTHON_EXEC = { COMMAND_PYTHON, "-c" }; private static final String PYTHON_MODULE_NOT_FOUND_ERROR_MSG = "ModuleNotFoundError"; /* DEFAULT VALUES FOR THE SCIKIT-LEARN SETTING */ private static final String RES_SKLEARN_SEARCHSPACE_CONFIG = "automl/searchmodels/sklearn/sklearn-mlplan.json"; private static final String RES_SKLEARN_UL_SEARCHSPACE_CONFIG = "automl/searchmodels/sklearn/ml-plan-ul.json"; private static final String FS_SEARCH_SPACE_CONFIG = "conf/mlplan-sklearn.json"; private static final String RES_SKLEARN_PREFERRED_COMPONENTS = "mlplan/sklearn-preferenceList.txt"; private static final String FS_SKLEARN_PREFERRED_COMPONENTS = "conf/sklearn-preferenceList.txt"; private static final String DEF_REQUESTED_HASCO_INTERFACE = "AbstractClassifier"; private static final String DEF_PREFERRED_COMPONENT_NAME_PREFIX = "resolveAbstractClassifierWith"; private static final IDatasetSplitter DEF_SELECTION_HOLDOUT_SPLITTER = new MulticlassClassStratifiedSplitter(); private static final IClassifierFactory DEF_CLASSIFIER_FACTORY = new SKLearnClassifierFactory(); private static final File DEF_SEARCH_SPACE_CONFIG = FileUtil.getExistingFileWithHighestPriority(RES_SKLEARN_SEARCHSPACE_CONFIG, FS_SEARCH_SPACE_CONFIG); private static final File DEF_PREFERRED_COMPONENTS = FileUtil.getExistingFileWithHighestPriority(RES_SKLEARN_PREFERRED_COMPONENTS, FS_SKLEARN_PREFERRED_COMPONENTS); private static final MonteCarloCrossValidationEvaluatorFactory DEF_SEARCH_PHASE_EVALUATOR = new MonteCarloCrossValidationEvaluatorFactory().withNumMCIterations(SEARCH_NUM_MC_ITERATIONS).withTrainFoldSize(SEARCH_TRAIN_FOLD_SIZE) .withSplitBasedEvaluator(new SimpleSLCSplitBasedClassifierEvaluator(LOSS_FUNCTION)).withDatasetSplitter(new MulticlassClassStratifiedSplitter()); private static final MonteCarloCrossValidationEvaluatorFactory DEF_SELECTION_PHASE_EVALUATOR = new MonteCarloCrossValidationEvaluatorFactory().withNumMCIterations(SELECTION_NUM_MC_ITERATIONS).withTrainFoldSize(SELECTION_TRAIN_FOLD_SIZE) .withSplitBasedEvaluator(new SimpleSLCSplitBasedClassifierEvaluator(LOSS_FUNCTION)).withDatasetSplitter(new MulticlassClassStratifiedSplitter()); /** * Creates a new ML-Plan Builder for scikit-learn. * @throws IOException Thrown if configuration files cannot be read. */ public MLPlanSKLearnBuilder() throws IOException { this(false); } /** * Creates a new ML-Plan Builder for scikit-learn. * * @param skipSetupCheck Flag whether to skip the system's setup check, which examines whether the operating system has python installed in the required version and all the required python modules are installed. * @throws IOException Thrown if configuration files cannot be read. */ public MLPlanSKLearnBuilder(final boolean skipSetupCheck) throws IOException { super(); if (!skipSetupCheck) { this.checkPythonSetup(); } this.withSearchSpaceConfigFile(DEF_SEARCH_SPACE_CONFIG); this.withPreferredComponentsFile(DEF_PREFERRED_COMPONENTS, DEF_PREFERRED_COMPONENT_NAME_PREFIX); this.withRequestedInterface(DEF_REQUESTED_HASCO_INTERFACE); this.withClassifierFactory(DEF_CLASSIFIER_FACTORY); this.withDatasetSplitterForSearchSelectionSplit(DEF_SELECTION_HOLDOUT_SPLITTER); this.withSearchPhaseEvaluatorFactory(DEF_SEARCH_PHASE_EVALUATOR); this.withSelectionPhaseEvaluatorFactory(DEF_SELECTION_PHASE_EVALUATOR); this.setPerformanceMeasureName(LOSS_FUNCTION.getClass().getSimpleName()); } /** * Configures ML-Plan to use the search space with unlimited length preprocessing pipelines. * @return The builder object. * @throws IOException Thrown if the search space configuration file cannot be read. */ public MLPlanSKLearnBuilder withUnlimitedLengthPipelineSearchSpace() throws IOException { return (MLPlanSKLearnBuilder) this.withSearchSpaceConfigFile(FileUtil.getExistingFileWithHighestPriority(RES_SKLEARN_UL_SEARCHSPACE_CONFIG, FS_SEARCH_SPACE_CONFIG)); } private void checkPythonSetup() { try { /* Check whether we have python in the $PATH environment variable and whether the required python version is installed. */ Process p = new ProcessBuilder().command(COMMAND_PYTHON_VERSION).start(); StringBuilder sb = new StringBuilder(); try (BufferedReader br = new BufferedReader(new InputStreamReader(p.getInputStream()))) { String line; while ((line = br.readLine()) != null) { sb.append(line); } } String versionString = sb.toString(); if (!versionString.startsWith("Python ")) { throw new SystemRequirementsNotMetException("Could not detect valid python version."); } String[] versionSplit = versionString.substring(7).split("\\."); if (versionSplit.length != 3) { throw new SystemRequirementsNotMetException("Could not parse python version to be of the shape X.X.X"); } int rel = Integer.parseInt(versionSplit[0]); int maj = Integer.parseInt(versionSplit[1]); int min = Integer.parseInt(versionSplit[2]); if (!this.isValidVersion(rel, maj, min)) { throw new SystemRequirementsNotMetException("Python version does not conform the minimum required python version of " + PYTHON_MINIMUM_REQUIRED_VERSION); } /* Check whether we have all required python modules available*/ List<String> checkAllModulesAvailableCommand = new LinkedList<>(Arrays.asList(COMMAND_PYTHON_EXEC)); StringBuilder imports = new StringBuilder(); for (String module : PYTHON_REQUIRED_MODULES) { if (!imports.toString().isEmpty()) { imports.append(";"); } imports.append("import " + module); } checkAllModulesAvailableCommand.add(imports.toString()); StringBuilder allModulesAvailableErrorSB = new StringBuilder(); Process allModulesCheckProcess = new ProcessBuilder().command(checkAllModulesAvailableCommand.toArray(new String[0])).start(); try (BufferedReader br = new BufferedReader(new InputStreamReader(allModulesCheckProcess.getErrorStream()))) { String line; while ((line = br.readLine()) != null) { allModulesAvailableErrorSB.append(line); } } if (!allModulesAvailableErrorSB.toString().isEmpty()) { List<String> modulesNotFound = new LinkedList<>(); for (String module : PYTHON_REQUIRED_MODULES) { Process moduleCheck = new ProcessBuilder().command(COMMAND_PYTHON_EXEC[0], COMMAND_PYTHON_EXEC[1], "import " + module).start(); StringBuilder errorSB = new StringBuilder(); try (BufferedReader br = new BufferedReader(new InputStreamReader(moduleCheck.getErrorStream()))) { String line; while ((line = br.readLine()) != null) { errorSB.append(line); } } if (!errorSB.toString().isEmpty() && errorSB.toString().contains(PYTHON_MODULE_NOT_FOUND_ERROR_MSG)) { if (module.equals("arff")) { this.logger.debug(MSG_MODULE_NOT_AVAILABLE, "liac-arff", errorSB); modulesNotFound.add("liac-arff"); } else if (module.equals("sklearn")) { this.logger.debug(MSG_MODULE_NOT_AVAILABLE, "scikit-learn", errorSB); modulesNotFound.add("scikit-learn"); } else { this.logger.debug(MSG_MODULE_NOT_AVAILABLE, module, errorSB); modulesNotFound.add(module); } } } if (!modulesNotFound.isEmpty()) { throw new SystemRequirementsNotMetException("Could not find required python modules: " + SetUtil.implode(modulesNotFound, ", ")); } } } catch (IOException e) { throw new SystemRequirementsNotMetException("Could not check whether python is installed in the required version. Is python available as a command on your command line?"); } } private boolean isValidVersion(final int rel, final int maj, final int min) { return ((rel > PYTHON_MINIMUM_REQUIRED_VERSION_REL) || (rel == PYTHON_MINIMUM_REQUIRED_VERSION_REL && maj > PYTHON_MINIMUM_REQUIRED_VERSION_MAJ) || (rel == PYTHON_MINIMUM_REQUIRED_VERSION_REL && maj == PYTHON_MINIMUM_REQUIRED_VERSION_MAJ && min >= PYTHON_MINIMUM_REQUIRED_VERSION_MIN)); } @Override protected IDatasetSplitter getDefaultDatasetSplitter() { return new MulticlassClassStratifiedSplitter(); } }
0
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan/core/MLPlanWekaBuilder.java
package ai.libs.mlplan.core; import java.io.File; import java.io.IOException; import ai.libs.jaicore.basic.FileUtil; import ai.libs.jaicore.basic.MathExt; import ai.libs.jaicore.basic.ResourceUtil; import ai.libs.jaicore.ml.core.dataset.sampling.inmemory.ASamplingAlgorithm; import ai.libs.jaicore.ml.core.dataset.sampling.inmemory.factories.interfaces.ISamplingAlgorithmFactory; import ai.libs.jaicore.ml.core.dataset.weka.WekaInstances; import ai.libs.jaicore.ml.core.evaluation.measure.singlelabel.ZeroOneLoss; import ai.libs.jaicore.ml.evaluation.evaluators.weka.factory.LearningCurveExtrapolationEvaluatorFactory; import ai.libs.jaicore.ml.evaluation.evaluators.weka.factory.MonteCarloCrossValidationEvaluatorFactory; import ai.libs.jaicore.ml.evaluation.evaluators.weka.splitevaluation.SimpleSLCSplitBasedClassifierEvaluator; import ai.libs.jaicore.ml.learningcurve.extrapolation.LearningCurveExtrapolationMethod; import ai.libs.jaicore.ml.weka.dataset.splitter.IDatasetSplitter; import ai.libs.jaicore.ml.weka.dataset.splitter.MulticlassClassStratifiedSplitter; import ai.libs.mlplan.multiclass.MLPlanClassifierConfig; import ai.libs.mlplan.multiclass.wekamlplan.IClassifierFactory; import ai.libs.mlplan.multiclass.wekamlplan.weka.WEKAPipelineFactory; public class MLPlanWekaBuilder extends AbstractMLPlanSingleLabelBuilder { private static final String RES_SSC_TINY_WEKA = "automl/searchmodels/weka/tinytest.json"; private static final String RES_SSC_WEKA_COMPLETE = "automl/searchmodels/weka/weka-all-autoweka.json"; private static final String FS_SSC_WEKA = "conf/mlplan-weka.json"; private static final String RES_PREFERRED_COMPONENTS = "mlplan/weka-preferenceList.txt"; private static final String FS_PREFERRED_COMPONENTS = "conf/mlpan-weka-preferenceList.txt"; /* Default configuration values. */ private static final String DEF_REQUESTED_HASCO_INTERFACE = "AbstractClassifier"; private static final String DEF_PREFERRED_COMPONENT_NAME_PREFIX = "resolveAbstractClassifierWith"; private static final IDatasetSplitter DEF_SELECTION_HOLDOUT_SPLITTER = new MulticlassClassStratifiedSplitter(); private static final IClassifierFactory DEF_CLASSIFIER_FACTORY = new WEKAPipelineFactory(); private static final File DEF_PREFERRED_COMPONENTS = FileUtil.getExistingFileWithHighestPriority(RES_PREFERRED_COMPONENTS, FS_PREFERRED_COMPONENTS); private static final File DEF_SEARCH_SPACE_CONFIG = FileUtil.getExistingFileWithHighestPriority(RES_SSC_WEKA_COMPLETE, FS_SSC_WEKA); private static final MonteCarloCrossValidationEvaluatorFactory DEF_SEARCH_PHASE_EVALUATOR = new MonteCarloCrossValidationEvaluatorFactory().withNumMCIterations(SEARCH_NUM_MC_ITERATIONS).withTrainFoldSize(SEARCH_TRAIN_FOLD_SIZE) .withSplitBasedEvaluator(new SimpleSLCSplitBasedClassifierEvaluator(LOSS_FUNCTION)).withDatasetSplitter(new MulticlassClassStratifiedSplitter()); private static final MonteCarloCrossValidationEvaluatorFactory DEF_SELECTION_PHASE_EVALUATOR = new MonteCarloCrossValidationEvaluatorFactory().withNumMCIterations(SELECTION_NUM_MC_ITERATIONS).withTrainFoldSize(SELECTION_TRAIN_FOLD_SIZE) .withSplitBasedEvaluator(new SimpleSLCSplitBasedClassifierEvaluator(LOSS_FUNCTION)).withDatasetSplitter(new MulticlassClassStratifiedSplitter()); public MLPlanWekaBuilder() throws IOException { super(); this.withSearchSpaceConfigFile(DEF_SEARCH_SPACE_CONFIG); this.withPreferredComponentsFile(DEF_PREFERRED_COMPONENTS, DEF_PREFERRED_COMPONENT_NAME_PREFIX); this.withRequestedInterface(DEF_REQUESTED_HASCO_INTERFACE); this.withClassifierFactory(DEF_CLASSIFIER_FACTORY); this.withDatasetSplitterForSearchSelectionSplit(DEF_SELECTION_HOLDOUT_SPLITTER); this.withSearchPhaseEvaluatorFactory(DEF_SEARCH_PHASE_EVALUATOR); this.withSelectionPhaseEvaluatorFactory(DEF_SELECTION_PHASE_EVALUATOR); this.setPerformanceMeasureName(LOSS_FUNCTION.getClass().getSimpleName()); // /* configure blow-ups for MCCV */ double blowUpInSelectionPhase = MathExt.round(1f / SEARCH_TRAIN_FOLD_SIZE * SELECTION_NUM_MC_ITERATIONS / SEARCH_NUM_MC_ITERATIONS, 2); this.getAlgorithmConfig().setProperty(MLPlanClassifierConfig.K_BLOWUP_SELECTION, String.valueOf(blowUpInSelectionPhase)); double blowUpInPostprocessing = MathExt.round((1 / (1 - this.getAlgorithmConfig().dataPortionForSelection())) / SELECTION_NUM_MC_ITERATIONS, 2); this.getAlgorithmConfig().setProperty(MLPlanClassifierConfig.K_BLOWUP_POSTPROCESS, String.valueOf(blowUpInPostprocessing)); } /** * Sets the search space to a tiny weka search space configuration. * @throws IOException Thrown if the resource file cannot be read. */ public MLPlanWekaBuilder withTinyWekaSearchSpace() throws IOException { this.withSearchSpaceConfigFile(ResourceUtil.getResourceAsFile(RES_SSC_TINY_WEKA)); return this; } /** * Allows to use learning curve extrapolation for predicting the quality of candidate solutions. * @param anchorpoints The anchor points for which samples are actually evaluated on the respective data. * @param subsamplingAlgorithmFactory The factory for the sampling algorithm that is to be used to randomly draw training instances. * @param trainSplitForAnchorpointsMeasurement The training fold size for measuring the acnhorpoints. * @param extrapolationMethod The method to be used in order to extrapolate the learning curve from the anchorpoints. */ public void withLearningCurveExtrapolationEvaluation(final int[] anchorpoints, final ISamplingAlgorithmFactory<WekaInstances<Object>, ? extends ASamplingAlgorithm<WekaInstances<Object>>> subsamplingAlgorithmFactory, final double trainSplitForAnchorpointsMeasurement, final LearningCurveExtrapolationMethod extrapolationMethod) { this.withSearchPhaseEvaluatorFactory(new LearningCurveExtrapolationEvaluatorFactory(anchorpoints, subsamplingAlgorithmFactory, trainSplitForAnchorpointsMeasurement, extrapolationMethod)); this.withSelectionPhaseEvaluatorFactory(new MonteCarloCrossValidationEvaluatorFactory().withNumMCIterations(3).withTrainFoldSize(.7).withSplitBasedEvaluator(new SimpleSLCSplitBasedClassifierEvaluator(new ZeroOneLoss()))); this.getAlgorithmConfig().setProperty(MLPlanClassifierConfig.K_BLOWUP_SELECTION, "" + 10); } }
0
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan/core/PipelineEvaluator.java
package ai.libs.mlplan.core; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.google.common.eventbus.EventBus; import com.google.common.eventbus.Subscribe; import ai.libs.hasco.exceptions.ComponentInstantiationFailedException; import ai.libs.hasco.model.ComponentInstance; import ai.libs.jaicore.basic.IInformedObjectEvaluatorExtension; import ai.libs.jaicore.basic.ILoggingCustomizable; import ai.libs.jaicore.basic.algorithm.exceptions.ObjectEvaluationFailedException; import ai.libs.jaicore.basic.events.IEvent; import ai.libs.jaicore.basic.events.IEventEmitter; import ai.libs.jaicore.ml.evaluation.evaluators.weka.IClassifierEvaluator; import ai.libs.jaicore.ml.scikitwrapper.ScikitLearnWrapper; import ai.libs.jaicore.timing.TimedObjectEvaluator; import ai.libs.mlplan.core.events.ClassifierCreatedEvent; import ai.libs.mlplan.multiclass.wekamlplan.IClassifierFactory; import weka.classifiers.Classifier; /** * Evaluator used in the search phase of mlplan. * * @author fmohr */ public class PipelineEvaluator extends TimedObjectEvaluator<ComponentInstance, Double> implements IInformedObjectEvaluatorExtension<Double>, ILoggingCustomizable { private Logger logger = LoggerFactory.getLogger(PipelineEvaluator.class); private final EventBus eventBus = new EventBus(); private final IClassifierFactory classifierFactory; private final IClassifierEvaluator benchmark; private final int timeoutForEvaluation; private Double bestScore = 1.0; public PipelineEvaluator(final IClassifierFactory classifierFactory, final IClassifierEvaluator benchmark, final int timeoutForEvaluation) { super(); this.classifierFactory = classifierFactory; this.benchmark = benchmark; if (benchmark instanceof IEventEmitter) { ((IEventEmitter) benchmark).registerListener(this); } this.timeoutForEvaluation = timeoutForEvaluation; } @Override public String getLoggerName() { return this.logger.getName(); } @Override public void setLoggerName(final String name) { this.logger.info("Switching logger name from {} to {}", this.logger.getName(), name); this.logger = LoggerFactory.getLogger(name); if (this.benchmark instanceof ILoggingCustomizable) { this.logger.info("Setting logger name of actual benchmark {} to {}.benchmark", this.benchmark.getClass().getName(), name); ((ILoggingCustomizable) this.benchmark).setLoggerName(name + ".benchmark"); } else { this.logger.info("Benchmark {} does not implement ILoggingCustomizable, not customizing its logger.", this.benchmark.getClass().getName()); } } @SuppressWarnings("unchecked") @Override public Double evaluateSupervised(final ComponentInstance c) throws InterruptedException, ObjectEvaluationFailedException { this.logger.debug("Received request to evaluate component instance {}", c); try { if (this.benchmark instanceof IInformedObjectEvaluatorExtension) { ((IInformedObjectEvaluatorExtension<Double>) this.benchmark).updateBestScore(this.bestScore); } Classifier classifier = this.classifierFactory.getComponentInstantiation(c); this.eventBus.post(new ClassifierCreatedEvent(c, classifier)); // inform listeners about the creation of the classifier if (this.logger.isDebugEnabled()) { this.logger.debug("Starting benchmark {} for classifier {}", this.benchmark, (classifier instanceof ScikitLearnWrapper) ? classifier.toString() : classifier.getClass().getName()); } Double score = this.benchmark.evaluate(classifier); if (this.logger.isInfoEnabled()) { this.logger.info("Obtained score {} for classifier {}", score, (classifier instanceof ScikitLearnWrapper) ? classifier.toString() : classifier.getClass().getName()); } return score; } catch (ComponentInstantiationFailedException e) { throw new ObjectEvaluationFailedException("Evaluation of composition failed as the component instantiation could not be built.", e); } } @Override public void updateBestScore(final Double bestScore) { this.bestScore = bestScore; } @Override public long getTimeout(final ComponentInstance item) { return this.timeoutForEvaluation; } @Override public String getMessage(final ComponentInstance item) { return "Pipeline evaluation phase"; } public IClassifierEvaluator getBenchmark() { return this.benchmark; } /** * Here, we send a coupling event that informs the listener about which ComponentInstance has been used to create a classifier. * * @param listener */ public void registerListener(final Object listener) { this.eventBus.register(listener); } /** * Forwards every incoming event e * * @param e */ @Subscribe public void receiveEvent(final IEvent e) { this.eventBus.post(e); } }
0
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan/core/PipelineValidityCheckingNodeEvaluator.java
package ai.libs.mlplan.core; import java.util.Collection; import ai.libs.hasco.model.Component; import ai.libs.jaicore.planning.hierarchical.algorithms.forwarddecomposition.graphgenerators.tfd.TFDNode; import ai.libs.jaicore.search.algorithms.standard.bestfirst.nodeevaluation.INodeEvaluator; import weka.core.Instances; public abstract class PipelineValidityCheckingNodeEvaluator implements INodeEvaluator<TFDNode, Double> { private Instances data; private Collection<Component> components; public PipelineValidityCheckingNodeEvaluator() { } public PipelineValidityCheckingNodeEvaluator(final Collection<Component> components, final Instances data) { this.data = data; this.components = components; } public void setData(Instances data) { this.data = data; } public void setComponents(Collection<Component> components) { this.components = components; } public Instances getData() { return data; } public Collection<Component> getComponents() { return components; } }
0
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan/core
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan/core/events/ClassifierCreatedEvent.java
package ai.libs.mlplan.core.events; import ai.libs.hasco.model.ComponentInstance; import ai.libs.jaicore.basic.events.IEvent; import weka.classifiers.Classifier; public class ClassifierCreatedEvent implements IEvent { private final ComponentInstance instance; private final Classifier classifier; public ClassifierCreatedEvent(final ComponentInstance instance, final Classifier classifier) { super(); this.instance = instance; this.classifier = classifier; } public ComponentInstance getInstance() { return this.instance; } public Classifier getClassifier() { return this.classifier; } }
0
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan/core
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan/core/events/ClassifierFoundEvent.java
package ai.libs.mlplan.core.events; import java.util.HashMap; import java.util.Map; import ai.libs.hasco.model.ComponentInstance; import ai.libs.jaicore.basic.algorithm.events.ASolutionCandidateFoundEvent; import ai.libs.jaicore.basic.algorithm.events.ScoredSolutionCandidateFoundEvent; import ai.libs.jaicore.logging.ToJSONStringUtil; import weka.classifiers.Classifier; public class ClassifierFoundEvent extends ASolutionCandidateFoundEvent<Classifier> implements ScoredSolutionCandidateFoundEvent<Classifier, Double> { private final double inSampleError; private final ComponentInstance componentDescription; public ClassifierFoundEvent(final String algorithmId, final ComponentInstance componentDescription, final Classifier solutionCandidate, final double inSampleError) { super(algorithmId, solutionCandidate); this.inSampleError = inSampleError; this.componentDescription = componentDescription; } public double getInSampleError() { return this.inSampleError; } @Override public Double getScore() { return this.inSampleError; } public ComponentInstance getComponentDescription() { return this.componentDescription; } @Override public String toString() { Map<String, Object> fields = new HashMap<>(); fields.put("candidate", super.getSolutionCandidate()); fields.put("componentDescription", this.componentDescription); fields.put("inSampleError", this.inSampleError); return ToJSONStringUtil.toJSONString(fields); } }
0
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan/gui
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan/gui/outofsampleplots/OutOfSampleErrorPlotPlugin.java
package ai.libs.mlplan.gui.outofsampleplots; import ai.libs.jaicore.graphvisualizer.plugin.ASimpleMVCPlugin; import weka.core.Instances; public class OutOfSampleErrorPlotPlugin extends ASimpleMVCPlugin<OutOfSampleErrorPlotPluginModel, OutOfSampleErrorPlotPluginView, OutOfSampleErrorPlotPluginController> { private final Instances trainData; private final Instances testData; public OutOfSampleErrorPlotPlugin(Instances trainData, Instances testData) { super(); this.trainData = trainData; this.testData = testData; getController().setTrain(trainData); getController().setTest(testData); } public Instances getTrainData() { return trainData; } public Instances getTestData() { return testData; } }
0
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan/gui
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan/gui/outofsampleplots/OutOfSampleErrorPlotPluginController.java
package ai.libs.mlplan.gui.outofsampleplots; import java.util.ArrayList; import java.util.List; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import ai.libs.jaicore.basic.algorithm.events.AlgorithmEvent; import ai.libs.jaicore.graphvisualizer.events.gui.GUIEvent; import ai.libs.jaicore.graphvisualizer.plugin.ASimpleMVCPluginController; import ai.libs.jaicore.graphvisualizer.plugin.controlbar.ResetEvent; import ai.libs.jaicore.graphvisualizer.plugin.timeslider.GoToTimeStepEvent; import ai.libs.mlplan.core.events.ClassifierFoundEvent; import weka.classifiers.Classifier; import weka.classifiers.Evaluation; import weka.core.Instances; public class OutOfSampleErrorPlotPluginController extends ASimpleMVCPluginController<OutOfSampleErrorPlotPluginModel, OutOfSampleErrorPlotPluginView> { private Instances train; private Instances test; private Logger logger = LoggerFactory.getLogger(OutOfSampleErrorPlotPluginController.class); public OutOfSampleErrorPlotPluginController(final OutOfSampleErrorPlotPluginModel model, final OutOfSampleErrorPlotPluginView view) { super(model, view); } public Instances getTrain() { return this.train; } public void setTrain(final Instances train) { this.train = train; } public Instances getTest() { return this.test; } public void setTest(final Instances test) { this.test = test; } @Override public void handleGUIEvent(final GUIEvent guiEvent) { if (guiEvent instanceof ResetEvent || guiEvent instanceof GoToTimeStepEvent) { this.getModel().clear(); } } @Override public void handleAlgorithmEventInternally(final AlgorithmEvent algorithmEvent) { if (algorithmEvent instanceof ClassifierFoundEvent) { try { this.logger.debug("Received classifier found event {}", algorithmEvent); ClassifierFoundEvent event = (ClassifierFoundEvent) algorithmEvent; Classifier classifier = event.getSolutionCandidate(); this.logger.debug("Building classifier"); classifier.buildClassifier(this.train); Evaluation eval = new Evaluation(this.train); List<Double> performances = new ArrayList<>(); performances.add(event.getScore()); eval.evaluateModel(classifier, this.test); performances.add(eval.errorRate()); this.logger.debug("Adding solution to model and updating view."); this.getModel().addEntry(event.getTimestamp(), classifier, performances); } catch (Exception e) { this.logger.error("Could not train classifier: {}", e); } } else { this.logger.trace("Received and ignored irrelevant event {}", algorithmEvent); } } }
0
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan/gui
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan/gui/outofsampleplots/OutOfSampleErrorPlotPluginModel.java
package ai.libs.mlplan.gui.outofsampleplots; import java.util.ArrayList; import java.util.List; import ai.libs.jaicore.graphvisualizer.plugin.ASimpleMVCPluginModel; import weka.classifiers.Classifier; /** * * @author fmohr * */ public class OutOfSampleErrorPlotPluginModel extends ASimpleMVCPluginModel<OutOfSampleErrorPlotPluginView, OutOfSampleErrorPlotPluginController> { private final List<Integer> timestamps = new ArrayList<>(); private final List<Classifier> classifiers = new ArrayList<>(); private final List<List<Double>> performances = new ArrayList<>(); private long timestampOfFirstEvent = -1; public final void addEntry(long timestamp, Classifier classifier, List<Double> performances) { int offset = 0; if (timestampOfFirstEvent == -1) { timestampOfFirstEvent = timestamp; } else offset = (int)(timestamp - timestampOfFirstEvent); this.timestamps.add(offset); this.classifiers.add(classifier); this.performances.add(performances); getView().update(); } public long getTimestampOfFirstEvent() { return timestampOfFirstEvent; } public void clear() { timestamps.clear(); classifiers.clear(); performances.clear(); timestampOfFirstEvent = -1; getView().clear(); } public List<Integer> getTimestamps() { return timestamps; } public List<Classifier> getClassifiers() { return classifiers; } public List<List<Double>> getPerformances() { return performances; } public void setTimestampOfFirstEvent(long timestampOfFirstEvent) { this.timestampOfFirstEvent = timestampOfFirstEvent; } }
0
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan/gui
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan/gui/outofsampleplots/OutOfSampleErrorPlotPluginView.java
package ai.libs.mlplan.gui.outofsampleplots; import java.util.ArrayList; import java.util.List; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import ai.libs.jaicore.graphvisualizer.plugin.ASimpleMVCPluginView; import javafx.application.Platform; import javafx.scene.chart.LineChart; import javafx.scene.chart.NumberAxis; import javafx.scene.chart.XYChart.Data; import javafx.scene.chart.XYChart.Series; /** * * @author fmohr * */ public class OutOfSampleErrorPlotPluginView extends ASimpleMVCPluginView<OutOfSampleErrorPlotPluginModel, OutOfSampleErrorPlotPluginController, LineChart<Number, Number>> { private Logger logger = LoggerFactory.getLogger(OutOfSampleErrorPlotPluginView.class); private final Series<Number,Number> believedErrorSeries; private final Series<Number,Number> outOfSampleErrorSeries; private int nextIndexToDisplay = 0; public OutOfSampleErrorPlotPluginView(OutOfSampleErrorPlotPluginModel model) { super(model, new LineChart<>(new NumberAxis(), new NumberAxis())); getNode().getXAxis().setLabel("elapsed time (s)"); getNode().setTitle(getTitle()); believedErrorSeries = new Series<>(); believedErrorSeries.setName("Believed (internal) Error"); outOfSampleErrorSeries = new Series<>(); outOfSampleErrorSeries.setName("Out-of-Sample Error"); getNode().getData().add(believedErrorSeries); getNode().getData().add(outOfSampleErrorSeries); } @Override public void update() { /* compute data to add */ List<Integer> observedTimestamps = getModel().getTimestamps(); List<List<Double>> performances = getModel().getPerformances(); List<Data<Number, Number>> believedErrors = new ArrayList<>(); List<Data<Number, Number>> outOfSampleErrors = new ArrayList<>(); for (; nextIndexToDisplay < observedTimestamps.size(); nextIndexToDisplay++) { int timestamp = observedTimestamps.get(nextIndexToDisplay) / 100; believedErrors.add(new Data<>(timestamp, performances.get(nextIndexToDisplay).get(0))); outOfSampleErrors.add(new Data<>(timestamp, performances.get(nextIndexToDisplay).get(1))); } logger.info("Adding {} values to chart.", believedErrors.size()); Platform.runLater(() -> { believedErrorSeries.getData().addAll(believedErrors); outOfSampleErrorSeries.getData().addAll(outOfSampleErrors); }); } @Override public String getTitle() { return "Out-of-Sample Error Timeline"; } public void clear() { nextIndexToDisplay = 0; believedErrorSeries.getData().clear(); outOfSampleErrorSeries.getData().clear(); } public int getNextIndexToDisplay() { return nextIndexToDisplay; } }
0
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan/metamining/IntermediateSolutionEvent.java
package ai.libs.mlplan.metamining; import ai.libs.jaicore.basic.algorithm.events.AAlgorithmEvent; import ai.libs.mlplan.multiclass.wekamlplan.weka.model.MLPipeline; import weka.classifiers.Classifier; public class IntermediateSolutionEvent extends AAlgorithmEvent { private String classifier; private String searcher; private String evaluator; private double score; public IntermediateSolutionEvent(final String algorithmId, final Classifier classifier, final double score) { super (algorithmId); if (classifier instanceof MLPipeline) { MLPipeline pl = (MLPipeline) classifier; this.classifier=pl.getBaseClassifier().getClass().getName(); if (pl.getPreprocessors() != null && !pl.getPreprocessors().isEmpty()) { this.searcher = pl.getPreprocessors().get(0).getSearcher().getClass().getName(); this.evaluator = pl.getPreprocessors().get(0).getEvaluator().getClass().getName(); } } else { this.classifier = classifier.getClass().getName(); } this.score=score; } public String getClassifier() { return this.classifier; } public String getSearcher() { return this.searcher; } public String getEvaluator() { return this.evaluator; } public double getScore() { return this.score; } }
0
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan/metamining/MetaMLPlan.java
package ai.libs.mlplan.metamining; import java.io.File; import java.io.IOException; import java.sql.SQLException; import java.util.Collection; import java.util.List; import java.util.NoSuchElementException; import java.util.Timer; import java.util.TimerTask; import org.apache.commons.lang3.time.StopWatch; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.google.common.eventbus.EventBus; import ai.libs.hasco.core.Util; import ai.libs.hasco.metamining.MetaMinerBasedSorter; import ai.libs.hasco.model.Component; import ai.libs.hasco.model.ComponentInstance; import ai.libs.jaicore.basic.algorithm.exceptions.AlgorithmException; import ai.libs.jaicore.ml.core.evaluation.measure.singlelabel.ZeroOneLoss; import ai.libs.jaicore.ml.evaluation.evaluators.weka.MonteCarloCrossValidationEvaluator; import ai.libs.jaicore.ml.evaluation.evaluators.weka.splitevaluation.SimpleSLCSplitBasedClassifierEvaluator; import ai.libs.jaicore.ml.metafeatures.GlobalCharacterizer; import ai.libs.jaicore.planning.hierarchical.algorithms.forwarddecomposition.graphgenerators.tfd.TFDNode; import ai.libs.jaicore.search.algorithms.standard.lds.BestFirstLimitedDiscrepancySearch; import ai.libs.jaicore.search.algorithms.standard.lds.BestFirstLimitedDiscrepancySearchFactory; import ai.libs.jaicore.search.algorithms.standard.lds.NodeOrderList; import ai.libs.jaicore.search.model.other.SearchGraphPath; import ai.libs.jaicore.search.model.travesaltree.ReducedGraphGenerator; import ai.libs.jaicore.search.probleminputs.GraphSearchWithNodeRecommenderInput; import ai.libs.mlplan.core.AbstractMLPlanBuilder; import ai.libs.mlplan.core.MLPlan; import ai.libs.mlplan.core.MLPlanWekaBuilder; import ai.libs.mlplan.metamining.databaseconnection.ExperimentRepository; import ai.libs.mlplan.multiclass.wekamlplan.weka.MLPipelineComponentInstanceFactory; import ai.libs.mlplan.multiclass.wekamlplan.weka.WEKAPipelineFactory; import weka.classifiers.AbstractClassifier; import weka.classifiers.Classifier; import weka.core.Instance; import weka.core.Instances; public class MetaMLPlan extends AbstractClassifier { private transient Logger logger = LoggerFactory.getLogger(MetaMLPlan.class); // ids private static final long serialVersionUID = 4772178784402396834L; private static final File resourceFile = new File("resources/automl/searchmodels/weka/weka-all-autoweka.json"); private String algorithmId = "MetaMLPlan"; // Search components private transient BestFirstLimitedDiscrepancySearch<TFDNode, String, NodeOrderList> lds; private transient WEKAMetaminer metaMiner; private transient WEKAPipelineFactory factory = new WEKAPipelineFactory(); // Search configuration private long timeoutInSeconds = 60; private long safetyInSeconds = 1; private int cpus = 1; private String metaFeatureSetName = "all"; private String datasetSetName = "all"; private int seed = 0; // Search results private Classifier bestModel; private transient Collection<Component> components; // For intermediate results private transient EventBus eventBus = new EventBus(); public MetaMLPlan(final Instances data) throws IOException { this(resourceFile, data); } public MetaMLPlan(final File configFile, final Instances data) throws IOException { // Prepare mlPlan to get a graphGenerator MLPlanWekaBuilder builder = AbstractMLPlanBuilder.forWeka(); builder.withSearchSpaceConfigFile(configFile); builder.withDataset(data); MLPlan mlPlan = builder.build(); mlPlan.next(); // Set search components except lds this.components = builder.getComponents(); this.metaMiner = new WEKAMetaminer(builder.getComponentParameterConfigurations()); // Get lds BestFirstLimitedDiscrepancySearchFactory<TFDNode, String, NodeOrderList> ldsFactory = new BestFirstLimitedDiscrepancySearchFactory<>(); GraphSearchWithNodeRecommenderInput<TFDNode, String> problemInput = new GraphSearchWithNodeRecommenderInput<>(new ReducedGraphGenerator<>(mlPlan.getGraphGenerator()), new MetaMinerBasedSorter(this.metaMiner, builder.getComponents())); ldsFactory.setProblemInput(problemInput); this.lds = ldsFactory.getAlgorithm(); } public void buildMetaComponents(final String host, final String user, final String password) throws AlgorithmException, InterruptedException, SQLException, IOException { ExperimentRepository repo = new ExperimentRepository(host, user, password, new MLPipelineComponentInstanceFactory(this.components), this.cpus, this.metaFeatureSetName, this.datasetSetName); this.metaMiner.build(repo.getDistinctPipelines(), repo.getDatasetCahracterizations(), repo.getPipelineResultsOnDatasets()); } public void buildMetaComponents(final String host, final String user, final String password, final int limit) throws AlgorithmException, InterruptedException, SQLException, IOException { this.logger.info("Get past experiment data from data base and build MetaMiner."); ExperimentRepository repo = new ExperimentRepository(host, user, password, new MLPipelineComponentInstanceFactory(this.components), this.cpus, this.metaFeatureSetName, this.datasetSetName); repo.setLimit(limit); this.metaMiner.build(repo.getDistinctPipelines(), repo.getDatasetCahracterizations(), repo.getPipelineResultsOnDatasets()); } @Override public void buildClassifier(final Instances data) throws Exception { StopWatch totalTimer = new StopWatch(); totalTimer.start(); // Characterize data set and give to meta miner this.logger.info("Characterizing data set"); this.metaMiner.setDataSetCharacterization(new GlobalCharacterizer().characterize(data)); // Preparing the split for validating pipelines this.logger.info("Preparing validation split"); SimpleSLCSplitBasedClassifierEvaluator classifierEval = new SimpleSLCSplitBasedClassifierEvaluator(new ZeroOneLoss()); MonteCarloCrossValidationEvaluator mccv = new MonteCarloCrossValidationEvaluator(classifierEval, 5, data, .7f, this.seed); // Search for solutions this.logger.info("Searching for solutions"); StopWatch trainingTimer = new StopWatch(); this.bestModel = null; double bestScore = 1; double bestModelMaxTrainingTime = 0; boolean thereIsEnoughTime = true; boolean thereAreMoreElements = true; while (!this.lds.isCanceled() && thereIsEnoughTime && thereAreMoreElements) { try { SearchGraphPath<TFDNode, String> searchGraphPath = this.lds.nextSolutionCandidate(); List<TFDNode> solution = searchGraphPath.getNodes(); if (solution == null) { this.logger.info("Ran out of solutions. Search is over."); break; } // Prepare pipeline ComponentInstance ci = Util.getSolutionCompositionFromState(this.components, solution.get(solution.size() - 1).getState(), true); Classifier pl = this.factory.getComponentInstantiation(ci); // Evaluate pipeline trainingTimer.reset(); trainingTimer.start(); this.logger.info("Evaluate Pipeline: {}", pl); double score = mccv.evaluate(pl); this.logger.info("Pipeline Score: {}", score); trainingTimer.stop(); this.eventBus.post(new IntermediateSolutionEvent(this.algorithmId, pl, score)); // Check if better than previous best if (score < bestScore) { this.bestModel = pl; bestScore = score; } if (trainingTimer.getTime() > bestModelMaxTrainingTime) { bestModelMaxTrainingTime = trainingTimer.getTime(); } thereIsEnoughTime = this.checkTermination(totalTimer, bestModelMaxTrainingTime, thereIsEnoughTime); } catch (NoSuchElementException e) { this.logger.info("Finished search (Exhaustive search conducted)."); thereAreMoreElements = false; } catch (Exception e) { this.logger.warn("Continuing search despite error: {}", e); } } Thread finalEval = new Thread() { @Override public void run() { MetaMLPlan.this.logger.info("Evaluating best model on whole training data ({})", MetaMLPlan.this.bestModel); try { MetaMLPlan.this.bestModel.buildClassifier(data); } catch (Exception e) { MetaMLPlan.this.bestModel = null; MetaMLPlan.this.logger.error("Evaluation of best model failed with an exception: {}", e); } } }; TimerTask newT = new TimerTask() { @Override public void run() { MetaMLPlan.this.logger.error("MetaMLPlan: Interrupt building of final classifier because time is running out."); finalEval.interrupt(); } }; // Start timer that interrupts the final training try { new Timer().schedule(newT, this.timeoutInSeconds * 1000 - this.safetyInSeconds * 1000 - totalTimer.getTime()); } catch (IllegalArgumentException e) { this.logger.error("No time anymore to start evaluation of final model. Abort search."); return; } finalEval.start(); finalEval.join(); this.logger.info("Ready. Best solution: {}", this.bestModel); } private boolean checkTermination(final StopWatch totalTimer, final double bestModelMaxTrainingTime, boolean thereIsEnoughTime) { // Check if enough time remaining to re-train the current best model on the // whole training data if ((this.timeoutInSeconds - this.safetyInSeconds) * 1000 <= (totalTimer.getTime() + bestModelMaxTrainingTime)) { this.logger.info("Stopping search to train best model on whole training data which is expected to take {} ms", bestModelMaxTrainingTime); thereIsEnoughTime = false; } return thereIsEnoughTime; } @Override public double classifyInstance(final Instance instance) throws Exception { return this.bestModel.classifyInstance(instance); } public void registerListenerForIntermediateSolutions(final Object listener) { this.eventBus.register(listener); } public void setTimeOutInSeconds(final int timeOutInSeconds) { this.timeoutInSeconds = timeOutInSeconds; } public void setMetaFeatureSetName(final String metaFeatureSetName) { this.metaFeatureSetName = metaFeatureSetName; } public void setDatasetSetName(final String datasetSetName) { this.datasetSetName = datasetSetName; } public void setCPUs(final int cPUs) { this.cpus = cPUs; } public WEKAMetaminer getMetaMiner() { return this.metaMiner; } public void setSeed(final int seed) { this.seed = seed; } public String getAlgorithmId() { return this.algorithmId; } public void setAlgorithmId(final String algorithmId) { this.algorithmId = algorithmId; } }
0
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan/metamining/WEKAMetaminer.java
package ai.libs.mlplan.metamining; import java.util.Collections; import java.util.Enumeration; import java.util.List; import java.util.Map; import org.nd4j.linalg.api.ndarray.INDArray; import org.nd4j.linalg.factory.Nd4j; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import ai.libs.hasco.metamining.IMetaMiner; import ai.libs.hasco.model.Component; import ai.libs.hasco.model.ComponentInstance; import ai.libs.hasco.model.Parameter; import ai.libs.hasco.model.ParameterRefinementConfiguration; import ai.libs.jaicore.basic.algorithm.exceptions.AlgorithmException; import ai.libs.mlplan.metamining.pipelinecharacterizing.IPipelineCharacterizer; import ai.libs.mlplan.metamining.pipelinecharacterizing.WEKAPipelineCharacterizer; import ai.libs.mlplan.metamining.similaritymeasures.F3Optimizer; import ai.libs.mlplan.metamining.similaritymeasures.IHeterogenousSimilarityMeasureComputer; import ai.libs.mlplan.metamining.similaritymeasures.IRelativeRankMatrixComputer; import ai.libs.mlplan.metamining.similaritymeasures.RelativeRankMatricComputer; import weka.core.Attribute; import weka.core.Instances; /** * An implementation of the meta miner for pipelines consisting exclusively of WEKA components. * * @author Helena Graf * */ public class WEKAMetaminer implements IMetaMiner { private Logger logger = LoggerFactory.getLogger(WEKAMetaminer.class); private boolean hasBeenBuilt = false; private INDArray datasetMetafeatures; private Enumeration<Attribute> dataSetMetaFeaturesAttributes; private IHeterogenousSimilarityMeasureComputer similarityMeasure = new F3Optimizer(0.1); private IRelativeRankMatrixComputer similarityComputer = new RelativeRankMatricComputer(); private IPipelineCharacterizer pipelineCharacterizer; public WEKAMetaminer(Map<Component, Map<Parameter, ParameterRefinementConfiguration>> paramConfigs) { this.pipelineCharacterizer = new WEKAPipelineCharacterizer(paramConfigs); } @Override public double score(ComponentInstance componentInstance) { // Check if has been trained if (!hasBeenBuilt) { throw new WEKAMetaminerRuntimeException("Metaminer has not been built!"); } if (dataSetMetaFeaturesAttributes == null) { throw new WEKAMetaminerRuntimeException("Metaminer has not been given a data set characterization!"); } // Characterize pipeline and compute similarity with data set double[] pipelineMetafeatures = pipelineCharacterizer.characterize(componentInstance); return similarityMeasure.computeSimilarity(datasetMetafeatures, Nd4j.create(pipelineMetafeatures)); } public void build(List<ComponentInstance> distinctPipelines, Instances metaFeatureInformation, double[][][] performanceValues) throws AlgorithmException, InterruptedException { // Check whether has been built if (hasBeenBuilt) { throw new AlgorithmException("MetaMiner has already been built!"); } // ----- Data set Characterization ----- dataSetMetaFeaturesAttributes = metaFeatureInformation.enumerateAttributes(); // Convert to matrix (Matrix X with rows representing data sets) INDArray datasetsMetafeatures = Nd4j.create(metaFeatureInformation.size(), metaFeatureInformation.numAttributes()); for (int i = 0; i < metaFeatureInformation.size(); i++) { datasetsMetafeatures.putRow(i, Nd4j.create(metaFeatureInformation.get(i).toDoubleArray())); } logger.debug("Dataset metafeatures: {} x {}",datasetsMetafeatures.rows(),datasetsMetafeatures.columns()); // ----- Pipeline Characterization ----- // Compute relative performance ranks of pipelines on data sets logger.info("Computing relative performance Matrix."); INDArray rankMatrix = similarityComputer.computeRelativeRankMatrix(performanceValues); logger.info("Rank matrix: {} x {}",rankMatrix.rows(),rankMatrix.columns()); logger.debug("Rank Matrix: {}",rankMatrix); // Initialize PipelineCharacterizer with list of distinct pipelines logger.info("WEKAMetaminer: Initializing pipeline characterizer."); pipelineCharacterizer.build(distinctPipelines); // Get Characterization of base pipelines from PipelineCharacterizer (Matrix W) INDArray pipelinesMetafeatures = Nd4j.create(pipelineCharacterizer.getCharacterizationsOfTrainingExamples()); logger.debug("WEKAMetaminer: Pipeline Metafeatures: {} x {}",pipelinesMetafeatures.rows(),pipelinesMetafeatures.columns()); // Initialize HeterogenousSimilarityMeasures logger.info("WEKAMetaminer: Create similarity measure."); similarityMeasure.build(datasetsMetafeatures, pipelinesMetafeatures, rankMatrix); // Building is finished hasBeenBuilt = true; } public void setDataSetCharacterization(Map<String, Double> datasetCharacterization) { // Characterize the given data set with characterizer (set x) datasetMetafeatures = Nd4j.create(datasetCharacterization.size()); List<Attribute> attributes = Collections.list(dataSetMetaFeaturesAttributes); for (int i = 0; i < attributes.size(); i++) { datasetMetafeatures.putScalar(i, datasetCharacterization.get(attributes.get(i).name())); } } /** * Get the similarity measure used to determine the similarities of s * * @return */ public IHeterogenousSimilarityMeasureComputer getSimilarityMeasure() { return similarityMeasure; } public void setSimilarityMeasure(IHeterogenousSimilarityMeasureComputer similarityMeasure) { this.similarityMeasure = similarityMeasure; } public IPipelineCharacterizer getPipelineCharacterizer() { return pipelineCharacterizer; } public void setPipelineCharacterizer(IPipelineCharacterizer pipelineCharacterizer) { this.pipelineCharacterizer = pipelineCharacterizer; } }
0
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan/metamining/WEKAMetaminerRuntimeException.java
package ai.libs.mlplan.metamining; /** * Indicates Runtime failures for the {@link WEKAMetaminer}. * * @author Helena Graf * */ public class WEKAMetaminerRuntimeException extends RuntimeException { private static final long serialVersionUID = 1246022912468302026L; public WEKAMetaminerRuntimeException(String message) { super(message); } public WEKAMetaminerRuntimeException(String message, Throwable cause) { super(message, cause); } public WEKAMetaminerRuntimeException(Throwable cause) { super(cause); } }
0
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan/metamining/package-info.java
/** * Package containing an implementation of a meta techniques for MLPlan with WEKA * * @author Helena Graf * */ package ai.libs.mlplan.metamining;
0
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan/metamining
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan/metamining/databaseconnection/ComponentInstanceDatabaseGetter.java
package ai.libs.mlplan.metamining.databaseconnection; import java.sql.ResultSet; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import ai.libs.hasco.model.ComponentInstance; import ai.libs.hasco.serialization.ComponentNotFoundException; import ai.libs.jaicore.basic.SQLAdapter; import ai.libs.mlplan.multiclass.wekamlplan.weka.MLPipelineComponentInstanceFactory; import ai.libs.mlplan.multiclass.wekamlplan.weka.model.MLPipeline; import ai.libs.mlplan.multiclass.wekamlplan.weka.model.SupervisedFilterSelector; import weka.attributeSelection.ASEvaluation; import weka.attributeSelection.ASSearch; import weka.classifiers.AbstractClassifier; /** * A worker that gets a range of rows from a database with entries containing * String representations of MLPipelines. These representations are converted * back to ComponentInstances by the thread. Currently only pipelines that * exclusively contain elements from the autoweka-all configuration can be * parsed. All the setters have to used before the thread is run. * * @author Helena Graf * */ public class ComponentInstanceDatabaseGetter extends Thread { private Logger logger = LoggerFactory.getLogger(ComponentInstanceDatabaseGetter.class); private List<ComponentInstance> pipelines; private List<HashMap<String, List<Double>>> pipelinePerformances; private int offset; private int limit; private SQLAdapter adapter; private MLPipelineComponentInstanceFactory factory; private boolean finishedSuccessfully = false; @Override public void run() { String query = "SELECT searcher, evaluator, classifier, GROUP_CONCAT( CONCAT (dataset_id, ':', dataset_origin, ',', error_rate) SEPARATOR ';') AS results FROM basePipelineEvals GROUP BY searcher, evaluator, classifier ORDER BY searcher, evaluator, classifier LIMIT " + limit + " OFFSET " + offset; try { pipelines = new ArrayList<>(limit); pipelinePerformances = new ArrayList<>(limit); ResultSet resultSet = adapter.getResultsOfQuery(query); logger.debug("ComponentInstanceDatabaseGetter: Thread {} got pipelines from data base.",this.getId()); while (resultSet.next()) { next(resultSet); } } catch (Exception e1) { logger.error("Thread {} could not finish getting all pipelines. Cause: {}",this.getId(),e1.getMessage()); return; } finishedSuccessfully = true; } private void next(ResultSet resultSet) throws Exception { try { // Get pipeline ComponentInstance ci; if (resultSet.getString("searcher") != null && resultSet.getString("evaluator") != null) { ci = factory.convertToComponentInstance( new MLPipeline(ASSearch.forName(resultSet.getString("searcher"), null), ASEvaluation.forName(resultSet.getString("evaluator"), null), AbstractClassifier.forName(resultSet.getString("classifier"), null))); } else { ci = factory .convertToComponentInstance(new MLPipeline(new ArrayList<SupervisedFilterSelector>(), AbstractClassifier.forName(resultSet.getString("classifier"), null))); } // Get pipeline performance values (errorRate,dataset array) String[] results = resultSet.getString("results").split(";"); HashMap<String, List<Double>> datasetPerformances = new HashMap<>(); for (int j = 0; j < results.length; j++) { String[] errorRatePerformance = results[j].split(","); if (!datasetPerformances.containsKey(errorRatePerformance[0])) { datasetPerformances.put(errorRatePerformance[0], new ArrayList<Double>()); } if (errorRatePerformance.length > 1) { datasetPerformances.get(errorRatePerformance[0]) .add(Double.parseDouble(errorRatePerformance[1])); } } pipelines.add(ci); pipelinePerformances.add(datasetPerformances); } catch (ComponentNotFoundException e) { // Could not convert pipeline - component not in loaded configuration logger.warn("Could not convert component due to {}",e); } } /** * Set the row of the table at which this thread should start. * * @param offset * The offset */ public void setOffset(int offset) { this.offset = offset; } /** * Set the limit of how many rows this thread shall get. * * @param limit * The limit */ public void setLimit(int limit) { this.limit = limit; } /** * Set the adapter this thread uses to get the data from the data base. It has * to have an open connection. * * @param adapter * The used adapter */ public void setAdapter(SQLAdapter adapter) { this.adapter = adapter; } /** * Set the factory used to convert the MLPipelines instantiated from the String * representation in the database to ComponentInstances. * * @param factory * The converter factory */ public void setFactory(MLPipelineComponentInstanceFactory factory) { this.factory = factory; } /** * Get the converted pipelines the thread collected from the data base. * * @return The list of converted pipelines */ public List<ComponentInstance> getPipelines() { return pipelines; } /** * Get the performances of the pipelines on the database for which they are * values present. * * @return A list of mappings of data set ids to a list of performance values in * the same order as the returned list of pipelines */ public List<HashMap<String, List<Double>>> getPipelinePerformances() { return pipelinePerformances; } /** * Find out whether the thread finished successfully or aborted with an error. * * @return Whether the execution of the thread was successful */ public boolean isFinishedSuccessfully() { return finishedSuccessfully; } }
0
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan/metamining
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan/metamining/databaseconnection/ExperimentRepository.java
package ai.libs.mlplan.metamining.databaseconnection; import java.sql.ResultSet; import java.sql.SQLException; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import ai.libs.hasco.model.ComponentInstance; import ai.libs.jaicore.basic.SQLAdapter; import ai.libs.mlplan.multiclass.wekamlplan.weka.MLPipelineComponentInstanceFactory; import dataHandling.mySQL.MetaDataDataBaseConnection; import weka.core.Instances; /** * Manages a connection to experiment data of pipelines on dataset in a data * base. * * @author Helena Graf * */ public class ExperimentRepository { private Logger logger = LoggerFactory.getLogger(ExperimentRepository.class); private SQLAdapter adapter; private String host; private String user; private String password; private MLPipelineComponentInstanceFactory factory; private int cpus; private String metaFeatureSetName; private String datasetSetName; private Integer limit; private List<HashMap<String, List<Double>>> pipelinePerformances = new ArrayList<>(); private MetaDataDataBaseConnection metaDataBaseConnection; public ExperimentRepository(String host, String user, String password, MLPipelineComponentInstanceFactory factory, int cpus, String metaFeatureSetName, String datasetSetName) { this.user = user; this.password = password; this.host = host; this.factory = factory; this.cpus = cpus; this.metaDataBaseConnection = new MetaDataDataBaseConnection(host, user, password, "hgraf"); this.metaFeatureSetName = metaFeatureSetName; this.datasetSetName = datasetSetName; } public List<ComponentInstance> getDistinctPipelines() throws SQLException, InterruptedException { connect(); String query = "SELECT (COUNT(DISTINCT searcher, evaluator, classifier)) FROM basePipelineEvals"; ResultSet resultSet = adapter.getResultsOfQuery(query); resultSet.next(); int distinctPipelineCount = resultSet.getInt("(COUNT(DISTINCT searcher, evaluator, classifier))"); distinctPipelineCount = limit == null ? distinctPipelineCount : limit; logger.info("Getting {} distinct pipelines.", distinctPipelineCount); int chunkSize = Math.floorDiv(distinctPipelineCount, cpus); int lastchunkSize = distinctPipelineCount - (chunkSize * (cpus - 1)); logger.debug("ExperimentRepository: Allocate Getter-Threads."); ComponentInstanceDatabaseGetter[] threads = new ComponentInstanceDatabaseGetter[cpus]; for (int i = 0; i < threads.length; i++) { threads[i] = new ComponentInstanceDatabaseGetter(); threads[i].setAdapter(adapter); threads[i].setOffset(i * chunkSize); threads[i].setLimit(i == (threads.length - 1) ? lastchunkSize : chunkSize); threads[i].setFactory(factory); threads[i].start(); } List<ComponentInstance> pipelines = new ArrayList<>(); for (int i = 0; i < threads.length; i++) { threads[i].join(); pipelines.addAll(threads[i].getPipelines()); pipelinePerformances.addAll(threads[i].getPipelinePerformances()); } boolean allSuccessful = true; for (int i = 0; i < threads.length; i++) { logger.debug("Thread {} finished succuesfully: {}",threads[i].getId(),threads[i].isFinishedSuccessfully()); if (!threads[i].isFinishedSuccessfully()) { allSuccessful = false; } } if (!allSuccessful) { logger.error("Not all threads finished the download successfully!"); } else { logger.info("All threads finished successfully."); } disconnect(); return pipelines; } public Instances getDatasetCahracterizations() throws SQLException { // get data set characterizations logger.info("Downloading dataset characterizations."); Instances metaData = metaDataBaseConnection.getMetaDataSetForDataSetSet(datasetSetName, metaFeatureSetName); metaData.deleteAttributeAt(0); return metaData; } /** * Gets all the pipeline results for the distinct pipelines from * {@link #getDistinctPipelines()}, thus has to be called after that method. * * @return The results of pipelines on datasets: rows: data sets, columns: * pipelines, entries: array of results of pipeline on data set * @throws SQLException * If something goes wrong while connecting to the database */ public double[][][] getPipelineResultsOnDatasets() throws SQLException { logger.info("Downloading pipeline results for datasets."); // Get order of datasets List<String> datasets = metaDataBaseConnection.getMembersOfDatasetSet(datasetSetName); // Organize results into matrix double[][][] results = new double[datasets.size()][pipelinePerformances.size()][]; for (int j = 0; j < datasets.size(); j++) { String dataset = datasets.get(j); for (int i = 0; i < pipelinePerformances.size(); i++) { // Does the pipeline have a result for the dataset List<Double> datasetResults = pipelinePerformances.get(i).get(dataset); if (datasetResults != null && !datasetResults.isEmpty()) { results[j][i] = datasetResults.stream().mapToDouble(value -> value).toArray(); } } } return results; } private void connect() { adapter = new SQLAdapter(host, user, password, "pgotfml_hgraf"); } private void disconnect() { adapter.close(); } public void setLimit(Integer limit) { this.limit = limit; } }
0
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan/metamining
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan/metamining/databaseconnection/package-info.java
/** * Package containing classes managing the connection to databases that contain * knowledge needed by the meta miner. * * @author Helena Graf * */ package ai.libs.mlplan.metamining.databaseconnection;
0
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan/metamining
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan/metamining/dyadranking/DyadRankingBasedNodeEvaluator.java
package ai.libs.mlplan.metamining.dyadranking; import java.io.FileInputStream; import java.io.IOException; import java.io.ObjectInputStream; import java.nio.file.Paths; import java.time.Duration; import java.time.Instant; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Random; import java.util.concurrent.CompletionService; import java.util.concurrent.ExecutionException; import java.util.concurrent.Executor; import java.util.concurrent.ExecutorCompletionService; import java.util.concurrent.Executors; import java.util.concurrent.Future; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; import java.util.stream.Collectors; import org.aeonbits.owner.ConfigFactory; import org.apache.commons.collections.BidiMap; import org.apache.commons.collections.bidimap.DualHashBidiMap; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import ai.libs.hasco.core.Util; import ai.libs.hasco.model.Component; import ai.libs.hasco.model.ComponentInstance; import ai.libs.hasco.serialization.ComponentLoader; import ai.libs.jaicore.basic.IObjectEvaluator; import ai.libs.jaicore.basic.algorithm.AlgorithmExecutionCanceledException; import ai.libs.jaicore.basic.algorithm.events.AlgorithmInitializedEvent; import ai.libs.jaicore.basic.sets.Pair; import ai.libs.jaicore.math.linearalgebra.DenseDoubleVector; import ai.libs.jaicore.math.linearalgebra.Vector; import ai.libs.jaicore.ml.WekaUtil; import ai.libs.jaicore.ml.core.exception.PredictionException; import ai.libs.jaicore.ml.dyadranking.Dyad; import ai.libs.jaicore.ml.dyadranking.algorithm.PLNetDyadRanker; import ai.libs.jaicore.ml.dyadranking.dataset.DyadRankingDataset; import ai.libs.jaicore.ml.dyadranking.dataset.IDyadRankingInstance; import ai.libs.jaicore.ml.dyadranking.dataset.SparseDyadRankingInstance; import ai.libs.jaicore.ml.dyadranking.util.DyadMinMaxScaler; import ai.libs.jaicore.ml.evaluation.evaluators.weka.FixedSplitClassifierEvaluator; import ai.libs.jaicore.ml.metafeatures.LandmarkerCharacterizer; import ai.libs.jaicore.planning.hierarchical.algorithms.forwarddecomposition.graphgenerators.tfd.TFDNode; import ai.libs.jaicore.search.algorithms.standard.bestfirst.events.EvaluatedSearchSolutionCandidateFoundEvent; import ai.libs.jaicore.search.algorithms.standard.bestfirst.events.FValueEvent; import ai.libs.jaicore.search.algorithms.standard.bestfirst.exceptions.NodeEvaluationException; import ai.libs.jaicore.search.algorithms.standard.bestfirst.nodeevaluation.INodeEvaluator; import ai.libs.jaicore.search.algorithms.standard.bestfirst.nodeevaluation.IPotentiallyGraphDependentNodeEvaluator; import ai.libs.jaicore.search.algorithms.standard.bestfirst.nodeevaluation.IPotentiallySolutionReportingNodeEvaluator; import ai.libs.jaicore.search.algorithms.standard.bestfirst.nodeevaluation.RandomCompletionBasedNodeEvaluator; import ai.libs.jaicore.search.algorithms.standard.bestfirst.nodeevaluation.RandomizedDepthFirstNodeEvaluator; import ai.libs.jaicore.search.algorithms.standard.gbf.SolutionEventBus; import ai.libs.jaicore.search.algorithms.standard.random.RandomSearch; import ai.libs.jaicore.search.core.interfaces.GraphGenerator; import ai.libs.jaicore.search.model.other.EvaluatedSearchGraphPath; import ai.libs.jaicore.search.model.other.SearchGraphPath; import ai.libs.jaicore.search.model.travesaltree.Node; import ai.libs.jaicore.search.probleminputs.GraphSearchWithSubpathEvaluationsInput; import ai.libs.mlplan.metamining.pipelinecharacterizing.ComponentInstanceVectorFeatureGenerator; import ai.libs.mlplan.metamining.pipelinecharacterizing.IPipelineCharacterizer; import ai.libs.mlplan.multiclass.wekamlplan.IClassifierFactory; import weka.core.Instances; /** * This NodeEvaluator can calculate the f-Value for nodes using dyad ranking. * Thereby, a huge amount of random completion will be drawn in a node, then * these pipelines will ranked using dyad ranking and finally the top k * pipelines will be evaluated, using the best observed score as the f-Value of * the node. * * @param <T> * the node type, typically it is {@link TFDNode} * @param <V> * the type of the score * @author Mirko Juergens * */ public class DyadRankingBasedNodeEvaluator<T, V extends Comparable<V>> implements IPotentiallyGraphDependentNodeEvaluator<T, V>, IPotentiallySolutionReportingNodeEvaluator<T, V> { private static final Logger logger = LoggerFactory.getLogger(DyadRankingBasedNodeEvaluator.class); /* Key is a path (hence, List<T>) value is a ComponentInstance */ private BidiMap pathToPipelines = new DualHashBidiMap(); /* Used to draw random completions for nodes that are not in the final state */ private RandomSearch<T, String> randomPathCompleter; /* The evaluator that can be used to get the performance of the paths */ private IObjectEvaluator<ComponentInstance, V> pipelineEvaluator; /* Specifies the components of this MLPlan run. */ private Collection<Component> components; /* * Specifies the amount of paths that are randomly completed for the computation * of the f-value */ private final int randomlyCompletedPaths; /* The dataset of this MLPlan run. */ private Instances evaluationDataset; /* * X in the paper, these are usually derived using landmarking algorithms on the * dataset */ private double[] datasetMetaFeatures; /* * Specifies the amount of paths that will be evaluated after ranking the paths */ private final int evaluatedPaths; /* The Random instance used to randomly complete the paths */ private final Random random; /* The ranker to use for dyad ranking */ private PLNetDyadRanker dyadRanker = new PLNetDyadRanker(); /* The characterizer to use to derive meta features for pipelines */ private IPipelineCharacterizer characterizer; /* Only used if useLandmarkers is set to true */ /* * Defines how many evaluations for each of the landmarkers are performed, to * reduce variance */ private final int landmarkerSampleSize; /* Only used if useLandmarkers is set to true */ /* Defines the size of the different landmarkers */ private final int[] landmarkers; /* Only used if useLandmarkers is set to true */ /* * The concete lanmarker instances, this array has dimension landmakers.size * \cdot landmarkerSampleSize */ private Instances[][] landmarkerSets; /* Only used if useLandmarkers is set to true */ /* * Used to create landmarker values for pipelines where no such landmarker has * yet been evaluated. */ private IClassifierFactory classifierFactory; /* * Defines if a landmarking based approach is used for defining the meta * features of the algorithm. */ private boolean useLandmarkers; /* * Used to derive the time until a certain solution has been found, useful for * evaluations */ private Instant firstEvaluation = null; private SolutionEventBus<T> eventBus; private GraphGenerator<T, ?> graphGenerator; private DyadMinMaxScaler scaler = null; public void setClassifierFactory(final IClassifierFactory classifierFactory) { this.classifierFactory = classifierFactory; } public DyadRankingBasedNodeEvaluator(final ComponentLoader loader) { this(loader, ConfigFactory.create(DyadRankingBasedNodeEvaluatorConfig.class)); } public DyadRankingBasedNodeEvaluator(final ComponentLoader loader, final DyadRankingBasedNodeEvaluatorConfig config) { this.eventBus = new SolutionEventBus<>(); this.components = loader.getComponents(); this.random = new Random(config.getSeed()); this.evaluatedPaths = config.getNumberOfEvaluations(); this.randomlyCompletedPaths = config.getNumberOfRandomSamples(); logger.debug("Initialized DyadRankingBasedNodeEvaluator with evalNum: {} and completionNum: {}", this.randomlyCompletedPaths, this.evaluatedPaths); this.characterizer = new ComponentInstanceVectorFeatureGenerator(loader.getComponents()); this.landmarkers = config.getLandmarkers(); this.landmarkerSampleSize = config.getLandmarkerSampleSize(); this.useLandmarkers = config.useLandmarkers(); String scalerPath = config.scalerPath(); try { this.dyadRanker.loadModelFromFile(Paths.get(config.getPlNetPath()).toString()); } catch (IOException e) { logger.error("Could not load model for plnet in {}", Paths.get(config.getPlNetPath())); } // load the dyadranker from the config try (ObjectInputStream oin = new ObjectInputStream(new FileInputStream(Paths.get(scalerPath).toFile()));) { this.scaler = (DyadMinMaxScaler) oin.readObject(); } catch (IOException e) { logger.error("Could not load sclader for plnet in {}", Paths.get(config.scalerPath())); } catch (ClassNotFoundException e) { logger.error("Could not read scaler.", e); } } @SuppressWarnings("unchecked") @Override public V f(final Node<T, ?> node) throws InterruptedException, NodeEvaluationException { if (this.firstEvaluation == null) { this.firstEvaluation = Instant.now(); } /* Let the random completer handle this use-case. */ if (node.isGoal()) { return null; } /* Time measuring */ Instant startOfEvaluation = Instant.now(); /* Make sure that the completer knows the path until this node */ if (!this.randomPathCompleter.knowsNode(node.getPoint())) { synchronized (this.randomPathCompleter) { this.randomPathCompleter.appendPathToNode(node.externalPath()); } } // draw N paths at random List<List<T>> randomPaths = null; try { randomPaths = this.getNRandomPaths(node); } catch (InterruptedException | TimeoutException e) { logger.error("Interrupted in path completion!"); Thread.currentThread().interrupt(); Thread.interrupted(); throw new InterruptedException(); } // order them according to dyad ranking List<ComponentInstance> allRankedPaths; try { allRankedPaths = this.getDyadRankedPaths(randomPaths); } catch (PredictionException e1) { throw new NodeEvaluationException(e1, "Could not rank nodes"); } // random search failed to find anything here if (allRankedPaths.isEmpty()) { return (V) ((Double) 9000.0d); } // get the top k paths List<ComponentInstance> topKRankedPaths = allRankedPaths.subList(0, Math.min(this.evaluatedPaths, allRankedPaths.size())); // evaluate the top k paths List<Pair<ComponentInstance, V>> allEvaluatedPaths = null; try { allEvaluatedPaths = this.evaluateTopKPaths(topKRankedPaths); } catch (InterruptedException | TimeoutException e) { logger.error("Interrupted while predicitng next best solution"); Thread.currentThread().interrupt(); Thread.interrupted(); throw new InterruptedException(); } catch (ExecutionException e2) { logger.error("Couldn't evaluate solution candidates- Returning null as FValue!."); return null; } Duration evaluationTime = Duration.between(startOfEvaluation, Instant.now()); logger.info("Evaluation took {}ms", evaluationTime.toMillis()); V bestSoultion = this.getBestSolution(allEvaluatedPaths); logger.info("Best solution is {}, {}", bestSoultion, allEvaluatedPaths.stream().map(Pair::getY).collect(Collectors.toList())); if (bestSoultion == null) { return (V) ((Double) 9000.0d); } this.eventBus.post(new FValueEvent<V>(null, bestSoultion, evaluationTime.toMillis())); return bestSoultion; } /** * Stolen from {@link RandomCompletionBasedNodeEvaluator}, maybe should refactor * this into a pattern. * * @param node * the starting node * @return the randomPaths, described by their final node * @throws InterruptedException * @throws TimeoutException */ private List<List<T>> getNRandomPaths(final Node<T, ?> node) throws InterruptedException, TimeoutException { List<List<T>> completedPaths = new ArrayList<>(); for (int currentPath = 0; currentPath < this.randomlyCompletedPaths; currentPath++) { /* * complete the current path by the dfs-solution; we assume that this goes in * almost constant time */ List<T> pathCompletion = null; List<T> completedPath = null; synchronized (this.randomPathCompleter) { if (this.randomPathCompleter.isCanceled()) { logger.info("Completer has been canceled (perhaps due a cancel on the evaluator). Canceling RDFS"); break; } completedPath = new ArrayList<>(node.externalPath()); SearchGraphPath<T, String> solutionPathFromN = null; try { solutionPathFromN = this.randomPathCompleter.nextSolutionUnderNode(node.getPoint()); } catch (AlgorithmExecutionCanceledException e) { logger.info("Completer has been canceled. Returning control."); break; } if (solutionPathFromN == null) { logger.info("No completion was found for path {}.", node.externalPath()); break; } pathCompletion = new ArrayList<>(solutionPathFromN.getNodes()); pathCompletion.remove(0); completedPath.addAll(pathCompletion); } completedPaths.add(completedPath); } logger.info("Returning {} paths", completedPaths.size()); return completedPaths; } private List<ComponentInstance> getDyadRankedPaths(final List<List<T>> randomPaths) throws PredictionException { Map<Vector, ComponentInstance> pipelineToCharacterization = new HashMap<>(); // extract componentInstances that we can rank for (List<T> randomPath : randomPaths) { TFDNode goalNode = (TFDNode) randomPath.get(randomPath.size() - 1); ComponentInstance cI = Util.getSolutionCompositionFromState(this.components, goalNode.getState(), true); this.pathToPipelines.put(randomPath, cI); // fill the y with landmarkers if (this.useLandmarkers) { Vector yPrime = this.evaluateLandmarkersForAlgorithm(cI); pipelineToCharacterization.put(yPrime, cI); } else { Vector y = new DenseDoubleVector(this.characterizer.characterize(cI)); if (this.scaler != null) { List<IDyadRankingInstance> asList = Arrays.asList(new SparseDyadRankingInstance(new DenseDoubleVector(this.datasetMetaFeatures), Arrays.asList(y))); DyadRankingDataset dataset = new DyadRankingDataset(asList); this.scaler.transformAlternatives(dataset); } pipelineToCharacterization.put(y, cI); } } // invoke dyad ranker return this.rankRandomPipelines(pipelineToCharacterization); } /** * Calculates the landmarkers for the given Pipeline, if the value * {@link DyadRankingBasedNodeEvaluator#useLandmarkers} is set to * <code>true</code>. * * @param y * the pipeline characterization * @param cI * the pipeline to characterize * @return the meta features of the pipeline with appended landmarking features */ private Vector evaluateLandmarkersForAlgorithm(final ComponentInstance cI) { double[] y = this.characterizer.characterize(cI); int sizeOfYPrime = this.characterizer.getLengthOfCharacterization() + this.landmarkers.length; double[] yPrime = new double[sizeOfYPrime]; System.arraycopy(y, 0, yPrime, 0, y.length); for (int i = 0; i < this.landmarkers.length; i++) { Instances[] subsets = this.landmarkerSets[i]; double score = 0d; for (Instances train : subsets) { FixedSplitClassifierEvaluator evaluator = new FixedSplitClassifierEvaluator(train, this.evaluationDataset); try { score += evaluator.evaluate(this.classifierFactory.getComponentInstantiation(cI)); } catch (Exception e) { logger.error("Couldn't get classifier for {}", cI); } } // average the score if (score != 0) { score = score / subsets.length; } yPrime[y.length + i] = score; } return new DenseDoubleVector(yPrime); } private List<ComponentInstance> rankRandomPipelines(final Map<Vector, ComponentInstance> randomPipelines) throws PredictionException { List<Vector> alternatives = new ArrayList<>(randomPipelines.keySet()); /* Use a sparse instance for ranking */ SparseDyadRankingInstance toRank = new SparseDyadRankingInstance(new DenseDoubleVector(this.datasetMetaFeatures), alternatives); IDyadRankingInstance rankedInstance; rankedInstance = this.dyadRanker.predict(toRank); List<ComponentInstance> rankedPipelines = new ArrayList<>(); for (Dyad dyad : rankedInstance) { rankedPipelines.add(randomPipelines.get(dyad.getAlternative())); } return rankedPipelines; } /** * Invokes the solution-evaluator to get the performances of the best k paths. * * @param topKRankedPaths * the paths, after ranking * @return the list of scores. * @throws InterruptedException * @throws ExecutionException * @throws TimeoutException */ private List<Pair<ComponentInstance, V>> evaluateTopKPaths(final List<ComponentInstance> topKRankedPaths) throws InterruptedException, ExecutionException, TimeoutException { // we use the executionservice mechanism to make sure we wait at most 5 seconds // for an evaluation Executor executor = Executors.newFixedThreadPool(1); CompletionService<Pair<ComponentInstance, V>> completionService = new ExecutorCompletionService<>(executor); List<Pair<ComponentInstance, V>> evaluatedSolutions = new ArrayList<>(); // schedule the tasks for (ComponentInstance node : topKRankedPaths) { completionService.submit(() -> { try { Instant startTime = Instant.now(); V score = this.pipelineEvaluator.evaluate(node); Duration evalTime = Duration.between(startTime, Instant.now()); this.postSolution(node, evalTime.toMillis(), score); return new Pair<>(node, score); } catch (Exception e) { logger.error("Couldn't evaluate {}", node); return null; } }); } // collect the results but not wait longer than 5 seconds for a result to appear for (int i = 0; i < topKRankedPaths.size(); i++) { logger.info("Got {} solutions. Waiting for iteration {} of max iterations {}", evaluatedSolutions.size(), i + 1, topKRankedPaths.size()); Future<Pair<ComponentInstance, V>> evaluatedPipe = completionService.poll(20, TimeUnit.SECONDS); if (evaluatedPipe == null) { logger.info("Didn't receive any futures (expected {} futures)", topKRankedPaths.size()); continue; } try { Pair<ComponentInstance, V> solution = evaluatedPipe.get(20, TimeUnit.SECONDS); if (solution != null) { logger.info("Evaluation was successful. Adding {} to solutions", solution.getY()); evaluatedSolutions.add(solution); } else { logger.info("No solution was found while waiting up to 20s."); evaluatedPipe.cancel(true); } } catch (Exception e) { logger.info("Got exception while evaluating {}", e.getMessage()); } } return evaluatedSolutions; } /** * Aggregates a list of found solutions to a f-value. Currently, this is the * minimal value found * * @param allFoundSolutions * all solutions * @return */ private V getBestSolution(final List<Pair<ComponentInstance, V>> allEvaluatedPaths) { return allEvaluatedPaths.stream().map(Pair::getY).min(V::compareTo).orElse(null); } @Override public void setGenerator(final GraphGenerator<T, ?> generator) { this.graphGenerator = generator; this.initializeRandomSearch(); } /** * Can be used to reinitialize the random-search at every call of the f-Value * computation. * * @param generator */ private void initializeRandomSearch() { INodeEvaluator<T, Double> nodeEvaluator = new RandomizedDepthFirstNodeEvaluator<>(this.random); @SuppressWarnings("unchecked") GraphSearchWithSubpathEvaluationsInput<T, String, Double> completionProblem = new GraphSearchWithSubpathEvaluationsInput<>((GraphGenerator<T, String>) this.graphGenerator, nodeEvaluator); this.randomPathCompleter = new RandomSearch<>(completionProblem, null, this.random); while (!(this.randomPathCompleter.next() instanceof AlgorithmInitializedEvent)) { /* do not do anything, just skip until InitializationEvent is observed */ } } /** * Sets the data set in the node evaluator and calculates its meta features. * * @param dataset */ public void setDataset(final Instances dataset) { // first we split the dataset into train & testdata if (this.useLandmarkers) { List<Instances> split = WekaUtil.getStratifiedSplit(dataset, 42l, 0.8d); Instances trainData = split.get(0); this.evaluationDataset = split.get(1); Map<String, Double> metaFeatures; try { metaFeatures = new LandmarkerCharacterizer().characterize(dataset); this.datasetMetaFeatures = metaFeatures.entrySet().stream().mapToDouble(Map.Entry::getValue).toArray(); } catch (Exception e) { logger.error("Failed to characterize the dataset", e); } this.setUpLandmarkingDatasets(dataset, trainData); } else { try { Map<String, Double> metaFeatures = new LandmarkerCharacterizer().characterize(dataset); this.datasetMetaFeatures = metaFeatures.entrySet().stream().mapToDouble(Map.Entry::getValue).toArray(); } catch (Exception e) { logger.error("Failed to characterize the dataset", e); } } } /** * Sets up the training data for the landmarkers that should be used. */ private void setUpLandmarkingDatasets(final Instances dataset, final Instances trainData) { this.landmarkerSets = new Instances[this.landmarkers.length][this.landmarkerSampleSize]; // draw instances used for the landmarkers for (int i = 0; i < this.landmarkers.length; i++) { int landmarker = this.landmarkers[i]; for (int j = 0; j < this.landmarkerSampleSize; j++) { Instances instances = new Instances(dataset, landmarker); for (int k = 0; k < landmarker; k++) { int randomEntry = this.random.nextInt(trainData.size()); instances.add(trainData.get(randomEntry)); } this.landmarkerSets[i][j] = instances; } } } /** * Posts the solution to the EventBus of the search. * * @param solution * evaluated pipeline * @param time * time it took * @param score * the observed score */ protected void postSolution(final ComponentInstance solution, final long time, final V score) { try { @SuppressWarnings("unchecked") List<T> pathToSolution = (List<T>) this.pathToPipelines.getKey(solution); EvaluatedSearchGraphPath<T, ?, V> solutionObject = new EvaluatedSearchGraphPath<>(pathToSolution, null, score); solutionObject.setAnnotation("fTime", time); solutionObject.setAnnotation("timeToSolution", Duration.between(this.firstEvaluation, Instant.now()).toMillis()); solutionObject.setAnnotation("nodesEvaluatedToSolution", this.randomlyCompletedPaths); logger.debug("Posting solution {}", solutionObject); this.eventBus.post(new EvaluatedSearchSolutionCandidateFoundEvent<>("DyadRankingBasedCompletion", solutionObject)); } catch (Exception e) { logger.error("Couldn't post solution to event bus.", e); } } public void setPipelineEvaluator(final IObjectEvaluator<ComponentInstance, V> wrappedSearchBenchmark) { this.pipelineEvaluator = wrappedSearchBenchmark; } @Override public boolean requiresGraphGenerator() { return true; } @Override public void registerSolutionListener(final Object listener) { this.eventBus.register(listener); } @Override public boolean reportsSolutions() { return true; } }
0
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan/metamining
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan/metamining/dyadranking/DyadRankingBasedNodeEvaluatorConfig.java
package ai.libs.mlplan.metamining.dyadranking; import org.aeonbits.owner.Config.Sources; import org.aeonbits.owner.Mutable; @Sources({ "file:conf/draco/dyadranking/nodeevaluator.properties" }) public interface DyadRankingBasedNodeEvaluatorConfig extends Mutable { /* The amount of top-ranked pipelines that are being evaluated. */ public static final String NUM_EVALUATIONS_KEY = "numEval"; /* The amount of random samples that are drawn in each f-value computation. */ public static final String NUM_SAMPLES_KEY = "numSamples"; /* The seed of the random completer. */ public static final String SEED_KEY = "seed"; public static final String PLNET_ZIP_KEY = "plnetPath"; public static final String SCALER_SER_KEY= "scalerPath"; public static final String LANDMARKERS_KEY = "landmarkers"; public static final String LANDMARKERS_SAMPLE_SIZE_KEY = "landmarkerSampleSize"; public static final String USE_LANDMARKERS = "useLandmarkers"; /* * Seocifies if the dad ranker should evaluate the top k pipelines, or, just * return the score which is predicted by the PLNet */ public static final String USE_EVALUATIONS = "useEvaluations"; @Key(NUM_EVALUATIONS_KEY) @DefaultValue("10") public int getNumberOfEvaluations(); @Key(NUM_SAMPLES_KEY) @DefaultValue("20") public int getNumberOfRandomSamples(); @Key(SEED_KEY) @DefaultValue("42") public int getSeed(); @Key(PLNET_ZIP_KEY) @DefaultValue("resources/draco/plnet/final_plnet_minmax.zip") public String getPlNetPath(); @Key(LANDMARKERS_KEY) @Separator(";") @DefaultValue("4; 8; 16") public int[] getLandmarkers(); @Key(LANDMARKERS_SAMPLE_SIZE_KEY) @DefaultValue("10") public int getLandmarkerSampleSize(); @Key(USE_LANDMARKERS) @DefaultValue("false") public boolean useLandmarkers(); @Key(USE_EVALUATIONS) @DefaultValue("true") public boolean useEvaluations(); @Key(SCALER_SER_KEY) @DefaultValue("resources/draco/plnet/minmaxscaler.ser") public String scalerPath(); }
0
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan/metamining
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan/metamining/dyadranking/WEKADyadRankedNodeQueue.java
package ai.libs.mlplan.metamining.dyadranking; import java.util.Collection; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import ai.libs.hasco.core.Util; import ai.libs.hasco.model.Component; import ai.libs.hasco.model.ComponentInstance; import ai.libs.jaicore.math.linearalgebra.DenseDoubleVector; import ai.libs.jaicore.math.linearalgebra.Vector; import ai.libs.jaicore.ml.dyadranking.algorithm.IDyadRanker; import ai.libs.jaicore.ml.dyadranking.search.ADyadRankedNodeQueue; import ai.libs.jaicore.ml.dyadranking.util.AbstractDyadScaler; import ai.libs.jaicore.planning.hierarchical.algorithms.forwarddecomposition.graphgenerators.tfd.TFDNode; import ai.libs.jaicore.search.model.travesaltree.Node; import ai.libs.mlplan.metamining.pipelinecharacterizing.IPipelineCharacterizer; /** * A queue that uses a dyad ranker to rank WEKA pipelines. * * @author Helena Graf * */ public class WEKADyadRankedNodeQueue extends ADyadRankedNodeQueue<TFDNode, Double> { private Logger logger = LoggerFactory.getLogger(WEKADyadRankedNodeQueue.class); /** * the allowed components of the pipelines */ private Collection<Component> components; /** * the characterizer for characterizing (partial) pipelines */ private IPipelineCharacterizer characterizer; /** * Construct a new WEKA dyad ranked node queue that ranks WEKA pipelines * constructed from the given components in the given context. * * @param contextCharacterization * the characterization of the dataset (the context) * @param components * the search space components * @param ranker * the ranker to use to rank the dyads - must be pre-trained * @param scaler * the scaler to use to scale the dataset - must have been fit to * data already */ public WEKADyadRankedNodeQueue(final Vector contextCharacterization, final Collection<Component> components, final IDyadRanker ranker, final AbstractDyadScaler scaler, final IPipelineCharacterizer characterizer) { super(contextCharacterization, ranker, scaler); this.components = components; this.characterizer = characterizer; } @Override protected Vector characterize(final Node<TFDNode, Double> node) { ComponentInstance cI = Util.getComponentInstanceFromState(this.components, node.getPoint().getState(), "solution", true); if (cI != null) { this.logger.debug("Characterizing new node."); return new DenseDoubleVector(this.characterizer.characterize(cI)); } else { this.logger.debug("CI from node for characterization is null."); return new DenseDoubleVector(this.characterizer.getLengthOfCharacterization(), 0); } } }
0
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan/metamining
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan/metamining/dyadranking/WEKADyadRankedNodeQueueConfig.java
package ai.libs.mlplan.metamining.dyadranking; import java.io.IOException; import java.util.Collection; import java.util.Map; import org.openml.webapplication.fantail.dc.Characterizer; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import ai.libs.hasco.model.Component; import ai.libs.jaicore.math.linearalgebra.DenseDoubleVector; import ai.libs.jaicore.ml.dyadranking.search.ADyadRankedNodeQueueConfig; import ai.libs.jaicore.ml.metafeatures.DatasetCharacterizerInitializationFailedException; import ai.libs.jaicore.ml.metafeatures.LandmarkerCharacterizer; import ai.libs.jaicore.planning.hierarchical.algorithms.forwarddecomposition.graphgenerators.tfd.TFDNode; import ai.libs.jaicore.search.algorithms.standard.bestfirst.BestFirst; import ai.libs.mlplan.metamining.pipelinecharacterizing.ComponentInstanceVectorFeatureGenerator; import ai.libs.mlplan.metamining.pipelinecharacterizing.IPipelineCharacterizer; import weka.core.Instances; /** * A configuration class that contains configurable variables for using ML-Plan * with best-first search and a dyad-ranked OPEN list instead of random * completions. * * @author Helena Graf * */ public class WEKADyadRankedNodeQueueConfig extends ADyadRankedNodeQueueConfig<TFDNode> { private Logger logger = LoggerFactory.getLogger(WEKADyadRankedNodeQueueConfig.class); /** * the characterizer used to characterize new datasets, must produce dataset * meta data of the same format the dyad ranker is trained with */ private Characterizer datasetCharacterizer; /** * the characterizer used to characterize new pipelines; must produce pipeline * meta features of the same format the dyad ranker is trained with */ private IPipelineCharacterizer pipelineCharacterizer; /** * characterization of the dataset the WEKA classifiers are applied to */ private double[] contextCharacterization; /** * components used during the search necessary so that the pipeline * characterizer can translate nodes to components instances */ private Collection<Component> components; /** * Create a new configuration for a WEAK dyad ranked node queue. * * @throws IOException * if the default ranker or scaler cannot be loaded * @throws ClassNotFoundException * if the default ranker or scaler cannot be instantiated * @throws DatasetCharacterizerInitializationFailedException * if the default dataset characterizer cannot be instantiated */ public WEKADyadRankedNodeQueueConfig() throws ClassNotFoundException, IOException, DatasetCharacterizerInitializationFailedException { super(); this.datasetCharacterizer = new LandmarkerCharacterizer(); } @SuppressWarnings({ "rawtypes", "unchecked" }) @Override public void configureBestFirst(final BestFirst bestFirst) { this.logger.trace("Configuring OPEN list of BF"); bestFirst.setOpen(new WEKADyadRankedNodeQueue(new DenseDoubleVector(this.contextCharacterization), this.components, this.ranker, this.scaler, this.pipelineCharacterizer)); } /** * Configure the data in the context of whose metafeatures the dyad ranker ranks * the pipelines. * * @param data * the data to use */ public void setData(final Instances data) { this.logger.trace("Setting data to instances of size {}", data.size()); this.contextCharacterization = this.datasetCharacterizer.characterize(data).entrySet().stream() .mapToDouble(Map.Entry::getValue).toArray(); } /** * Configure the dyad ranked node queue to use the given components for the * pipeline characterizer to transform nodes to component instances. * * @param components * the components to use for the pipeline characterizer */ public void setComponents(final Collection<Component> components) { this.components = components; if (this.pipelineCharacterizer == null) { this.pipelineCharacterizer = new ComponentInstanceVectorFeatureGenerator(components); } } /** * Set the dataset characterizer to be used. must produce dataset * meta data of the same format the dyad ranker is trained with. * * @param datasetCharacterizer */ public void setDatasetCharacterizer(final Characterizer datasetCharacterizer) { this.datasetCharacterizer = datasetCharacterizer; } /** * Set the pipeline characterizer to be used, must produce pipeline * meta features of the same format the dyad ranker is trained with. * * @param pipelineCharacterizer the pipeline characterizer to use */ public void setPipelineCharacterizer(final IPipelineCharacterizer pipelineCharacterizer) { this.pipelineCharacterizer = pipelineCharacterizer; } }
0
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan/metamining
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan/metamining/dyadranking/package-info.java
/** * Package containing ML-Plan related dyad ranking implementations. * * @author Helena Graf, Mirko Jürgens * */ package ai.libs.mlplan.metamining.dyadranking;
0
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan/metamining
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan/metamining/pipelinecharacterizing/AWEKAPerformanceDecisionTreeBasedFeatureGenerator.java
package ai.libs.mlplan.metamining.pipelinecharacterizing; import java.util.ArrayList; import java.util.Map; import ai.libs.jaicore.basic.algorithm.exceptions.AlgorithmException; import ai.libs.jaicore.math.linearalgebra.Vector; import ai.libs.jaicore.ml.core.exception.TrainingException; import weka.core.Attribute; import weka.core.DenseInstance; import weka.core.Instances; /** * A {@link IPerformanceDecisionTreeBasedFeatureGenerator} that uses a WEKA * implementation of a decision tree. * * @author Helena Graf * */ public abstract class AWEKAPerformanceDecisionTreeBasedFeatureGenerator implements IPerformanceDecisionTreeBasedFeatureGenerator { @Override public void train(final Map<Vector, Double> intermediatePipelineRepresentationsWithPerformanceValues) throws TrainingException { // Step 1: Transform to Instances Object ArrayList<Attribute> attInfo = new ArrayList<>(); for (int i = 0; i < intermediatePipelineRepresentationsWithPerformanceValues.keySet().toArray(new Vector[0])[0] .length(); i++) { attInfo.add(new Attribute("Attribute-" + i)); } attInfo.add(new Attribute("Target")); Instances train = new Instances("train", attInfo, intermediatePipelineRepresentationsWithPerformanceValues.size()); train.setClassIndex(train.numAttributes() - 1); intermediatePipelineRepresentationsWithPerformanceValues.forEach((features, value) -> { double[] values = new double[features.length() + 1]; for (int i = 0; i < features.length(); i++) { values[i] = features.getValue(i); } values[values.length - 1] = value; train.add(new DenseInstance(1, values)); }); try { this.train(train); } catch (AlgorithmException e) { throw new TrainingException("Could not train the " + this.getClass().getName() + ".", e); } } /** * Constructs an internal decision tree based on the Instances object so that * the feature generator can be used in the future to predict features for some * new vector ({@link #predict(Vector)}). * * @param data * @throws Exception */ public abstract void train(Instances data) throws AlgorithmException; }
0
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan/metamining
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan/metamining/pipelinecharacterizing/ComponentInstanceStringConverter.java
package ai.libs.mlplan.metamining.pipelinecharacterizing; import java.io.IOException; import java.io.InputStream; import java.util.ArrayList; import java.util.Arrays; import java.util.List; import java.util.Map; import java.util.Properties; import java.util.regex.Pattern; import java.util.stream.Collectors; import org.apache.commons.math3.geometry.euclidean.oned.Interval; import org.apache.commons.math3.geometry.partitioning.Region.Location; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import ai.libs.hasco.core.Util; import ai.libs.hasco.model.Component; import ai.libs.hasco.model.ComponentInstance; import ai.libs.hasco.model.NumericParameterDomain; import ai.libs.hasco.model.Parameter; import ai.libs.hasco.model.ParameterRefinementConfiguration; import treeminer.util.TreeRepresentationUtils; public class ComponentInstanceStringConverter extends Thread { private static final String WEKA_LABEL_FILE = "weka-labels.properties"; private static final Logger log = LoggerFactory.getLogger(ComponentInstanceStringConverter.class); /** * The name of the top node for all pipelines */ private String pipelineTreeName = "0"; private IOntologyConnector ontologyConnector; private List<ComponentInstance> cIs; private Properties wekaLabels; private List<String> convertedPipelines; private Map<Component, Map<Parameter, ParameterRefinementConfiguration>> componentParameters; public ComponentInstanceStringConverter(final IOntologyConnector ontologyConnector, final List<ComponentInstance> cIs, final Map<Component, Map<Parameter, ParameterRefinementConfiguration>> componentParameters) { this.ontologyConnector = ontologyConnector; this.cIs = cIs; this.convertedPipelines = new ArrayList<>(cIs.size()); this.componentParameters = componentParameters; InputStream fis = this.getClass().getClassLoader().getResourceAsStream(WEKA_LABEL_FILE); this.wekaLabels = new Properties(); try { this.wekaLabels.load(fis); } catch (IOException e) { log.warn("Could not load weka labels."); throw new ComponentInstanceStringConverterIntializeException(e); } } @Override public void run() { for (ComponentInstance cI : this.cIs) { String pipeline = this.makeStringTreeRepresentation(cI); this.convertedPipelines.add(pipeline); } } /** * Converts the given MLPipeline to a String representation of its components * using the ontology. * * @param pipeline * The pipeline to convert * @return The string representation of the tree deduced from the pipeline * */ public String makeStringTreeRepresentation(final ComponentInstance pipeline) { List<String> pipelineBranches = new ArrayList<>(); ComponentInstance classifierCI; // Component is pipeline if (pipeline == null) { log.warn("Try to characterize a null pipeline"); return null; } if (pipeline.getComponent().getName().equals("pipeline")) { ComponentInstance preprocessorCI = pipeline.getSatisfactionOfRequiredInterfaces().get("preprocessor"); if (preprocessorCI != null) { // Characterize searcher this.addCharacterizationOfPipelineElement(pipelineBranches, preprocessorCI.getSatisfactionOfRequiredInterfaces().get("search")); // Characterize evaluator this.addCharacterizationOfPipelineElement(pipelineBranches, preprocessorCI.getSatisfactionOfRequiredInterfaces().get("eval")); } classifierCI = pipeline.getSatisfactionOfRequiredInterfaces().get("classifier"); // Component is just a classifier } else { classifierCI = pipeline; } // Characterize classifier this.addCharacterizationOfPipelineElement(pipelineBranches, classifierCI); // Put tree together String toReturn = TreeRepresentationUtils.addChildrenToNode(this.pipelineTreeName, pipelineBranches); // if we have a properties file which maps our weka label to integers; use it if (this.wekaLabels != null) { Pattern p = Pattern.compile(" "); return p.splitAsStream(toReturn).filter(s -> !"".equals(s)).map(s -> this.wekaLabels.getProperty(s, s)).collect(Collectors.joining(" ")); } else { log.error("Did not find label property mapper."); throw new IllegalStateException(); } } /** * Gets the ontology characterization and selected parameters of the given * ComponentInstance and adds its characterization (the branch of a tree that is * the current pipeline) to the pipeline tree by adding its branch * representation as a last element of the list of branches. * * @param pipelineBranches * The current branches of the pipeline. * @param componentInstance * The pipeline element to be characterized */ protected void addCharacterizationOfPipelineElement(final List<String> pipelineBranches, final ComponentInstance componentInstance) { if (componentInstance != null) { String wekaName = componentInstance.getComponent().getName(); // Get generalization List<String> branchComponents = this.ontologyConnector.getAncestorsOfAlgorithm(wekaName); // Get parameters branchComponents.set(branchComponents.size() - 1, TreeRepresentationUtils.addChildrenToNode(branchComponents.get(branchComponents.size() - 1), this.getParametersForComponentInstance(componentInstance))); // Serialize String branch = TreeRepresentationUtils.makeRepresentationForBranch(branchComponents); pipelineBranches.add(branch); } } /** * Get String representations of the parameters of the given ComponentInstance * (representing a pipeline element). Numerical parameters are refined. * * @param componentInstance * The ComponentInstance for which to get the parameters * @return A list of parameter descriptions represented as Strings */ protected List<String> getParametersForComponentInstance(final ComponentInstance componentInstance) { List<String> parameters = new ArrayList<>(); // Get Parameters of base classifier if this is a meta classifier if (componentInstance.getSatisfactionOfRequiredInterfaces() != null && componentInstance.getSatisfactionOfRequiredInterfaces().size() > 0) { componentInstance.getSatisfactionOfRequiredInterfaces().forEach((requiredInterface, component) -> { // so far, only have the "K" interface & this has no param so can directly get List<String> kernelFunctionCharacterisation = new ArrayList<>(); kernelFunctionCharacterisation.add(requiredInterface); kernelFunctionCharacterisation.addAll(this.ontologyConnector.getAncestorsOfAlgorithm(component.getComponent().getName())); parameters.add(TreeRepresentationUtils.addChildrenToNode(requiredInterface, Arrays.asList(TreeRepresentationUtils.makeRepresentationForBranch(kernelFunctionCharacterisation)))); }); } // Get other parameters for (Parameter parameter : componentInstance.getComponent().getParameters()) { // Check if the parameter even has a value! String parameterName = parameter.getName(); if (!componentInstance.getParameterValues().containsKey(parameterName)) { continue; } List<String> parameterRefinement = new ArrayList<>(); parameterRefinement.add(parameterName); // Numeric parameter - needs to be refined if (parameter.isNumeric()) { this.resolveNumericParameter(componentInstance, parameter, parameterName, parameterRefinement); // Categorical parameter } else { if (parameter.isCategorical()) { parameterRefinement.add(componentInstance.getParameterValues().get(parameterName)); } } parameters.add(TreeRepresentationUtils.makeRepresentationForBranch(parameterRefinement)); } return parameters; } private void resolveNumericParameter(final ComponentInstance componentInstance, final Parameter parameter, final String parameterName, final List<String> parameterRefinement) { ParameterRefinementConfiguration parameterRefinementConfiguration = this.componentParameters.get(componentInstance.getComponent()).get(parameter); NumericParameterDomain parameterDomain = ((NumericParameterDomain) parameter.getDefaultDomain()); Interval currentInterval = null; Interval nextInterval = new Interval(parameterDomain.getMin(), parameterDomain.getMax()); double parameterValue = Double.parseDouble(componentInstance.getParameterValues().get(parameterName)); double precision = parameterValue == 0 ? 0 : Math.ulp(parameterValue); while (true) { currentInterval = nextInterval; parameterRefinement.add(this.serializeInterval(currentInterval)); List<Interval> refinement = Util.getNumericParameterRefinement(nextInterval, parameterValue, parameterDomain.isInteger(), parameterRefinementConfiguration); if (refinement.isEmpty()) { break; } for (Interval interval : refinement) { if (interval.checkPoint(parameterValue, precision) == Location.INSIDE || interval.checkPoint(parameterValue, precision) == Location.BOUNDARY) { nextInterval = interval; break; } } } parameterRefinement.add(String.valueOf(parameterValue)); } /** * Helper method for serializing an interval so that it can be used in String * representations of parameters of pipeline elements. * * @param interval * The interval to be serialized * @return The String representation of the interval */ protected String serializeInterval(final Interval interval) { StringBuilder builder = new StringBuilder(); builder.append("["); builder.append(interval.getInf()); builder.append(","); builder.append(interval.getSup()); builder.append("]"); return builder.toString(); } public List<String> getConvertedPipelines() { return this.convertedPipelines; } }
0
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan/metamining
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan/metamining/pipelinecharacterizing/ComponentInstanceStringConverterIntializeException.java
package ai.libs.mlplan.metamining.pipelinecharacterizing; /** * An exception signaling that the {@link ComponentInstanceStringConverter} * could not properly be intialized. * * @author Helena Graf * */ public class ComponentInstanceStringConverterIntializeException extends RuntimeException { /** * generated id */ private static final long serialVersionUID = 5483934746870892252L; /** * Create a new generic exception. */ public ComponentInstanceStringConverterIntializeException() { super(); } /** * Create a new exception with the given message. * * @param message * a message describing the exception */ public ComponentInstanceStringConverterIntializeException(String message) { super(message); } /** * Create a new exception with the given cause. * * @param cause * the cause of the exception */ public ComponentInstanceStringConverterIntializeException(Throwable cause) { super(cause); } /** * Create a new exception with a given message and cause. * * @param message * a message describing the exception * @param cause * the cause of the exception */ public ComponentInstanceStringConverterIntializeException(String message, Throwable cause) { super(message, cause); } }
0
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan/metamining
java-sources/ai/libs/mlplancli/0.1.4/ai/libs/mlplan/metamining/pipelinecharacterizing/ComponentInstanceVectorFeatureGenerator.java
package ai.libs.mlplan.metamining.pipelinecharacterizing; import java.util.Collection; import java.util.HashMap; import java.util.List; import java.util.Map; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import ai.libs.hasco.model.CategoricalParameterDomain; import ai.libs.hasco.model.Component; import ai.libs.hasco.model.ComponentInstance; import ai.libs.hasco.model.Parameter; import ai.libs.jaicore.math.linearalgebra.DenseDoubleVector; import ai.libs.jaicore.math.linearalgebra.Vector; /** * Characterizes a pipelines by the components that occur in it and the * parameters that are set for it. * * @author Mirko Jürgens * */ public class ComponentInstanceVectorFeatureGenerator implements IPipelineCharacterizer { private static final Logger logger = LoggerFactory.getLogger(ComponentInstanceVectorFeatureGenerator.class); /** * Maps the name of a component to a map that maps the name of the hyper * parameter to its index in the dyad vector. */ private Map<String, Map<String, Integer>> componentNameToParameterDyadIndex = new HashMap<>(); /** * Maps the name of a component to */ private Map<String, Integer> componentNameToDyadIndex = new HashMap<>(); /** * Number of found patterns. */ private int patternCount; /** * Construct a ComponentInstanceVectorFeatureGenerator that is able to * characterize pipelines consisting of the given components and parameters. * * @param collection * the components to use */ public ComponentInstanceVectorFeatureGenerator(final Collection<Component> collection) { int counter = 0; logger.debug("Got {} components as input.", collection.size()); for (Component component : collection) { logger.debug("Inserting {} at position {}", component.getName(), counter); this.componentNameToDyadIndex.put(component.getName(), counter++); Map<String, Integer> parameterIndices = new HashMap<>(); logger.debug("{} has {} parameters.", component.getName(), component.getParameters().size()); for (Parameter param : component.getParameters()) { if (param.isNumeric()) { parameterIndices.put(param.getName(), counter++); } else if (param.isCategorical()) { parameterIndices.put(param.getName(), counter); CategoricalParameterDomain domain = (CategoricalParameterDomain) param.getDefaultDomain(); counter += domain.getValues().length; } } this.componentNameToParameterDyadIndex.put(component.getName(), parameterIndices); } this.patternCount = counter; } /** * Recursively resolves the components. * * @param cI * the component instance to resolve * @param patterns * the patterns found so far * @return the characterization */ public double[] characterize(final ComponentInstance cI, final Vector patterns) { // first: get the encapsulated component Component c = cI.getComponent(); String componentName = c.getName(); // set the used algorithm to '1' int index = this.componentNameToDyadIndex.get(componentName); patterns.setValue(index, 1.0d); // now resolve the parameters Map<String, Integer> parameterIndices = this.componentNameToParameterDyadIndex.get(componentName); // assumption: the values is always set in the parameters vector for (Parameter param : c.getParameters()) { String parameterName = param.getName(); int parameterIndex = parameterIndices.get(parameterName); if (param.isNumeric()) { this.handleNumericalParameter(cI, patterns, param, parameterIndex); } else if (param.isCategorical()) { this.handleCatergoricalParameter(cI, patterns, param, parameterIndex); } } // recursively resolve the patterns for the requiredInterfaces for (ComponentInstance requiredInterface : cI.getSatisfactionOfRequiredInterfaces().values()) { this.characterize(requiredInterface, patterns); } return patterns.asArray(); } private void handleNumericalParameter(final ComponentInstance cI, final Vector patterns, final Parameter param, final int parameterIndex) { if (cI.getParameterValue(param) != null) { double value = Double.parseDouble(cI.getParameterValue(param)); patterns.setValue(parameterIndex, value); } else { double value = (double) param.getDefaultValue(); patterns.setValue(parameterIndex, value); } } private void handleCatergoricalParameter(final ComponentInstance cI, final Vector patterns, final Parameter param, final int parameterIndex) { // the parameters are one-hot-encoded, where the parameterIndex specifies the // one hot index for the first categorical parameter, parameterIndex+1 is the // one-hot index for the second parameter etc. String parameterValue = cI.getParameterValue(param); if (parameterValue == null) { if (param.getDefaultValue() instanceof String) { parameterValue = (String) param.getDefaultValue(); } else { parameterValue = String.valueOf(param.getDefaultValue()); } } CategoricalParameterDomain domain = (CategoricalParameterDomain) param.getDefaultDomain(); for (int i = 0; i < domain.getValues().length; i++) { if (domain.getValues()[i].equals(parameterValue)) { patterns.setValue(parameterIndex + i, 1); } else { patterns.setValue(parameterIndex + i, 0); } } } @Override public void build(final List<ComponentInstance> pipelines) throws InterruptedException { throw new UnsupportedOperationException("This characterizer is not trained!"); } @Override public double[] characterize(final ComponentInstance pipeline) { return this.characterize(pipeline, new DenseDoubleVector(this.patternCount, 0.0d)); } @Override public double[][] getCharacterizationsOfTrainingExamples() { throw new UnsupportedOperationException("This characterizer is not trained!"); } @Override public int getLengthOfCharacterization() { return this.patternCount; } }