index
int64 | repo_id
string | file_path
string | content
string |
|---|---|---|---|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/core
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/core/tokenizers/AlphabeticTokenizer.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* AlphabeticStringTokenizer.java
* Copyright (C) 2003-2012 University of Waikato, Hamilton, New Zealand
*/
package weka.core.tokenizers;
import java.util.NoSuchElementException;
import weka.core.RevisionUtils;
/**
* <!-- globalinfo-start --> Alphabetic string tokenizer, tokens are to be
* formed only from contiguous alphabetic sequences.
* <p/>
* <!-- globalinfo-end -->
*
* @author Asrhaf M. Kibriya (amk14@cs.waikato.ac.nz)
* @author FracPete (fracpete at waikato dot ac dot nz)
* @version $Revision$
*/
public class AlphabeticTokenizer extends Tokenizer {
/** for serialization */
private static final long serialVersionUID = 6705199562609861697L;
/** the characters of the string */
protected char[] m_Str;
/** the current position */
protected int m_CurrentPos;
/**
* Returns a string describing the stemmer
*
* @return a description suitable for displaying in the explorer/experimenter
* gui
*/
@Override
public String globalInfo() {
return "Alphabetic string tokenizer, tokens are to be formed only from "
+ "contiguous alphabetic sequences.";
}
/**
* returns whether there are more elements still
*
* @return true if there are still more elements
*/
@Override
public boolean hasMoreElements() {
int beginpos = m_CurrentPos;
while ((beginpos < m_Str.length)
&& ((m_Str[beginpos] < 'a') || (m_Str[beginpos] > 'z'))
&& ((m_Str[beginpos] < 'A') || (m_Str[beginpos] > 'Z'))) {
beginpos++;
}
m_CurrentPos = beginpos;
if ((beginpos < m_Str.length)
&& (((m_Str[beginpos] >= 'a') && (m_Str[beginpos] <= 'z')) || ((m_Str[beginpos] >= 'A') && (m_Str[beginpos] <= 'Z')))) {
return true;
} else {
return false;
}
}
/**
* returns the next element
*
* @return the next element
*/
@Override
public String nextElement() {
int beginpos, endpos;
beginpos = m_CurrentPos;
while ((beginpos < m_Str.length)
&& ((m_Str[beginpos] < 'a') && (m_Str[beginpos] > 'z'))
&& ((m_Str[beginpos] < 'A') && (m_Str[beginpos] > 'Z'))) {
beginpos++;
}
m_CurrentPos = endpos = beginpos;
if (beginpos >= m_Str.length) {
throw new NoSuchElementException("No more tokens present");
}
while ((endpos < m_Str.length)
&& (((m_Str[endpos] >= 'a') && (m_Str[endpos] <= 'z')) || ((m_Str[endpos] >= 'A') && (m_Str[endpos] <= 'Z')))) {
endpos++;
}
String s = new String(m_Str, beginpos, endpos - m_CurrentPos);
m_CurrentPos = endpos;
return s;
}
/**
* Sets the string to tokenize. Tokenization happens immediately.
*
* @param s the string to tokenize
*/
@Override
public void tokenize(String s) {
m_CurrentPos = 0;
m_Str = new char[s.length()];
s.getChars(0, s.length(), m_Str, 0);
}
/**
* Returns the revision string.
*
* @return the revision
*/
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
/**
* Runs the tokenizer with the given options and strings to tokenize. The
* tokens are printed to stdout.
*
* @param args the commandline options and strings to tokenize
*/
public static void main(String[] args) {
runTokenizer(new AlphabeticTokenizer(), args);
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/core
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/core/tokenizers/CharacterDelimitedTokenizer.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* DelimitedTokenizer.java
* Copyright (C) 2007-2012 University of Waikato, Hamilton, New Zealand
*/
package weka.core.tokenizers;
import java.util.Enumeration;
import java.util.Vector;
import weka.core.Option;
import weka.core.Utils;
/**
* Abstract superclass for tokenizers that take characters as delimiters.
*
* @author fracpete (fracpete at waikato dot ac dot nz)
* @version $Revision$
*/
public abstract class CharacterDelimitedTokenizer extends Tokenizer {
/** Added to prevent warning */
private static final long serialVersionUID = -3091468793633408477L;
/** Delimiters used in tokenization */
protected String m_Delimiters = " \r\n\t.,;:'\"()?!";
/**
* Returns an enumeration of all the available options..
*
* @return an enumeration of all available options.
*/
@Override
public Enumeration<Option> listOptions() {
Vector<Option> result = new Vector<Option>();
result.addElement(new Option("\tThe delimiters to use\n"
+ "\t(default ' \\r\\n\\t.,;:'\"()?!').", "delimiters", 1,
"-delimiters <value>"));
return result.elements();
}
/**
* Gets the current option settings for the OptionHandler.
*
* @return the list of current option settings as an array of strings
*/
@Override
public String[] getOptions() {
Vector<String> result = new Vector<String>();
result.add("-delimiters");
result.add(getDelimiters());
return result.toArray(new String[result.size()]);
}
/**
* Sets the OptionHandler's options using the given list. All options will be
* set (or reset) during this call (i.e. incremental setting of options is not
* possible).
*
* @param options the list of options as an array of strings
* @throws Exception if an option is not supported
*/
@Override
public void setOptions(String[] options) throws Exception {
String tmpStr = Utils.getOption("delimiters", options);
if (tmpStr.length() != 0) {
setDelimiters(tmpStr);
} else {
setDelimiters(" \r\n\t.,;:'\"()?!");
}
}
/**
* Get the value of delimiters (not backquoted).
*
* @return Value of delimiters.
*/
public String getDelimiters() {
return m_Delimiters;
}
/**
* Set the value of delimiters. For convenienve, the strings "\r", "\n", "\t",
* "\'", "\\" get automatically translated into their character
* representations '\r', '\n', '\t', '\'', '\\'. This means, one can either
* use <code>setDelimiters("\r\n\t\\");</code> or
* <code>setDelimiters("\\r\\n\\t\\\\");</code>.
*
* @param value Value to assign to delimiters.
* @see Utils#unbackQuoteChars(String)
*/
public void setDelimiters(String value) {
m_Delimiters = Utils.unbackQuoteChars(value);
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String delimitersTipText() {
return "Set of delimiter characters to use in tokenizing (\\r, \\n and \\t can be used for carriage-return, line-feed and tab)";
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/core
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/core/tokenizers/CharacterNGramTokenizer.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* NGramTokenizer.java
* Copyright (C) 2007-2012 University of Waikato
*/
package weka.core.tokenizers;
import java.util.Collections;
import java.util.Enumeration;
import java.util.Vector;
import weka.core.Option;
import weka.core.RevisionUtils;
import weka.core.Utils;
/**
* <!-- globalinfo-start --> Splits a string into an n-gram with min and max
* grams.
* <p/>
* <!-- globalinfo-end -->
*
* <!-- options-start --> Valid options are:
* <p/>
*
* <pre>
* -max <int>
* The max size of the Ngram (default = 3).
* </pre>
*
* <pre>
* -min <int>
* The min size of the Ngram (default = 1).
* </pre>
*
* <!-- options-end -->
*
* @author Sebastian Germesin (sebastian.germesin@dfki.de)
* @author Eibe Frank (eibe@cs.waikato.ac.nz)
*
* @version $Revision: 10971 $
*/
public class CharacterNGramTokenizer extends Tokenizer {
/** for serialization */
private static final long serialVersionUID = -1181896253171647218L;
/** the maximum number of N */
protected int m_NMax = 3;
/** the minimum number of N */
protected int m_NMin = 1;
/** the current length of the N-grams */
protected int m_N;
/** the current position for returning elements */
protected int m_CurrentPosition;
/** the string to tokenize */
protected String m_String;
/**
* Returns a string describing the tokenizer
*
* @return a description suitable for displaying in the explorer/experimenter
* GUI
*/
@Override
public String globalInfo() {
return "Splits a string into all character n-grams it contains based on the given maximum and minimum for n.";
}
/**
* Returns an enumeration of all the available options..
*
* @return an enumeration of all available options.
*/
@Override
public Enumeration<Option> listOptions() {
Vector<Option> result = new Vector<Option>();
result.addElement(new Option("\tThe maximum number of characters (default = 3).",
"max", 1, "-max <int>"));
result.addElement(new Option("\tThe minimum number of characters (default = 1).",
"min", 1, "-min <int>"));
result.addAll(Collections.list(super.listOptions()));
return result.elements();
}
/**
* Gets the current option settings for the OptionHandler.
*
* @return the list of current option settings as an array of strings
*/
@Override
public String[] getOptions() {
Vector<String> result = new Vector<String>();
result.add("-max");
result.add("" + getNGramMaxSize());
result.add("-min");
result.add("" + getNGramMinSize());
Collections.addAll(result, super.getOptions());
return result.toArray(new String[result.size()]);
}
/**
* Parses a given list of options.
* <p/>
*
* <!-- options-start --> Valid options are:
* <p/>
*
* <pre>
* -max <int>
* The max size of the Ngram (default = 3).
* </pre>
*
* <pre>
* -min <int>
* The min size of the Ngram (default = 1).
* </pre>
*
* <!-- options-end -->
*
* @param options the list of options as an array of strings
* @throws Exception if an option is not supported
*/
@Override
public void setOptions(String[] options) throws Exception {
String value;
value = Utils.getOption("max", options);
if (value.length() != 0) {
setNGramMaxSize(Integer.parseInt(value));
} else {
setNGramMaxSize(3);
}
value = Utils.getOption("min", options);
if (value.length() != 0) {
setNGramMinSize(Integer.parseInt(value));
} else {
setNGramMinSize(1);
}
super.setOptions(options);
}
/**
* Gets the max N of the NGram.
*
* @return the size (N) of the NGram.
*/
public int getNGramMaxSize() {
return m_NMax;
}
/**
* Sets the max size of the Ngram.
*
* @param value the size of the NGram.
*/
public void setNGramMaxSize(int value) {
if (value < 1) {
m_NMax = 1;
} else {
m_NMax = value;
}
}
/**
* Returns the tip text for this property.
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String NGramMaxSizeTipText() {
return "The maximum size of an n-gram.";
}
/**
* Sets the min size of the Ngram.
*
* @param value the size of the NGram.
*/
public void setNGramMinSize(int value) {
if (value < 1) {
m_NMin = 1;
} else {
m_NMin = value;
}
}
/**
* Gets the min N of the NGram.
*
* @return the size (N) of the NGram.
*/
public int getNGramMinSize() {
return m_NMin;
}
/**
* Returns the tip text for this property.
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String NGramMinSizeTipText() {
return "The minimum size of an n-gram.";
}
/**
* returns true if there's more elements available
*
* @return true if there are more elements available
*/
@Override
public boolean hasMoreElements() {
return (m_CurrentPosition + m_N <= m_String.length());
}
/**
* Returns N-grams and also (N-1)-grams and ....
*
* @return the next element
*/
@Override
public String nextElement() {
String result = null;
try {
result = m_String.substring(m_CurrentPosition, m_CurrentPosition + m_N);
} catch (StringIndexOutOfBoundsException ex) {
// Just return null;
}
m_N++;
if ((m_N > m_NMax) || (m_CurrentPosition + m_N > m_String.length())) {
m_N = m_NMin;
m_CurrentPosition++;
}
return result;
}
/**
* Sets the string to tokenize.
*
* @param s the string to tokenize
*/
@Override
public void tokenize(String s) {
m_CurrentPosition = 0;
m_String = s;
m_N = m_NMin;
}
/**
* Returns the revision string.
*
* @return the revision
*/
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision: 10971 $");
}
/**
* Runs the tokenizer with the given options and strings to tokenize. The
* tokens are printed to stdout.
*
* @param args the commandline options and strings to tokenize
*/
public static void main(String[] args) {
runTokenizer(new CharacterNGramTokenizer(), args);
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/core
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/core/tokenizers/NGramTokenizer.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* NGramTokenizer.java
* Copyright (C) 2007-2012 University of Waikato
*/
package weka.core.tokenizers;
import java.util.Collections;
import java.util.Enumeration;
import java.util.LinkedList;
import java.util.Vector;
import weka.core.Option;
import weka.core.RevisionUtils;
import weka.core.Utils;
/**
* <!-- globalinfo-start --> Splits a string into an n-gram with min and max
* grams.
* <p/>
* <!-- globalinfo-end -->
*
* <!-- options-start --> Valid options are:
* <p/>
*
* <pre>
* -delimiters <value>
* The delimiters to use
* (default ' \r\n\t.,;:'"()?!').
* </pre>
*
* <pre>
* -max <int>
* The max size of the Ngram (default = 3).
* </pre>
*
* <pre>
* -min <int>
* The min size of the Ngram (default = 1).
* </pre>
*
* <!-- options-end -->
*
* @author Sebastian Germesin (sebastian.germesin@dfki.de)
* @author FracPete (fracpete at waikato dot ac dot nz)
* @version $Revision$
*/
public class NGramTokenizer extends CharacterDelimitedTokenizer {
/** for serialization */
private static final long serialVersionUID = -2181896254171647219L;
/** the maximum number of N */
protected int m_NMax = 3;
/** the minimum number of N */
protected int m_NMin = 1;
/** the current length of the N-grams */
protected int m_N;
/** the number of strings available */
protected int m_MaxPosition;
/** the current position for returning elements */
protected int m_CurrentPosition;
/** all the available grams */
protected String[] m_SplitString;
/**
* Returns a string describing the stemmer
*
* @return a description suitable for displaying in the explorer/experimenter
* gui
*/
@Override
public String globalInfo() {
return "Splits a string into an n-gram with min and max grams.";
}
/**
* Returns an enumeration of all the available options..
*
* @return an enumeration of all available options.
*/
@Override
public Enumeration<Option> listOptions() {
Vector<Option> result = new Vector<Option>();
result.addElement(new Option("\tThe max size of the Ngram (default = 3).",
"max", 1, "-max <int>"));
result.addElement(new Option("\tThe min size of the Ngram (default = 1).",
"min", 1, "-min <int>"));
result.addAll(Collections.list(super.listOptions()));
return result.elements();
}
/**
* Gets the current option settings for the OptionHandler.
*
* @return the list of current option settings as an array of strings
*/
@Override
public String[] getOptions() {
Vector<String> result = new Vector<String>();
result.add("-max");
result.add("" + getNGramMaxSize());
result.add("-min");
result.add("" + getNGramMinSize());
Collections.addAll(result, super.getOptions());
return result.toArray(new String[result.size()]);
}
/**
* Parses a given list of options.
* <p/>
*
* <!-- options-start --> Valid options are:
* <p/>
*
* <pre>
* -delimiters <value>
* The delimiters to use
* (default ' \r\n\t.,;:'"()?!').
* </pre>
*
* <pre>
* -max <int>
* The max size of the Ngram (default = 3).
* </pre>
*
* <pre>
* -min <int>
* The min size of the Ngram (default = 1).
* </pre>
*
* <!-- options-end -->
*
* @param options the list of options as an array of strings
* @throws Exception if an option is not supported
*/
@Override
public void setOptions(String[] options) throws Exception {
String value;
value = Utils.getOption("max", options);
if (value.length() != 0) {
setNGramMaxSize(Integer.parseInt(value));
} else {
setNGramMaxSize(3);
}
value = Utils.getOption("min", options);
if (value.length() != 0) {
setNGramMinSize(Integer.parseInt(value));
} else {
setNGramMinSize(1);
}
super.setOptions(options);
}
/**
* Gets the max N of the NGram.
*
* @return the size (N) of the NGram.
*/
public int getNGramMaxSize() {
return m_NMax;
}
/**
* Sets the max size of the Ngram.
*
* @param value the size of the NGram.
*/
public void setNGramMaxSize(int value) {
if (value < 1) {
m_NMax = 1;
} else {
m_NMax = value;
}
}
/**
* Returns the tip text for this property.
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String NGramMaxSizeTipText() {
return "The max N of the NGram.";
}
/**
* Sets the min size of the Ngram.
*
* @param value the size of the NGram.
*/
public void setNGramMinSize(int value) {
if (value < 1) {
m_NMin = 1;
} else {
m_NMin = value;
}
}
/**
* Gets the min N of the NGram.
*
* @return the size (N) of the NGram.
*/
public int getNGramMinSize() {
return m_NMin;
}
/**
* Returns the tip text for this property.
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String NGramMinSizeTipText() {
return "The min N of the NGram.";
}
/**
* returns true if there's more elements available
*
* @return true if there are more elements available
*/
@Override
public boolean hasMoreElements() {
// return (m_CurrentPosition < m_MaxPosition
// && m_N - 1 + m_CurrentPosition < m_MaxPosition && m_N >= m_NMin);
//
return (m_N >= m_NMin);
}
/**
* Returns N-grams and also (N-1)-grams and .... and 1-grams.
*
* @return the next element
*/
@Override
public String nextElement() {
String retValue = "";
// for (int i = 0; i < m_N && i + m_CurrentPosition < m_MaxPosition; i++) {
// retValue += " " + m_SplitString[m_CurrentPosition + i];
// }
for (int i = 0; i < m_N; i++) {
retValue += " " + m_SplitString[m_CurrentPosition + i];
}
m_CurrentPosition++;
if (m_CurrentPosition + m_N - 1 == m_MaxPosition) {
m_CurrentPosition = 0;
m_N--;
}
return retValue.trim();
}
/**
* filters out empty strings in m_SplitString and replaces m_SplitString with
* the cleaned version.
*
* @see #m_SplitString
*/
protected void filterOutEmptyStrings() {
String[] newSplit;
LinkedList<String> clean = new LinkedList<String>();
for (int i = 0; i < m_SplitString.length; i++) {
if (!m_SplitString[i].equals("")) {
clean.add(m_SplitString[i]);
}
}
newSplit = new String[clean.size()];
for (int i = 0; i < clean.size(); i++) {
newSplit[i] = clean.get(i);
}
m_SplitString = newSplit;
}
/**
* Sets the string to tokenize. Tokenization happens immediately.
*
* @param s the string to tokenize
*/
@Override
public void tokenize(String s) {
m_N = m_NMax;
m_SplitString = s.split("[" + getDelimiters() + "]");
filterOutEmptyStrings();
m_CurrentPosition = 0;
m_MaxPosition = m_SplitString.length;
if (m_SplitString.length < m_NMax) {
m_N = m_SplitString.length;
}
}
/**
* Returns the revision string.
*
* @return the revision
*/
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
/**
* Runs the tokenizer with the given options and strings to tokenize. The
* tokens are printed to stdout.
*
* @param args the commandline options and strings to tokenize
*/
public static void main(String[] args) {
runTokenizer(new NGramTokenizer(), args);
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/core
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/core/tokenizers/Tokenizer.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* Tokenizer.java
* Copyright (C) 2007-2012 University of Waikato, Hamilton, New Zealand
*/
package weka.core.tokenizers;
import java.io.BufferedReader;
import java.io.InputStreamReader;
import java.io.Serializable;
import java.util.Enumeration;
import java.util.Vector;
import weka.core.Option;
import weka.core.OptionHandler;
import weka.core.RevisionHandler;
/**
* A superclass for all tokenizer algorithms.
*
* @author FracPete (fracpete at waikato dot ac dot nz)
* @version $Revision$
*/
public abstract class Tokenizer implements Enumeration<String>, OptionHandler,
Serializable, RevisionHandler {
/** Added to avoid warning */
private static final long serialVersionUID = 7781271062738973996L;
/**
* Returns a string describing the stemmer
*
* @return a description suitable for displaying in the explorer/experimenter
* gui
*/
public abstract String globalInfo();
/**
* Returns an enumeration of all the available options..
*
* @return an enumeration of all available options.
*/
@Override
public Enumeration<Option> listOptions() {
return (new Vector<Option>()).elements();
}
/**
* Gets the current option settings for the OptionHandler.
*
* @return the list of current option settings as an array of strings
*/
@Override
public String[] getOptions() {
return new String[0];
}
/**
* Sets the OptionHandler's options using the given list. All options will be
* set (or reset) during this call (i.e. incremental setting of options is not
* possible).
*
* @param options the list of options as an array of strings
* @throws Exception if an option is not supported
*/
@Override
public void setOptions(String[] options) throws Exception {
// nothing in this class
}
/**
* Tests if this enumeration contains more elements.
*
* @return true if and only if this enumeration object contains at least one
* more element to provide; false otherwise.
*/
@Override
public abstract boolean hasMoreElements();
/**
* Returns the next element of this enumeration if this enumeration object has
* at least one more element to provide.
*
* @return the next element of this enumeration.
*/
@Override
public abstract String nextElement();
/**
* Sets the string to tokenize. Tokenization happens immediately.
*
* @param s the string to tokenize
*/
public abstract void tokenize(String s);
/**
* initializes the given tokenizer with the given options and runs the
* tokenizer over all the remaining strings in the options array. If no
* strings remained in the option string then data is read from stdin, line by
* line.
*
* @param tokenizer the tokenizer to use
* @param options the options for the tokenizer
* @return the tokenized strings
* @throws Exception if setting of options or tokenization fails
*/
public static String[] tokenize(Tokenizer tokenizer, String[] options)
throws Exception {
Vector<String> result;
Vector<String> tmpResult;
Vector<String> data;
int i;
boolean processed;
BufferedReader reader;
String line;
result = new Vector<String>();
// init tokenizer
tokenizer.setOptions(options);
// for storing the data to process
data = new Vector<String>();
// run over all un-processed strings in the options array
processed = false;
for (i = 0; i < options.length; i++) {
if (options[i].length() != 0) {
processed = true;
data.add(options[i]);
}
}
// if no strings in option string then read from stdin
if (!processed) {
reader = new BufferedReader(new InputStreamReader(System.in));
while ((line = reader.readLine()) != null) {
data.add(line);
}
}
// process data
for (i = 0; i < data.size(); i++) {
tmpResult = new Vector<String>();
tokenizer.tokenize(data.get(i));
while (tokenizer.hasMoreElements()) {
tmpResult.add(tokenizer.nextElement());
}
// add to result
result.addAll(tmpResult);
}
return result.toArray(new String[result.size()]);
}
/**
* initializes the given tokenizer with the given options and runs the
* tokenizer over all the remaining strings in the options array. The
* generated tokens are then printed to stdout. If no strings remained in the
* option string then data is read from stdin, line by line.
*
* @param tokenizer the tokenizer to use
* @param options the options for the tokenizer
*/
public static void runTokenizer(Tokenizer tokenizer, String[] options) {
String[] result;
int i;
try {
result = tokenize(tokenizer, options);
for (i = 0; i < result.length; i++) {
System.out.println(result[i]);
}
} catch (Exception e) {
e.printStackTrace();
}
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/core
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/core/tokenizers/WordTokenizer.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* SimpleStringTokenizer.java
* Copyright (C) 2007-2012 University of Waikato, Hamilton, New Zealand
*/
package weka.core.tokenizers;
import java.util.StringTokenizer;
import weka.core.RevisionUtils;
/**
* <!-- globalinfo-start --> A simple tokenizer that is using the
* java.util.StringTokenizer class to tokenize the strings.
* <p/>
* <!-- globalinfo-end -->
*
* <!-- options-start --> Valid options are:
* <p/>
*
* <pre>
* -delimiters <value>
* The delimiters to use
* (default ' \r\n\t.,;:'"()?!').
* </pre>
*
* <!-- options-end -->
*
* @author FracPete (fracpete at waikato dot ac dot nz)
* @version $Revision$
*/
public class WordTokenizer extends CharacterDelimitedTokenizer {
/** for serialization */
private static final long serialVersionUID = -930893034037880773L;
/** the actual tokenizer */
protected transient StringTokenizer m_Tokenizer;
/**
* Returns a string describing the stemmer
*
* @return a description suitable for displaying in the explorer/experimenter
* gui
*/
@Override
public String globalInfo() {
return "A simple tokenizer that is using the java.util.StringTokenizer "
+ "class to tokenize the strings.";
}
/**
* Tests if this enumeration contains more elements.
*
* @return true if and only if this enumeration object contains at least one
* more element to provide; false otherwise.
*/
@Override
public boolean hasMoreElements() {
return m_Tokenizer.hasMoreElements();
}
/**
* Returns the next element of this enumeration if this enumeration object has
* at least one more element to provide.
*
* @return the next element of this enumeration.
*/
@Override
public String nextElement() {
return m_Tokenizer.nextToken();
}
/**
* Sets the string to tokenize. Tokenization happens immediately.
*
* @param s the string to tokenize
*/
@Override
public void tokenize(String s) {
m_Tokenizer = new StringTokenizer(s, getDelimiters());
}
/**
* Returns the revision string.
*
* @return the revision
*/
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
/**
* Runs the tokenizer with the given options and strings to tokenize. The
* tokens are printed to stdout.
*
* @param args the commandline options and strings to tokenize
*/
public static void main(String[] args) {
runTokenizer(new WordTokenizer(), args);
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/core
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/core/xml/KOML.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* KOML.java
* Copyright (C) 2004-2012 University of Waikato, Hamilton, New Zealand
*/
package weka.core.xml;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.InputStream;
import java.io.OutputStream;
import weka.core.RevisionHandler;
import weka.core.RevisionUtils;
/**
* This class is a helper class for XML serialization using <a
* href="http://koala.ilog.fr/XML/serialization/" target="_blank">KOML</a> .
* KOML does not need to be present, since the class-calls are done generically
* via Reflection.
*
* @author FracPete (fracpete at waikato dot ac dot nz)
* @version $Revision 1.0$
*/
public class KOML implements RevisionHandler {
/**
* indicates whether <a href="http://koala.ilog.fr/XML/serialization/"
* target="_blank">KOML</a> (Koala Object Markup Language) is present
*/
protected static boolean m_Present = false;
/** the extension for KOML files (including '.') */
public final static String FILE_EXTENSION = ".koml";
/** check for KOML statically (needs only to be done once) */
static {
checkForKOML();
}
/**
* checks whether the KOML is present in the class path
*/
private static void checkForKOML() {
try {
Class.forName("fr.dyade.koala.xml.koml.KOMLSerializer");
m_Present = true;
} catch (Exception e) {
m_Present = false;
}
}
/**
* returns whether KOML is present or not, i.e. whether the classes are in the
* classpath or not
*
* @return whether KOML is available
*/
public static boolean isPresent() {
return m_Present;
}
/**
* reads the XML-serialized object from the given file
*
* @param filename the file to deserialize the object from
* @return the deserialized object
* @throws Exception if something goes wrong while reading from the file
*/
public static Object read(String filename) throws Exception {
return read(new FileInputStream(filename));
}
/**
* reads the XML-serialized object from the given file
*
* @param file the file to deserialize the object from
* @return the deserialized object
* @throws Exception if something goes wrong while reading from the file
*/
public static Object read(File file) throws Exception {
return read(new FileInputStream(file));
}
/**
* reads the XML-serialized object from a stream
*
* @param stream the stream to deserialize the object from
* @return the deserialized object
* @throws Exception if something goes wrong while reading from the stream
*/
public static Object read(InputStream stream) throws Exception {
Class<?> komlClass;
Class<?>[] komlClassArgs;
Object[] komlArgs;
java.lang.reflect.Constructor<?> constructor;
Object koml;
java.lang.reflect.Method methodRead;
java.lang.reflect.Method methodClose;
Class<?>[] readArgsClasses;
Class<?>[] closeArgsClasses;
Object[] readArgs;
Object[] closeArgs;
Object result;
result = null;
// get Deserializer
komlClass = Class.forName("fr.dyade.koala.xml.koml.KOMLDeserializer");
komlClassArgs = new Class[2];
komlClassArgs[0] = java.io.InputStream.class;
komlClassArgs[1] = Boolean.TYPE;
komlArgs = new Object[2];
komlArgs[0] = stream;
komlArgs[1] = new Boolean(false);
constructor = komlClass.getConstructor(komlClassArgs);
koml = constructor.newInstance(komlArgs);
readArgsClasses = new Class[0];
methodRead = komlClass.getMethod("readObject", readArgsClasses);
readArgs = new Object[0];
closeArgsClasses = new Class[0];
methodClose = komlClass.getMethod("close", closeArgsClasses);
closeArgs = new Object[0];
// execute it
try {
result = methodRead.invoke(koml, readArgs);
} catch (Exception e) {
result = null;
} finally {
methodClose.invoke(koml, closeArgs);
}
return result;
}
/**
* writes the XML-serialized object to the given file
*
* @param filename the file to serialize the object to
* @param o the object to write to the file
* @return whether writing was successful or not
* @throws Exception if something goes wrong while writing to the file
*/
public static boolean write(String filename, Object o) throws Exception {
return write(new FileOutputStream(filename), o);
}
/**
* write the XML-serialized object to the given file
*
* @param file the file to serialize the object to
* @param o the object to write to the file
* @return whether writing was successful or not
* @throws Exception if something goes wrong while writing to the file
*/
public static boolean write(File file, Object o) throws Exception {
return write(new FileOutputStream(file), o);
}
/**
* writes the XML-serialized object to a stream
*
* @param stream the stream to serialize the object to
* @param o the object to write to the stream
* @return whether writing was successful or not
* @throws Exception if something goes wrong while writing to the stream
*/
public static boolean write(OutputStream stream, Object o) throws Exception {
Class<?> komlClass;
Class<?>[] komlClassArgs;
Object[] komlArgs;
java.lang.reflect.Constructor<?> constructor;
Object koml;
java.lang.reflect.Method methodAdd;
java.lang.reflect.Method methodClose;
Class<?>[] addArgsClasses;
Class<?>[] closeArgsClasses;
Object[] addArgs;
Object[] closeArgs;
boolean result;
result = false;
// get Deserializer
komlClass = Class.forName("fr.dyade.koala.xml.koml.KOMLSerializer");
komlClassArgs = new Class[2];
komlClassArgs[0] = java.io.OutputStream.class;
komlClassArgs[1] = Boolean.TYPE;
komlArgs = new Object[2];
komlArgs[0] = stream;
komlArgs[1] = new Boolean(false);
constructor = komlClass.getConstructor(komlClassArgs);
koml = constructor.newInstance(komlArgs);
addArgsClasses = new Class[1];
addArgsClasses[0] = Object.class;
methodAdd = komlClass.getMethod("addObject", addArgsClasses);
addArgs = new Object[1];
addArgs[0] = o;
closeArgsClasses = new Class[0];
methodClose = komlClass.getMethod("close", closeArgsClasses);
closeArgs = new Object[0];
// execute it
try {
methodAdd.invoke(koml, addArgs);
result = true;
} catch (Exception e) {
result = false;
} finally {
methodClose.invoke(koml, closeArgs);
}
return result;
}
/**
* Returns the revision string.
*
* @return the revision
*/
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/core
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/core/xml/MethodHandler.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* MethodHandler.java
* Copyright (C) 2004-2012 University of Waikato, Hamilton, New Zealand
*/
package weka.core.xml;
import java.lang.reflect.Method;
import java.util.Enumeration;
import java.util.Hashtable;
import weka.core.RevisionHandler;
import weka.core.RevisionUtils;
/**
* This class handles relationships between display names of properties (or
* classes) and Methods that are associated with them.
*
* @author FracPete (fracpete at waikato dot ac dot nz)
* @version $Revision$
*/
public class MethodHandler implements RevisionHandler {
/**
* stores the properties/class - Method relationship
*
* @see #keys()
* @see #add(Class, Method)
* @see #add(String, Method)
* @see #remove(Class)
* @see #remove(String)
* @see #get(Class)
* @see #get(String)
* @see #contains(Class)
* @see #contains(String)
*/
protected Hashtable<Object, Method> m_Methods = null;
/**
* initializes the handler
*/
public MethodHandler() {
super();
m_Methods = new Hashtable<Object, Method>();
}
/**
* returns an enumeration over all currently stored custom methods, i.e. it
* returns the display names/classes in the enumeration.
*
* @return the currently stored methods
* @see #m_Methods
*/
public Enumeration<Object> keys() {
return m_Methods.keys();
}
/**
* adds the specified method for the property with the given displayname to
* its internal list.
*
* @param displayName the display name of the property to handle manually
* @param method the method, which will be invoked by reflection to handle the
* property manually
* @see #m_Methods
*/
public void add(String displayName, Method method) {
if (method != null) {
m_Methods.put(displayName, method);
}
}
/**
* adds the specified method for the given class to its internal list.
*
* @param c the class to handle manually
* @param method the method, which will be invoked by reflection to handle the
* property manually
* @see #m_Methods
*/
public void add(Class<?> c, Method method) {
if (method != null) {
m_Methods.put(c, method);
}
}
/**
* removes the method for the property specified by the display name from its
* internal list.
*
* @param displayName the display name of the propery to remove the custom
* method for
* @return whether the method was stored in the list at all
* @see #m_Methods
*/
public boolean remove(String displayName) {
return (m_Methods.remove(displayName) != null);
}
/**
* removes the method for the specified class from its internal list.
*
* @param c the class to remove the custom method for
* @return whether the method was stored in the list at all
* @see #m_Methods
*/
public boolean remove(Class<?> c) {
return (m_Methods.remove(c) != null);
}
/**
* checks whether a method is stored for the given property
*
* @param displayName the display name of the property to check for a method
* @return whether a method is currently stored
* @see #m_Methods
*/
public boolean contains(String displayName) {
return m_Methods.containsKey(displayName);
}
/**
* checks whether a method is stored for the given class
*
* @param c the class to check for a method
* @return whether a method is currently stored
* @see #m_Methods
*/
public boolean contains(Class<?> c) {
return m_Methods.containsKey(c);
}
/**
* returns the stored method for the given property
*
* @param displayName the display name of the property to retrieve the method
* for
* @return the method associated with the display name, can be
* <code>null</code>
* @see #m_Methods
*/
public Method get(String displayName) {
return m_Methods.get(displayName);
}
/**
* returns the stored method for the given class
*
* @param c the class to retrieve the method for
* @return the method associated with the class, can be <code>null</code>
* @see #m_Methods
*/
public Method get(Class<?> c) {
return m_Methods.get(c);
}
/**
* returns the number of currently stored Methods
*
* @return the nummber of methods
*/
public int size() {
return m_Methods.size();
}
/**
* removes all mappings
*/
public void clear() {
m_Methods.clear();
}
/**
* returns the internal Hashtable (propety/class - method relationship) in a
* string representation
*
* @return the object as string
*/
@Override
public String toString() {
return m_Methods.toString();
}
/**
* Returns the revision string.
*
* @return the revision
*/
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/core
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/core/xml/PropertyHandler.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* PropertyHandler.java
* Copyright (C) 2004-2012 University of Waikato, Hamilton, New Zealand
*/
package weka.core.xml;
import java.util.Enumeration;
import java.util.HashSet;
import java.util.Hashtable;
import weka.core.RevisionHandler;
import weka.core.RevisionUtils;
/**
* This class stores information about properties to ignore or properties that
* are allowed for a certain class.
*
* @author FracPete (fracpete at waikato dot ac dot nz)
* @version $Revision$
*/
public class PropertyHandler implements RevisionHandler {
/**
* contains display names of properties to ignore in the serialization process
*
* @see #ignored()
* @see #addIgnored(String)
* @see #removeIgnored(String)
* @see #isIgnored(String)
*/
protected Hashtable<Object, HashSet<String>> m_Ignored = null;
/**
* lists for a class the properties allowed to use for setting and getting. if
* a class is not listed, then all get/set-methods are allowed.<br>
* Mapping: classname (String) - properties (HashSet, containing the
* Property-Names)
*
* @see #allowed()
* @see #addAllowed(Class,String)
* @see #removeAllowed(Class,String)
* @see #isAllowed(Class,String)
*/
protected Hashtable<Object, HashSet<String>> m_Allowed = null;
/**
* initializes the handling
*/
public PropertyHandler() {
super();
m_Ignored = new Hashtable<Object, HashSet<String>>();
m_Allowed = new Hashtable<Object, HashSet<String>>();
}
/**
* returns an enumeration of the stored display names and classes of
* properties to ignore.<br>
* <b>NOTE:</b> String and Class Objects are mixed in this enumeration,
* depending whether it is a global property to ignore or just one for a
* certain class!
*
* @return the display names and classes
* @see #m_Ignored
*/
public Enumeration<Object> ignored() {
return m_Ignored.keys();
}
/**
* adds the given display name of a property to the ignore list. Can either be
* a complete path (e.g. <code>__root__.options</code>) or only a property
* name (e.g. <code>options</code>). In the latter case it matches all
* occurences of this display name.
*
* @param displayName the property to ignore
* @see #m_Ignored
*/
public void addIgnored(String displayName) {
HashSet<String> list;
list = new HashSet<String>();
list.add(displayName);
m_Ignored.put(displayName, list);
}
/**
* adds the given class with the display name of a property to the ignore
* list. I.e. this property is only ignored for this class.
*
* @param c the class for which a property is to be ignored
* @param displayName the property to ignore
* @see #m_Ignored
*/
public void addIgnored(Class<?> c, String displayName) {
HashSet<String> list;
// retrieve list
if (m_Ignored.containsKey(c)) {
list = m_Ignored.get(c);
} else {
list = new HashSet<String>();
m_Ignored.put(c, list);
}
list.add(displayName);
}
/**
* removes the given display name from the ignore list. returns whether the
* removing was succesful, i.e. whether the display name was in the list.
*
* @param displayName the property to remove from the ignore list
* @return whether the ignore list contained the specified property
* @see #m_Ignored
*/
public boolean removeIgnored(String displayName) {
return (m_Ignored.remove(displayName) != null);
}
/**
* removes the given display name from the ignore list of the class. returns
* whether the removing was succesful, i.e. whether the display name was in
* the list.
*
* @param c the class to remove the property from
* @param displayName the property to remove from the ignore list
* @return whether the ignore list contained the specified property
* @see #m_Ignored
*/
public boolean removeIgnored(Class<?> c, String displayName) {
HashSet<String> list;
// retrieve list
if (m_Ignored.containsKey(c)) {
list = m_Ignored.get(c);
} else {
list = new HashSet<String>();
}
return list.remove(displayName);
}
/**
* checks whether the given display name is an ignored property
*
* @param displayName the property to check whether it is on the ignore list
* @return whether the property is in the ignored list
* @see #m_Ignored
*/
public boolean isIgnored(String displayName) {
return m_Ignored.containsKey(displayName);
}
/**
* checks whether the given display name of a certain class is an ignored
* property. It only checks for this certain class and no derivative classes.
* If you also want to check for derivative classes, use
* <code>isIgnored(Object,String)</code>.
*
* @param c the class to check the property for
* @param displayName the property to check whether it is on the ignore list
* @return whether the property is in the ignored list
* @see #m_Ignored
* @see #isIgnored(Object, String)
*/
public boolean isIgnored(Class<?> c, String displayName) {
HashSet<String> list;
// retrieve list
if (m_Ignored.containsKey(c)) {
list = m_Ignored.get(c);
} else {
list = new HashSet<String>();
}
return list.contains(displayName);
}
/**
* checks whether the given display name of a given object is an ignored
* property. The object is checked for each stored class whether it is an
* <code>instanceof</code>. If the class is not stored then it will default to
* <code>false</code>, since there are no restrictions for this class.
*
* @param o the object to check the property for
* @param displayName the property to check whether it is on the ignore list
* @return whether the property is in the ignored list
* @see #m_Ignored
*/
public boolean isIgnored(Object o, String displayName) {
Enumeration<Object> enm;
Class<?> c;
Object element;
boolean result;
HashSet<String> list;
result = false;
enm = ignored();
while (enm.hasMoreElements()) {
element = enm.nextElement();
// has to be class! not a display name
if (!(element instanceof Class)) {
continue;
}
c = (Class<?>) element;
// is it an instance of this class?
if (c.isInstance(o)) {
list = m_Ignored.get(c);
result = list.contains(displayName);
break;
}
}
return result;
}
/**
* returns an enumeration of the classnames for which only certain properties
* (display names) are allowed
*
* @return the classnames with restriction to properties
*/
public Enumeration<Object> allowed() {
return m_Allowed.keys();
}
/**
* adds the given property (display name) to the list of allowed properties
* for the specified class.
*
* @param c the class to add a property for
* @param displayName the property to allow for the class
* @see #m_Allowed
*/
public void addAllowed(Class<?> c, String displayName) {
HashSet<String> list;
// retrieve list
list = m_Allowed.get(c);
if (list == null) {
list = new HashSet<String>();
m_Allowed.put(c, list);
}
// add property
list.add(displayName);
}
/**
* removes the given property (display name) for the specified class from the
* list of allowed properties.
*
* @param c the class to remove the property for
* @param displayName the property to remove
* @return whether the property was found
* @see #m_Allowed
*/
public boolean removeAllowed(Class<?> c, String displayName) {
boolean result;
HashSet<String> list;
result = false;
// retrieve list
list = m_Allowed.get(c);
// remove property
if (list != null) {
result = list.remove(displayName);
}
return result;
}
/**
* returns whether the given property (display name) is allowed for the given
* class. It only checks for this certain class and no derivative classes. If
* you also want to check for derivative classes, use
* <code>isAllowed(Object,String)</code>.
*
* @param c the class to check the property for
* @param displayName the property (display name) to check
* @return whether the property is allowed in that context
* @see #m_Allowed
* @see #isAllowed(Object, String)
*/
public boolean isAllowed(Class<?> c, String displayName) {
boolean result;
HashSet<String> list;
result = true;
// retrieve list
list = m_Allowed.get(c);
// check list
if (list != null) {
result = list.contains(displayName);
}
return result;
}
/**
* returns whether the given property (display name) is allowed for the given
* object . The object is checked for each stored class whether it is an
* <code>instanceof</code>. If the class is not stored then it will default to
* <code>true</code>, since there are no restrictions for this class.
*
* @param o the object to check the property for
* @param displayName the property (display name) to check
* @return whether the property is allowed in that context
*/
public boolean isAllowed(Object o, String displayName) {
Enumeration<Object> enm;
Class<?> c;
boolean result;
HashSet<String> list;
result = true;
enm = allowed();
while (enm.hasMoreElements()) {
c = (Class<?>) enm.nextElement();
// is it an instance of this class?
if (c.isInstance(o)) {
list = m_Allowed.get(c);
result = list.contains(displayName);
break;
}
}
return result;
}
/**
* Returns the revision string.
*
* @return the revision
*/
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/core
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/core/xml/SerialUIDChanger.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* SerialUIDChanger.java
* Copyright (C) 2004-2012 University of Waikato, Hamilton, New Zealand
*/
package weka.core.xml;
import java.io.BufferedInputStream;
import java.io.BufferedOutputStream;
import java.io.BufferedReader;
import java.io.BufferedWriter;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.FileReader;
import java.io.FileWriter;
import java.io.ObjectInputStream;
import java.io.ObjectOutputStream;
import weka.core.RevisionHandler;
import weka.core.RevisionUtils;
/**
* This class enables one to change the UID of a serialized object and therefore
* not losing the data stored in the binary format.
*
* @author FracPete (fracpete at waikato dot ac dot nz)
* @version $Revision$
*/
public class SerialUIDChanger
implements RevisionHandler {
/**
* checks whether KOML is present
*
* @return returns <code>true</code> if KOML is present
* @throws Exception if KOML is not present
*/
protected static boolean checkKOML() throws Exception {
if (!KOML.isPresent())
throw new Exception("KOML is not present!");
else
return true;
}
/**
* checks whether the given filename ends with ".koml"
*
* @param filename the filename to check
* @return whether it is a KOML file or not
* @see KOML#FILE_EXTENSION
*/
public static boolean isKOML(String filename) {
return filename.toLowerCase().endsWith(KOML.FILE_EXTENSION);
}
/**
* loads a serialized object and returns it
*
* @param binary the filename that points to the file containing the
* serialized object
* @return the object from the file
* @throws Exception if reading fails
*/
protected static Object readBinary(String binary) throws Exception {
FileInputStream fi;
ObjectInputStream oi;
Object o;
fi = new FileInputStream(binary);
oi = new ObjectInputStream(new BufferedInputStream(fi));
o = oi.readObject();
oi.close();
return o;
}
/**
* serializes the given object into the given file
*
* @param binary the file to store the object in
* @param o the object to serialize
* @throws Exception if saving fails
*/
protected static void writeBinary(String binary, Object o) throws Exception {
FileOutputStream fo;
ObjectOutputStream oo;
fo = new FileOutputStream(binary);
oo = new ObjectOutputStream(new BufferedOutputStream(fo));
oo.writeObject(o);
oo.close();
}
/**
* converts a binary file into a KOML XML file
*
* @param binary the binary file to convert
* @param koml where to store the XML output
* @throws Exception if conversion fails
*/
public static void binaryToKOML(String binary, String koml) throws Exception {
Object o;
// can we use KOML?
checkKOML();
// read binary
o = readBinary(binary);
if (o == null)
throw new Exception("Failed to deserialize object from binary file '" + binary + "'!");
// save as KOML
KOML.write(koml, o);
}
/**
* converts a KOML file into a binary one
*
* @param koml the filename with the XML data
* @param binary the name of the
*/
public static void komlToBinary(String koml, String binary) throws Exception {
Object o;
// can we use KOML?
checkKOML();
// read KOML
o = KOML.read(koml);
if (o == null)
throw new Exception("Failed to deserialize object from XML file '" + koml + "'!");
// write binary
writeBinary(binary, o);
}
/**
* changes the oldUID into newUID from the given file (binary/KOML) into the
* other one (binary/KOML). it basically does a replace in the XML, i.e. it
* looks for " uid='oldUID'" and replaces it with " uid='newUID'".
*
* @param oldUID the old UID to change
* @param newUID the new UID to use
* @param fromFile the original file with the old UID
* @param toFile the new file where to store the modified UID
* @throws Exception if conversion fails
*/
public static void changeUID(long oldUID, long newUID, String fromFile, String toFile) throws Exception {
String inputFile;
String tempFile;
File file;
String content;
String line;
BufferedReader reader;
BufferedWriter writer;
// input
if (!isKOML(fromFile)) {
inputFile = fromFile + ".koml";
binaryToKOML(fromFile, inputFile);
}
else {
inputFile = fromFile;
}
// load KOML
reader = new BufferedReader(new FileReader(inputFile));
content = "";
while ((line = reader.readLine()) != null) {
if (!content.equals(""))
content += "\n";
content += line;
}
reader.close();
// transform UID
content = content.replaceAll(" uid='" + Long.toString(oldUID) + "'", " uid='" + Long.toString(newUID) + "'");
// save to tempFile
tempFile = inputFile + ".temp";
writer = new BufferedWriter(new FileWriter(tempFile));
writer.write(content);
writer.flush();
writer.close();
// output
if (!isKOML(toFile)) {
komlToBinary(tempFile, toFile);
}
else {
writer = new BufferedWriter(new FileWriter(toFile));
writer.write(content);
writer.flush();
writer.close();
}
// remove tempFile
file = new File(tempFile);
file.delete();
}
/**
* Returns the revision string.
*
* @return the revision
*/
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
/**
* exchanges an old UID for a new one. a file that doesn't end with ".koml"
* is considered being binary.
* takes four arguments: oldUID newUID oldFilename newFilename
*
* @param args the command line parameters
* @see KOML#FILE_EXTENSION
*/
public static void main(String[] args) throws Exception {
if (args.length != 4) {
System.out.println();
System.out.println("Usage: " + SerialUIDChanger.class.getName() + " <oldUID> <newUID> <oldFilename> <newFilename>");
System.out.println(" <oldFilename> and <newFilename> have to be different");
System.out.println();
}
else {
if (args[2].equals(args[3]))
throw new Exception("Filenames have to be different!");
changeUID( Long.parseLong(args[0]),
Long.parseLong(args[1]),
args[2],
args[3] );
}
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/core
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/core/xml/XMLBasicSerialization.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* XMLBasicSerialization.java
* Copyright (C) 2004-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.core.xml;
import java.awt.Color;
import java.io.StringReader;
import java.io.StringWriter;
import java.util.Collection;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Hashtable;
import java.util.Iterator;
import java.util.LinkedHashMap;
import java.util.LinkedList;
import java.util.Map;
import java.util.Properties;
import java.util.Stack;
import java.util.TreeMap;
import java.util.TreeSet;
import java.util.Vector;
import javax.swing.DefaultListModel;
import org.w3c.dom.Element;
import weka.core.RevisionUtils;
import weka.core.Utils;
/**
* This serializer contains some read/write methods for common classes that are
* not beans-conform. Currently supported are:
* <ul>
* <li>java.util.HashMap</li>
* <li>java.util.LinkedHashMap</li>
* <li>java.util.HashSet</li>
* <li>java.util.Hashtable</li>
* <li>java.util.LinkedList</li>
* <li>java.util.Properties</li>
* <li>java.util.Stack</li>
* <li>java.util.TreeMap</li>
* <li>java.util.TreeSet</li>
* <li>java.util.Vector</li>
* <li>javax.swing.DefaultListModel</li>
* <li>java.awt.Color</li>
* </ul>
*
* Weka classes:
* <ul>
* <li>weka.core.Matrix</li>
* <li>weka.core.matrix.Matrix</li>
* </ul>
*
* @author FracPete (fracpete at waikato dot ac dot nz)
* @version $Revision$
*/
public class XMLBasicSerialization extends XMLSerialization {
/** the value for mapping, e.g., Maps */
public final static String VAL_MAPPING = "mapping";
/** the value for a mapping-key, e.g., Maps */
public final static String VAL_KEY = "key";
/** the value for mapping-value, e.g., Maps */
public final static String VAL_VALUE = "value";
/** the matrix cells */
public final static String VAL_CELLS = "cells";
/**
* initializes the serialization
*
* @throws Exception if initialization fails
*/
public XMLBasicSerialization() throws Exception {
super();
}
/**
* generates internally a new XML document and clears also the IgnoreList and
* the mappings for the Read/Write-Methods
*
* @throws Exception if initializing fails
*/
@Override
@SuppressWarnings("deprecation")
public void clear() throws Exception {
super.clear();
// Java classes
m_CustomMethods.register(this, DefaultListModel.class, "DefaultListModel");
m_CustomMethods.register(this, HashMap.class, "Map");
m_CustomMethods.register(this, HashSet.class, "Collection");
m_CustomMethods.register(this, Hashtable.class, "Map");
m_CustomMethods.register(this, LinkedList.class, "Collection");
m_CustomMethods.register(this, Properties.class, "Map");
m_CustomMethods.register(this, Stack.class, "Collection");
m_CustomMethods.register(this, TreeMap.class, "Map");
m_CustomMethods.register(this, LinkedHashMap.class, "Map");
m_CustomMethods.register(this, TreeSet.class, "Collection");
m_CustomMethods.register(this, Vector.class, "Collection");
m_CustomMethods.register(this, Color.class, "Color");
// Weka classes
m_CustomMethods.register(this, weka.core.matrix.Matrix.class, "Matrix");
m_CustomMethods.register(this, weka.core.Matrix.class, "MatrixOld");
m_CustomMethods.register(this, weka.classifiers.CostMatrix.class,
"CostMatrix");
}
/**
* adds the given Color to a DOM structure.
*
* @param parent the parent of this object, e.g. the class this object is a
* member of
* @param o the Object to describe in XML
* @param name the name of the object
* @return the node that was created
* @throws Exception if the DOM creation fails
* @see java.awt.Color
*/
public Element writeColor(Element parent, Object o, String name)
throws Exception {
Element node;
Color c;
if (DEBUG) {
trace(new Throwable(), name);
}
m_CurrentNode = parent;
c = (Color) o;
node = addElement(parent, name, o.getClass().getName(), false);
invokeWriteToXML(node, c.getRed(), "red");
invokeWriteToXML(node, c.getGreen(), "green");
invokeWriteToXML(node, c.getBlue(), "blue");
return node;
}
/**
* builds the Color object from the given DOM node.
*
* @param node the associated XML node
* @return the instance created from the XML description
* @throws Exception if instantiation fails
* @see java.awt.Color
*/
public Object readColor(Element node) throws Exception {
Vector<Element> children;
if (DEBUG) {
trace(new Throwable(), node.getAttribute(ATT_NAME));
}
children = XMLDocument.getChildTags(node);
Element redchild = children.get(0);
Element greenchild = children.get(1);
Element bluechild = children.get(2);
Integer red = (Integer) readFromXML(redchild);
Integer green = (Integer) readFromXML(greenchild);
Integer blue = (Integer) readFromXML(bluechild);
return new Color(red, green, blue);
}
/**
* adds the given DefaultListModel to a DOM structure.
*
* @param parent the parent of this object, e.g. the class this object is a
* member of
* @param o the Object to describe in XML
* @param name the name of the object
* @return the node that was created
* @throws Exception if the DOM creation fails
* @see javax.swing.DefaultListModel
*/
public Element writeDefaultListModel(Element parent, Object o, String name)
throws Exception {
Element node;
int i;
DefaultListModel model;
// for debugging only
if (DEBUG) {
trace(new Throwable(), name);
}
m_CurrentNode = parent;
model = (DefaultListModel) o;
node = addElement(parent, name, o.getClass().getName(), false);
for (i = 0; i < model.getSize(); i++) {
invokeWriteToXML(node, model.get(i), Integer.toString(i));
}
return node;
}
/**
* builds the DefaultListModel from the given DOM node.
*
* @param node the associated XML node
* @return the instance created from the XML description
* @throws Exception if instantiation fails
* @see javax.swing.DefaultListModel
*/
public Object readDefaultListModel(Element node) throws Exception {
DefaultListModel model;
Vector<Element> children;
Element child;
int i;
int index;
int currIndex;
// for debugging only
if (DEBUG) {
trace(new Throwable(), node.getAttribute(ATT_NAME));
}
m_CurrentNode = node;
children = XMLDocument.getChildTags(node);
model = new DefaultListModel();
// determine highest index for size
index = children.size() - 1;
for (i = 0; i < children.size(); i++) {
child = children.get(i);
currIndex = Integer.parseInt(child.getAttribute(ATT_NAME));
if (currIndex > index) {
index = currIndex;
}
}
model.setSize(index + 1);
// set values
for (i = 0; i < children.size(); i++) {
child = children.get(i);
model.set(Integer.parseInt(child.getAttribute(ATT_NAME)),
invokeReadFromXML(child));
}
return model;
}
/**
* adds the given Collection to a DOM structure.
*
* @param parent the parent of this object, e.g. the class this object is a
* member of
* @param o the Object to describe in XML
* @param name the name of the object
* @return the node that was created
* @throws Exception if the DOM creation fails
* @see java.util.Collection
*/
public Element writeCollection(Element parent, Object o, String name)
throws Exception {
Element node;
Iterator<?> iter;
int i;
// for debugging only
if (DEBUG) {
trace(new Throwable(), name);
}
m_CurrentNode = parent;
iter = ((Collection<?>) o).iterator();
node = addElement(parent, name, o.getClass().getName(), false);
i = 0;
while (iter.hasNext()) {
invokeWriteToXML(node, iter.next(), Integer.toString(i));
i++;
}
return node;
}
/**
* builds the Collection from the given DOM node.
*
* @param node the associated XML node
* @return the instance created from the XML description
* @throws Exception if instantiation fails
* @see java.util.Collection
*/
public Object readCollection(Element node) throws Exception {
Collection<Object> coll;
Vector<Object> v;
Vector<Element> children;
Element child;
int i;
int index;
int currIndex;
// for debugging only
if (DEBUG) {
trace(new Throwable(), node.getAttribute(ATT_NAME));
}
m_CurrentNode = node;
children = XMLDocument.getChildTags(node);
v = new Vector<Object>();
// determine highest index for size
index = children.size() - 1;
for (i = 0; i < children.size(); i++) {
child = children.get(i);
currIndex = Integer.parseInt(child.getAttribute(ATT_NAME));
if (currIndex > index) {
index = currIndex;
}
}
v.setSize(index + 1);
// put the children in the vector to sort them according their index
for (i = 0; i < children.size(); i++) {
child = children.get(i);
v.set(Integer.parseInt(child.getAttribute(ATT_NAME)),
invokeReadFromXML(child));
}
// populate collection
coll =
Utils.cast(Class.forName(node.getAttribute(ATT_CLASS)).newInstance());
coll.addAll(v);
return coll;
}
/**
* adds the given Map to a DOM structure.
*
* @param parent the parent of this object, e.g. the class this object is a
* member of
* @param o the Object to describe in XML
* @param name the name of the object
* @return the node that was created
* @throws Exception if the DOM creation fails
* @see java.util.Map
*/
public Element writeMap(Element parent, Object o, String name)
throws Exception {
Map<?, ?> map;
Object key;
Element node;
Element child;
Iterator<?> iter;
// for debugging only
if (DEBUG) {
trace(new Throwable(), name);
}
m_CurrentNode = parent;
map = (Map<?, ?>) o;
iter = map.keySet().iterator();
node = addElement(parent, name, o.getClass().getName(), false);
while (iter.hasNext()) {
key = iter.next();
child = addElement(node, VAL_MAPPING, Object.class.getName(), false);
invokeWriteToXML(child, key, VAL_KEY);
invokeWriteToXML(child, map.get(key), VAL_VALUE);
}
return node;
}
/**
* builds the Map from the given DOM node.
*
* @param node the associated XML node
* @return the instance created from the XML description
* @throws Exception if instantiation fails
* @see java.util.Map
*/
public Object readMap(Element node) throws Exception {
Map<Object, Object> map;
Object key;
Object value;
Vector<Element> children;
Vector<Element> cchildren;
Element child;
Element cchild;
int i;
int n;
String name;
// for debugging only
if (DEBUG) {
trace(new Throwable(), node.getAttribute(ATT_NAME));
}
m_CurrentNode = node;
map = Utils.cast(Class.forName(node.getAttribute(ATT_CLASS)).newInstance());
children = XMLDocument.getChildTags(node);
for (i = 0; i < children.size(); i++) {
child = children.get(i);
cchildren = XMLDocument.getChildTags(child);
key = null;
value = null;
for (n = 0; n < cchildren.size(); n++) {
cchild = cchildren.get(n);
name = cchild.getAttribute(ATT_NAME);
if (name.equals(VAL_KEY)) {
key = invokeReadFromXML(cchild);
} else if (name.equals(VAL_VALUE)) {
value = invokeReadFromXML(cchild);
} else {
System.out.println("WARNING: '" + name
+ "' is not a recognized name for maps!");
}
}
map.put(key, value);
}
return map;
}
/**
* adds the given CostMatrix to a DOM structure.
*
* @param parent the parent of this object, e.g. the class this object is a
* member of
* @param o the Object to describe in XML
* @param name the name of the object
* @return the node that was created
* @throws Exception if the DOM creation fails
* @see weka.classifiers.CostMatrix
*/
public Element writeCostMatrix(Element parent, Object o, String name)
throws Exception {
weka.classifiers.CostMatrix matrix = (weka.classifiers.CostMatrix) o;
Element node;
// for debugging only
if (DEBUG) {
trace(new Throwable(), name);
}
m_CurrentNode = parent;
node = addElement(parent, name, o.getClass().getName(), false);
Object[][] m = new Object[matrix.size()][matrix.size()];
for (int i = 0; i < matrix.size(); i++) {
for (int j = 0; j < matrix.size(); j++) {
m[i][j] = matrix.getCell(i, j);
}
}
invokeWriteToXML(node, m, VAL_CELLS);
return node;
}
/**
* builds the Matrix from the given DOM node.
*
* @param node the associated XML node
* @return the instance created from the XML description
* @throws Exception if instantiation fails
* @see weka.classifiers.CostMatrix
*/
public Object readCostMatrix(Element node) throws Exception {
weka.classifiers.CostMatrix matrix;
Vector<Element> children;
Element child;
int i;
String name;
Object o;
// for debugging only
if (DEBUG) {
trace(new Throwable(), node.getAttribute(ATT_NAME));
}
m_CurrentNode = node;
matrix = null;
children = XMLDocument.getChildTags(node);
for (i = 0; i < children.size(); i++) {
child = children.get(i);
name = child.getAttribute(ATT_NAME);
if (name.equals(VAL_CELLS)) {
o = invokeReadFromXML(child);
Object[][] m = (Object[][]) o;
matrix = new weka.classifiers.CostMatrix(m.length);
for (int j = 0; j < matrix.size(); j++) {
for (int k = 0; k < matrix.size(); k++) {
matrix.setCell(j, k, m[j][k]);
}
}
}
}
return matrix;
}
/**
* adds the given Matrix to a DOM structure.
*
* @param parent the parent of this object, e.g. the class this object is a
* member of
* @param o the Object to describe in XML
* @param name the name of the object
* @return the node that was created
* @throws Exception if the DOM creation fails
* @see weka.core.matrix.Matrix
*/
public Element writeMatrix(Element parent, Object o, String name)
throws Exception {
weka.core.matrix.Matrix matrix;
Element node;
// for debugging only
if (DEBUG) {
trace(new Throwable(), name);
}
m_CurrentNode = parent;
matrix = (weka.core.matrix.Matrix) o;
node = addElement(parent, name, o.getClass().getName(), false);
invokeWriteToXML(node, matrix.getArray(), VAL_CELLS);
return node;
}
/**
* builds the Matrix from the given DOM node.
*
* @param node the associated XML node
* @return the instance created from the XML description
* @throws Exception if instantiation fails
* @see weka.core.matrix.Matrix
*/
public Object readMatrix(Element node) throws Exception {
weka.core.matrix.Matrix matrix;
Vector<Element> children;
Element child;
int i;
String name;
Object o;
// for debugging only
if (DEBUG) {
trace(new Throwable(), node.getAttribute(ATT_NAME));
}
m_CurrentNode = node;
matrix = null;
children = XMLDocument.getChildTags(node);
for (i = 0; i < children.size(); i++) {
child = children.get(i);
name = child.getAttribute(ATT_NAME);
if (name.equals(VAL_CELLS)) {
o = invokeReadFromXML(child);
matrix = new weka.core.matrix.Matrix((double[][]) o);
}
}
return matrix;
}
/**
* adds the given Matrix (old) to a DOM structure.
*
* @param parent the parent of this object, e.g. the class this object is a
* member of
* @param o the Object to describe in XML
* @param name the name of the object
* @return the node that was created
* @throws Exception if the DOM creation fails
* @see weka.core.Matrix
*/
@SuppressWarnings("deprecation")
public Element writeMatrixOld(Element parent, Object o, String name)
throws Exception {
weka.core.Matrix matrix;
Element node;
double[][] array;
int i;
// for debugging only
if (DEBUG) {
trace(new Throwable(), name);
}
m_CurrentNode = parent;
matrix = (weka.core.Matrix) o;
node = addElement(parent, name, o.getClass().getName(), false);
array = new double[matrix.numRows()][];
for (i = 0; i < array.length; i++) {
array[i] = matrix.getRow(i);
}
invokeWriteToXML(node, array, VAL_CELLS);
return node;
}
/**
* builds the Matrix (old) from the given DOM node.
*
* @param node the associated XML node
* @return the instance created from the XML description
* @throws Exception if instantiation fails
* @see weka.core.Matrix
*/
@SuppressWarnings("deprecation")
public Object readMatrixOld(Element node) throws Exception {
weka.core.Matrix matrix;
weka.core.matrix.Matrix matrixNew;
// for debugging only
if (DEBUG) {
trace(new Throwable(), node.getAttribute(ATT_NAME));
}
m_CurrentNode = node;
matrixNew = (weka.core.matrix.Matrix) readMatrix(node);
matrix = new weka.core.Matrix(matrixNew.getArrayCopy());
return matrix;
}
/**
* adds the given CostMatrix (old) to a DOM structure.
*
* @param parent the parent of this object, e.g. the class this object is a
* member of
* @param o the Object to describe in XML
* @param name the name of the object
* @return the node that was created
* @throws Exception if the DOM creation fails
* @see weka.classifiers.CostMatrix
*/
public Element writeCostMatrixOld(Element parent, Object o, String name)
throws Exception {
// for debugging only
if (DEBUG) {
trace(new Throwable(), name);
}
m_CurrentNode = parent;
return writeMatrixOld(parent, o, name);
}
/**
* builds the Matrix (old) from the given DOM node.
*
* @param node the associated XML node
* @return the instance created from the XML description
* @throws Exception if instantiation fails
* @see weka.classifiers.CostMatrix
*/
public Object readCostMatrixOld(Element node) throws Exception {
weka.classifiers.CostMatrix matrix;
weka.core.matrix.Matrix matrixNew;
StringWriter writer;
// for debugging only
if (DEBUG) {
trace(new Throwable(), node.getAttribute(ATT_NAME));
}
m_CurrentNode = node;
matrixNew = (weka.core.matrix.Matrix) readMatrix(node);
writer = new StringWriter();
matrixNew.write(writer);
matrix =
new weka.classifiers.CostMatrix(new StringReader(writer.toString()));
return matrix;
}
/**
* Returns the revision string.
*
* @return the revision
*/
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/core
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/core/xml/XMLDocument.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* XMLDocument.java
* Copyright (C) 2004-2012 University of Waikato, Hamilton, New Zealand
*/
package weka.core.xml;
import java.io.BufferedWriter;
import java.io.ByteArrayInputStream;
import java.io.File;
import java.io.FileWriter;
import java.io.InputStream;
import java.io.OutputStream;
import java.io.Reader;
import java.io.Writer;
import java.util.Vector;
import javax.xml.namespace.QName;
import javax.xml.parsers.DocumentBuilder;
import javax.xml.parsers.DocumentBuilderFactory;
import javax.xml.xpath.XPath;
import javax.xml.xpath.XPathConstants;
import javax.xml.xpath.XPathFactory;
import org.w3c.dom.Document;
import org.w3c.dom.Element;
import org.w3c.dom.NamedNodeMap;
import org.w3c.dom.Node;
import org.w3c.dom.NodeList;
import org.xml.sax.InputSource;
import weka.core.RevisionHandler;
import weka.core.RevisionUtils;
/**
* This class offers some methods for generating, reading and writing
* XML documents.<br>
* It can only handle UTF-8.
*
* @see #PI
* @author FracPete (fracpete at waikato dot ac dot nz)
* @version $Revision$
*/
public class XMLDocument
implements RevisionHandler {
/** the parsing instructions "<?xml version=\"1.0\" encoding=\"utf-8\"?>"
* (may not show up in Javadoc due to tags!). */
public final static String PI = "<?xml version=\"1.0\" encoding=\"utf-8\"?>";
// DTD placeholders
/** the DocType definition. */
public final static String DTD_DOCTYPE = "DOCTYPE";
/** the Element definition. */
public final static String DTD_ELEMENT = "ELEMENT";
/** the AttList definition. */
public final static String DTD_ATTLIST = "ATTLIST";
/** the optional marker. */
public final static String DTD_OPTIONAL = "?";
/** the at least one marker. */
public final static String DTD_AT_LEAST_ONE = "+";
/** the zero or more marker. */
public final static String DTD_ZERO_OR_MORE = "*";
/** the option separator. */
public final static String DTD_SEPARATOR = "|";
/** the CDATA placeholder. */
public final static String DTD_CDATA = "CDATA";
/** the ANY placeholder. */
public final static String DTD_ANY = "ANY";
/** the #PCDATA placeholder. */
public final static String DTD_PCDATA = "#PCDATA";
/** the #IMPLIED placeholder. */
public final static String DTD_IMPLIED = "#IMPLIED";
/** the #REQUIRED placeholder. */
public final static String DTD_REQUIRED = "#REQUIRED";
// often used attributes
/** the "version" attribute. */
public final static String ATT_VERSION = "version";
/** the "name" attribute. */
public final static String ATT_NAME = "name";
// often used values
/** the value "yes". */
public final static String VAL_YES = "yes";
/** the value "no". */
public final static String VAL_NO = "no";
// members
/** the factory for DocumentBuilder. */
protected DocumentBuilderFactory m_Factory = null;
/** the instance of a DocumentBuilder. */
protected DocumentBuilder m_Builder = null;
/** whether to use a validating parser or not. */
protected boolean m_Validating = false;
/** the DOM document. */
protected Document m_Document = null;
/** the DOCTYPE node as String. */
protected String m_DocType = null;
/** the root node as String. */
protected String m_RootNode = null;
/** for XPath queries. */
protected XPath m_XPath = null;
/**
* initializes the factory with non-validating parser.
*
* @throws Exception if the construction fails
*/
public XMLDocument() throws Exception {
m_Factory = DocumentBuilderFactory.newInstance();
m_XPath = XPathFactory.newInstance(XPathFactory.DEFAULT_OBJECT_MODEL_URI).newXPath();
setDocType(null);
setRootNode(null);
setValidating(false);
}
/**
* Creates a new instance of XMLDocument.
*
* @param xml the xml to parse (if "<?xml" is not found then it is considered a file)
* @throws Exception if the construction of the DocumentBuilder fails
* @see #setValidating(boolean)
*/
public XMLDocument(String xml) throws Exception {
this();
read(xml);
}
/**
* Creates a new instance of XMLDocument.
*
* @param file the XML file to parse
* @throws Exception if the construction of the DocumentBuilder fails
* @see #setValidating(boolean)
*/
public XMLDocument(File file) throws Exception {
this();
read(file);
}
/**
* Creates a new instance of XMLDocument.
*
* @param stream the XML stream to parse
* @throws Exception if the construction of the DocumentBuilder fails
* @see #setValidating(boolean)
*/
public XMLDocument(InputStream stream) throws Exception {
this();
read(stream);
}
/**
* Creates a new instance of XMLDocument.
*
* @param reader the XML reader to parse
* @throws Exception if the construction of the DocumentBuilder fails
* @see #setValidating(boolean)
*/
public XMLDocument(Reader reader) throws Exception {
this();
read(reader);
}
/**
* returns the DocumentBuilderFactory.
*
* @return the DocumentBuilderFactory
*/
public DocumentBuilderFactory getFactory() {
return m_Factory;
}
/**
* returns the DocumentBuilder.
*
* @return the DocumentBuilder
*/
public DocumentBuilder getBuilder() {
return m_Builder;
}
/**
* returns whether a validating parser is used.
*
* @return whether a validating parser is used
*/
public boolean getValidating() {
return m_Validating;
}
/**
* sets whether to use a validating parser or not.<br>
* Note: this does clear the current DOM document!
*
* @param validating whether to use a validating parser
* @throws Exception if the instantiating of the DocumentBuilder fails
*/
public void setValidating(boolean validating) throws Exception {
m_Validating = validating;
m_Factory.setValidating(validating);
m_Builder = m_Factory.newDocumentBuilder();
clear();
}
/**
* returns the parsed DOM document.
*
* @return the parsed DOM document
*/
public Document getDocument() {
return m_Document;
}
/**
* sets the DOM document to use.
*
* @param newDocument the DOM document to use
*/
public void setDocument(Document newDocument) {
m_Document = newDocument;
}
/**
* sets the DOCTYPE-String to use in the XML output. Performs NO checking!
* if it is <code>null</code> the DOCTYPE is omitted.
*
* @param docType the DOCTYPE definition to use in XML output
*/
public void setDocType(String docType) {
m_DocType = docType;
}
/**
* returns the current DOCTYPE, can be <code>null</code>.
*
* @return the current DOCTYPE definition, can be <code>null</code>
*/
public String getDocType() {
return m_DocType;
}
/**
* sets the root node to use in the XML output. Performs NO checking with
* DOCTYPE!
*
* @param rootNode the root node to use in the XML output
*/
public void setRootNode(String rootNode) {
if (rootNode == null)
m_RootNode = "root";
else
m_RootNode = rootNode;
}
/**
* returns the current root node.
*
* @return the current root node
*/
public String getRootNode() {
return m_RootNode;
}
/**
* sets up an empty DOM document, with the current DOCTYPE and root node.
*
* @see #setRootNode(String)
* @see #setDocType(String)
*/
public void clear() {
newDocument(getDocType(), getRootNode());
}
/**
* creates a new Document with the given information.
*
* @param docType the DOCTYPE definition (no checking happens!), can be null
* @param rootNode the name of the root node (must correspond to the one
* given in <code>docType</code>)
* @return returns the just created DOM document for convenience
*/
public Document newDocument(String docType, String rootNode) {
m_Document = getBuilder().newDocument();
m_Document.appendChild(m_Document.createElement(rootNode));
setDocType(docType);
return getDocument();
}
/**
* parses the given XML string (can be XML or a filename) and returns a
* DOM Document.
*
* @param xml the xml to parse (if "<?xml" is not found then it is considered a file)
* @return the parsed DOM document
* @throws Exception if something goes wrong with the parsing
*/
public Document read(String xml) throws Exception {
if (xml.toLowerCase().indexOf("<?xml") > -1)
return read(new ByteArrayInputStream(xml.getBytes()));
else
return read(new File(xml));
}
/**
* parses the given file and returns a DOM document.
*
* @param file the XML file to parse
* @return the parsed DOM document
* @throws Exception if something goes wrong with the parsing
*/
public Document read(File file) throws Exception {
m_Document = getBuilder().parse(file);
return getDocument();
}
/**
* parses the given stream and returns a DOM document.
*
* @param stream the XML stream to parse
* @return the parsed DOM document
* @throws Exception if something goes wrong with the parsing
*/
public Document read(InputStream stream) throws Exception {
m_Document = getBuilder().parse(stream);
return getDocument();
}
/**
* parses the given reader and returns a DOM document.
*
* @param reader the XML reader to parse
* @return the parsed DOM document
* @throws Exception if something goes wrong with the parsing
*/
public Document read(Reader reader) throws Exception {
m_Document = getBuilder().parse(new InputSource(reader));
return getDocument();
}
/**
* writes the current DOM document into the given file.
*
* @param file the filename to write to
* @throws Exception if something goes wrong with the parsing
*/
public void write(String file) throws Exception {
write(new File(file));
}
/**
* writes the current DOM document into the given file.
*
* @param file the filename to write to
* @throws Exception if something goes wrong with the parsing
*/
public void write(File file) throws Exception {
write(new BufferedWriter(new FileWriter(file)));
}
/**
* writes the current DOM document into the given stream.
*
* @param stream the filename to write to
* @throws Exception if something goes wrong with the parsing
*/
public void write(OutputStream stream) throws Exception {
String xml;
xml = toString();
stream.write(xml.getBytes(), 0, xml.length());
stream.flush();
}
/**
* writes the current DOM document into the given writer.
*
* @param writer the filename to write to
* @throws Exception if something goes wrong with the parsing
*/
public void write(Writer writer) throws Exception {
writer.write(toString());
writer.flush();
}
/**
* returns all non tag-children from the given node.
*
* @param parent the node to get the children from
* @return a vector containing all the non-text children
*/
public static Vector<Element> getChildTags(Node parent) {
return getChildTags(parent, "");
}
/**
* returns all non tag-children from the given node.
*
* @param parent the node to get the children from
* @param name the name of the tags to return, "" for all
* @return a vector containing all the non-text children
*/
public static Vector<Element> getChildTags(Node parent, String name) {
Vector<Element> result;
int i;
NodeList list;
result = new Vector<Element>();
list = parent.getChildNodes();
for (i = 0; i < list.getLength(); i++) {
if (!(list.item(i) instanceof Element))
continue;
// only tags with a certain name?
if (name.length() != 0) {
if (!((Element) list.item(i)).getTagName().equals(name))
continue;
}
result.add((Element)list.item(i));
}
return result;
}
/**
* Returns the specified result of the XPath expression.
* Can return null if an error occurred.
*
* @param xpath the XPath expression to run on the document
* @param type the type of the result
* @return the result
*/
protected Object eval(String xpath, QName type) {
Object result;
try {
result = m_XPath.evaluate(xpath, m_Document, type);
}
catch (Exception e) {
e.printStackTrace();
result = null;
}
return result;
}
/**
* Returns the nodes that the given xpath expression will find in the
* document. Can return null if an error occurred.
*
* @param xpath the XPath expression to run on the document
* @return the nodelist
*/
public NodeList findNodes(String xpath) {
return (NodeList) eval(xpath, XPathConstants.NODESET);
}
/**
* Returns the node represented by the XPath expression.
* Can return null if an error occurred.
*
* @param xpath the XPath expression to run on the document
* @return the node
*/
public Node getNode(String xpath) {
return (Node) eval(xpath, XPathConstants.NODE);
}
/**
* Evaluates and returns the boolean result of the XPath expression.
*
* @param xpath the expression to evaluate
* @return the result of the evaluation, null in case of an error
*/
public Boolean evalBoolean(String xpath) {
return (Boolean) eval(xpath, XPathConstants.BOOLEAN);
}
/**
* Evaluates and returns the double result of the XPath expression.
*
* @param xpath the expression to evaluate
* @return the result of the evaluation, null in case of
* an error
*/
public Double evalDouble(String xpath) {
return (Double) eval(xpath, XPathConstants.NUMBER);
}
/**
* Evaluates and returns the boolean result of the XPath expression.
*
* @param xpath the expression to evaluate
* @return the result of the evaluation
*/
public String evalString(String xpath) {
return (String) eval(xpath, XPathConstants.STRING);
}
/**
* returns the text between the opening and closing tag of a node
* (performs a <code>trim()</code> on the result).
*
* @param node the node to get the text from
* @return the content of the given node
*/
public static String getContent(Element node) {
NodeList list;
Node item;
int i;
String result;
result = "";
list = node.getChildNodes();
for (i = 0; i < list.getLength(); i++) {
item = list.item(i);
if (item.getNodeType() == Node.TEXT_NODE)
result += item.getNodeValue();
}
return result.trim();
}
/**
* turns the given node into a XML-stringbuffer according to the depth.
*
* @param buf the stringbuffer so far
* @param parent the current node
* @param depth the current depth
* @return the new XML-stringbuffer
*/
protected StringBuffer toString(StringBuffer buf, Node parent, int depth) {
NodeList list;
Node node;
int i;
int n;
String indent;
NamedNodeMap atts;
// build indent
indent = "";
for (i = 0; i < depth; i++)
indent += " ";
if (parent.getNodeType() == Node.TEXT_NODE) {
if (!parent.getNodeValue().trim().equals(""))
buf.append(indent + parent.getNodeValue().trim() + "\n");
}
else
if (parent.getNodeType() == Node.COMMENT_NODE) {
buf.append(indent + "<!--" + parent.getNodeValue() + "-->\n");
}
else {
buf.append(indent + "<" + parent.getNodeName());
// attributes?
if (parent.hasAttributes()) {
atts = parent.getAttributes();
for (n = 0; n < atts.getLength(); n++) {
node = atts.item(n);
buf.append(" " + node.getNodeName() + "=\"" + node.getNodeValue() + "\"");
}
}
// children?
if (parent.hasChildNodes()) {
list = parent.getChildNodes();
// just a text node?
if ( (list.getLength() == 1) && (list.item(0).getNodeType() == Node.TEXT_NODE) ) {
buf.append(">");
buf.append(list.item(0).getNodeValue().trim());
buf.append("</" + parent.getNodeName() + ">\n");
}
else {
buf.append(">\n");
for (n = 0; n < list.getLength(); n++) {
node = list.item(n);
toString(buf, node, depth + 1);
}
buf.append(indent + "</" + parent.getNodeName() + ">\n");
}
}
else {
buf.append("/>\n");
}
}
return buf;
}
/**
* prints the current DOM document to standard out.
*/
public void print() {
System.out.println(toString());
}
/**
* returns the current DOM document as XML-string.
*
* @return the document as XML-string representation
*/
public String toString() {
String header;
header = PI + "\n\n";
if (getDocType() != null)
header += getDocType() + "\n\n";
return toString(new StringBuffer(header), getDocument().getDocumentElement(), 0).toString();
}
/**
* Returns the revision string.
*
* @return the revision
*/
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
/**
* for testing only. takes the name of an XML file as first arg, reads that
* file, prints it to stdout and if a second filename is given, writes the
* parsed document to that again.
*
* @param args the commandline arguments
* @throws Exception if something goes wrong
*/
public static void main(String[] args) throws Exception {
XMLDocument doc;
if (args.length > 0) {
doc = new XMLDocument();
// read
doc.read(args[0]);
// print to stdout
doc.print();
// output?
if (args.length > 1) {
doc.write(args[1]);
}
}
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/core
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/core/xml/XMLInstances.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* XMLInstances.java
* Copyright (C) 2006-2012 University of Waikato, Hamilton, New Zealand
*/
package weka.core.xml;
import java.io.BufferedReader;
import java.io.FileInputStream;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.Reader;
import java.io.Serializable;
import java.util.ArrayList;
import java.util.Enumeration;
import java.util.Properties;
import java.util.Vector;
import java.util.zip.GZIPInputStream;
import org.w3c.dom.Element;
import weka.core.Attribute;
import weka.core.DenseInstance;
import weka.core.Instance;
import weka.core.Instances;
import weka.core.ProtectedProperties;
import weka.core.RevisionUtils;
import weka.core.SparseInstance;
import weka.core.Utils;
import weka.core.Version;
/**
* XML representation of the Instances class.
*
* @author fracpete (fracpete at waikato dot ac dot nz)
* @version $Revision$
*/
public class XMLInstances extends XMLDocument implements Serializable {
/** for serialization */
private static final long serialVersionUID = 3626821327547416099L;
/** The filename extension that should be used for xrff files */
public static String FILE_EXTENSION = ".xrff";
// tags
/** the root element */
public final static String TAG_DATASET = "dataset";
/** the header element */
public final static String TAG_HEADER = "header";
/** the body element */
public final static String TAG_BODY = "body";
/** the notes element */
public final static String TAG_NOTES = "notes";
/** the attributes element */
public final static String TAG_ATTRIBUTES = "attributes";
/** the attribute element */
public final static String TAG_ATTRIBUTE = "attribute";
/** the labels element */
public final static String TAG_LABELS = "labels";
/** the label element */
public final static String TAG_LABEL = "label";
/** the meta-data element */
public final static String TAG_METADATA = "metadata";
/** the property element */
public final static String TAG_PROPERTY = "property";
/** the data element */
public final static String TAG_INSTANCES = "instances";
/** the instance element */
public final static String TAG_INSTANCE = "instance";
/** the value element */
public final static String TAG_VALUE = "value";
// attributes
/** the version attribute */
public final static String ATT_VERSION = "version";
/** the type attribute */
public final static String ATT_TYPE = "type";
/** the format attribute (for date attributes) */
public final static String ATT_FORMAT = "format";
/** the class attribute */
public final static String ATT_CLASS = "class";
/** the index attribute */
public final static String ATT_INDEX = "index";
/** the weight attribute */
public final static String ATT_WEIGHT = "weight";
/** the missing attribute */
public final static String ATT_MISSING = "missing";
// values
/** the value for numeric */
public final static String VAL_NUMERIC = "numeric";
/** the value for date */
public final static String VAL_DATE = "date";
/** the value for nominal */
public final static String VAL_NOMINAL = "nominal";
/** the value for string */
public final static String VAL_STRING = "string";
/** the value for relational */
public final static String VAL_RELATIONAL = "relational";
/** the value for normal */
public final static String VAL_NORMAL = "normal";
/** the value for sparse */
public final static String VAL_SPARSE = "sparse";
/** the DTD */
public final static String DOCTYPE = "<!" + DTD_DOCTYPE + " " + TAG_DATASET
+ "\n" + "[\n" + " <!" + DTD_ELEMENT + " " + TAG_DATASET + " ("
+ TAG_HEADER + "," + TAG_BODY + ")" + ">\n" + " <!" + DTD_ATTLIST + " "
+ TAG_DATASET + " " + ATT_NAME + " " + DTD_CDATA + " " + DTD_REQUIRED
+ ">\n" + " <!" + DTD_ATTLIST + " " + TAG_DATASET + " " + ATT_VERSION
+ " " + DTD_CDATA + " \"" + Version.VERSION + "\">\n" + "\n" + " <!"
+ DTD_ELEMENT + " " + TAG_HEADER + " (" + TAG_NOTES + DTD_OPTIONAL + ","
+ TAG_ATTRIBUTES + ")" + ">\n" + " <!" + DTD_ELEMENT + " " + TAG_BODY
+ " (" + TAG_INSTANCES + ")" + ">\n" + " <!" + DTD_ELEMENT + " "
+ TAG_NOTES + " " + DTD_ANY
+ "> <!-- comments, information, copyright, etc. -->\n" + "\n" + " <!"
+ DTD_ELEMENT + " " + TAG_ATTRIBUTES + " (" + TAG_ATTRIBUTE
+ DTD_AT_LEAST_ONE + ")" + ">\n" + " <!" + DTD_ELEMENT + " "
+ TAG_ATTRIBUTE + " (" + TAG_LABELS + DTD_OPTIONAL + "," + TAG_METADATA
+ DTD_OPTIONAL + "," + TAG_ATTRIBUTES + DTD_OPTIONAL + ")" + ">\n"
+ " <!" + DTD_ATTLIST + " " + TAG_ATTRIBUTE + " " + ATT_NAME + " "
+ DTD_CDATA + " " + DTD_REQUIRED + ">\n" + " <!" + DTD_ATTLIST + " "
+ TAG_ATTRIBUTE + " " + ATT_TYPE + " (" + VAL_NUMERIC + DTD_SEPARATOR
+ VAL_DATE + DTD_SEPARATOR + VAL_NOMINAL + DTD_SEPARATOR + VAL_STRING
+ DTD_SEPARATOR + VAL_RELATIONAL + ") " + DTD_REQUIRED + ">\n" + " <!"
+ DTD_ATTLIST + " " + TAG_ATTRIBUTE + " " + ATT_FORMAT + " " + DTD_CDATA
+ " " + DTD_IMPLIED + ">\n" + " <!" + DTD_ATTLIST + " " + TAG_ATTRIBUTE
+ " " + ATT_CLASS + " (" + VAL_YES + DTD_SEPARATOR + VAL_NO + ") \""
+ VAL_NO + "\"" + ">\n" + " <!" + DTD_ELEMENT + " " + TAG_LABELS + " ("
+ TAG_LABEL + DTD_ZERO_OR_MORE + ")"
+ "> <!-- only for type \"nominal\" -->\n" + " <!" + DTD_ELEMENT + " "
+ TAG_LABEL + " " + DTD_ANY + ">\n" + " <!" + DTD_ELEMENT + " "
+ TAG_METADATA + " (" + TAG_PROPERTY + DTD_ZERO_OR_MORE + ")" + ">\n"
+ " <!" + DTD_ELEMENT + " " + TAG_PROPERTY + " " + DTD_ANY + ">\n"
+ " <!" + DTD_ATTLIST + " " + TAG_PROPERTY + " " + ATT_NAME + " "
+ DTD_CDATA + " " + DTD_REQUIRED + ">\n" + "\n" + " <!" + DTD_ELEMENT
+ " " + TAG_INSTANCES + " (" + TAG_INSTANCE + DTD_ZERO_OR_MORE + ")"
+ ">\n" + " <!" + DTD_ELEMENT + " " + TAG_INSTANCE + " (" + TAG_VALUE
+ DTD_ZERO_OR_MORE + ")" + ">\n" + " <!" + DTD_ATTLIST + " "
+ TAG_INSTANCE + " " + ATT_TYPE + " (" + VAL_NORMAL + DTD_SEPARATOR
+ VAL_SPARSE + ") \"" + VAL_NORMAL + "\"" + ">\n" + " <!" + DTD_ATTLIST
+ " " + TAG_INSTANCE + " " + ATT_WEIGHT + " " + DTD_CDATA + " "
+ DTD_IMPLIED + ">\n" + " <!" + DTD_ELEMENT + " " + TAG_VALUE + " ("
+ DTD_PCDATA + DTD_SEPARATOR + TAG_INSTANCES + ")" + DTD_ZERO_OR_MORE
+ ">\n" + " <!" + DTD_ATTLIST + " " + TAG_VALUE + " " + ATT_INDEX + " "
+ DTD_CDATA + " " + DTD_IMPLIED
+ "> <!-- 1-based index (only used for instance format \"sparse\") -->\n"
+ " <!" + DTD_ATTLIST + " " + TAG_VALUE + " " + ATT_MISSING + " ("
+ VAL_YES + DTD_SEPARATOR + VAL_NO + ") \"" + VAL_NO + "\"" + ">\n" + "]\n"
+ ">";
/** the precision for numbers */
protected int m_Precision = 6;
/** the underlying Instances */
protected Instances m_Instances;
/**
* the default constructor
*
* @throws Exception if XML initialization fails
*/
public XMLInstances() throws Exception {
super();
m_Instances = null;
setDocType(DOCTYPE);
setRootNode(TAG_DATASET);
setValidating(true);
}
/**
* generates the XML structure based on the given data
*
* @param data the data to build the XML structure from
* @throws Exception if initialization/generation fails
*/
public XMLInstances(Instances data) throws Exception {
this();
setInstances(data);
}
/**
* generates the Instances directly from the reader containing the XML data.
*
* @param reader the reader for the XML data
* @throws Exception if something goes wrong
*/
public XMLInstances(Reader reader) throws Exception {
this();
setXML(reader);
}
/**
* adds the attribute to the XML structure
*
* @param parent the parent node to add the attribute node as child
* @param att the attribute to add
*/
protected void addAttribute(Element parent, Attribute att) {
Element node;
Element child;
Element property;
Element label;
String tmpStr;
Enumeration<?> enm;
int i;
node = m_Document.createElement(TAG_ATTRIBUTE);
parent.appendChild(node);
// XML attributes
// name
node.setAttribute(ATT_NAME, validContent(att.name()));
// type
switch (att.type()) {
case Attribute.NUMERIC:
node.setAttribute(ATT_TYPE, VAL_NUMERIC);
break;
case Attribute.DATE:
node.setAttribute(ATT_TYPE, VAL_DATE);
break;
case Attribute.NOMINAL:
node.setAttribute(ATT_TYPE, VAL_NOMINAL);
break;
case Attribute.STRING:
node.setAttribute(ATT_TYPE, VAL_STRING);
break;
case Attribute.RELATIONAL:
node.setAttribute(ATT_TYPE, VAL_RELATIONAL);
break;
default:
node.setAttribute(ATT_TYPE, "???");
}
// labels
if (att.isNominal()) {
child = m_Document.createElement(TAG_LABELS);
node.appendChild(child);
enm = att.enumerateValues();
while (enm.hasMoreElements()) {
tmpStr = enm.nextElement().toString();
label = m_Document.createElement(TAG_LABEL);
child.appendChild(label);
label.appendChild(m_Document.createTextNode(validContent(tmpStr)));
}
}
// format
if (att.isDate()) {
node.setAttribute(ATT_FORMAT, validContent(att.getDateFormat()));
}
// class
if (m_Instances.classIndex() > -1) {
if (att == m_Instances.classAttribute()) {
node.setAttribute(ATT_CLASS, VAL_YES);
}
}
// add meta-data
if ((att.getMetadata() != null) && (att.getMetadata().size() > 0)) {
child = m_Document.createElement(TAG_METADATA);
node.appendChild(child);
enm = att.getMetadata().propertyNames();
while (enm.hasMoreElements()) {
tmpStr = enm.nextElement().toString();
property = m_Document.createElement(TAG_PROPERTY);
child.appendChild(property);
property.setAttribute(ATT_NAME, validContent(tmpStr));
property.appendChild(m_Document.createTextNode(validContent(att
.getMetadata().getProperty(tmpStr, ""))));
}
}
// relational attribute?
if (att.isRelationValued()) {
child = m_Document.createElement(TAG_ATTRIBUTES);
node.appendChild(child);
for (i = 0; i < att.relation().numAttributes(); i++) {
addAttribute(child, att.relation().attribute(i));
}
}
}
/**
* turns all <, > and &into character entities and returns that
* string. Necessary for TextNodes.
*
* @param content string to convert
* @return the valid content string
*/
protected String validContent(String content) {
String result;
result = content;
// these five entities are recognized by every XML processor
// see http://www.xml.com/pub/a/2001/03/14/trxml10.html
result = result.replaceAll("&", "&").replaceAll("\"", """)
.replaceAll("'", "'").replaceAll("<", "<")
.replaceAll(">", ">");
// in addition, replace some other entities as well
result = result.replaceAll("\n", " ").replaceAll("\r", " ")
.replaceAll("\t", "	");
return result;
}
/**
* adds the instance to the XML structure
*
* @param parent the parent node to add the instance node as child
* @param inst the instance to add
*/
protected void addInstance(Element parent, Instance inst) {
Element node;
Element value;
Element child;
boolean sparse;
int i;
int n;
int index;
node = m_Document.createElement(TAG_INSTANCE);
parent.appendChild(node);
// sparse?
sparse = (inst instanceof SparseInstance);
if (sparse) {
node.setAttribute(ATT_TYPE, VAL_SPARSE);
}
// weight
if (inst.weight() != 1.0) {
node.setAttribute(ATT_WEIGHT,
Utils.doubleToString(inst.weight(), m_Precision));
}
// values
for (i = 0; i < inst.numValues(); i++) {
index = inst.index(i);
value = m_Document.createElement(TAG_VALUE);
node.appendChild(value);
if (inst.isMissing(index)) {
value.setAttribute(ATT_MISSING, VAL_YES);
} else {
if (inst.attribute(index).isRelationValued()) {
child = m_Document.createElement(TAG_INSTANCES);
value.appendChild(child);
for (n = 0; n < inst.relationalValue(i).numInstances(); n++) {
addInstance(child, inst.relationalValue(i).instance(n));
}
} else {
if (inst.attribute(index).type() == Attribute.NUMERIC) {
value.appendChild(m_Document.createTextNode(Utils.doubleToString(
inst.value(index), m_Precision)));
} else {
value.appendChild(m_Document.createTextNode(validContent(inst
.stringValue(index))));
}
}
}
if (sparse) {
value.setAttribute(ATT_INDEX, "" + (index + 1));
}
}
}
/**
* generates the XML structure for the header
*/
protected void headerToXML() {
Element root;
Element node;
Element child;
int i;
root = m_Document.getDocumentElement();
root.setAttribute(ATT_NAME, validContent(m_Instances.relationName()));
root.setAttribute(ATT_VERSION, Version.VERSION);
// create "header" node
node = m_Document.createElement(TAG_HEADER);
root.appendChild(node);
// add all attributes
child = m_Document.createElement(TAG_ATTRIBUTES);
node.appendChild(child);
for (i = 0; i < m_Instances.numAttributes(); i++) {
addAttribute(child, m_Instances.attribute(i));
}
}
/**
* generates the XML structure from the rows
*/
protected void dataToXML() {
Element root;
Element node;
Element child;
int i;
root = m_Document.getDocumentElement();
// create "body" node
node = m_Document.createElement(TAG_BODY);
root.appendChild(node);
// add all instances
child = m_Document.createElement(TAG_INSTANCES);
node.appendChild(child);
for (i = 0; i < m_Instances.numInstances(); i++) {
addInstance(child, m_Instances.instance(i));
}
}
/**
* builds up the XML structure based on the given data
*
* @param data data to generate the XML from
*/
public void setInstances(Instances data) {
m_Instances = new Instances(data);
clear();
headerToXML();
dataToXML();
}
/**
* returns the current instances, either the ones that were set or the ones
* that were generated from the XML structure.
*
* @return the current instances
*/
public Instances getInstances() {
return m_Instances;
}
/**
* returns the metadata, if any available underneath this node, otherwise just
* null
*
* @param parent the attribute node
* @return the metadata, or null if none found
* @throws Exception if generation fails
*/
protected ProtectedProperties createMetadata(Element parent) throws Exception {
ProtectedProperties result;
Properties props;
Vector<Element> list;
Element node;
Element metanode;
int i;
result = null;
// find metadata node directly underneath this attribute, but not in
// deeper nested attributes (e.g., within relational attributes)
metanode = null;
list = getChildTags(parent, TAG_METADATA);
if (list.size() > 0) {
metanode = list.get(0);
}
// generate properties
if (metanode != null) {
props = new Properties();
list = getChildTags(metanode, TAG_PROPERTY);
for (i = 0; i < list.size(); i++) {
node = list.get(i);
props.setProperty(node.getAttribute(ATT_NAME), getContent(node));
}
result = new ProtectedProperties(props);
}
return result;
}
/**
* returns the labels listed underneath this (nominal) attribute in a
* ArrayList
*
* @param parent the (nominal) attribute node
* @return the label vector
* @throws Exception if generation fails
*/
protected ArrayList<String> createLabels(Element parent) throws Exception {
ArrayList<String> result;
Vector<Element> list;
Element node;
Element labelsnode;
int i;
result = new ArrayList<String>();
// find labels node directly underneath this attribute, but not in
// deeper nested attributes (e.g., within relational attributes)
labelsnode = null;
list = getChildTags(parent, TAG_LABELS);
if (list.size() > 0) {
labelsnode = list.get(0);
}
// retrieve all labels
if (labelsnode != null) {
list = getChildTags(labelsnode, TAG_LABEL);
for (i = 0; i < list.size(); i++) {
node = list.get(i);
result.add(getContent(node));
}
}
return result;
}
/**
* creates an Attribute from the given XML node
*
* @param node the node with the setup
* @return the configured Attribute
* @throws Exception if generation fails, e.g., due to unknown attribute type
*/
protected Attribute createAttribute(Element node) throws Exception {
String typeStr;
String name;
int type;
Attribute result;
ArrayList<String> values;
ProtectedProperties metadata;
Vector<Element> list;
ArrayList<Attribute> atts;
result = null;
// name
name = node.getAttribute(ATT_NAME);
// type
typeStr = node.getAttribute(ATT_TYPE);
if (typeStr.equals(VAL_NUMERIC)) {
type = Attribute.NUMERIC;
} else if (typeStr.equals(VAL_DATE)) {
type = Attribute.DATE;
} else if (typeStr.equals(VAL_NOMINAL)) {
type = Attribute.NOMINAL;
} else if (typeStr.equals(VAL_STRING)) {
type = Attribute.STRING;
} else if (typeStr.equals(VAL_RELATIONAL)) {
type = Attribute.RELATIONAL;
} else {
throw new Exception("Attribute type '" + typeStr + "' is not supported!");
}
// metadata
metadata = createMetadata(node);
switch (type) {
case Attribute.NUMERIC:
if (metadata == null) {
result = new Attribute(name);
} else {
result = new Attribute(name, metadata);
}
break;
case Attribute.DATE:
if (metadata == null) {
result = new Attribute(name, node.getAttribute(ATT_FORMAT));
} else {
result = new Attribute(name, node.getAttribute(ATT_FORMAT), metadata);
}
break;
case Attribute.NOMINAL:
values = createLabels(node);
if (metadata == null) {
result = new Attribute(name, values);
} else {
result = new Attribute(name, values, metadata);
}
break;
case Attribute.STRING:
if (metadata == null) {
result = new Attribute(name, (ArrayList<String>) null);
} else {
result = new Attribute(name, (ArrayList<String>) null, metadata);
}
break;
case Attribute.RELATIONAL:
list = getChildTags(node, TAG_ATTRIBUTES);
node = list.get(0);
atts = createAttributes(node, new int[1]);
if (metadata == null) {
result = new Attribute(name, new Instances(name, atts, 0));
} else {
result = new Attribute(name, new Instances(name, atts, 0), metadata);
}
break;
}
return result;
}
/**
* returns a list of generated attributes
*
* @param parent the attributes node
* @param classIndex array of length 1 to return the class index, if any
* @return the vector with the generated attributes
* @throws Exception if generation fails, e.g., due to unknown attribute type
*/
protected ArrayList<Attribute> createAttributes(Element parent,
int[] classIndex) throws Exception {
Vector<Element> list;
ArrayList<Attribute> result;
int i;
Element node;
Attribute att;
result = new ArrayList<Attribute>();
classIndex[0] = -1;
list = getChildTags(parent, TAG_ATTRIBUTE);
for (i = 0; i < list.size(); i++) {
node = list.get(i);
att = createAttribute(node);
if (node.getAttribute(ATT_CLASS).equals(VAL_YES)) {
classIndex[0] = i;
}
result.add(att);
}
return result;
}
/**
* creates an Instance from the given XML node
*
* @param header the data this instance will belong to
* @param parent the instance node
* @return the configured Instance
* @throws Exception if generation fails, e.g., due to unknown attribute type
*/
protected Instance createInstance(Instances header, Element parent)
throws Exception {
Instance result;
Element node;
Element child;
boolean sparse;
int i;
int index;
Vector<Element> list;
Vector<Element> subList;
double[] values;
String content;
double weight;
Instances data;
result = null;
// sparse?
sparse = (parent.getAttribute(ATT_TYPE).equals(VAL_SPARSE));
values = new double[header.numAttributes()];
// weight
if (parent.getAttribute(ATT_WEIGHT).length() != 0) {
weight = Double.parseDouble(parent.getAttribute(ATT_WEIGHT));
} else {
weight = 1.0;
}
list = getChildTags(parent, TAG_VALUE);
for (i = 0; i < list.size(); i++) {
node = list.get(i);
// determine index
if (sparse) {
index = Integer.parseInt(node.getAttribute(ATT_INDEX)) - 1;
} else {
index = i;
}
// set value
if (node.getAttribute(ATT_MISSING).equals(VAL_YES)) {
values[index] = Utils.missingValue();
} else {
content = getContent(node);
switch (header.attribute(index).type()) {
case Attribute.NUMERIC:
values[index] = Double.parseDouble(content);
break;
case Attribute.DATE:
values[index] = header.attribute(index).parseDate(content);
break;
case Attribute.NOMINAL:
values[index] = header.attribute(index).indexOfValue(content);
break;
case Attribute.STRING:
values[index] = header.attribute(index).addStringValue(content);
break;
case Attribute.RELATIONAL:
subList = getChildTags(node, TAG_INSTANCES);
child = subList.get(0);
data = createInstances(header.attribute(index).relation(), child);
values[index] = header.attribute(index).addRelation(data);
break;
default:
throw new Exception("Attribute type "
+ header.attribute(index).type() + " is not supported!");
}
}
}
// create instance
if (sparse) {
result = new SparseInstance(weight, values);
} else {
result = new DenseInstance(weight, values);
}
return result;
}
/**
* creates Instances from the given XML node
*
* @param header the header of this data
* @param parent the instances node
* @return the generated Instances
* @throws Exception if generation fails, e.g., due to unknown attribute type
*/
protected Instances createInstances(Instances header, Element parent)
throws Exception {
Instances result;
Vector<Element> list;
int i;
result = new Instances(header, 0);
list = getChildTags(parent, TAG_INSTANCE);
for (i = 0; i < list.size(); i++) {
result.add(createInstance(result, list.get(i)));
}
return result;
}
/**
* generates the header from the XML document
*
* @return the generated header
* @throws Exception if generation fails
*/
protected Instances headerFromXML() throws Exception {
Instances result;
Element root;
Element node;
Vector<Element> list;
ArrayList<Attribute> atts;
Version version;
int[] classIndex;
root = m_Document.getDocumentElement();
// check version
version = new Version();
if (version.isOlder(root.getAttribute(ATT_VERSION))) {
System.out.println("WARNING: loading data of version "
+ root.getAttribute(ATT_VERSION) + " with version " + Version.VERSION);
}
// attributes
list = getChildTags(root, TAG_HEADER);
node = list.get(0);
list = getChildTags(node, TAG_ATTRIBUTES);
node = list.get(0);
classIndex = new int[1];
atts = createAttributes(node, classIndex);
// generate header
result = new Instances(root.getAttribute(ATT_NAME), atts, 0);
result.setClassIndex(classIndex[0]);
return result;
}
/**
* generates the complete dataset from the XML document
*
* @param header the header structure
* @return the complete dataset
* @throws Exception if generation fails
*/
protected Instances dataFromXML(Instances header) throws Exception {
Instances result;
Element node;
Vector<Element> list;
list = getChildTags(m_Document.getDocumentElement(), TAG_BODY);
node = list.get(0);
list = getChildTags(node, TAG_INSTANCES);
node = list.get(0);
result = createInstances(header, node);
return result;
}
/**
* reads the XML structure from the given reader
*
* @param reader the reader to get the XML from
* @throws Exception if
*/
public void setXML(Reader reader) throws Exception {
read(reader);
// interprete XML structure
m_Instances = dataFromXML(headerFromXML());
}
/**
* Returns the revision string.
*
* @return the revision
*/
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
/**
* takes an XML document as first argument and then outputs the Instances
* statistics
*
* @param args the commandline options
*/
public static void main(String[] args) {
try {
Reader r = null;
if (args.length != 1) {
throw (new Exception("Usage: XMLInstances <filename>"));
} else {
InputStream in = new FileInputStream(args[0]);
// compressed file?
if (args[0].endsWith(".gz")) {
in = new GZIPInputStream(in);
}
r = new BufferedReader(new InputStreamReader(in));
}
if (args[0].endsWith(Instances.FILE_EXTENSION)) {
XMLInstances i = new XMLInstances(new Instances(r));
System.out.println(i.toString());
} else {
Instances i = new XMLInstances(r).getInstances();
System.out.println(i.toSummaryString());
}
} catch (Exception ex) {
ex.printStackTrace();
System.err.println(ex.getMessage());
}
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/core
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/core/xml/XMLOptions.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* XMLOptions.java
* Copyright (C) 2004-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.core.xml;
import java.io.File;
import java.io.InputStream;
import java.io.Reader;
import java.util.Vector;
import org.w3c.dom.Document;
import org.w3c.dom.Element;
import org.w3c.dom.Node;
import org.w3c.dom.NodeList;
import weka.core.RevisionHandler;
import weka.core.RevisionUtils;
import weka.core.Utils;
/**
* A class for transforming options listed in XML to a regular WEKA command line
* string.
* <p>
*
* @author FracPete (fracpete at waikato dot ac dot nz)
* @version $Revision$
*/
public class XMLOptions implements RevisionHandler {
/** tag for a single option. */
public final static String TAG_OPTION = "option";
/** tag for a list of options. */
public final static String TAG_OPTIONS = "options";
/** the name attribute. */
public final static String ATT_NAME = "name";
/** the type attribute. */
public final static String ATT_TYPE = "type";
/** the value attribute. */
public final static String ATT_VALUE = "value";
/** a value of the type attribute. */
public final static String VAL_TYPE_FLAG = "flag";
/** a value of the type attribute. */
public final static String VAL_TYPE_SINGLE = "single";
/** a value of the type attribute. */
public final static String VAL_TYPE_HYPHENS = "hyphens";
/** a value of the type attribute. */
public final static String VAL_TYPE_QUOTES = "quotes";
/** a value of the type attribute. */
public final static String VAL_TYPE_CLASSIFIER = "classifier";
/** a value of the type attribute. */
public final static String VAL_TYPE_OPTIONHANDLER = "optionhandler";
/** the root node. */
public final static String ROOT_NODE = TAG_OPTIONS;
/** the DTD for the XML file. */
public final static String DOCTYPE = "<!DOCTYPE " + ROOT_NODE + "\n" + "[\n"
+ " <!ELEMENT " + TAG_OPTIONS + " (" + TAG_OPTION + ")*>\n"
+ " <!ATTLIST " + TAG_OPTIONS + " " + ATT_TYPE + " CDATA \""
+ VAL_TYPE_OPTIONHANDLER + "\">\n" + " <!ATTLIST " + TAG_OPTIONS + " "
+ ATT_VALUE + " CDATA \"\">\n" + " <!ELEMENT " + TAG_OPTION
+ " (#PCDATA | " + TAG_OPTIONS + ")*>\n" + " <!ATTLIST " + TAG_OPTION
+ " " + ATT_NAME + " CDATA #REQUIRED>\n" + " <!ATTLIST " + TAG_OPTION
+ " " + ATT_TYPE + " (" + VAL_TYPE_FLAG + " | " + VAL_TYPE_SINGLE + " | "
+ VAL_TYPE_HYPHENS + " | " + VAL_TYPE_QUOTES + ") \"" + VAL_TYPE_SINGLE
+ "\">\n" + "]\n" + ">";
/** the XML document. */
protected XMLDocument m_XMLDocument = null;
/**
* Creates a new instance of XMLOptions.
*
* @throws Exception if the construction of the DocumentBuilder fails
* @see #setValidating(boolean)
*/
public XMLOptions() throws Exception {
m_XMLDocument = new XMLDocument();
m_XMLDocument.setRootNode(ROOT_NODE);
m_XMLDocument.setDocType(DOCTYPE);
setValidating(true);
}
/**
* Creates a new instance of XMLOptions.
*
* @param xml the xml to parse (if "<?xml" is not found then it is considered
* a file)
* @throws Exception if the construction of the DocumentBuilder fails
* @see #setValidating(boolean)
*/
public XMLOptions(String xml) throws Exception {
this();
getXMLDocument().read(xml);
}
/**
* Creates a new instance of XMLOptions.
*
* @param file the XML file to parse
* @throws Exception if the construction of the DocumentBuilder fails
* @see #setValidating(boolean)
*/
public XMLOptions(File file) throws Exception {
this();
getXMLDocument().read(file);
}
/**
* Creates a new instance of XMLOptions.
*
* @param stream the XML stream to parse
* @throws Exception if the construction of the DocumentBuilder fails
* @see #setValidating(boolean)
*/
public XMLOptions(InputStream stream) throws Exception {
this();
getXMLDocument().read(stream);
}
/**
* Creates a new instance of XMLOptions.
*
* @param reader the XML reader to parse
* @throws Exception if the construction of the DocumentBuilder fails
* @see #setValidating(boolean)
*/
public XMLOptions(Reader reader) throws Exception {
this();
getXMLDocument().read(reader);
}
/**
* returns whether a validating parser is used.
*
* @return whether a validating parser is used
*/
public boolean getValidating() {
return m_XMLDocument.getValidating();
}
/**
* sets whether to use a validating parser or not. <br>
* Note: this does clear the current DOM document!
*
* @param validating whether to use a validating parser
* @throws Exception if the instantiating of the DocumentBuilder fails
*/
public void setValidating(boolean validating) throws Exception {
m_XMLDocument.setValidating(validating);
}
/**
* returns the parsed DOM document.
*
* @return the parsed DOM document
*/
public Document getDocument() {
fixHyphens();
return m_XMLDocument.getDocument();
}
/**
* returns the handler of the XML document. the internal DOM document can be
* accessed via the <code>getDocument()</code> method.
*
* @return the object handling the XML document
* @see #getDocument()
*/
public XMLDocument getXMLDocument() {
fixHyphens();
return m_XMLDocument;
}
/**
* pushes any options with type VAL_TYPE_HYPHENS to the end, i.e., the "--"
* are really added at the end.
*
* @see #VAL_TYPE_HYPHENS
*/
protected void fixHyphens() {
NodeList list;
Vector<Element> hyphens;
int i;
Node node;
Node tmpNode;
boolean isLast;
// get all option tags
list = m_XMLDocument.findNodes("//" + TAG_OPTION);
// get all hyphen tags
hyphens = new Vector<Element>();
for (i = 0; i < list.getLength(); i++) {
if (((Element) list.item(i)).getAttribute(ATT_TYPE).equals(
VAL_TYPE_HYPHENS)) {
hyphens.add((Element) list.item(i));
}
}
// check all hyphen tags whether they are at the end, if not fix it
for (i = 0; i < hyphens.size(); i++) {
node = hyphens.get(i);
// at the end?
isLast = true;
tmpNode = node;
while (tmpNode.getNextSibling() != null) {
// normal tag?
if (tmpNode.getNextSibling().getNodeType() == Node.ELEMENT_NODE) {
isLast = false;
break;
}
tmpNode = tmpNode.getNextSibling();
}
// move
if (!isLast) {
tmpNode = node.getParentNode();
tmpNode.removeChild(node);
tmpNode.appendChild(node);
}
}
}
/**
* converts the given node into a command line representation and returns it.
*
* @param parent the node to convert to command line
* @return the new command line
*/
protected String toCommandLine(Element parent) {
Vector<String> result;
Vector<Element> list;
Vector<Element> subList;
NodeList subNodeList;
String[] params;
int i;
int n;
String tmpStr;
result = new Vector<String>();
// "options" tag
if (parent.getNodeName().equals(TAG_OPTIONS)) {
// children
list = XMLDocument.getChildTags(parent);
if (parent.getAttribute(ATT_TYPE).equals(VAL_TYPE_CLASSIFIER)) {
System.err.println("Type '" + VAL_TYPE_CLASSIFIER + "' is deprecated, "
+ "use '" + VAL_TYPE_OPTIONHANDLER + "' instead!");
parent.setAttribute(ATT_TYPE, VAL_TYPE_OPTIONHANDLER);
}
if (parent.getAttribute(ATT_TYPE).equals(VAL_TYPE_OPTIONHANDLER)) {
result.add(parent.getAttribute(ATT_VALUE));
// hyphens?
if ((list.size() > 0)
&& (parent.getParentNode() != null)
&& (parent.getParentNode() instanceof Element)
&& (((Element) parent.getParentNode()).getNodeName()
.equals(TAG_OPTION))
&& (((Element) parent.getParentNode()).getAttribute(ATT_TYPE)
.equals(VAL_TYPE_HYPHENS))) {
result.add("--");
}
}
// process children
for (i = 0; i < list.size(); i++) {
tmpStr = toCommandLine(list.get(i));
try {
params = Utils.splitOptions(tmpStr);
for (n = 0; n < params.length; n++) {
result.add(params[n]);
}
} catch (Exception e) {
System.err.println("Error splitting: " + tmpStr);
e.printStackTrace();
}
}
}
// "option" tag
else if (parent.getNodeName().equals(TAG_OPTION)) {
subList = XMLDocument.getChildTags(parent);
subNodeList = parent.getChildNodes();
result.add("-" + parent.getAttribute(ATT_NAME));
// single argument
if (parent.getAttribute(ATT_TYPE).equals(VAL_TYPE_SINGLE)) {
if ((subNodeList.getLength() > 0)
&& (subNodeList.item(0).getNodeValue().trim().length() > 0)) {
result.add(subNodeList.item(0).getNodeValue());
}
}
// compound argument surrounded by quotes
else if (parent.getAttribute(ATT_TYPE).equals(VAL_TYPE_QUOTES)) {
result.add(toCommandLine(subList.get(0)));
}
// classname + further options after "--"
else if (parent.getAttribute(ATT_TYPE).equals(VAL_TYPE_HYPHENS)) {
tmpStr = toCommandLine(subList.get(0));
try {
params = Utils.splitOptions(tmpStr);
for (n = 0; n < params.length; n++) {
result.add(params[n]);
}
} catch (Exception e) {
System.err.println("Error splitting: " + tmpStr);
e.printStackTrace();
}
}
}
// other tag
else {
System.err.println("Unsupported tag '" + parent.getNodeName()
+ "' - skipped!");
}
return Utils.joinOptions(result.toArray(new String[result.size()]));
}
/**
* returns the given DOM document as command line.
*
* @return the document as command line
* @throws Exception if anything goes wrong initializing the parsing
*/
public String toCommandLine() throws Exception {
return toCommandLine(getDocument().getDocumentElement());
}
/**
* returns the current DOM document as string array.
*
* @return the document as string array
* @throws Exception if anything goes wrong initializing the parsing
*/
public String[] toArray() throws Exception {
return Utils.splitOptions(toCommandLine());
}
/**
* returns the object in a string representation (as indented XML output).
*
* @return the object in a string representation
*/
@Override
public String toString() {
return getXMLDocument().toString();
}
/**
* Returns the revision string.
*
* @return the revision
*/
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
/**
* for testing only. prints the given XML file, the resulting commandline and
* the string array.
*
* @param args the commandline options.
* @throws Exception if something goes wrong
*/
public static void main(String[] args) throws Exception {
if (args.length > 0) {
System.out.println("\nXML:\n\n" + new XMLOptions(args[0]).toString());
System.out.println("\nCommandline:\n\n"
+ new XMLOptions(args[0]).toCommandLine());
System.out.println("\nString array:\n");
String[] options = new XMLOptions(args[0]).toArray();
for (String option : options) {
System.out.println(option);
}
}
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/core
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/core/xml/XMLSerialization.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* XMLSerialization.java
* Copyright (C) 2004-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.core.xml;
import java.beans.BeanInfo;
import java.beans.Introspector;
import java.beans.PropertyDescriptor;
import java.io.BufferedInputStream;
import java.io.BufferedOutputStream;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.InputStream;
import java.io.ObjectInputStream;
import java.io.ObjectOutputStream;
import java.io.OutputStream;
import java.io.Reader;
import java.io.Writer;
import java.lang.reflect.Array;
import java.lang.reflect.Constructor;
import java.lang.reflect.Method;
import java.util.ArrayList;
import java.util.Enumeration;
import java.util.Hashtable;
import java.util.List;
import java.util.Vector;
import org.w3c.dom.Document;
import org.w3c.dom.Element;
import weka.core.RevisionHandler;
import weka.core.RevisionUtils;
import weka.core.Utils;
import weka.core.Version;
import weka.core.WekaPackageClassLoaderManager;
/**
* With this class objects can be serialized to XML instead into a binary
* format. It uses introspection (cf. beans) to retrieve the data from the given
* object, i.e. it can only access beans-conform fields automatically.
* <p>
* The generic approach of writing data as XML can be overriden by adding custom
* methods for reading/writing in a derived class (cf. <code>m_Properties</code>, <code>m_CustomMethods</code>).<br>
* Custom read and write methods must have the same signature (and also be
* <code>public</code>!) as the <code>readFromXML</code> and
* <code>writeToXML</code> methods. Methods that apply to the naming rule
* <code>read + property name</code> are added automatically to the list of
* methods by the method
* <code>XMLSerializationMethodHandler.addMethods(...)</code>.
* <p>
* Other properties that are not conform the bean set/get-methods have to be
* processed manually in a derived class (cf.
* <code>readPostProcess(Object)</code>, <code>writePostProcess(Object)</code>).
* <p>
* For a complete XML serialization/deserialization have a look at the
* <code>KOML</code> class.
* <p>
* If a stored class has a constructor that takes a String to initialize (e.g.
* String or Double) then the content of the tag will used for the constructor,
* e.g. from
*
* <pre>
* <object name="name" class="String" primitive="no">Smith</object>
* </pre>
*
* "Smith" will be used to instantiate a String object as constructor argument.
* <p>
*
* @see KOML
* @see #fromXML(Document)
* @see #toXML(Object)
* @see #m_Properties
* @see #m_CustomMethods
* @see #readPostProcess(Object)
* @see #writePostProcess(Object)
* @see #readFromXML(Element)
* @see #writeToXML(Element, Object, String)
*
* @author FracPete (fracpete at waikato dot ac dot nz)
* @version $Revision$
*/
public class XMLSerialization implements RevisionHandler {
/** for debugging purposes only */
protected static boolean DEBUG = false;
/**
* the node that is currently processed, in case of writing the parent node
* (something might go wrong writing the new child) and in case of reading the
* actual node that is tried to process
*/
protected Element m_CurrentNode = null;
/** the tag for an object */
public final static String TAG_OBJECT = "object";
/** the version attribute */
public final static String ATT_VERSION = XMLDocument.ATT_VERSION;
/** the tag for the name */
public final static String ATT_NAME = XMLDocument.ATT_NAME;
/** the tag for the class */
public final static String ATT_CLASS = "class";
/** the tag whether primitive or not (yes/no) */
public final static String ATT_PRIMITIVE = "primitive";
/** the tag whether array or not (yes/no) */
public final static String ATT_ARRAY = "array";
/** the tag whether null or not (yes/no) */
public final static String ATT_NULL = "null";
/** the value "yes" for the primitive and array attribute */
public final static String VAL_YES = XMLDocument.VAL_YES;
/** the value "no" for the primitive and array attribute */
public final static String VAL_NO = XMLDocument.VAL_NO;
/** the value of the name for the root node */
public final static String VAL_ROOT = "__root__";
/** the root node of the XML document */
public final static String ROOT_NODE = TAG_OBJECT;
/**
* default value for attribute ATT_PRIMITIVE
*
* @see #ATT_PRIMITIVE
*/
public final static String ATT_PRIMITIVE_DEFAULT = VAL_NO;
/**
* default value for attribute ATT_ARRAY
*
* @see #ATT_ARRAY
*/
public final static String ATT_ARRAY_DEFAULT = VAL_NO;
/**
* default value for attribute ATT_NULL
*
* @see #ATT_NULL
*/
public final static String ATT_NULL_DEFAULT = VAL_NO;
/** the DOCTYPE for the serialization */
public final static String DOCTYPE = "<!" + XMLDocument.DTD_DOCTYPE + " "
+ ROOT_NODE + "\n" + "[\n" + " <!" + XMLDocument.DTD_ELEMENT + " "
+ TAG_OBJECT + " (" + XMLDocument.DTD_PCDATA + XMLDocument.DTD_SEPARATOR
+ TAG_OBJECT + ")" + XMLDocument.DTD_ZERO_OR_MORE + ">\n" + " <!"
+ XMLDocument.DTD_ATTLIST + " " + TAG_OBJECT + " " + ATT_NAME + " "
+ XMLDocument.DTD_CDATA + " " + XMLDocument.DTD_REQUIRED + ">\n" + " <!"
+ XMLDocument.DTD_ATTLIST + " " + TAG_OBJECT + " " + ATT_CLASS + " "
+ XMLDocument.DTD_CDATA + " " + XMLDocument.DTD_REQUIRED + ">\n" + " <!"
+ XMLDocument.DTD_ATTLIST + " " + TAG_OBJECT + " " + ATT_PRIMITIVE + " "
+ XMLDocument.DTD_CDATA + " \"" + ATT_PRIMITIVE_DEFAULT + "\">\n" + " <!"
+ XMLDocument.DTD_ATTLIST + " " + TAG_OBJECT + " " + ATT_ARRAY + " "
+ XMLDocument.DTD_CDATA + " \"" + ATT_ARRAY_DEFAULT
+ "\"> <!-- the dimensions of the array; no=0, yes=1 -->\n" + " <!"
+ XMLDocument.DTD_ATTLIST + " " + TAG_OBJECT + " " + ATT_NULL + " "
+ XMLDocument.DTD_CDATA + " \"" + ATT_NULL_DEFAULT + "\">\n" + " <!"
+ XMLDocument.DTD_ATTLIST + " " + TAG_OBJECT + " " + ATT_VERSION + " "
+ XMLDocument.DTD_CDATA + " \"" + Version.VERSION + "\">\n" + "]\n" + ">";
/**
* List of fully qualified property names to suppress any warning messages for
*/
public final static List<String> SUPPRESS_PROPERTY_WARNINGS =
new ArrayList<String>();
/** the XMLDocument that performs the transformation to and fro XML */
protected XMLDocument m_Document = null;
/** for handling properties (ignored/allowed) */
protected PropertyHandler m_Properties = null;
/** for handling custom read/write methods */
protected XMLSerializationMethodHandler m_CustomMethods = null;
/**
* for overriding class names (Class <-> Classname (String))
*
* @see #overrideClassname(Object)
*/
protected Hashtable<Class<?>, String> m_ClassnameOverride = null;
/** true to suppress warnings about loading newer/older versions etc. */
protected boolean m_suppressWarnings;
/**
* initializes the serialization
*
* @throws Exception if initialization fails
*/
public XMLSerialization() throws Exception {
super();
clear();
}
/**
* Set whether to suppress warning messages or not
*
* @param suppress true to suppress warnings
*/
public void setSuppressWarnings(boolean suppress) {
m_suppressWarnings = suppress;
}
/**
* used for debugging purposes, i.e. only if DEBUG is set to true. needs a
* newly generated Throwable instance to get the method/line from
*
* @param t a throwable instance, generated in the calling method
* @param msg a message to pring
* @see #DEBUG
*/
protected void trace(Throwable t, String msg) {
if ((DEBUG) && (t.getStackTrace().length > 0)) {
System.out.println("trace: " + t.getStackTrace()[0] + ": " + msg);
}
}
/**
* generates internally a new XML document and clears also the IgnoreList and
* the mappings for the Read/Write-Methods
*
* @throws Exception if something goes wrong
*/
public void clear() throws Exception {
m_Document = new XMLDocument();
m_Document.setValidating(true);
m_Document.newDocument(DOCTYPE, ROOT_NODE);
m_Properties = new PropertyHandler();
m_CustomMethods = new XMLSerializationMethodHandler(this);
m_ClassnameOverride = new Hashtable<Class<?>, String>();
// java.io.File is sometimes represented as another class:
// - Win32: sun.awt.shell.Win32ShellFolder2
// - Linux: sun.awt.shell.DefaultShellFolder
// -> we set it to "java.io.File"
m_ClassnameOverride.put(java.io.File.class, java.io.File.class.getName());
setVersion(Version.VERSION);
m_CurrentNode = null;
}
/**
* sets the given version string in the XML document
*
* @param version the new version string
*/
private void setVersion(String version) {
Document doc;
doc = m_Document.getDocument();
doc.getDocumentElement().setAttribute(ATT_VERSION, version);
}
/**
* returns the WEKA version with which the serialized object was created
*
* @return the current version
* @see Version
*/
public String getVersion() {
Document doc;
String result;
doc = m_Document.getDocument();
result = doc.getDocumentElement().getAttribute(ATT_VERSION);
return result;
}
/**
* Checks the version in the current Document with the one of the current
* release. If the version differ, a warning is printed.
*/
private void checkVersion() {
if (m_suppressWarnings) {
return;
}
String versionStr;
Version version;
version = new Version();
versionStr = getVersion();
if (versionStr.equals("")) {
System.out.println("WARNING: has no version!");
} else if (version.isOlder(versionStr)) {
System.out.println("WARNING: loading a newer version (" + versionStr
+ " > " + Version.VERSION + ")!");
} else if (version.isNewer(versionStr)) {
System.out.println("NOTE: loading an older version (" + versionStr
+ " < " + Version.VERSION + ")!");
}
}
/**
* returns a hashtable with PropertyDescriptors that have "get" and "set"
* methods indexed by the property name.
*
* @see java.beans.PropertyDescriptor
* @param o the object to retrieve the descriptors from
* @return the PropertyDescriptors indexed by name of the property
* @throws Exception if the introspection fails
*/
protected Hashtable<String, PropertyDescriptor> getDescriptors(Object o)
throws Exception {
BeanInfo info;
PropertyDescriptor[] desc;
int i;
Hashtable<String, PropertyDescriptor> result;
result = new Hashtable<String, PropertyDescriptor>();
info = Introspector.getBeanInfo(o.getClass());
desc = info.getPropertyDescriptors();
for (i = 0; i < desc.length; i++) {
// get AND set method?
if ((desc[i].getReadMethod() != null)
&& (desc[i].getWriteMethod() != null)) {
// in ignore list, i.e. a general ignore without complete path?
if (m_Properties.isIgnored(desc[i].getDisplayName())) {
continue;
}
// in ignore list of the class?
if (m_Properties.isIgnored(o, desc[i].getDisplayName())) {
continue;
}
// not an allowed property
if (!m_Properties.isAllowed(o, desc[i].getDisplayName())) {
continue;
}
result.put(desc[i].getDisplayName(), desc[i]);
}
}
return result;
}
/**
* returns the path of the "name" attribute from the root down to this node
* (including it).
*
* @param node the node to get the path for
* @return the complete "name" path of this node
*/
protected String getPath(Element node) {
String result;
result = node.getAttribute(ATT_NAME);
while (node.getParentNode() != node.getOwnerDocument()) {
node = (Element) node.getParentNode();
result = node.getAttribute(ATT_NAME) + "." + result;
}
return result;
}
/**
* returns either <code>VAL_YES</code> or <code>VAL_NO</code> depending on the
* value of <code>b</code>
*
* @param b the boolean to turn into a string
* @return the value in string representation
*/
protected String booleanToString(boolean b) {
if (b) {
return VAL_YES;
} else {
return VAL_NO;
}
}
/**
* turns the given string into a boolean, if a positive number is given, then
* zero is considered FALSE, every other number TRUE; the empty string is also
* considered being FALSE
*
* @param s the string to turn into a boolean
* @return the string as boolean
*/
protected boolean stringToBoolean(String s) {
if (s.equals("")) {
return false;
} else if (s.equals(VAL_YES)) {
return true;
} else if (s.equalsIgnoreCase("true")) {
return true;
} else if (s.replaceAll("[0-9]*", "").equals("")) {
return (Integer.parseInt(s) != 0);
} else {
return false;
}
}
/**
* appends a new node to the parent with the given parameters (a non-array)
*
* @param parent the parent of this node. if it is <code>null</code> the
* document root element is used
* @param name the name of the node
* @param classname the classname for this node
* @param primitive whether it is a primitve data type or not (i.e. an object)
* @return the generated node
*/
protected Element addElement(Element parent, String name, String classname,
boolean primitive) {
return addElement(parent, name, classname, primitive, 0);
}
/**
* appends a new node to the parent with the given parameters
*
* @param parent the parent of this node. if it is <code>null</code> the
* document root element is used
* @param name the name of the node
* @param classname the classname for this node
* @param primitive whether it is a primitve data type or not (i.e. an object)
* @param array the dimensions of the array (0 if not an array)
* @return the generated node
*/
protected Element addElement(Element parent, String name, String classname,
boolean primitive, int array) {
return addElement(parent, name, classname, primitive, array, false);
}
/**
* appends a new node to the parent with the given parameters
*
* @param parent the parent of this node. if it is <code>null</code> the
* document root element is used
* @param name the name of the node
* @param classname the classname for this node
* @param primitive whether it is a primitve data type or not (i.e. an object)
* @param array the dimensions of the array (0 if not an array)
* @param isnull whether it is null
* @return the generated node
*/
protected Element addElement(Element parent, String name, String classname,
boolean primitive, int array, boolean isnull) {
Element result;
if (parent == null) {
result = m_Document.getDocument().getDocumentElement();
} else {
result = (Element) parent.appendChild(m_Document.getDocument()
.createElement(TAG_OBJECT));
}
// attributes
// mandatory attributes:
result.setAttribute(ATT_NAME, name);
result.setAttribute(ATT_CLASS, classname);
// add following attributes only if necessary, i.e., different from default:
if (!booleanToString(primitive).equals(ATT_PRIMITIVE_DEFAULT)) {
result.setAttribute(ATT_PRIMITIVE, booleanToString(primitive));
}
// multi-dimensional array?
if (array > 1) {
result.setAttribute(ATT_ARRAY, Integer.toString(array));
}
// backwards compatible: 0 -> no array ("no"), 1 -> 1-dim. array ("yes")
else {
if (!booleanToString(array == 1).equals(ATT_ARRAY_DEFAULT)) {
result.setAttribute(ATT_ARRAY, booleanToString(array == 1));
}
}
if (!booleanToString(isnull).equals(ATT_NULL_DEFAULT)) {
result.setAttribute(ATT_NULL, booleanToString(isnull));
}
return result;
}
/**
* if the class of the given object (or one of its ancestors) is stored in the
* classname override hashtable, then the override name is returned otherwise
* the classname of the given object.
*
* @param o the object to check for overriding its classname
* @return if overridden then the classname stored in the hashtable, otherwise
* the classname of the given object
* @see #m_ClassnameOverride
*/
protected String overrideClassname(Object o) {
Enumeration<Class<?>> enm;
String result;
Class<?> currentCls;
result = o.getClass().getName();
// check overrides
enm = m_ClassnameOverride.keys();
while (enm.hasMoreElements()) {
currentCls = enm.nextElement();
if (currentCls.isInstance(o)) {
result = m_ClassnameOverride.get(currentCls);
break;
}
}
return result;
}
/**
* if the given classname is stored in the classname override hashtable, then
* the override name is returned otherwise the given classname. <b>Note:</b>
* in contrast to <code>overrideClassname(Object)</code> does this method only
* look for exact name matches. The other method checks whether the class of
* the given object is a subclass of any of the stored overrides.
*
* @param classname the classname to check for overriding
* @return if overridden then the classname stored in the hashtable, otherwise
* the given classname
* @see #m_ClassnameOverride
* @see #overrideClassname(Object)
*/
protected String overrideClassname(String classname) {
Enumeration<Class<?>> enm;
String result;
Class<?> currentCls;
result = classname;
// check overrides
enm = m_ClassnameOverride.keys();
while (enm.hasMoreElements()) {
currentCls = enm.nextElement();
if (currentCls.getName().equals(classname)) {
result = m_ClassnameOverride.get(currentCls);
break;
}
}
return result;
}
/**
* returns a property descriptor if possible, otherwise <code>null</code>
*
* @param className the name of the class to get the descriptor for
* @param displayName the name of the property
* @return the descriptor if available, otherwise <code>null</code>
*/
protected PropertyDescriptor determineDescriptor(String className,
String displayName) {
PropertyDescriptor result;
result = null;
try {
// result = new PropertyDescriptor(displayName, Class.forName(className));
result = new PropertyDescriptor(displayName,
WekaPackageClassLoaderManager.forName(className));
} catch (Exception e) {
result = null;
}
return result;
}
/**
* adds the given primitive to the DOM structure.
*
* @param parent the parent of this object, e.g. the class this object is a
* member of
* @param o the primitive to describe in XML
* @param name the name of the primitive
* @return the node that was created
* @throws Exception if the DOM creation fails
*/
protected Element writeBooleanToXML(Element parent, boolean o, String name)
throws Exception {
Element node;
// for debugging only
if (DEBUG) {
trace(new Throwable(), name);
}
m_CurrentNode = parent;
node = addElement(parent, name, Boolean.TYPE.getName(), true);
node.appendChild(node.getOwnerDocument().createTextNode(
new Boolean(o).toString()));
return node;
}
/**
* adds the given primitive to the DOM structure.
*
* @param parent the parent of this object, e.g. the class this object is a
* member of
* @param o the primitive to describe in XML
* @param name the name of the primitive
* @return the node that was created
* @throws Exception if the DOM creation fails
*/
protected Element writeByteToXML(Element parent, byte o, String name)
throws Exception {
Element node;
// for debugging only
if (DEBUG) {
trace(new Throwable(), name);
}
m_CurrentNode = parent;
node = addElement(parent, name, Byte.TYPE.getName(), true);
node.appendChild(node.getOwnerDocument().createTextNode(
new Byte(o).toString()));
return node;
}
/**
* adds the given primitive to the DOM structure.
*
* @param parent the parent of this object, e.g. the class this object is a
* member of
* @param o the primitive to describe in XML
* @param name the name of the primitive
* @return the node that was created
* @throws Exception if the DOM creation fails
*/
protected Element writeCharToXML(Element parent, char o, String name)
throws Exception {
Element node;
// for debugging only
if (DEBUG) {
trace(new Throwable(), name);
}
m_CurrentNode = parent;
node = addElement(parent, name, Character.TYPE.getName(), true);
node.appendChild(node.getOwnerDocument().createTextNode(
new Character(o).toString()));
return node;
}
/**
* adds the given primitive to the DOM structure.
*
* @param parent the parent of this object, e.g. the class this object is a
* member of
* @param o the primitive to describe in XML
* @param name the name of the primitive
* @return the node that was created
* @throws Exception if the DOM creation fails
*/
protected Element writeDoubleToXML(Element parent, double o, String name)
throws Exception {
Element node;
// for debugging only
if (DEBUG) {
trace(new Throwable(), name);
}
m_CurrentNode = parent;
node = addElement(parent, name, Double.TYPE.getName(), true);
node.appendChild(node.getOwnerDocument().createTextNode(
new Double(o).toString()));
return node;
}
/**
* adds the given primitive to the DOM structure.
*
* @param parent the parent of this object, e.g. the class this object is a
* member of
* @param o the primitive to describe in XML
* @param name the name of the primitive
* @return the node that was created
* @throws Exception if the DOM creation fails
*/
protected Element writeFloatToXML(Element parent, float o, String name)
throws Exception {
Element node;
// for debugging only
if (DEBUG) {
trace(new Throwable(), name);
}
m_CurrentNode = parent;
node = addElement(parent, name, Float.TYPE.getName(), true);
node.appendChild(node.getOwnerDocument().createTextNode(
new Float(o).toString()));
return node;
}
/**
* adds the given primitive to the DOM structure.
*
* @param parent the parent of this object, e.g. the class this object is a
* member of
* @param o the primitive to describe in XML
* @param name the name of the primitive
* @return the node that was created
* @throws Exception if the DOM creation fails
*/
protected Element writeIntToXML(Element parent, int o, String name)
throws Exception {
Element node;
// for debugging only
if (DEBUG) {
trace(new Throwable(), name);
}
m_CurrentNode = parent;
node = addElement(parent, name, Integer.TYPE.getName(), true);
node.appendChild(node.getOwnerDocument().createTextNode(
new Integer(o).toString()));
return node;
}
/**
* adds the given primitive to the DOM structure.
*
* @param parent the parent of this object, e.g. the class this object is a
* member of
* @param o the primitive to describe in XML
* @param name the name of the primitive
* @return the node that was created
* @throws Exception if the DOM creation fails
*/
protected Element writeLongToXML(Element parent, long o, String name)
throws Exception {
Element node;
// for debugging only
if (DEBUG) {
trace(new Throwable(), name);
}
m_CurrentNode = parent;
node = addElement(parent, name, Long.TYPE.getName(), true);
node.appendChild(node.getOwnerDocument().createTextNode(
new Long(o).toString()));
return node;
}
/**
* adds the given primitive to the DOM structure.
*
* @param parent the parent of this object, e.g. the class this object is a
* member of
* @param o the primitive to describe in XML
* @param name the name of the primitive
* @return the node that was created
* @throws Exception if the DOM creation fails
*/
protected Element writeShortToXML(Element parent, short o, String name)
throws Exception {
Element node;
// for debugging only
if (DEBUG) {
trace(new Throwable(), name);
}
m_CurrentNode = parent;
node = addElement(parent, name, Short.TYPE.getName(), true);
node.appendChild(node.getOwnerDocument().createTextNode(
new Short(o).toString()));
return node;
}
/**
* checks whether the innermost class is a primitive class (handles
* multi-dimensional arrays)
*
* @param c the array class to inspect
* @return whether the array consists of primitive elements
*/
protected boolean isPrimitiveArray(Class<?> c) {
if (c.getComponentType().isArray()) {
return isPrimitiveArray(c.getComponentType());
} else {
return c.getComponentType().isPrimitive();
}
}
/**
* adds the given Object to a DOM structure. (only public due to reflection).<br>
* <b>Note:</b> <code>overrideClassname(Object)</code> is not invoked in case
* of arrays, since the array class could be a superclass, whereas the
* elements of the array can be specialized subclasses. In case of an array
* the method <code>overrideClassname(String)</code> is invoked, which
* searches for an exact match of the classname in the override hashtable.
*
* @param parent the parent of this object, e.g. the class this object is a
* member of
* @param o the Object to describe in XML
* @param name the name of the object
* @return the node that was created
* @throws Exception if the DOM creation fails
* @see #overrideClassname(Object)
* @see #overrideClassname(String)
* @see #m_ClassnameOverride
*/
public Element writeToXML(Element parent, Object o, String name)
throws Exception {
String classname;
Element node;
Hashtable<String, PropertyDescriptor> memberlist;
Enumeration<String> enm;
Object member;
String memberName;
Method method;
PropertyDescriptor desc;
boolean primitive;
int array;
int i;
Object obj;
String tmpStr;
node = null;
// for debugging only
if (DEBUG) {
trace(new Throwable(), name);
}
// special handling of null-objects
if (o == null) {
node = addElement(parent, name, "" + null, false, 0, true);
return node;
}
// used for overriding the classname
obj = null;
// get information about object
array = 0;
if (o.getClass().isArray()) {
array = Utils.getArrayDimensions(o);
}
if (array > 0) {
classname = Utils.getArrayClass(o.getClass()).getName();
primitive = isPrimitiveArray(o.getClass());
} else {
// try to get property descriptor to determine real class
// (for primitives the getClass() method returns the corresponding
// Object-Class!)
desc = null;
if (parent != null) {
desc = determineDescriptor(parent.getAttribute(ATT_CLASS), name);
}
if (desc != null) {
primitive = desc.getPropertyType().isPrimitive();
} else {
primitive = o.getClass().isPrimitive();
}
// for primitives: retrieve primitive type, otherwise the object's real
// class. For non-primitives we can't use the descriptor, since that
// might only return an interface as class!
if (primitive) {
classname = desc.getPropertyType().getName();
} else {
obj = o;
classname = o.getClass().getName();
}
}
// fix class/primitive if parent is array of primitives, thanks to
// reflection the elements of the array are objects and not primitives!
if ((parent != null) && (!parent.getAttribute(ATT_ARRAY).equals(""))
&& (!parent.getAttribute(ATT_ARRAY).equals(VAL_NO))
&& (stringToBoolean(parent.getAttribute(ATT_PRIMITIVE)))) {
primitive = true;
classname = parent.getAttribute(ATT_CLASS);
obj = null;
}
// perhaps we need to override the classname
if (obj != null) {
classname = overrideClassname(obj); // for non-arrays
} else {
classname = overrideClassname(classname); // for arrays
}
// create node for current object
node = addElement(parent, name, classname, primitive, array);
// array? -> save as child with 'name="<index>"'
if (array > 0) {
for (i = 0; i < Array.getLength(o); i++) {
invokeWriteToXML(node, Array.get(o, i), Integer.toString(i));
}
}
// non-array
else {
// primitive? -> only toString()
if (primitive) {
node.appendChild(node.getOwnerDocument().createTextNode(o.toString()));
}
// object
else {
// process recursively members of this object
memberlist = getDescriptors(o);
// if no get/set methods -> we assume it has String-Constructor
if (memberlist.size() == 0) {
if (!o.toString().equals("")) {
tmpStr = o.toString();
// these five entities are recognized by every XML processor
// see http://www.xml.com/pub/a/2001/03/14/trxml10.html
tmpStr = tmpStr.replaceAll("&", "&").replaceAll("\"", """)
.replaceAll("'", "'").replaceAll("<", "<")
.replaceAll(">", ">");
// in addition, replace some other entities as well
tmpStr = tmpStr.replaceAll("\n", " ").replaceAll("\r", " ")
.replaceAll("\t", "	");
if (o instanceof java.io.File) {
// hack to force separators to be always saved as /
tmpStr = tmpStr.replace('\\', '/');
}
node.appendChild(node.getOwnerDocument().createTextNode(tmpStr));
}
} else {
enm = memberlist.keys();
while (enm.hasMoreElements()) {
memberName = enm.nextElement().toString();
// in ignore list?
if ((m_Properties.isIgnored(memberName))
|| (m_Properties.isIgnored(getPath(node) + "." + memberName))
|| (m_Properties.isIgnored(o, getPath(node) + "." + memberName))) {
continue;
}
// is it allowed?
if (!m_Properties.isAllowed(o, memberName)) {
continue;
}
desc = memberlist.get(memberName);
method = desc.getReadMethod();
member = method.invoke(o, (Object[]) null);
invokeWriteToXML(node, member, memberName);
}
}
}
}
return node;
}
/**
* either invokes a custom method to write a specific property/class or the
* standard method <code>writeToXML(Element,Object,String)</code>
*
* @param parent the parent XML node
* @param o the object's content will be added as children to the given parent
* node
* @param name the name of the object
* @return the node that was created
* @throws Exception if invocation or turning into XML fails
*/
protected Element invokeWriteToXML(Element parent, Object o, String name)
throws Exception {
Method method;
Class<?>[] methodClasses;
Object[] methodArgs;
boolean array;
Element node;
boolean useDefault;
node = null;
method = null;
useDefault = false;
m_CurrentNode = parent;
// default, if null
if (o == null) {
useDefault = true;
}
try {
if (!useDefault) {
array = o.getClass().isArray();
// display name?
if (m_CustomMethods.write().contains(name)) {
method = m_CustomMethods.write().get(o.getClass());
} else
// class?
if ((!array) && (m_CustomMethods.write().contains(o.getClass()))) {
method = m_CustomMethods.write().get(o.getClass());
} else {
method = null;
}
useDefault = (method == null);
}
// custom
if (!useDefault) {
methodClasses = new Class[3];
methodClasses[0] = Element.class;
methodClasses[1] = Object.class;
methodClasses[2] = String.class;
methodArgs = new Object[3];
methodArgs[0] = parent;
methodArgs[1] = o;
methodArgs[2] = name;
node = (Element) method.invoke(this, methodArgs);
}
// standard
else {
node = writeToXML(parent, o, name);
}
} catch (Exception e) {
if (DEBUG) {
e.printStackTrace();
}
if (m_CurrentNode != null) {
System.out.println("Happened near: " + getPath(m_CurrentNode));
// print it only once!
m_CurrentNode = null;
}
System.out.println("PROBLEM (write): " + name);
throw (Exception) e.fillInStackTrace();
}
return node;
}
/**
* enables derived classes to due some pre-processing on the objects, that's
* about to be serialized. Right now it only returns the object.
*
* @param o the object that is serialized into XML
* @return the possibly altered object
* @throws Exception if post-processing fails
*/
protected Object writePreProcess(Object o) throws Exception {
return o;
}
/**
* enables derived classes to add other properties to the DOM tree, e.g. ones
* that do not apply to the get/set convention of beans. only implemented with
* empty method body.
*
* @param o the object that is serialized into XML
* @throws Exception if post-processing fails
*/
protected void writePostProcess(Object o) throws Exception {
}
/**
* extracts all accesible properties from the given object
*
* @param o the object to turn into an XML representation
* @return the generated DOM document
* @throws Exception if XML generation fails
*/
public XMLDocument toXML(Object o) throws Exception {
clear();
invokeWriteToXML(null, writePreProcess(o), VAL_ROOT);
writePostProcess(o);
return m_Document;
}
/**
* returns a descriptor for a given objet by providing the name
*
* @param o the object the get the descriptor for
* @param name the display name of the descriptor
* @return the Descriptor, if found, otherwise <code>null</code>
* @throws Exception if introsepction fails
*/
protected PropertyDescriptor getDescriptorByName(Object o, String name)
throws Exception {
PropertyDescriptor result;
PropertyDescriptor[] desc;
int i;
result = null;
desc = Introspector.getBeanInfo(o.getClass()).getPropertyDescriptors();
for (i = 0; i < desc.length; i++) {
if (desc[i].getDisplayName().equals(name)) {
result = desc[i];
break;
}
}
return result;
}
/**
* returns the associated class for the given name
*
* @param name the name of the class to return a Class object for
* @return the class if it could be retrieved
* @throws Exception if it class retrieval fails
*/
protected Class<?> determineClass(String name) throws Exception {
Class<?> result;
if (name.equals(Boolean.TYPE.getName())) {
result = Boolean.TYPE;
} else if (name.equals(Byte.TYPE.getName())) {
result = Byte.TYPE;
} else if (name.equals(Character.TYPE.getName())) {
result = Character.TYPE;
} else if (name.equals(Double.TYPE.getName())) {
result = Double.TYPE;
} else if (name.equals(Float.TYPE.getName())) {
result = Float.TYPE;
} else if (name.equals(Integer.TYPE.getName())) {
result = Integer.TYPE;
} else if (name.equals(Long.TYPE.getName())) {
result = Long.TYPE;
} else if (name.equals(Short.TYPE.getName())) {
result = Short.TYPE;
} else {
// result = Class.forName(name);
result = WekaPackageClassLoaderManager.forName(name);
}
return result;
}
/**
* returns an Object representing the primitive described by the given node.
* Here we use a trick to return an object even though its a primitive: by
* creating a primitive array with reflection of length 1, setting the
* primtive value as real object and then returning the "object" at position 1
* of the array.
*
* @param node the node to return the value as "primitive" object
* @return the primitive as "pseudo" object
* @throws Exception if the instantiation of the array fails or any of the
* String conversions fails
*/
protected Object getPrimitive(Element node) throws Exception {
Object result;
Object tmpResult;
Class<?> cls;
cls = determineClass(node.getAttribute(ATT_CLASS));
tmpResult = Array.newInstance(cls, 1);
if (cls == Boolean.TYPE) {
Array.set(tmpResult, 0, new Boolean(XMLDocument.getContent(node)));
} else if (cls == Byte.TYPE) {
Array.set(tmpResult, 0, new Byte(XMLDocument.getContent(node)));
} else if (cls == Character.TYPE) {
Array.set(tmpResult, 0, new Character(XMLDocument.getContent(node)
.charAt(0)));
} else if (cls == Double.TYPE) {
Array.set(tmpResult, 0, new Double(XMLDocument.getContent(node)));
} else if (cls == Float.TYPE) {
Array.set(tmpResult, 0, new Float(XMLDocument.getContent(node)));
} else if (cls == Integer.TYPE) {
Array.set(tmpResult, 0, new Integer(XMLDocument.getContent(node)));
} else if (cls == Long.TYPE) {
Array.set(tmpResult, 0, new Long(XMLDocument.getContent(node)));
} else if (cls == Short.TYPE) {
Array.set(tmpResult, 0, new Short(XMLDocument.getContent(node)));
} else {
throw new Exception("Cannot get primitive for class '" + cls.getName()
+ "'!");
}
result = Array.get(tmpResult, 0);
return result;
}
/**
* builds the primitive from the given DOM node.
*
* @param node the associated XML node
* @return the primitive created from the XML description
* @throws Exception if instantiation fails
*/
public boolean readBooleanFromXML(Element node) throws Exception {
// for debugging only
if (DEBUG) {
trace(new Throwable(), node.getAttribute(ATT_NAME));
}
m_CurrentNode = node;
return ((Boolean) getPrimitive(node)).booleanValue();
}
/**
* builds the primitive from the given DOM node.
*
* @param node the associated XML node
* @return the primitive created from the XML description
* @throws Exception if instantiation fails
*/
public byte readByteFromXML(Element node) throws Exception {
// for debugging only
if (DEBUG) {
trace(new Throwable(), node.getAttribute(ATT_NAME));
}
m_CurrentNode = node;
return ((Byte) getPrimitive(node)).byteValue();
}
/**
* builds the primitive from the given DOM node.
*
* @param node the associated XML node
* @return the primitive created from the XML description
* @throws Exception if instantiation fails
*/
public char readCharFromXML(Element node) throws Exception {
// for debugging only
if (DEBUG) {
trace(new Throwable(), node.getAttribute(ATT_NAME));
}
m_CurrentNode = node;
return ((Character) getPrimitive(node)).charValue();
}
/**
* builds the primitive from the given DOM node.
*
* @param node the associated XML node
* @return the primitive created from the XML description
* @throws Exception if instantiation fails
*/
public double readDoubleFromXML(Element node) throws Exception {
// for debugging only
if (DEBUG) {
trace(new Throwable(), node.getAttribute(ATT_NAME));
}
m_CurrentNode = node;
return ((Double) getPrimitive(node)).doubleValue();
}
/**
* builds the primitive from the given DOM node.
*
* @param node the associated XML node
* @return the primitive created from the XML description
* @throws Exception if instantiation fails
*/
public float readFloatFromXML(Element node) throws Exception {
// for debugging only
if (DEBUG) {
trace(new Throwable(), node.getAttribute(ATT_NAME));
}
m_CurrentNode = node;
return ((Float) getPrimitive(node)).floatValue();
}
/**
* builds the primitive from the given DOM node.
*
* @param node the associated XML node
* @return the primitive created from the XML description
* @throws Exception if instantiation fails
*/
public int readIntFromXML(Element node) throws Exception {
// for debugging only
if (DEBUG) {
trace(new Throwable(), node.getAttribute(ATT_NAME));
}
m_CurrentNode = node;
return ((Integer) getPrimitive(node)).intValue();
}
/**
* builds the primitive from the given DOM node.
*
* @param node the associated XML node
* @return the primitive created from the XML description
* @throws Exception if instantiation fails
*/
public long readLongFromXML(Element node) throws Exception {
// for debugging only
if (DEBUG) {
trace(new Throwable(), node.getAttribute(ATT_NAME));
}
m_CurrentNode = node;
return ((Long) getPrimitive(node)).longValue();
}
/**
* builds the primitive from the given DOM node.
*
* @param node the associated XML node
* @return the primitive created from the XML description
* @throws Exception if instantiation fails
*/
public short readShortFromXML(Element node) throws Exception {
// for debugging only
if (DEBUG) {
trace(new Throwable(), node.getAttribute(ATT_NAME));
}
m_CurrentNode = node;
return ((Short) getPrimitive(node)).shortValue();
}
/**
* adds the specific node to the object via a set method
*
* @param o the object to set a property
* @param name the name of the object for which to set a property (only for
* information reasons)
* @param child the value of the property to add
* @return the provided object, but augmented by the child
* @throws Exception if something goes wrong
*/
public Object readFromXML(Object o, String name, Element child)
throws Exception {
Object result;
Hashtable<String, PropertyDescriptor> descriptors;
PropertyDescriptor descriptor;
String methodName;
Method method;
Object[] methodArgs;
Object tmpResult;
Class<?> paramClass;
result = o;
descriptors = getDescriptors(result);
methodName = child.getAttribute(ATT_NAME);
// in ignore list?
if (m_Properties.isIgnored(getPath(child))) {
return result;
}
// in ignore list of class?
if (m_Properties.isIgnored(result, getPath(child))) {
return result;
}
// is it allowed?
if (!m_Properties.isAllowed(result, methodName)) {
return result;
}
descriptor = descriptors.get(methodName);
// unknown property?
if (descriptor == null) {
if (!m_CustomMethods.read().contains(methodName)
&& !SUPPRESS_PROPERTY_WARNINGS.contains(name + "." + methodName)) {
System.out.println("WARNING: unknown property '" + name + "."
+ methodName + "'!");
}
return result;
}
method = descriptor.getWriteMethod();
methodArgs = new Object[1];
tmpResult = invokeReadFromXML(child);
paramClass = method.getParameterTypes()[0];
// array?
if (paramClass.isArray()) {
// no data?
if (Array.getLength(tmpResult) == 0) {
return result;
}
methodArgs[0] = tmpResult;
}
// non-array
else {
methodArgs[0] = tmpResult;
}
try {
method.invoke(result, methodArgs);
} catch (Exception ex) {
System.err.println("Warning: error invoking method: " + methodName + " ("
+ ex.getCause().getMessage() + ")");
}
return result;
}
/**
* returns an array with the dimensions of the array stored in XML
*
* @param node the node to determine the dimensions for
* @return the dimensions of the array
*/
protected int[] getArrayDimensions(Element node) {
Vector<Element> children;
Vector<Integer> tmpVector;
int[] tmp;
int[] result;
int i;
// have we reached the innermost dimension?
if (stringToBoolean(node.getAttribute(ATT_ARRAY))) {
children = XMLDocument.getChildTags(node);
} else {
children = null;
}
if (children != null) {
tmpVector = new Vector<Integer>();
if (children.size() > 0) {
// are children also arrays?
tmp = getArrayDimensions(children.get(0));
// further dimensions
if (tmp != null) {
for (i = tmp.length - 1; i >= 0; i--) {
tmpVector.add(new Integer(tmp[i]));
}
}
// add current dimension
tmpVector.add(0, new Integer(children.size()));
} else {
tmpVector.add(new Integer(0));
}
// generate result
result = new int[tmpVector.size()];
for (i = 0; i < result.length; i++) {
result[i] = tmpVector.get(tmpVector.size() - i - 1).intValue();
}
} else {
result = null;
}
return result;
}
/**
* builds the object from the given DOM node. (only public due to reflection)
*
* @param node the associated XML node
* @return the instance created from the XML description
* @throws Exception if instantiation fails
*/
public Object readFromXML(Element node) throws Exception {
String classname;
String name;
boolean primitive;
boolean array;
boolean isnull;
Class<?> cls;
Vector<Element> children;
Object result;
int i;
Constructor<?> constructor;
Class<?>[] methodClasses;
Object[] methodArgs;
Element child;
// for debugging only
if (DEBUG) {
trace(new Throwable(), node.getAttribute(ATT_NAME));
}
m_CurrentNode = node;
result = null;
name = node.getAttribute(ATT_NAME);
classname = node.getAttribute(ATT_CLASS);
primitive = stringToBoolean(node.getAttribute(ATT_PRIMITIVE));
array = stringToBoolean(node.getAttribute(ATT_ARRAY));
isnull = stringToBoolean(node.getAttribute(ATT_NULL));
// special handling of null
if (isnull) {
return result;
}
children = XMLDocument.getChildTags(node);
cls = determineClass(classname);
// array
if (array) {
result = Array.newInstance(cls, getArrayDimensions(node));
for (i = 0; i < children.size(); i++) {
child = children.get(i);
Array.set(result, Integer.parseInt(child.getAttribute(ATT_NAME)),
invokeReadFromXML(child));
}
}
// non-array
else {
// primitive/String-constructor
if (children.size() == 0) {
// primitive
if (primitive) {
result = getPrimitive(node);
}
// assumed String-constructor
else {
methodClasses = new Class[1];
methodClasses[0] = String.class;
methodArgs = new Object[1];
methodArgs[0] = XMLDocument.getContent(node);
try {
constructor = cls.getConstructor(methodClasses);
result = constructor.newInstance(methodArgs);
} catch (Exception e) {
// if it's not a class with String constructor, let's try standard
// constructor
try {
result = cls.newInstance();
} catch (Exception e2) {
// sorry, can't instantiate!
result = null;
System.out.println("ERROR: Can't instantiate '" + classname
+ "'!");
}
}
}
}
// normal get/set methods
else {
result = cls.newInstance();
for (i = 0; i < children.size(); i++) {
result = readFromXML(result, name, children.get(i));
}
}
}
return result;
}
/**
* either invokes a custom method to read a specific property/class or the
* standard method <code>readFromXML(Element)</code>
*
* @param node the associated XML node
* @return the instance created from the XML description
* @throws Exception if instantiation fails
*/
protected Object invokeReadFromXML(Element node) throws Exception {
Method method;
Class<?>[] methodClasses;
Object[] methodArgs;
boolean array;
boolean useDefault;
useDefault = false;
method = null;
m_CurrentNode = node;
try {
// special handling of null values
if (stringToBoolean(node.getAttribute(ATT_NULL))) {
useDefault = true;
}
if (!useDefault) {
array = stringToBoolean(node.getAttribute(ATT_ARRAY));
// display name?
if (m_CustomMethods.read().contains(node.getAttribute(ATT_NAME))) {
method = m_CustomMethods.read().get(node.getAttribute(ATT_NAME));
} else
// class name?
if ((!array)
&& (m_CustomMethods.read().contains(determineClass(node
.getAttribute(ATT_CLASS))))) {
method = m_CustomMethods.read().get(
determineClass(node.getAttribute(ATT_CLASS)));
} else {
method = null;
}
useDefault = (method == null);
}
// custom method
if (!useDefault) {
methodClasses = new Class[1];
methodClasses[0] = Element.class;
methodArgs = new Object[1];
methodArgs[0] = node;
return method.invoke(this, methodArgs);
}
// standard
else {
return readFromXML(node);
}
} catch (Exception e) {
if (DEBUG) {
e.printStackTrace();
}
if (m_CurrentNode != null) {
System.out.println("Happened near: " + getPath(m_CurrentNode));
// print it only once!
m_CurrentNode = null;
}
System.out.println("PROBLEM (read): " + node.getAttribute("name"));
throw (Exception) e.fillInStackTrace();
}
}
/**
* additional pre-processing can happen in derived classes before the actual
* reading from XML (working on the raw XML). right now it does nothing with
* the document.
*
* @param document the document to pre-process
* @return the processed object
* @throws Exception if post-processing fails
*/
protected Document readPreProcess(Document document) throws Exception {
return document;
}
/**
* additional post-processing can happen in derived classes after reading from
* XML. right now it only returns the object as it is.
*
* @param o the object to perform some additional processing on
* @return the processed object
* @throws Exception if post-processing fails
*/
protected Object readPostProcess(Object o) throws Exception {
return o;
}
/**
* returns the given DOM document as an instance of the specified class
*
* @param document the parsed DOM document representing the object
* @return the XML as object
* @throws Exception if object instantiation fails
*/
public Object fromXML(Document document) throws Exception {
if (!document.getDocumentElement().getNodeName().equals(ROOT_NODE)) {
throw new Exception("Expected '" + ROOT_NODE
+ "' as root element, but found '"
+ document.getDocumentElement().getNodeName() + "'!");
}
m_Document.setDocument(readPreProcess(document));
checkVersion();
return readPostProcess(invokeReadFromXML(m_Document.getDocument()
.getDocumentElement()));
}
/**
* parses the given XML string (can be XML or a filename) and returns an
* Object generated from the representation
*
* @param xml the xml to parse (if "<?xml" is not found then it is considered
* a file)
* @return the generated instance
* @throws Exception if something goes wrong with the parsing
*/
public Object read(String xml) throws Exception {
return fromXML(m_Document.read(xml));
}
/**
* parses the given file and returns a DOM document
*
* @param file the XML file to parse
* @return the parsed DOM document
* @throws Exception if something goes wrong with the parsing
*/
public Object read(File file) throws Exception {
return fromXML(m_Document.read(file));
}
/**
* parses the given stream and returns a DOM document
*
* @param stream the XML stream to parse
* @return the parsed DOM document
* @throws Exception if something goes wrong with the parsing
*/
public Object read(InputStream stream) throws Exception {
return fromXML(m_Document.read(stream));
}
/**
* parses the given reader and returns a DOM document
*
* @param reader the XML reader to parse
* @return the parsed DOM document
* @throws Exception if something goes wrong with the parsing
*/
public Object read(Reader reader) throws Exception {
return fromXML(m_Document.read(reader));
}
/**
* writes the given object into the file
*
* @param file the filename to write to
* @param o the object to serialize as XML
* @throws Exception if something goes wrong with the parsing
*/
public void write(String file, Object o) throws Exception {
toXML(o).write(file);
}
/**
* writes the given object into the file
*
* @param file the filename to write to
* @param o the object to serialize as XML
* @throws Exception if something goes wrong with the parsing
*/
public void write(File file, Object o) throws Exception {
toXML(o).write(file);
}
/**
* writes the given object into the stream
*
* @param stream the filename to write to
* @param o the object to serialize as XML
* @throws Exception if something goes wrong with the parsing
*/
public void write(OutputStream stream, Object o) throws Exception {
toXML(o).write(stream);
}
/**
* writes the given object into the writer
*
* @param writer the filename to write to
* @param o the object to serialize as XML
* @throws Exception if something goes wrong with the parsing
*/
public void write(Writer writer, Object o) throws Exception {
toXML(o).write(writer);
}
/**
* for testing only. if the first argument is a filename with ".xml" as
* extension it tries to generate an instance from the XML description and
* does a <code>toString()</code> of the generated object.
*/
public static void main(String[] args) throws Exception {
if (args.length > 0) {
// read xml and print
if (args[0].toLowerCase().endsWith(".xml")) {
System.out.println(new XMLSerialization().read(args[0]).toString());
}
// read binary and print generated XML
else {
// read
FileInputStream fi = new FileInputStream(args[0]);
ObjectInputStream oi = new ObjectInputStream(
new BufferedInputStream(fi));
Object o = oi.readObject();
oi.close();
// print to stdout
// new XMLSerialization().write(System.out, o);
new XMLSerialization().write(new BufferedOutputStream(
new FileOutputStream(args[0] + ".xml")), o);
// print to binary file
FileOutputStream fo = new FileOutputStream(args[0] + ".exp");
ObjectOutputStream oo = new ObjectOutputStream(
new BufferedOutputStream(fo));
oo.writeObject(o);
oo.close();
}
}
}
/**
* Returns the revision string.
*
* @return the revision
*/
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/core
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/core/xml/XMLSerializationMethodHandler.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* XMLSerializationMethodHandler.java
* Copyright (C) 2004-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.core.xml;
import java.lang.reflect.Method;
import org.w3c.dom.Element;
import weka.core.RevisionHandler;
import weka.core.RevisionUtils;
/**
* This class handles relationships between display names of properties (or
* classes) and Methods that are associated with them. It differentiates between
* read and write methods. It automatically stores public methods that have the
* same signature as the <code>readFromXML()</code> and
* <code>writeToXML()</code> methods in the <code>XMLSerialization</code> class.
*
* @see MethodHandler
* @see XMLSerialization
*
* @author FracPete (fracpete at waikato dot ac dot nz)
* @version $Revision$
*/
public class XMLSerializationMethodHandler implements RevisionHandler {
/** for storing read methods */
protected MethodHandler m_ReadMethods = null;
/** for storing write methods */
protected MethodHandler m_WriteMethods = null;
/** the object to retrieve the methods from */
protected Object owner = null;
/**
* initializes the method handling, executes also <code>clear()</code>, which
* adds initial methods automatically.
*
* @param owner the owner to retrieve the methods from
* @throws Exception if initialization fails
* @see #clear()
*/
public XMLSerializationMethodHandler(Object owner) throws Exception {
super();
this.owner = owner;
m_ReadMethods = new MethodHandler();
m_WriteMethods = new MethodHandler();
clear();
}
/**
* adds all methods that are like <code>template</code> to the method list
*
* @param handler the list to add fitting methods to
* @param template the signature to check the given methods against
* @param methods the methods to check
*/
protected void addMethods(MethodHandler handler, Method template,
Method[] methods) {
int i;
int n;
Method method;
boolean equal;
String name;
for (i = 0; i < methods.length; i++) {
method = methods[i];
// is it template?
if (template.equals(method)) {
continue;
}
// tests
// 1. return type
if (!template.getReturnType().equals(method.getReturnType())) {
continue;
}
// 2. signature
if (template.getParameterTypes().length != method.getParameterTypes().length) {
continue;
}
equal = true;
for (n = 0; n < template.getParameterTypes().length; n++) {
if (!template.getParameterTypes()[n]
.equals(method.getParameterTypes()[n])) {
equal = false;
break;
}
}
// add to list
if (equal) {
name = method.getName();
name = name.replaceAll("read|write", "");
name = name.substring(0, 1).toLowerCase() + name.substring(1);
handler.add(name, method);
}
}
}
/**
* automatically adds all fitting methods to the custom read/write lists, it
* excludes only the generic ones. it is automatically called in
* <code>clear()</code> It only work with methods that apply to the naming
* rule "read" + property name (same for "write")
*
* @throws Exception if retrieving of methods fails
* @see #clear()
*/
protected void addMethods() throws Exception {
Method method;
Class<?>[] params;
// read
params = new Class[1];
params[0] = Element.class;
method = owner.getClass().getMethod("readFromXML", params);
addMethods(m_ReadMethods, method, owner.getClass().getMethods());
// write
params = new Class[3];
params[0] = Element.class;
params[1] = Object.class;
params[2] = String.class;
method = owner.getClass().getMethod("writeToXML", params);
addMethods(m_WriteMethods, method, owner.getClass().getMethods());
}
/**
* returns the method with the given name that has the same signature as
* <code>readFromXML()</code> of the <code>XMLSerialiation</code> class.
* simplifies the adding of custom methods.
*
* @param o the object to inspect
* @param name the name of the method to return
* @return either <code>null</code> if no method was found or a reference
* @see XMLSerialization#readFromXML(Element)
*/
public static Method findReadMethod(Object o, String name) {
Class<?>[] params;
Method result;
result = null;
params = new Class[1];
params[0] = Element.class;
try {
result = o.getClass().getMethod(name, params);
} catch (Exception e) {
result = null;
}
return result;
}
/**
* returns the method with the given name that has the same signature as
* <code>writeToXML()</code> of the <code>XMLSerialiation</code> class.
* simplifies the adding of custom methods.
*
* @param o the object to inspect
* @param name the name of the method to return
* @return either <code>null</code> if no method was found or a reference
* @see XMLSerialization#writeToXML(Element, Object, String)
*/
public static Method findWriteMethod(Object o, String name) {
Class<?>[] params;
Method result;
result = null;
params = new Class[3];
params[0] = Element.class;
params[1] = Object.class;
params[2] = String.class;
try {
result = o.getClass().getMethod(name, params);
} catch (Exception e) {
result = null;
}
return result;
}
/**
* removes all current methods and adds the methods according to the
*
*/
public void clear() {
m_ReadMethods.clear();
m_WriteMethods.clear();
try {
addMethods();
} catch (Exception e) {
e.printStackTrace();
}
}
/**
* returns the handler for read methods
*
* @return the methodhandler for read methods
*/
public MethodHandler read() {
return m_ReadMethods;
}
/**
* returns the handler for write methods
*
* @return the methodhandler for read methods
*/
public MethodHandler write() {
return m_WriteMethods;
}
/**
* adds read and write methods for the given class, i.e., read&;lt;name>
* and write<name> ("name" is prefixed by read and write)
*
* @param handler the handler class that contains the read and write method
* @param cls the class to register the read and write method for
* @param name the suffix of the read and write method
*/
public void register(Object handler, Class<?> cls, String name) {
read().add(cls,
XMLSerializationMethodHandler.findReadMethod(handler, "read" + name));
write().add(cls,
XMLSerializationMethodHandler.findWriteMethod(handler, "write" + name));
}
/**
* returns the read and write method handlers as string
*
* @return the read/write method handlers as string
*/
@Override
public String toString() {
return "Read Methods:\n" + read() + "\n\n" + "Write Methods:\n" + write();
}
/**
* Returns the revision string.
*
* @return the revision
*/
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/core
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/core/xml/XStream.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* XStream.java
* Copyright (C) 2008-2012 University of Waikato, Hamilton, New Zealand
*/
package weka.core.xml;
import java.io.BufferedInputStream;
import java.io.BufferedOutputStream;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.InputStream;
import java.io.OutputStream;
import java.io.Reader;
import java.io.Writer;
import weka.core.RevisionHandler;
import weka.core.RevisionUtils;
/**
* This class is a helper class for XML serialization using <a
* href="http://xstream.codehaus.org" target="_blank">XStream</a> . XStream does
* not need to be present, since the class-calls are done generically via
* Reflection.
*
* @author Mark Hall (mhall{[at]}pentaho{[dot]}org)
* @version $Revision$
*/
public class XStream implements RevisionHandler {
/**
* indicates whether <a href="http://xstream.codehaus.org"
* target="_blank">XStream</a> is present
*/
protected static boolean m_Present = false;
/** the extension for XStream files (including '.') */
public final static String FILE_EXTENSION = ".xstream";
/** check for XStream statically (needs only to be done once) */
static {
checkForXStream();
}
/**
* checks whether the XStream is present in the class path
*/
private static void checkForXStream() {
try {
Class.forName("com.thoughtworks.xstream.XStream");
m_Present = true;
} catch (Exception e) {
m_Present = false;
}
}
/**
* returns whether XStream is present or not, i.e. whether the classes are in
* the classpath or not
*
* @return whether XStream is available
*/
public static boolean isPresent() {
return m_Present;
}
/**
* Serializes the supplied object xml
*
* @param toSerialize the object to serialize
* @return the serialized object as an XML string
* @throws Exception if something goes wrong
*/
public static String serialize(Object toSerialize) throws Exception {
Class<?> xstreamClass;
java.lang.reflect.Constructor<?> constructor;
Object xstream;
Class<?>[] serializeArgsClasses = new Class[1];
Object[] serializeArgs = new Object[1];
java.lang.reflect.Method methodSerialize;
String result;
xstreamClass = Class.forName("com.thoughtworks.xstream.XStream");
constructor = xstreamClass.getConstructor();
xstream = constructor.newInstance();
serializeArgsClasses[0] = Object.class;
serializeArgs[0] = toSerialize;
methodSerialize = xstreamClass.getMethod("toXML", serializeArgsClasses);
// execute it
try {
result = (String) methodSerialize.invoke(xstream, serializeArgs);
} catch (Exception ex) {
result = null;
}
return result;
}
/**
* writes the XML-serialized object to the given file
*
* @param filename the file to serialize the object to
* @param o the object to write to the file
* @return whether writing was successful or not
* @throws Exception if something goes wrong while writing to the file
*/
public static boolean write(String filename, Object o) throws Exception {
return write(new File(filename), o);
}
/**
* write the XML-serialized object to the given file
*
* @param file the file to serialize the object to
* @param o the object to write to the file
* @return whether writing was successful or not
* @throws Exception if something goes wrong while writing to the file
*/
public static boolean write(File file, Object o) throws Exception {
return write(new BufferedOutputStream(new FileOutputStream(file)), o);
}
/**
* writes the XML-serialized object to the given output stream
*
* @param stream the output stream
* @param o the object to write
* @return true if everything goes ok
*/
public static boolean write(OutputStream stream, Object o) throws Exception {
Class<?> xstreamClass;
java.lang.reflect.Constructor<?> constructor;
Object xstream;
Class<?>[] serializeArgsClasses = new Class[2];
Object[] serializeArgs = new Object[2];
java.lang.reflect.Method methodSerialize;
boolean result = false;
xstreamClass = Class.forName("com.thoughtworks.xstream.XStream");
constructor = xstreamClass.getConstructor();
xstream = constructor.newInstance();
serializeArgsClasses[0] = Object.class;
serializeArgsClasses[1] = OutputStream.class;
serializeArgs[0] = o;
serializeArgs[1] = stream;
methodSerialize = xstreamClass.getMethod("toXML", serializeArgsClasses);
// execute it
try {
methodSerialize.invoke(xstream, serializeArgs);
result = true;
} catch (Exception ex) {
result = false;
}
return result;
}
/**
* writes the XML-serialized object to the given Writer.
*
* @param writer the Writer
* @param toSerialize the object to write
* @return true if everything goes ok
* @throws Exception if something goes wrong
*/
public static boolean write(Writer writer, Object toSerialize)
throws Exception {
Class<?> xstreamClass;
java.lang.reflect.Constructor<?> constructor;
Object xstream;
Class<?>[] serializeArgsClasses = new Class[2];
Object[] serializeArgs = new Object[2];
java.lang.reflect.Method methodSerialize;
boolean result = false;
xstreamClass = Class.forName("com.thoughtworks.xstream.XStream");
constructor = xstreamClass.getConstructor();
xstream = constructor.newInstance();
serializeArgsClasses[0] = Object.class;
serializeArgsClasses[1] = Writer.class;
serializeArgs[0] = toSerialize;
serializeArgs[1] = writer;
methodSerialize = xstreamClass.getMethod("toXML", serializeArgsClasses);
// execute it
try {
methodSerialize.invoke(xstream, serializeArgs);
result = true;
} catch (Exception ex) {
result = false;
}
return result;
}
/**
* reads the XML-serialized object from the given file
*
* @param filename the file to deserialize the object from
* @return the deserialized object
* @throws Exception if something goes wrong while reading from the file
*/
public static Object read(String filename) throws Exception {
return read(new File(filename));
}
/**
* reads the XML-serialized object from the given file
*
* @param file the file to deserialize the object from
* @return the deserialized object
* @throws Exception if something goes wrong while reading from the file
*/
public static Object read(File file) throws Exception {
return read(new BufferedInputStream(new FileInputStream(file)));
}
/**
* reads the XML-serialized object from the given input stream
*
* @param stream the input stream
* @return the deserialized object
* @throws Exception if something goes wrong while reading from stream
*/
public static Object read(InputStream stream) throws Exception {
Class<?> xstreamClass;
java.lang.reflect.Constructor<?> constructor;
Object xstream;
Class<?>[] deSerializeArgsClasses = new Class[1];
Object[] deSerializeArgs = new Object[1];
java.lang.reflect.Method methodDeSerialize;
Object result;
xstreamClass = Class.forName("com.thoughtworks.xstream.XStream");
constructor = xstreamClass.getConstructor();
xstream = constructor.newInstance();
deSerializeArgsClasses[0] = InputStream.class;
deSerializeArgs[0] = stream;
methodDeSerialize = xstreamClass.getMethod("fromXML",
deSerializeArgsClasses);
// execute it
try {
result = methodDeSerialize.invoke(xstream, deSerializeArgs);
} catch (Exception ex) {
ex.printStackTrace();
result = null;
}
return result;
}
/**
* reads the XML-serialized object from the given Reader
*
* @param r the reader
* @return the deserialized object
* @throws Exception if something goes wrong while reading from stream
*/
public static Object read(Reader r) throws Exception {
Class<?> xstreamClass;
java.lang.reflect.Constructor<?> constructor;
Object xstream;
Class<?>[] deSerializeArgsClasses = new Class[1];
Object[] deSerializeArgs = new Object[1];
java.lang.reflect.Method methodDeSerialize;
Object result;
xstreamClass = Class.forName("com.thoughtworks.xstream.XStream");
constructor = xstreamClass.getConstructor();
xstream = constructor.newInstance();
deSerializeArgsClasses[0] = Reader.class;
deSerializeArgs[0] = r;
methodDeSerialize = xstreamClass.getMethod("fromXML",
deSerializeArgsClasses);
// execute it
try {
result = methodDeSerialize.invoke(xstream, deSerializeArgs);
} catch (Exception ex) {
ex.printStackTrace();
result = null;
}
return result;
}
/**
* Deserializes an object from the supplied XML string
*
* @param xmlString the XML to deserialize from
* @return the deserialized object
* @throws Exception if something goes wrong
*/
public static Object deSerialize(String xmlString) throws Exception {
Class<?> xstreamClass;
java.lang.reflect.Constructor<?> constructor;
Object xstream;
Class<?>[] deSerializeArgsClasses = new Class[1];
Object[] deSerializeArgs = new Object[1];
java.lang.reflect.Method methodDeSerialize;
Object result;
xstreamClass = Class.forName("com.thoughtworks.xstream.XStream");
constructor = xstreamClass.getConstructor();
xstream = constructor.newInstance();
deSerializeArgsClasses[0] = String.class;
deSerializeArgs[0] = xmlString;
methodDeSerialize = xstreamClass.getMethod("fromXML",
deSerializeArgsClasses);
// execute it
try {
result = methodDeSerialize.invoke(xstream, deSerializeArgs);
} catch (Exception ex) {
ex.printStackTrace();
result = null;
}
return result;
}
/**
* Returns the revision string.
*
* @return the revision
*/
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/datagenerators/ClassificationGenerator.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* ClassificationGenerator.java
* Copyright (C) 2000-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.datagenerators;
import java.util.Collections;
import java.util.Enumeration;
import java.util.Vector;
import weka.core.Option;
import weka.core.Utils;
/**
* Abstract class for data generators for classifiers.
* <p/>
*
* @author Gabi Schmidberger (gabi@cs.waikato.ac.nz)
* @author FracPete (fracpete at waikato dot ac dot nz)
* @version $Revision$
*/
public abstract class ClassificationGenerator extends DataGenerator {
/** for serialization */
private static final long serialVersionUID = -5261662546673517844L;
/** Number of instances */
protected int m_NumExamples;
/**
* initializes with default values
*/
public ClassificationGenerator() {
super();
setNumExamples(defaultNumExamples());
}
/**
* Returns an enumeration describing the available options.
*
* @return an enumeration of all the available options.
*/
@Override
public Enumeration<Option> listOptions() {
Vector<Option> result = enumToVector(super.listOptions());
result.addElement(new Option(
"\tThe number of examples to generate (default " + defaultNumExamples()
+ ")", "n", 1, "-n <num>"));
return result.elements();
}
/**
* Sets the options.
*
* @param options the options
* @throws Exception if invalid option
*/
@Override
public void setOptions(String[] options) throws Exception {
String tmpStr;
super.setOptions(options);
tmpStr = Utils.getOption('n', options);
if (tmpStr.length() != 0) {
setNumExamples(Integer.parseInt(tmpStr));
} else {
setNumExamples(defaultNumExamples());
}
}
/**
* Gets the current settings of the classifier.
*
* @return an array of strings suitable for passing to setOptions
*/
@Override
public String[] getOptions() {
Vector<String> result = new Vector<String>();
Collections.addAll(result, super.getOptions());
result.add("-n");
result.add("" + getNumExamples());
return result.toArray(new String[result.size()]);
}
/**
* returns the default number of examples
*
* @return the default number of examples
*/
protected int defaultNumExamples() {
return 100;
}
/**
* Sets the number of examples, given by option.
*
* @param numExamples the new number of examples
*/
public void setNumExamples(int numExamples) {
m_NumExamples = numExamples;
}
/**
* Gets the number of examples, given by option.
*
* @return the number of examples, given by option
*/
public int getNumExamples() {
return m_NumExamples;
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String numExamplesTipText() {
return "The number of examples to generate.";
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/datagenerators/ClusterDefinition.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* ClusterDefinition.java
* Copyright (C) 2005-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.datagenerators;
import java.io.Serializable;
import java.util.Enumeration;
import weka.core.Option;
import weka.core.OptionHandler;
import weka.core.RevisionHandler;
import weka.core.Utils;
/**
* Ancestor to all ClusterDefinitions, i.e., subclasses that handle their own
* parameters that the cluster generator only passes on.
*
*
* @author FracPete (fracpete at waikato dot ac dot nz)
* @version $Revision$
*/
public abstract class ClusterDefinition implements Serializable, OptionHandler,
RevisionHandler {
/** for serialization */
private static final long serialVersionUID = -5950001207047429961L;
/** the parent of the cluster */
protected ClusterGenerator m_Parent;
/**
* initializes the cluster, without a parent cluster (necessary for GOE)
*/
public ClusterDefinition() {
this(null);
}
/**
* initializes the cluster
*
* @param parent the datagenerator this cluster belongs to
*/
public ClusterDefinition(ClusterGenerator parent) {
m_Parent = parent;
try {
setDefaults();
} catch (Exception e) {
e.printStackTrace();
}
}
/**
* sets the default values
*
* @throws Exception if setting of defaults fails
*/
protected abstract void setDefaults() throws Exception;
/**
* Returns a string describing this data generator.
*
* @return a description of the data generator suitable for displaying in the
* explorer/experimenter gui
*/
public String globalInfo() {
return "Contains informations about a certain cluster of a cluster generator.";
}
/**
* Returns an enumeration describing the available options.
*
* @return an enumeration of all the available options
*/
@Override
public abstract Enumeration<Option> listOptions();
/**
* Parses a list of options for this object.
* <p/>
*
* For list of valid options see class description.
* <p/>
*
* @param options the list of options as an array of strings
* @throws Exception if an option is not supported
*/
@Override
public abstract void setOptions(String[] options) throws Exception;
/**
* Gets the current settings of the datagenerator BIRCHCluster.
*
* @return an array of strings suitable for passing to setOptions
*/
@Override
public abstract String[] getOptions();
/**
* returns the parent datagenerator this cluster belongs to
*
* @return the parent this cluster belongs to
*/
public ClusterGenerator getParent() {
return m_Parent;
}
/**
* sets the parent datagenerator this cluster belongs to
*
* @param parent the parent datagenerator
*/
public void setParent(ClusterGenerator parent) {
m_Parent = parent;
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String parentTipText() {
return "The cluster generator this object belongs to.";
}
/**
* returns a string representation of the cluster
*
* @return the cluster definition as string
*/
@Override
public String toString() {
return this.getClass().getName() + ": " + Utils.joinOptions(getOptions());
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/datagenerators/ClusterGenerator.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* ClusterGenerator.java
* Copyright (C) 2000-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.datagenerators;
import java.util.Collections;
import java.util.Enumeration;
import java.util.Vector;
import weka.core.Option;
import weka.core.Range;
import weka.core.Utils;
/**
* Abstract class for cluster data generators.
* <p/>
*
* Example usage as the main of a datagenerator called RandomGenerator:
*
* <pre>
* public static void main(String[] args) {
* try {
* DataGenerator.makeData(new RandomGenerator(), args);
* } catch (Exception e) {
* e.printStackTrace();
* System.err.println(e.getMessage());
* }
* }
* </pre>
* <p/>
*
* @author Gabi Schmidberger (gabi@cs.waikato.ac.nz)
* @author FracPete (fracpete at waikato dot ac dot nz)
* @version $Revision$
*/
public abstract class ClusterGenerator extends DataGenerator {
/** for serialization */
private static final long serialVersionUID = 6131722618472046365L;
/** Number of attribute the dataset should have */
protected int m_NumAttributes;
/** class flag */
protected boolean m_ClassFlag = false;
/** Stores which columns are boolean (default numeric) */
protected Range m_booleanCols;
/** Stores which columns are nominal (default numeric) */
protected Range m_nominalCols;
/**
* initializes the generator
*/
public ClusterGenerator() {
super();
setNumAttributes(defaultNumAttributes());
}
/**
* Returns an enumeration describing the available options.
*
* @return an enumeration of all the available options.
*/
@Override
public Enumeration<Option> listOptions() {
Vector<Option> result = enumToVector(super.listOptions());
result.addElement(new Option("\tThe number of attributes (default "
+ defaultNumAttributes() + ").", "a", 1, "-a <num>"));
result.addElement(new Option(
"\tClass Flag, if set, the cluster is listed in extra attribute.", "c",
0, "-c"));
result.addElement(new Option("\tThe indices for boolean attributes.", "b",
1, "-b <range>"));
result.addElement(new Option("\tThe indices for nominal attributes.", "m",
1, "-m <range>"));
return result.elements();
}
/**
* Sets the options.
*
* @param options the options
* @throws Exception if invalid option
*/
@Override
public void setOptions(String[] options) throws Exception {
String tmpStr;
super.setOptions(options);
tmpStr = Utils.getOption('a', options);
if (tmpStr.length() != 0) {
setNumAttributes(Integer.parseInt(tmpStr));
} else {
setNumAttributes(defaultNumAttributes());
}
setClassFlag(Utils.getFlag('c', options));
tmpStr = Utils.getOption('b', options);
setBooleanIndices(tmpStr);
m_booleanCols.setUpper(getNumAttributes() - 1);
tmpStr = Utils.getOption('m', options);
setNominalIndices(tmpStr);
m_nominalCols.setUpper(getNumAttributes() - 1);
// check indices
tmpStr = checkIndices();
if (tmpStr.length() > 0) {
throw new IllegalArgumentException(tmpStr);
}
}
/**
* Gets the current settings of the classifier.
*
* @return an array of strings suitable for passing to setOptions
*/
@Override
public String[] getOptions() {
Vector<String> result = new Vector<String>();
Collections.addAll(result, super.getOptions());
result.add("-a");
result.add("" + getNumAttributes());
if (getClassFlag()) {
result.add("-c");
}
if (!getBooleanCols().toString().equalsIgnoreCase("empty")) {
result.add("-b");
result.add("" + getBooleanCols().getRanges());
}
if (!getNominalCols().toString().equalsIgnoreCase("empty")) {
result.add("-m");
result.add("" + getNominalCols().getRanges());
}
return result.toArray(new String[result.size()]);
}
/**
* returns the default number of attributes
*
* @return the default number of attributes
*/
protected int defaultNumAttributes() {
return 10;
}
/**
* Sets the number of attributes the dataset should have.
*
* @param numAttributes the new number of attributes
*/
public void setNumAttributes(int numAttributes) {
m_NumAttributes = numAttributes;
getBooleanCols().setUpper(getNumAttributes());
getNominalCols().setUpper(getNumAttributes());
}
/**
* Gets the number of attributes that should be produced.
*
* @return the number of attributes that should be produced
*/
public int getNumAttributes() {
return m_NumAttributes;
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String numAttributesTipText() {
return "The number of attributes the generated data will contain.";
}
/**
* Sets the class flag, if class flag is set, the cluster is listed as class
* atrribute in an extra attribute.
*
* @param classFlag the new class flag
*/
public void setClassFlag(boolean classFlag) {
m_ClassFlag = classFlag;
}
/**
* Gets the class flag.
*
* @return the class flag
*/
public boolean getClassFlag() {
return m_ClassFlag;
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String classFlagTipText() {
return "If set to TRUE, lists the cluster as an extra attribute.";
}
/**
* Sets which attributes are boolean
*
* @param rangeList a string representing the list of attributes. Since the
* string will typically come from a user, attributes are indexed
* from 1. <br/>
* eg: first-3,5,6-last
* @throws IllegalArgumentException if an invalid range list is supplied
*/
public void setBooleanIndices(String rangeList) {
m_booleanCols.setRanges(rangeList);
}
/**
* Sets which attributes are boolean.
*
* @param value the range to use
*/
public void setBooleanCols(Range value) {
m_booleanCols.setRanges(value.getRanges());
}
/**
* returns the range of boolean attributes.
*
* @return the range of boolean attributes
*/
public Range getBooleanCols() {
if (m_booleanCols == null) {
m_booleanCols = new Range();
}
return m_booleanCols;
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String booleanColsTipText() {
return "The range of attributes that are generated as boolean ones.";
}
/**
* Sets which attributes are nominal
*
* @param rangeList a string representing the list of attributes. Since the
* string will typically come from a user, attributes are indexed
* from 1. <br/>
* eg: first-3,5,6-last
* @throws IllegalArgumentException if an invalid range list is supplied
*/
public void setNominalIndices(String rangeList) {
m_nominalCols.setRanges(rangeList);
}
/**
* Sets which attributes are nominal.
*
* @param value the range to use
*/
public void setNominalCols(Range value) {
m_nominalCols.setRanges(value.getRanges());
}
/**
* returns the range of nominal attributes
*
* @return the range of nominal attributes
*/
public Range getNominalCols() {
if (m_nominalCols == null) {
m_nominalCols = new Range();
}
return m_nominalCols;
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String nominalColsTipText() {
return "The range of attributes to generate as nominal ones.";
}
/**
* check if attribute types are not contradicting
*
* @return empty string if no problem, otherwise error message
*/
protected String checkIndices() {
for (int i = 0; i < getNumAttributes(); i++) {
if (m_booleanCols.isInRange(i) && m_nominalCols.isInRange(i)) {
return "Error in attribute type: Attribute " + i
+ " is set boolean and nominal.";
}
}
return "";
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/datagenerators/DataGenerator.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* DataGenerator.java
* Copyright (C) 2005-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.datagenerators;
import java.io.FileOutputStream;
import java.io.PrintWriter;
import java.io.Serializable;
import java.util.Collections;
import java.util.Enumeration;
import java.util.HashSet;
import java.util.Hashtable;
import java.util.Random;
import java.util.Vector;
import weka.core.Instance;
import weka.core.Instances;
import weka.core.Option;
import weka.core.OptionHandler;
import weka.core.Randomizable;
import weka.core.RevisionHandler;
import weka.core.Utils;
/**
* Abstract superclass for data generators that generate data for classifiers
* and clusterers.
*
* @author FracPete (fracpete at waikato dot ac dot nz)
* @version $Revision$
*/
public abstract class DataGenerator implements OptionHandler, Randomizable,
Serializable, RevisionHandler {
/** for serialization */
private static final long serialVersionUID = -3698585946221802578L;
/** Debugging mode */
protected boolean m_Debug = false;
/** The format for the generated dataset */
protected Instances m_DatasetFormat = null;
/** Relation name the dataset should have */
protected String m_RelationName = "";
/**
* Number of instances that should be produced into the dataset this number is
* by default m_NumExamples, but can be reset by the generator
*/
protected int m_NumExamplesAct;
/** default output (stdout) */
protected transient PrintWriter m_DefaultOutput = new PrintWriter(
new java.io.OutputStreamWriter(System.out));
/** PrintWriter for outputting the generated data */
protected transient PrintWriter m_Output = m_DefaultOutput;
/** random number generator seed */
protected int m_Seed;
/** random number generator */
protected Random m_Random = null;
/** flag, that indicates whether the relationname is currently assembled */
protected boolean m_CreatingRelationName = false;
/**
* a black list for options not to be listed (for derived generators) in the
* makeOptionString method
*
* @see #makeOptionString(DataGenerator)
*/
protected static HashSet<String> m_OptionBlacklist;
static {
m_OptionBlacklist = new HashSet<String>();
}
/**
* initializes with default settings. <br/>
* Note: default values are set via a default<name> method. These
* default methods are also used in the listOptions method and in the
* setOptions method. Why? Derived generators can override the return value of
* these default methods, to avoid exceptions.
*/
public DataGenerator() {
clearBlacklist();
setNumExamplesAct(defaultNumExamplesAct());
setSeed(defaultSeed());
}
/**
* Returns an enumeration describing the available options.
*
* @return an enumeration of all the available options
*/
@Override
public Enumeration<Option> listOptions() {
Vector<Option> result = new Vector<Option>();
result.addElement(new Option("\tPrints this help.", "h", 1, "-h"));
result.addElement(new Option(
"\tThe name of the output file, otherwise the generated data is\n"
+ "\tprinted to stdout.", "o", 1, "-o <file>"));
result.addElement(new Option("\tThe name of the relation.", "r", 1,
"-r <name>"));
result.addElement(new Option("\tWhether to print debug informations.", "d",
0, "-d"));
result.addElement(new Option("\tThe seed for random function (default "
+ defaultSeed() + ")", "S", 1, "-S"));
return result.elements();
}
/**
* Convenience method. Turns the given enumeration of options into a vector.
*/
public Vector<Option> enumToVector(Enumeration<Option> enu) {
Vector<Option> options = new Vector<Option>();
options.addAll(Collections.list(enu));
return options;
}
/**
* Parses a list of options for this object.
* <p/>
*
* For list of valid options see class description.
* <p/>
*
* @param options the list of options as an array of strings
* @throws Exception if an option is not supported
*/
@Override
public void setOptions(String[] options) throws Exception {
String tmpStr;
// remove unwanted options
options = removeBlacklist(options);
tmpStr = Utils.getOption('r', options);
if (tmpStr.length() != 0) {
setRelationName(Utils.unquote(tmpStr));
} else {
setRelationName("");
}
tmpStr = Utils.getOption('o', options);
if (tmpStr.length() != 0) {
setOutput(new PrintWriter(new FileOutputStream(tmpStr)));
} else if (getOutput() == null) {
throw new Exception("No Output defined!");
}
setDebug(Utils.getFlag('d', options));
tmpStr = Utils.getOption('S', options);
if (tmpStr.length() != 0) {
setSeed(Integer.parseInt(tmpStr));
} else {
setSeed(defaultSeed());
}
}
/**
* Gets the current settings of the datagenerator RDG1. Removing of
* blacklisted options has to be done in the derived class, that defines the
* blacklist-entry.
*
* @return an array of strings suitable for passing to setOptions
* @see #removeBlacklist(String[])
*/
@Override
public String[] getOptions() {
Vector<String> result = new Vector<String>();
// to avoid endless loop
if (!m_CreatingRelationName) {
result.add("-r");
result.add(Utils.quote(getRelationNameToUse()));
}
if (getDebug()) {
result.add("-d");
}
result.add("-S");
result.add("" + getSeed());
return result.toArray(new String[result.size()]);
}
/**
* Initializes the format for the dataset produced. Must be called before the
* generateExample or generateExamples methods are used. Also sets a default
* relation name in case the current relation name is empty.
*
* @return the format for the dataset
* @throws Exception if the generating of the format failed
* @see #defaultRelationName()
*/
public Instances defineDataFormat() throws Exception {
if (getRelationName().length() == 0) {
setRelationName(defaultRelationName());
}
return m_DatasetFormat;
}
/**
* Generates one example of the dataset.
*
* @return the generated example
* @throws Exception if the format of the dataset is not yet defined
* @throws Exception if the generator only works with generateExamples which
* means in non single mode
*/
public abstract Instance generateExample() throws Exception;
/**
* Generates all examples of the dataset.
*
* @return the generated dataset
* @throws Exception if the format of the dataset is not yet defined
* @throws Exception if the generator only works with generateExample, which
* means in single mode
*/
public abstract Instances generateExamples() throws Exception;
/**
* Generates a comment string that documentates the data generator. By default
* this string is added at the beginning of the produced output as ARFF file
* type, next after the options.
*
* @return string contains info about the generated rules
* @throws Exception if the generating of the documentation fails
*/
public abstract String generateStart() throws Exception;
/**
* Generates a comment string that documentates the data generator. By default
* this string is added at the end of the produced output as ARFF file type.
*
* @return string contains info about the generated rules
* @throws Exception if the generating of the documentation fails
*/
public abstract String generateFinished() throws Exception;
/**
* Return if single mode is set for the given data generator mode depends on
* option setting and or generator type.
*
* @return single mode flag
* @throws Exception if mode is not set yet
*/
public abstract boolean getSingleModeFlag() throws Exception;
/**
* Sets the debug flag.
*
* @param debug the new debug flag
*/
public void setDebug(boolean debug) {
m_Debug = debug;
}
/**
* Gets the debug flag.
*
* @return the debug flag
*/
public boolean getDebug() {
return m_Debug;
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String debugTipText() {
return "Whether the generator is run in debug mode or not.";
}
/**
* Sets the relation name the dataset should have.
*
* @param relationName the new relation name
*/
public void setRelationName(String relationName) {
m_RelationName = relationName;
}
/**
* returns a relation name based on the options
*
* @return a relation name based on the options
*/
protected String defaultRelationName() {
StringBuffer result;
String[] options;
String option;
int i;
m_CreatingRelationName = true;
result = new StringBuffer(this.getClass().getName());
options = getOptions();
for (i = 0; i < options.length; i++) {
option = options[i].trim();
if (i > 0) {
result.append("_");
}
result.append(option.replaceAll(" ", "_"));
}
m_CreatingRelationName = false;
return result.toString();
}
/**
* returns the relation name to use, i.e., in case the currently set relation
* name is empty, a generic one is returned. Must be used in
* defineDataFormat()
*
* @return the relation name
* @see #defaultRelationName()
* @see #defineDataFormat()
*/
protected String getRelationNameToUse() {
String result;
result = getRelationName();
if (result.length() == 0) {
result = defaultRelationName();
}
return result;
}
/**
* Gets the relation name the dataset should have.
*
* @return the relation name the dataset should have
*/
public String getRelationName() {
return m_RelationName;
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String relationNameTipText() {
return "The relation name of the generated data (if empty, a generic one will be supplied).";
}
/**
* returns the default number of actual examples
*
* @return the default number of actual examples
*/
protected int defaultNumExamplesAct() {
return 0;
}
/**
* Sets the number of examples the dataset should have.
*
* @param numExamplesAct the new number of examples
*/
protected void setNumExamplesAct(int numExamplesAct) {
m_NumExamplesAct = numExamplesAct;
}
/**
* Gets the number of examples the dataset should have.
*
* @return the number of examples the dataset should have
*/
public int getNumExamplesAct() {
return m_NumExamplesAct;
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
protected String numExamplesActTipText() {
return "The actual number of examples to generate.";
}
/**
* Sets the print writer.
*
* @param newOutput the new print writer
*/
public void setOutput(PrintWriter newOutput) {
m_Output = newOutput;
m_DefaultOutput = null;
}
/**
* Gets the print writer.
*
* @return print writer object
*/
public PrintWriter getOutput() {
return m_Output;
}
/**
* Gets writer, which is used for outputting to stdout. A workaround for the
* problem of closing stdout when closing the associated Printwriter.
*
* @return writer object
*/
public PrintWriter defaultOutput() {
return m_DefaultOutput;
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String outputTipText() {
return "The output writer to use for printing the generated data.";
}
/**
* Sets the format of the dataset that is to be generated.
*
* @param newFormat the new dataset format of the dataset
*/
public void setDatasetFormat(Instances newFormat) {
m_DatasetFormat = new Instances(newFormat, 0);
}
/**
* Gets the format of the dataset that is to be generated.
*
* @return the dataset format of the dataset
*/
public Instances getDatasetFormat() {
if (m_DatasetFormat != null) {
return new Instances(m_DatasetFormat, 0);
} else {
return null;
}
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String formatTipText() {
return "The data format to use.";
}
/**
* returns the default seed
*
* @return the default seed
*/
protected int defaultSeed() {
return 1;
}
/**
* Gets the random number seed.
*
* @return the random number seed.
*/
@Override
public int getSeed() {
return m_Seed;
}
/**
* Sets the random number seed.
*
* @param newSeed the new random number seed.
*/
@Override
public void setSeed(int newSeed) {
m_Seed = newSeed;
m_Random = new Random(newSeed);
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String seedTipText() {
return "The seed value for the random number generator.";
}
/**
* Gets the random generator.
*
* @return the random generator
*/
public Random getRandom() {
if (m_Random == null) {
m_Random = new Random(getSeed());
}
return m_Random;
}
/**
* Sets the random generator.
*
* @param newRandom is the random generator.
*/
public void setRandom(Random newRandom) {
m_Random = newRandom;
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String randomTipText() {
return "The random number generator to use.";
}
/**
* Returns a string representing the dataset in the instance queue.
*
* @return the string representing the output data format
*/
protected String toStringFormat() {
if (m_DatasetFormat == null) {
return "";
}
return m_DatasetFormat.toString();
}
/**
* removes all entries from the options blacklist
*/
protected static void clearBlacklist() {
m_OptionBlacklist.clear();
}
/**
* adds the given option, e.g., for "-V" use "V", to the blacklist of options
* that are not to be output via the makeOptionString method
*
* @param option the option to exclude from listing
* @see #makeOptionString(DataGenerator)
*/
protected static void addToBlacklist(String option) {
m_OptionBlacklist.add(option);
}
/**
* checks, whether the given option is in the blacklist of options not to be
* output by makeOptionString
*
* @param option the option to check
* @return true if the option is on the blacklist
* @see #makeOptionString(DataGenerator)
*/
protected static boolean isOnBlacklist(String option) {
return m_OptionBlacklist.contains(option);
}
/**
* removes all the options from the options array that are blacklisted
*
* @param options the options to remove from the blacklist
* @return the processed options array
*/
protected String[] removeBlacklist(String[] options) {
Hashtable<String, Option> pool;
Option option;
// retrieve options that are on blacklist
Enumeration<Option> enm = listOptions();
pool = new Hashtable<String, Option>();
while (enm.hasMoreElements()) {
option = enm.nextElement();
if (isOnBlacklist(option.name())) {
pool.put(option.name(), option);
}
}
// remove options
Enumeration<String> enm2 = pool.keys();
while (enm2.hasMoreElements()) {
option = pool.get(enm2.nextElement());
try {
if (option.numArguments() == 0) {
Utils.getFlag(option.name(), options);
} else {
Utils.getOption(option.name(), options);
}
} catch (Exception e) {
e.printStackTrace();
}
}
return options;
}
/**
* returns all the options in a string
*
* @param generator the DataGenerator to return all the options for
* @return the assembled option string
*/
protected static String makeOptionString(DataGenerator generator) {
StringBuffer result;
Enumeration<Option> enm;
Option option;
result = new StringBuffer();
result.append("\nData Generator options:\n\n");
enm = generator.listOptions();
while (enm.hasMoreElements()) {
option = enm.nextElement();
// skip option if on blacklist
if (isOnBlacklist(option.name())) {
continue;
}
result.append(option.synopsis() + "\n" + option.description() + "\n");
}
return result.toString();
}
/**
* Gets the prologue string.
*
* @return prologue
*/
public String getPrologue() throws Exception {
StringBuilder sb = new StringBuilder();
// output of options
sb.append("%");
sb.append("% Commandline");
sb.append("%");
sb.append("% " + getClass().getName() + " " + Utils.joinOptions(getOptions()));
sb.append("%");
// comment at beginning of ARFF File
String commentAtStart = generateStart();
if (commentAtStart.length() > 0) {
sb.append("%");
sb.append("% Prologue");
sb.append("%");
sb.append(commentAtStart.trim());
sb.append("%");
}
return sb.toString();
}
/**
* Gets the epilogue string.
*
* @return epilogue
*/
public String getEpilogue() throws Exception {
StringBuilder sb = new StringBuilder();
// comment at end of ARFF File
String commentAtEnd = generateFinished();
if (commentAtEnd.length() > 0) {
sb.append("%");
sb.append("% Epilogue");
sb.append("%");
sb.append(commentAtEnd.trim());
sb.append("%");
}
return sb.toString();
}
/**
* Calls the data generator.
*
* @param generator one of the data generators
* @param options options of the data generator
* @throws Exception if there was an error in the option list
*/
public static void makeData(DataGenerator generator, String[] options)
throws Exception {
// help?
boolean printhelp = (Utils.getFlag('h', options));
// read options
if (!printhelp) {
try {
options = generator.removeBlacklist(options);
generator.setOptions(options);
// check for left-over options, but don't raise exception
Vector<String> unknown = new Vector<String>();
for (int i = 0; i < options.length; i++) {
if (options[i].length() != 0) {
unknown.add(options[i]);
}
}
if (unknown.size() > 0) {
System.out.print("Unknown options:");
for (int i = 0; i < unknown.size(); i++) {
System.out.print(" " + unknown.get(i));
}
System.out.println();
}
} catch (Exception e) {
e.printStackTrace();
printhelp = true;
}
}
if (printhelp) {
System.out.println(makeOptionString(generator));
return;
}
// define dataset format
// computes actual number of examples to be produced
generator.setDatasetFormat(generator.defineDataFormat());
// get print writer and print header
PrintWriter output = generator.getOutput();
output.println(generator.getPrologue());
// ask data generator which mode
boolean singleMode = generator.getSingleModeFlag();
// start data producer
if (singleMode) {
// output of dataset header
output.println(generator.toStringFormat());
for (int i = 0; i < generator.getNumExamplesAct(); i++) {
// over all examples to be produced
output.println(generator.generateExample());
}
} else { // generator produces all instances at once
Instances data = generator.generateExamples();
// output of dataset
for (int i = 0; i < data.numInstances(); i++) {
if (i % 1000 == 0) {
output.flush();
}
output.println(data.instance(i));
}
output.flush();
}
output.println(generator.getEpilogue());
output.flush();
if (generator.getOutput() != generator.defaultOutput()) {
output.close();
}
}
/**
* runs the datagenerator instance with the given options.
*
* @param datagenerator the datagenerator to run
* @param options the commandline options
*/
public static void runDataGenerator(DataGenerator datagenerator,
String[] options) {
try {
DataGenerator.makeData(datagenerator, options);
} catch (Exception e) {
if ((e.getMessage() != null)
&& (e.getMessage().indexOf("Data Generator options") == -1)) {
e.printStackTrace();
} else {
System.err.println(e.getMessage());
}
}
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/datagenerators/RegressionGenerator.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* RegressionGenerator.java
* Copyright (C) 2005-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.datagenerators;
import java.util.Collections;
import java.util.Enumeration;
import java.util.Vector;
import weka.core.Option;
import weka.core.Utils;
/**
* Abstract class for data generators for regression classifiers.
* <p/>
*
* Example usage as the main of a datagenerator called RandomGenerator:
*
* <pre>
* public static void main(String[] args) {
* try {
* DataGenerator.makeData(new RandomGenerator(), args);
* } catch (Exception e) {
* e.printStackTrace();
* System.err.println(e.getMessage());
* }
* }
* </pre>
*
* @author FracPete (fracpete at waikato dot ac dot nz)
* @version $Revision$
*/
public abstract class RegressionGenerator extends DataGenerator {
/** for serialization */
private static final long serialVersionUID = 3073254041275658221L;
/** Number of instances */
protected int m_NumExamples;
/**
* initializes the generator with default values
*/
public RegressionGenerator() {
super();
setNumExamples(defaultNumExamples());
}
/**
* Returns an enumeration describing the available options.
*
* @return an enumeration of all the available options.
*/
@Override
public Enumeration<Option> listOptions() {
Vector<Option> result = enumToVector(super.listOptions());
result.addElement(new Option(
"\tThe number of examples to generate (default " + defaultNumExamples()
+ ")", "n", 1, "-n <num>"));
return result.elements();
}
/**
* Sets the options.
*
* @param options the options
* @throws Exception if invalid option
*/
@Override
public void setOptions(String[] options) throws Exception {
String tmpStr;
super.setOptions(options);
tmpStr = Utils.getOption('n', options);
if (tmpStr.length() != 0) {
setNumExamples(Integer.parseInt(tmpStr));
} else {
setNumExamples(defaultNumExamples());
}
}
/**
* Gets the current settings of the classifier.
*
* @return an array of strings suitable for passing to setOptions
*/
@Override
public String[] getOptions() {
Vector<String> result = new Vector<String>();
Collections.addAll(result, super.getOptions());
result.add("-n");
result.add("" + getNumExamples());
return result.toArray(new String[result.size()]);
}
/**
* returns the default number of examples
*
* @return the default number of examples
*/
protected int defaultNumExamples() {
return 100;
}
/**
* Sets the number of examples, given by option.
*
* @param numExamples the new number of examples
*/
public void setNumExamples(int numExamples) {
m_NumExamples = numExamples;
}
/**
* Gets the number of examples, given by option.
*
* @return the number of examples, given by option
*/
public int getNumExamples() {
return m_NumExamples;
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String numExamplesTipText() {
return "The number of examples to generate.";
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/datagenerators/Test.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* Test.java
* Copyright (C) 2000-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.datagenerators;
import java.io.Serializable;
import weka.core.Attribute;
import weka.core.Instance;
import weka.core.Instances;
import weka.core.RevisionHandler;
import weka.core.RevisionUtils;
import weka.core.Utils;
/**
* Class to represent a test. <br/>
* <br/>
* The string representation of the test can be supplied in standard notation or
* for a subset of types of attributes in Prolog notation.<br/>
*
* Following examples for all possible tests that can be represented by this
* class, given in standard notation.<br/>
* <br/>
* Examples of tests for numeric attributes:<br/>
* B >= 2.333<br/>
* B < 4.56<br/>
* <br/>
* Examples of tests for nominal attributes with more then 2 values:<br/>
* A = rain <br/>
* A != rain<br/>
* <br/>
* Examples of tests for nominal attribute with exactly 2 values:<br/>
* A = false <br/>
* A = true<br/>
* <br/>
* <br/>
* The Prolog notation is only supplied for numeric attributes and for nominal
* attributes that have the values "true" and "false".<br/>
* <br/>
* Following examples for the Prolog notation provided.<br/>
* <br/>
* Examples of tests for numeric attributes:<br/>
* The same as for standard notation above.<br/>
* <br/>
* Examples of tests for nominal attributes with values "true"and "false":<br/>
* A<br/>
* not(A)<br/>
* <br/>
* (Other nominal attributes are not supported by the Prolog notation.)<br/>
* <br/>
*
* @author Gabi Schmidberger (gabi@cs.waikato.ac.nz)
* @version $Revision$
**/
public class Test implements Serializable, RevisionHandler {
/** for serialization */
static final long serialVersionUID = -8890645875887157782L;
/** the attribute index */
int m_AttIndex;
/** the split */
double m_Split;
/** whether to negate the test */
boolean m_Not;
/** the dataset */
Instances m_Dataset;
/**
* Constructor
*
* @param i the attribute index
* @param s the split
* @param dataset the dataset
*/
public Test(int i, double s, Instances dataset) {
m_AttIndex = i;
m_Split = s;
m_Dataset = dataset;
m_Not = false;
}
/**
* Constructor
*
* @param i the attribute index
* @param s the split
* @param dataset the dataset
* @param n whether to negate the test
*/
public Test(int i, double s, Instances dataset, boolean n) {
m_AttIndex = i;
m_Split = s;
m_Dataset = dataset;
m_Not = n;
}
/**
* Negates the test.
*
* @return the test itself negated
*/
public Test getNot() { // returns a modified copy
return new Test(m_AttIndex, m_Split, m_Dataset, m_Not ? false : true);
}
/**
* Determines whether an instance passes the test.
*
* @param inst the instance
* @return true if the instance satisfies the test, false otherwise
* @throws Exception if something goes wrong
*/
public boolean passesTest(Instance inst) throws Exception {
if (inst.isMissing(m_AttIndex)) {
return false; // missing values fail
}
boolean isNominal = inst.attribute(m_AttIndex).isNominal();
double attribVal = inst.value(m_AttIndex);
if (!m_Not) {
if (isNominal) {
if (((int) attribVal) != ((int) m_Split)) {
return false;
}
} else if (attribVal >= m_Split) {
return false;
}
} else {
if (isNominal) {
if (((int) attribVal) == ((int) m_Split)) {
return false;
}
} else if (attribVal < m_Split) {
return false;
}
}
return true;
}
/**
* Returns the test represented by a string.
*
* @return a string representing the test
*/
@Override
public String toString() {
return (m_Dataset.attribute(m_AttIndex).name() + " " + testComparisonString());
}
/**
* Returns the test represented by a string in Prolog notation.
*
* @return a string representing the test in Prolog notation
*/
public String toPrologString() {
Attribute att = m_Dataset.attribute(m_AttIndex);
StringBuffer str = new StringBuffer();
String attName = m_Dataset.attribute(m_AttIndex).name();
if (att.isNumeric()) {
str = str.append(attName + " ");
if (m_Not) {
str = str.append(">= " + Utils.doubleToString(m_Split, 3));
} else {
str = str.append("< " + Utils.doubleToString(m_Split, 3));
}
} else {
String value = att.value((int) m_Split);
if (value == "false") {
str = str.append("not(" + attName + ")");
} else {
str = str.append(attName);
}
}
return str.toString();
}
/**
* Gives a string representation of the test, starting from the comparison
* symbol.
*
* @return a string representing the test
*/
private String testComparisonString() {
Attribute att = m_Dataset.attribute(m_AttIndex);
if (att.isNumeric()) {
return ((m_Not ? ">= " : "< ") + Utils.doubleToString(m_Split, 3));
} else {
if (att.numValues() != 2) {
return ((m_Not ? "!= " : "= ") + att.value((int) m_Split));
} else {
return ("= " + (m_Not ? att.value((int) m_Split == 0 ? 1 : 0) : att
.value((int) m_Split)));
}
}
}
/**
* Compares the test with the test that is given as parameter.
*
* @param t the test the object is compared to
* @return true if the two Tests are equal
*/
public boolean equalTo(Test t) {
return (m_AttIndex == t.m_AttIndex && m_Split == t.m_Split && m_Not == t.m_Not);
}
/**
* Returns the revision string.
*
* @return the revision
*/
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/datagenerators/classifiers
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/datagenerators/classifiers/classification/Agrawal.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* Agrawal.java
* Copyright (C) 2005-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.datagenerators.classifiers.classification;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Enumeration;
import java.util.Random;
import java.util.Vector;
import weka.core.Attribute;
import weka.core.DenseInstance;
import weka.core.Instance;
import weka.core.Instances;
import weka.core.Option;
import weka.core.RevisionUtils;
import weka.core.SelectedTag;
import weka.core.Tag;
import weka.core.TechnicalInformation;
import weka.core.TechnicalInformation.Field;
import weka.core.TechnicalInformation.Type;
import weka.core.TechnicalInformationHandler;
import weka.core.Utils;
import weka.datagenerators.ClassificationGenerator;
/**
* <!-- globalinfo-start --> Generates a people database and is based on the
* paper by Agrawal et al.:<br/>
* R. Agrawal, T. Imielinski, A. Swami (1993). Database Mining: A Performance
* Perspective. IEEE Transactions on Knowledge and Data Engineering.
* 5(6):914-925. URL
* http://www.almaden.ibm.com/software/quest/Publications/ByDate.html.
* <p/>
* <!-- globalinfo-end -->
*
* <!-- technical-bibtex-start --> BibTeX:
*
* <pre>
* @article{Agrawal1993,
* author = {R. Agrawal and T. Imielinski and A. Swami},
* journal = {IEEE Transactions on Knowledge and Data Engineering},
* note = {Special issue on Learning and Discovery in Knowledge-Based Databases},
* number = {6},
* pages = {914-925},
* title = {Database Mining: A Performance Perspective},
* volume = {5},
* year = {1993},
* URL = {http://www.almaden.ibm.com/software/quest/Publications/ByDate.html},
* PDF = {http://www.almaden.ibm.com/software/quest/Publications/papers/tkde93.pdf}
* }
* </pre>
* <p/>
* <!-- technical-bibtex-end -->
*
* <!-- options-start --> Valid options are:
* <p/>
*
* <pre>
* -h
* Prints this help.
* </pre>
*
* <pre>
* -o <file>
* The name of the output file, otherwise the generated data is
* printed to stdout.
* </pre>
*
* <pre>
* -r <name>
* The name of the relation.
* </pre>
*
* <pre>
* -d
* Whether to print debug informations.
* </pre>
*
* <pre>
* -S
* The seed for random function (default 1)
* </pre>
*
* <pre>
* -n <num>
* The number of examples to generate (default 100)
* </pre>
*
* <pre>
* -F <num>
* The function to use for generating the data. (default 1)
* </pre>
*
* <pre>
* -B
* Whether to balance the class.
* </pre>
*
* <pre>
* -P <num>
* The perturbation factor. (default 0.05)
* </pre>
*
* <!-- options-end -->
*
* @author Richard Kirkby (rkirkby at cs dot waikato dot ac dot nz)
* @author FracPete (fracpete at waikato dot ac dot nz)
* @version $Revision$
*/
public class Agrawal extends ClassificationGenerator implements
TechnicalInformationHandler {
/** for serialization */
static final long serialVersionUID = 2254651939636143025L;
/**
* the interface for the class functions
*/
protected interface ClassFunction {
/**
* returns a class value based on the given inputs
*
* @param salary the salary
* @param commission the commission
* @param age the age
* @param elevel the education level
* @param car
* @param zipcode the zip code
* @param hvalue
* @param hyears
* @param loan
*/
public long determineClass(double salary, double commission, int age,
int elevel, int car, int zipcode, double hvalue, int hyears, double loan);
}
/**
* built in functions are based on the paper (page 924), which turn out to be
* functions pred20 thru pred29 in the public c code
*/
protected static ClassFunction[] builtInFunctions = {
// function 1
new ClassFunction() {
@Override
public long determineClass(double salary, double commission, int age,
int elevel, int car, int zipcode, double hvalue, int hyears, double loan) {
if (age < 40 || 60 <= age) {
return 0;
} else {
return 1;
}
}
},
// function 2
new ClassFunction() {
@Override
public long determineClass(double salary, double commission, int age,
int elevel, int car, int zipcode, double hvalue, int hyears, double loan) {
if (age < 40) {
if (50000 <= salary && salary <= 100000) {
return 0;
} else {
return 1;
}
} else if (age < 60) {
if (75000 <= salary && salary <= 125000) {
return 0;
} else {
return 1;
}
} else // age >= 60
if (25000 <= salary && salary <= 75000) {
return 0;
} else {
return 1;
}
}
},
// function 3
new ClassFunction() {
@Override
public long determineClass(double salary, double commission, int age,
int elevel, int car, int zipcode, double hvalue, int hyears, double loan) {
if (age < 40) {
if (elevel == 0 || elevel == 1) {
return 0;
} else {
return 1;
}
} else if (age < 60) {
if (elevel == 1 || elevel == 2 || elevel == 3) {
return 0;
} else {
return 1;
}
} else // age >= 60
if (elevel == 2 || elevel == 3 || elevel == 4) {
return 0;
} else {
return 1;
}
}
},
// function 4
new ClassFunction() {
@Override
public long determineClass(double salary, double commission, int age,
int elevel, int car, int zipcode, double hvalue, int hyears, double loan) {
if (age < 40) {
if (elevel == 0 || elevel == 1) {
if (25000 <= salary && salary <= 75000) {
return 0;
} else {
return 1;
}
} else if (50000 <= salary && salary <= 100000) {
return 0;
} else {
return 1;
}
} else if (age < 60) {
if (elevel == 1 || elevel == 2 || elevel == 3) {
if (50000 <= salary && salary <= 100000) {
return 0;
} else {
return 1;
}
} else if (75000 <= salary && salary <= 125000) {
return 0;
} else {
return 1;
}
} else // age >= 60
if (elevel == 2 || elevel == 3 || elevel == 4) {
if (50000 <= salary && salary <= 100000) {
return 0;
} else {
return 1;
}
} else if (25000 <= salary && salary <= 75000) {
return 0;
} else {
return 1;
}
}
},
// function 5
new ClassFunction() {
@Override
public long determineClass(double salary, double commission, int age,
int elevel, int car, int zipcode, double hvalue, int hyears, double loan) {
if (age < 40) {
if (50000 <= salary && salary <= 100000) {
if (100000 <= loan && loan <= 300000) {
return 0;
} else {
return 1;
}
} else if (200000 <= loan && loan <= 400000) {
return 0;
} else {
return 1;
}
} else if (age < 60) {
if (75000 <= salary && salary <= 125000) {
if (200000 <= loan && loan <= 400000) {
return 0;
} else {
return 1;
}
} else if (300000 <= loan && loan <= 500000) {
return 0;
} else {
return 1;
}
} else // age >= 60
if (25000 <= salary && salary <= 75000) {
if (300000 <= loan && loan <= 500000) {
return 0;
} else {
return 1;
}
} else if (100000 <= loan && loan <= 300000) {
return 0;
} else {
return 1;
}
}
},
// function 6
new ClassFunction() {
@Override
public long determineClass(double salary, double commission, int age,
int elevel, int car, int zipcode, double hvalue, int hyears, double loan) {
double totalSalary = salary + commission;
if (age < 40) {
if (50000 <= totalSalary && totalSalary <= 100000) {
return 0;
} else {
return 1;
}
} else if (age < 60) {
if (75000 <= totalSalary && totalSalary <= 125000) {
return 0;
} else {
return 1;
}
} else // age >= 60
if (25000 <= totalSalary && totalSalary <= 75000) {
return 0;
} else {
return 1;
}
}
},
// function 7
new ClassFunction() {
@Override
public long determineClass(double salary, double commission, int age,
int elevel, int car, int zipcode, double hvalue, int hyears, double loan) {
double disposable = (2.0 * (salary + commission) / 3.0 - loan / 5.0 - 20000.0);
return disposable > 0 ? 0 : 1;
}
},
// function 8
new ClassFunction() {
@Override
public long determineClass(double salary, double commission, int age,
int elevel, int car, int zipcode, double hvalue, int hyears, double loan) {
double disposable = (2.0 * (salary + commission) / 3.0 - 5000.0
* elevel - 20000.0);
return disposable > 0 ? 0 : 1;
}
},
// function 9
new ClassFunction() {
@Override
public long determineClass(double salary, double commission, int age,
int elevel, int car, int zipcode, double hvalue, int hyears, double loan) {
double disposable = (2.0 * (salary + commission) / 3.0 - 5000.0
* elevel - loan / 5.0 - 10000.0);
return disposable > 0 ? 0 : 1;
}
},
// function 10
new ClassFunction() {
@Override
public long determineClass(double salary, double commission, int age,
int elevel, int car, int zipcode, double hvalue, int hyears, double loan) {
double equity = 0.0;
if (hyears >= 20) {
equity = hvalue * (hyears - 20.0) / 10.0;
}
double disposable = (2.0 * (salary + commission) / 3.0 - 5000.0
* elevel + equity / 5.0 - 10000.0);
return disposable > 0 ? 0 : 1;
}
} };
/** function 1 */
public final static int FUNCTION_1 = 1;
/** function 2 */
public final static int FUNCTION_2 = 2;
/** function 3 */
public final static int FUNCTION_3 = 3;
/** function 4 */
public final static int FUNCTION_4 = 4;
/** function 5 */
public final static int FUNCTION_5 = 5;
/** function 6 */
public final static int FUNCTION_6 = 6;
/** function 7 */
public final static int FUNCTION_7 = 7;
/** function 8 */
public final static int FUNCTION_8 = 8;
/** function 9 */
public final static int FUNCTION_9 = 9;
/** function 10 */
public final static int FUNCTION_10 = 10;
/** the funtion tags */
public static final Tag[] FUNCTION_TAGS = {
new Tag(FUNCTION_1, "Function 1"), new Tag(FUNCTION_2, "Function 2"),
new Tag(FUNCTION_3, "Function 3"), new Tag(FUNCTION_4, "Function 4"),
new Tag(FUNCTION_5, "Function 5"), new Tag(FUNCTION_6, "Function 6"),
new Tag(FUNCTION_7, "Function 7"), new Tag(FUNCTION_8, "Function 8"),
new Tag(FUNCTION_9, "Function 9"), new Tag(FUNCTION_10, "Function 10"), };
/** the function to use for generating the data */
protected int m_Function;
/** whether to balance the class */
protected boolean m_BalanceClass;
/** the perturabation fraction */
protected double m_PerturbationFraction;
/** used for balancing the class */
protected boolean m_nextClassShouldBeZero;
/** the last class label that was generated */
protected double m_lastLabel;
/**
* initializes the generator with default values
*/
public Agrawal() {
super();
setFunction(defaultFunction());
setBalanceClass(defaultBalanceClass());
setPerturbationFraction(defaultPerturbationFraction());
}
/**
* Returns a string describing this data generator.
*
* @return a description of the data generator suitable for displaying in the
* explorer/experimenter gui
*/
public String globalInfo() {
return "Generates a people database and is based on the paper by Agrawal "
+ "et al.:\n" + getTechnicalInformation().toString();
}
/**
* Returns an instance of a TechnicalInformation object, containing detailed
* information about the technical background of this class, e.g., paper
* reference or book this class is based on.
*
* @return the technical information about this class
*/
@Override
public TechnicalInformation getTechnicalInformation() {
TechnicalInformation result;
result = new TechnicalInformation(Type.ARTICLE);
result.setValue(Field.AUTHOR, "R. Agrawal and T. Imielinski and A. Swami");
result.setValue(Field.YEAR, "1993");
result.setValue(Field.TITLE, "Database Mining: A Performance Perspective");
result.setValue(Field.JOURNAL,
"IEEE Transactions on Knowledge and Data Engineering");
result.setValue(Field.VOLUME, "5");
result.setValue(Field.NUMBER, "6");
result.setValue(Field.PAGES, "914-925");
result.setValue(Field.NOTE,
"Special issue on Learning and Discovery in Knowledge-Based Databases");
result.setValue(Field.URL,
"http://www.almaden.ibm.com/software/quest/Publications/ByDate.html");
result
.setValue(Field.PDF,
"http://www.almaden.ibm.com/software/quest/Publications/papers/tkde93.pdf");
return result;
}
/**
* Returns an enumeration describing the available options.
*
* @return an enumeration of all the available options
*/
@Override
public Enumeration<Option> listOptions() {
Vector<Option> result = enumToVector(super.listOptions());
result
.add(new Option(
"\tThe function to use for generating the data. (default "
+ defaultFunction().getSelectedTag().getID() + ")", "F", 1,
"-F <num>"));
result.add(new Option("\tWhether to balance the class.", "B", 0, "-B"));
result.add(new Option("\tThe perturbation factor. (default "
+ defaultPerturbationFraction() + ")", "P", 1, "-P <num>"));
return result.elements();
}
/**
* Parses a list of options for this object.
* <p/>
*
* <!-- options-start --> Valid options are:
* <p/>
*
* <pre>
* -h
* Prints this help.
* </pre>
*
* <pre>
* -o <file>
* The name of the output file, otherwise the generated data is
* printed to stdout.
* </pre>
*
* <pre>
* -r <name>
* The name of the relation.
* </pre>
*
* <pre>
* -d
* Whether to print debug informations.
* </pre>
*
* <pre>
* -S
* The seed for random function (default 1)
* </pre>
*
* <pre>
* -n <num>
* The number of examples to generate (default 100)
* </pre>
*
* <pre>
* -F <num>
* The function to use for generating the data. (default 1)
* </pre>
*
* <pre>
* -B
* Whether to balance the class.
* </pre>
*
* <pre>
* -P <num>
* The perturbation factor. (default 0.05)
* </pre>
*
* <!-- options-end -->
*
* @param options the list of options as an array of strings
* @throws Exception if an option is not supported
*/
@Override
public void setOptions(String[] options) throws Exception {
String tmpStr;
super.setOptions(options);
tmpStr = Utils.getOption('F', options);
if (tmpStr.length() != 0) {
setFunction(new SelectedTag(Integer.parseInt(tmpStr), FUNCTION_TAGS));
} else {
setFunction(defaultFunction());
}
setBalanceClass(Utils.getFlag('B', options));
tmpStr = Utils.getOption('P', options);
if (tmpStr.length() != 0) {
setPerturbationFraction(Double.parseDouble(tmpStr));
} else {
setPerturbationFraction(defaultPerturbationFraction());
}
}
/**
* Gets the current settings of the datagenerator.
*
* @return an array of strings suitable for passing to setOptions
*/
@Override
public String[] getOptions() {
Vector<String> result = new Vector<String>();
Collections.addAll(result, super.getOptions());
result.add("-F");
result.add("" + m_Function);
if (getBalanceClass()) {
result.add("-B");
}
result.add("-P");
result.add("" + getPerturbationFraction());
return result.toArray(new String[result.size()]);
}
/**
* returns the default function
*
* @return the default function
*/
protected SelectedTag defaultFunction() {
return new SelectedTag(FUNCTION_1, FUNCTION_TAGS);
}
/**
* Gets the function for generating the data.
*
* @return the function.
* @see #FUNCTION_TAGS
*/
public SelectedTag getFunction() {
return new SelectedTag(m_Function, FUNCTION_TAGS);
}
/**
* Sets the function for generating the data.
*
* @param value the function.
* @see #FUNCTION_TAGS
*/
public void setFunction(SelectedTag value) {
if (value.getTags() == FUNCTION_TAGS) {
m_Function = value.getSelectedTag().getID();
}
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String functionTipText() {
return "The function to use for generating the data.";
}
/**
* returns the default for balancing the class
*
* @return the default for balancing the class
*/
protected boolean defaultBalanceClass() {
return false;
}
/**
* Gets whether the class is balanced.
*
* @return whether the class is balanced.
*/
public boolean getBalanceClass() {
return m_BalanceClass;
}
/**
* Sets whether the class is balanced.
*
* @param value whether to balance the class.
*/
public void setBalanceClass(boolean value) {
m_BalanceClass = value;
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String balanceClassTipText() {
return "Whether to balance the class.";
}
/**
* returns the default perturbation fraction
*
* @return the default perturbation fraction
*/
protected double defaultPerturbationFraction() {
return 0.05;
}
/**
* Gets the perturbation fraction.
*
* @return the perturbation fraction.
*/
public double getPerturbationFraction() {
return m_PerturbationFraction;
}
/**
* Sets the perturbation fraction.
*
* @param value the perturbation fraction.
*/
public void setPerturbationFraction(double value) {
if ((value >= 0.0) && (value <= 1.0)) {
m_PerturbationFraction = value;
} else {
throw new IllegalArgumentException(
"Perturbation fraction must be in [0,1] (provided: " + value + ")!");
}
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String perturbationFractionTipText() {
return "The perturbation fraction: 0 <= fraction <= 1.";
}
/**
* Return if single mode is set for the given data generator mode depends on
* option setting and or generator type.
*
* @return single mode flag
* @throws Exception if mode is not set yet
*/
@Override
public boolean getSingleModeFlag() throws Exception {
return true;
}
/**
* Initializes the format for the dataset produced. Must be called before the
* generateExample or generateExamples methods are used. Re-initializes the
* random number generator with the given seed.
*
* @return the format for the dataset
* @throws Exception if the generating of the format failed
* @see #getSeed()
*/
@Override
public Instances defineDataFormat() throws Exception {
ArrayList<Attribute> atts;
ArrayList<String> attValues;
int i;
m_Random = new Random(getSeed());
m_nextClassShouldBeZero = true;
m_lastLabel = Double.NaN;
// number of examples is the same as given per option
setNumExamplesAct(getNumExamples());
// set up attributes
atts = new ArrayList<Attribute>();
atts.add(new Attribute("salary"));
atts.add(new Attribute("commission"));
atts.add(new Attribute("age"));
attValues = new ArrayList<String>();
for (i = 0; i < 5; i++) {
attValues.add("" + i);
}
atts.add(new Attribute("elevel", attValues));
attValues = new ArrayList<String>();
for (i = 1; i <= 20; i++) {
attValues.add("" + i);
}
atts.add(new Attribute("car", attValues));
attValues = new ArrayList<String>();
for (i = 0; i < 9; i++) {
attValues.add("" + i);
}
atts.add(new Attribute("zipcode", attValues));
atts.add(new Attribute("hvalue"));
atts.add(new Attribute("hyears"));
atts.add(new Attribute("loan"));
attValues = new ArrayList<String>();
for (i = 0; i < 2; i++) {
attValues.add("" + i);
}
atts.add(new Attribute("group", attValues));
// dataset
m_DatasetFormat = new Instances(getRelationNameToUse(), atts, 0);
return m_DatasetFormat;
}
/**
* perturbs the given value
*
* @param val the value to perturb
* @param min the minimum
* @param max the maximum
* @return the perturbed value
*/
protected double perturbValue(double val, double min, double max) {
return perturbValue(val, max - min, min, max);
}
/**
* perturbs the given value
*
* @param val the value to perturb
* @param range the range for the perturbation
* @param min the minimum
* @param max the maximum
* @return the perturbed value
*/
protected double perturbValue(double val, double range, double min, double max) {
val += range * (2.0 * (getRandom().nextDouble() - 0.5))
* getPerturbationFraction();
if (val < min) {
val = min;
} else if (val > max) {
val = max;
}
return val;
}
/**
* Generates one example of the dataset.
*
* @return the generated example
* @throws Exception if the format of the dataset is not yet defined
* @throws Exception if the generator only works with generateExamples which
* means in non single mode
*/
@Override
public Instance generateExample() throws Exception {
Instance result;
double salary;
double commission;
double hvalue;
double loan;
int age;
int elevel;
int car;
int zipcode;
int hyears;
boolean desiredClassFound;
double[] atts;
Random random;
ClassFunction classFunction;
result = null;
random = getRandom();
if (m_DatasetFormat == null) {
throw new Exception("Dataset format not defined.");
}
salary = 0;
commission = 0;
hvalue = 0;
loan = 0;
age = 0;
elevel = 0;
car = 0;
zipcode = 0;
hyears = 0;
desiredClassFound = false;
classFunction = builtInFunctions[m_Function - 1];
while (!desiredClassFound) {
// generate attributes
salary = 20000.0 + 130000.0 * random.nextDouble();
commission = (salary >= 75000.0) ? 0 : (10000.0 + 65000.0 * random
.nextDouble());
age = 20 + random.nextInt(61);
elevel = random.nextInt(5);
car = 1 + random.nextInt(20);
zipcode = random.nextInt(9);
hvalue = (9.0 - zipcode) * 100000.0 * (0.5 + random.nextDouble());
hyears = 1 + random.nextInt(30);
loan = random.nextDouble() * 500000.0;
// determine class
m_lastLabel = classFunction.determineClass(salary, commission, age,
elevel, car, zipcode, hvalue, hyears, loan);
if (!getBalanceClass()) {
desiredClassFound = true;
} else {
// balance the classes
if ((m_nextClassShouldBeZero && (m_lastLabel == 0))
|| (!m_nextClassShouldBeZero && (m_lastLabel == 1))) {
desiredClassFound = true;
m_nextClassShouldBeZero = !m_nextClassShouldBeZero;
} // else keep searching
}
}
// perturb values
if (getPerturbationFraction() > 0.0) {
salary = perturbValue(salary, 20000, 150000);
if (commission > 0) {
commission = perturbValue(commission, 10000, 75000);
}
age = (int) Math.round(perturbValue(age, 20, 80));
hvalue = perturbValue(hvalue, (9.0 - zipcode) * 100000.0, 0, 135000);
hyears = (int) Math.round(perturbValue(hyears, 1, 30));
loan = perturbValue(loan, 0, 500000);
}
// create instance
atts = new double[m_DatasetFormat.numAttributes()];
atts[0] = salary;
atts[1] = commission;
atts[2] = age;
atts[3] = elevel;
atts[4] = car - 1;
atts[5] = zipcode;
atts[6] = hvalue;
atts[7] = hyears;
atts[8] = loan;
atts[9] = m_lastLabel;
result = new DenseInstance(1.0, atts);
result.setDataset(m_DatasetFormat);
return result;
}
/**
* Generates all examples of the dataset. Re-initializes the random number
* generator with the given seed, before generating instances.
*
* @return the generated dataset
* @throws Exception if the format of the dataset is not yet defined
* @throws Exception if the generator only works with generateExample, which
* means in single mode
* @see #getSeed()
*/
@Override
public Instances generateExamples() throws Exception {
Instances result;
int i;
result = new Instances(m_DatasetFormat, 0);
m_Random = new Random(getSeed());
for (i = 0; i < getNumExamplesAct(); i++) {
result.add(generateExample());
}
return result;
}
/**
* Generates a comment string that documentates the data generator. By default
* this string is added at the beginning of the produced output as ARFF file
* type, next after the options.
*
* @return string contains info about the generated rules
*/
@Override
public String generateStart() {
return "";
}
/**
* Generates a comment string that documentats the data generator. By default
* this string is added at the end of theproduces output as ARFF file type.
*
* @return string contains info about the generated rules
* @throws Exception if the generating of the documentaion fails
*/
@Override
public String generateFinished() throws Exception {
return "";
}
/**
* Returns the revision string.
*
* @return the revision
*/
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
/**
* Main method for executing this class.
*
* @param args should contain arguments for the data producer:
*/
public static void main(String[] args) {
runDataGenerator(new Agrawal(), args);
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/datagenerators/classifiers
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/datagenerators/classifiers/classification/BayesNet.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* BayesNet.java
* Copyright (C) 2005-2012,2015 University of Waikato, Hamilton, New Zealand
*
*/
package weka.datagenerators.classifiers.classification;
import weka.classifiers.bayes.net.BayesNetGenerator;
import weka.core.Instance;
import weka.core.Instances;
import weka.core.Option;
import weka.core.RevisionUtils;
import weka.core.Utils;
import weka.datagenerators.ClassificationGenerator;
import java.util.Collections;
import java.util.Enumeration;
import java.util.Vector;
/**
<!-- globalinfo-start -->
* Generates random instances based on a Bayes network.
* <br><br>
<!-- globalinfo-end -->
*
<!-- options-start -->
* Valid options are: <p>
*
* <pre> -h
* Prints this help.</pre>
*
* <pre> -o <file>
* The name of the output file, otherwise the generated data is
* printed to stdout.</pre>
*
* <pre> -r <name>
* The name of the relation.</pre>
*
* <pre> -d
* Whether to print debug informations.</pre>
*
* <pre> -S
* The seed for random function (default 1)</pre>
*
* <pre> -n <num>
* The number of examples to generate (default 100)</pre>
*
* <pre> -A <num>
* The number of arcs to use. (default 20)</pre>
*
* <pre> -N <num>
* The number of attributes to generate. (default 10)</pre>
*
* <pre> -C <num>
* The cardinality of the attributes and the class. (default 2)</pre>
*
<!-- options-end -->
*
* @author FracPete (fracpete at waikato dot ac dot nz)
* @version $Revision$
* @see BayesNetGenerator
*/
public class BayesNet extends ClassificationGenerator {
/** for serialization */
static final long serialVersionUID = -796118162379901512L;
/** the bayesian net generator, that produces the actual data */
protected BayesNetGenerator m_Generator;
/**
* initializes the generator
*/
public BayesNet() {
super();
setNumAttributes(defaultNumAttributes());
setNumArcs(defaultNumArcs());
setCardinality(defaultCardinality());
}
/**
* Returns a string describing this data generator.
*
* @return a description of the data generator suitable for displaying in the
* explorer/experimenter gui
*/
public String globalInfo() {
return "Generates random instances based on a Bayes network.";
}
/**
* Returns an enumeration describing the available options.
*
* @return an enumeration of all the available options
*/
@Override
public Enumeration<Option> listOptions() {
Vector<Option> result = enumToVector(super.listOptions());
result.add(new Option("\tThe number of arcs to use. (default "
+ defaultNumArcs() + ")", "A", 1, "-A <num>"));
result.add(new Option("\tThe number of attributes to generate. (default "
+ defaultNumAttributes() + ")", "N", 1, "-N <num>"));
result.add(new Option(
"\tThe cardinality of the attributes and the class. (default "
+ defaultCardinality() + ")", "C", 1, "-C <num>"));
return result.elements();
}
/**
* Parses a list of options for this object.
* <p/>
*
<!-- options-start -->
* Valid options are: <p>
*
* <pre> -h
* Prints this help.</pre>
*
* <pre> -o <file>
* The name of the output file, otherwise the generated data is
* printed to stdout.</pre>
*
* <pre> -r <name>
* The name of the relation.</pre>
*
* <pre> -d
* Whether to print debug informations.</pre>
*
* <pre> -S
* The seed for random function (default 1)</pre>
*
* <pre> -n <num>
* The number of examples to generate (default 100)</pre>
*
* <pre> -A <num>
* The number of arcs to use. (default 20)</pre>
*
* <pre> -N <num>
* The number of attributes to generate. (default 10)</pre>
*
* <pre> -C <num>
* The cardinality of the attributes and the class. (default 2)</pre>
*
<!-- options-end -->
*
* @param options the list of options as an array of strings
* @throws Exception if an option is not supported
*/
@Override
public void setOptions(String[] options) throws Exception {
String tmpStr;
Vector<String> list;
super.setOptions(options);
list = new Vector<String>();
list.add("-N");
tmpStr = Utils.getOption('N', options);
if (tmpStr.length() != 0) {
list.add(tmpStr);
} else {
list.add("" + defaultNumAttributes());
}
// handled via -n option
list.add("-M");
list.add("" + getNumExamples());
list.add("-S");
tmpStr = Utils.getOption('S', options);
if (tmpStr.length() != 0) {
list.add(tmpStr);
} else {
list.add("" + defaultSeed());
}
list.add("-A");
tmpStr = Utils.getOption('A', options);
if (tmpStr.length() != 0) {
list.add(tmpStr);
} else {
list.add("" + defaultNumArcs());
}
list.add("-C");
tmpStr = Utils.getOption('C', options);
if (tmpStr.length() != 0) {
list.add(tmpStr);
} else {
list.add("" + defaultCardinality());
}
setGeneratorOptions(list);
}
/**
* Gets the current settings of the datagenerator.
*
* @return an array of strings suitable for passing to setOptions
*/
@Override
public String[] getOptions() {
Vector<String> result = new Vector<String>();
String[] options = removeBlacklist(super.getOptions());
Collections.addAll(result, options);
// determine options from generator
options = getGenerator().getOptions();
result.add("-N");
result.add("" + getNumAttributes());
result.add("-S");
result.add("" + getSeed());
try {
result.add("-A");
result.add(Utils.getOption('A', options));
} catch (Exception e) {
e.printStackTrace();
}
try {
result.add("-C");
result.add(Utils.getOption('C', options));
} catch (Exception e) {
e.printStackTrace();
}
return result.toArray(new String[result.size()]);
}
/**
* sets the given options of the BayesNetGenerator
*
* @param generator the generator to set the options for
* @param options the options to set
*/
protected void setGeneratorOptions(BayesNetGenerator generator,
Vector<String> options) {
try {
generator.setOptions(options.toArray(new String[options.size()]));
} catch (Exception e) {
e.printStackTrace();
}
}
/**
* returns the actual datagenerator
*
* @return the actual datagenerator
*/
protected BayesNetGenerator getGenerator() {
if (m_Generator == null) {
m_Generator = new BayesNetGenerator();
}
return m_Generator;
}
/**
* sets the given options of the BayesNetGenerator
*
* @param options the options to set
*/
protected void setGeneratorOptions(Vector<String> options) {
setGeneratorOptions(getGenerator(), options);
}
/**
* sets a specific option/value of the generator (option must be w/o then '-')
*
* @param generator the generator to set the option for
* @param option the option to set
* @param value the new value for the option
*/
protected void setGeneratorOption(BayesNetGenerator generator, String option,
String value) {
String[] options;
Vector<String> list;
int i;
try {
// get options and remove specific option
options = generator.getOptions();
Utils.getOption(option, options);
// add option and set the new options
list = new Vector<String>();
for (i = 0; i < options.length; i++) {
if (options[i].length() != 0) {
list.add(options[i]);
}
}
list.add("-" + option);
list.add(value);
setGeneratorOptions(generator, list);
} catch (Exception e) {
e.printStackTrace();
}
}
/**
* sets a specific option/value of the generator (option must be w/o then '-')
*
* @param option the option to set
* @param value the new value for the option
*/
protected void setGeneratorOption(String option, String value) {
setGeneratorOption(getGenerator(), option, value);
}
/**
* returns the default number of attributes
*
* @return the default number of attributes
*/
protected int defaultNumAttributes() {
return 10;
}
/**
* Sets the number of attributes the dataset should have.
*
* @param numAttributes the new number of attributes
*/
public void setNumAttributes(int numAttributes) {
setGeneratorOption("N", "" + numAttributes);
}
/**
* Gets the number of attributes that should be produced.
*
* @return the number of attributes that should be produced
*/
public int getNumAttributes() {
int result;
result = -1;
try {
result = Integer.parseInt(Utils.getOption('N', getGenerator()
.getOptions()));
} catch (Exception e) {
e.printStackTrace();
result = -1;
}
return result;
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String numAttributesTipText() {
return "The number of attributes the generated data will contain (including class attribute), ie the number of nodes in the bayesian net.";
}
/**
* returns the default cardinality
*
* @return the default cardinality
*/
protected int defaultCardinality() {
return 2;
}
/**
* Sets the cardinality of the attributes (incl class attribute)
*
* @param value the cardinality
*/
public void setCardinality(int value) {
setGeneratorOption("C", "" + value);
}
/**
* Gets the cardinality of the attributes (incl class attribute)
*
* @return the cardinality of the attributes
*/
public int getCardinality() {
int result;
result = -1;
try {
result = Integer.parseInt(Utils.getOption('C', getGenerator()
.getOptions()));
} catch (Exception e) {
e.printStackTrace();
result = -1;
}
return result;
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String cardinalityTipText() {
return "The cardinality of the attributes, incl the class attribute.";
}
/**
* returns the default number of arcs
*
* @return the default number of arcs
*/
protected int defaultNumArcs() {
return 20;
}
/**
* Sets the number of arcs for the bayesian net
*
* @param value the number of arcs
*/
public void setNumArcs(int value) {
int nodes;
int minArcs;
int maxArcs;
nodes = getNumAttributes();
minArcs = nodes - 1;
maxArcs = nodes * (nodes - 1) / 2;
if (value > maxArcs) {
throw new IllegalArgumentException(
"Number of arcs should be at most nodes * (nodes - 1) / 2 = " + maxArcs
+ " instead of " + value + " (nodes = numAttributes)!");
} else if (value < minArcs) {
throw new IllegalArgumentException(
"Number of arcs should be at least (nodes - 1) = " + minArcs
+ " instead of " + value + " (nodes = numAttributes)!");
} else {
setGeneratorOption("A", "" + value);
}
}
/**
* Gets the number of arcs for the bayesian net
*
* @return the number of arcs
*/
public int getNumArcs() {
int result;
result = -1;
try {
result = Integer.parseInt(Utils.getOption('A', getGenerator()
.getOptions()));
} catch (Exception e) {
e.printStackTrace();
result = -1;
}
return result;
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String numArcsTipText() {
return "The number of arcs in the bayesian net, at most: n * (n - 1) / 2 and at least: (n - 1); with n = numAttributes";
}
/**
* Sets the number of examples, given by option.
*
* @param numExamples the new number of examples
*/
@Override
public void setNumExamples(int numExamples) {
super.setNumExamples(numExamples);
setGeneratorOption("M", "" + numExamples);
}
/**
* Gets the number of examples, given by option.
*
* @return the number of examples, given by option
*/
@Override
public int getNumExamples() {
int result;
result = -1;
try {
result = Integer.parseInt(Utils.getOption('M', getGenerator()
.getOptions()));
} catch (Exception e) {
e.printStackTrace();
result = -1;
}
return result;
}
/**
* Gets the random number seed.
*
* @return the random number seed.
*/
@Override
public int getSeed() {
int result;
result = -1;
try {
result = Integer.parseInt(Utils.getOption('S', getGenerator()
.getOptions()));
} catch (Exception e) {
e.printStackTrace();
result = -1;
}
return result;
}
/**
* Sets the random number seed.
*
* @param newSeed the new random number seed.
*/
@Override
public void setSeed(int newSeed) {
super.setSeed(newSeed);
setGeneratorOption("S", "" + newSeed);
}
/**
* Return if single mode is set for the given data generator mode depends on
* option setting and or generator type.
*
* @return single mode flag
* @throws Exception if mode is not set yet
*/
@Override
public boolean getSingleModeFlag() throws Exception {
return false;
}
/**
* Initializes the format for the dataset produced. Must be called before the
* generateExample or generateExamples methods are used. Re-initializes the
* random number generator with the given seed.
*
* @return the format for the dataset
* @throws Exception if the generating of the format failed
* @see #getSeed()
*/
@Override
public Instances defineDataFormat() throws Exception {
BayesNetGenerator bng;
bng = new BayesNetGenerator();
bng.setOptions(getGenerator().getOptions());
setGeneratorOption(bng, "M", "1");
bng.generateRandomNetwork();
bng.generateInstances();
bng.m_Instances.renameAttribute(0, "class");
bng.m_Instances.setRelationName(getRelationNameToUse());
return bng.m_Instances;
}
/**
* Generates one example of the dataset.
*
* @return the generated example
* @throws Exception if the format of the dataset is not yet defined
* @throws Exception if the generator only works with generateExamples which
* means in non single mode
*/
@Override
public Instance generateExample() throws Exception {
throw new Exception("Cannot generate examples one-by-one!");
}
/**
* Generates all examples of the dataset. Re-initializes the random number
* generator with the given seed, before generating instances.
*
* @return the generated dataset
* @throws Exception if the format of the dataset is not yet defined
* @throws Exception if the generator only works with generateExample, which
* means in single mode
* @see #getSeed()
*/
@Override
public Instances generateExamples() throws Exception {
getGenerator().setOptions(getGenerator().getOptions());
getGenerator().generateRandomNetwork();
getGenerator().generateInstances();
getGenerator().m_Instances.renameAttribute(0, "class");
getGenerator().m_Instances.setRelationName(getRelationNameToUse());
return getGenerator().m_Instances;
}
/**
* Generates a comment string that documentates the data generator. By default
* this string is added at the beginning of the produced output as ARFF file
* type, next after the options.
*
* @return string contains info about the generated rules
*/
@Override
public String generateStart() {
return "";
}
/**
* Generates a comment string that documentats the data generator. By default
* this string is added at the end of theproduces output as ARFF file type.
*
* @return string contains info about the generated rules
* @throws Exception if the generating of the documentaion fails
*/
@Override
public String generateFinished() throws Exception {
return "";
}
/**
* Returns the revision string.
*
* @return the revision
*/
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
/**
* Main method for executing this class.
*
* @param args should contain arguments for the data producer:
*/
public static void main(String[] args) {
runDataGenerator(new BayesNet(), args);
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/datagenerators/classifiers
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/datagenerators/classifiers/classification/LED24.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* LED24.java
* Copyright (C) 2005-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.datagenerators.classifiers.classification;
import java.util.ArrayList;
import java.util.Enumeration;
import java.util.Random;
import java.util.Vector;
import weka.core.Attribute;
import weka.core.DenseInstance;
import weka.core.Instance;
import weka.core.Instances;
import weka.core.Option;
import weka.core.RevisionUtils;
import weka.core.TechnicalInformation;
import weka.core.TechnicalInformation.Field;
import weka.core.TechnicalInformation.Type;
import weka.core.TechnicalInformationHandler;
import weka.core.Utils;
import weka.datagenerators.ClassificationGenerator;
/**
* <!-- globalinfo-start --> This generator produces data for a display with 7
* LEDs. The original output consists of 10 concepts and 7 boolean attributes.
* Here, in addition to the 7 necessary boolean attributes, 17 other, irrelevant
* boolean attributes with random values are added to make it harder. By default
* 10 percent of noise are added to the data.<br/>
* <br/>
* More information can be found here:<br/>
* L. Breiman J.H. Friedman R.A. Olshen, C.J. Stone (1984). Classification and
* Regression Trees. Belmont, California. URL
* http://www.ics.uci.edu/~mlearn/databases/led-display-creator/.
* <p/>
* <!-- globalinfo-end -->
*
* Link: <br/>
* <a
* href="http://www.ics.uci.edu/~mlearn/databases/led-display-creator/">http:/
* /www.ics.uci.edu/~mlearn/databases/led-display-creator/</a>
* <p/>
*
* <!-- technical-bibtex-start --> BibTeX:
*
* <pre>
* @inbook{Olshen1984,
* address = {Belmont, California},
* author = {L. Breiman J.H. Friedman R.A. Olshen and C.J. Stone},
* pages = {43-49},
* publisher = {Wadsworth International Group},
* title = {Classification and Regression Trees},
* year = {1984},
* ISBN = {0412048418},
* URL = {http://www.ics.uci.edu/\~mlearn/databases/led-display-creator/}
* }
* </pre>
* <p/>
* <!-- technical-bibtex-end -->
*
* <!-- options-start --> Valid options are:
* <p/>
*
* <pre>
* -h
* Prints this help.
* </pre>
*
* <pre>
* -o <file>
* The name of the output file, otherwise the generated data is
* printed to stdout.
* </pre>
*
* <pre>
* -r <name>
* The name of the relation.
* </pre>
*
* <pre>
* -d
* Whether to print debug informations.
* </pre>
*
* <pre>
* -S
* The seed for random function (default 1)
* </pre>
*
* <pre>
* -n <num>
* The number of examples to generate (default 100)
* </pre>
*
* <pre>
* -N <num>
* The noise percentage. (default 10.0)
* </pre>
*
* <!-- options-end -->
*
* @author Richard Kirkby (rkirkby at cs dot waikato dot ac dot nz)
* @author FracPete (fracpete at waikato dot ac dot nz)
* @version $Revision$
*/
public class LED24 extends ClassificationGenerator implements
TechnicalInformationHandler {
/** for serialization */
static final long serialVersionUID = -7880209100415868737L;
/** the noise rate */
protected double m_NoisePercent;
/** the 7-bit LEDs */
protected static final int m_originalInstances[][] = {
{ 1, 1, 1, 0, 1, 1, 1 }, { 0, 0, 1, 0, 0, 1, 0 }, { 1, 0, 1, 1, 1, 0, 1 },
{ 1, 0, 1, 1, 0, 1, 1 }, { 0, 1, 1, 1, 0, 1, 0 }, { 1, 1, 0, 1, 0, 1, 1 },
{ 1, 1, 0, 1, 1, 1, 1 }, { 1, 0, 1, 0, 0, 1, 0 }, { 1, 1, 1, 1, 1, 1, 1 },
{ 1, 1, 1, 1, 0, 1, 1 } };
/** used for generating the output, i.e., the additional noise attributes */
protected int m_numIrrelevantAttributes = 17;
/**
* initializes the generator with default values
*/
public LED24() {
super();
setNoisePercent(defaultNoisePercent());
}
/**
* Returns a string describing this data generator.
*
* @return a description of the data generator suitable for displaying in the
* explorer/experimenter gui
*/
public String globalInfo() {
return "This generator produces data for a display with 7 LEDs. The original "
+ "output consists of 10 concepts and 7 boolean attributes. Here, in "
+ "addition to the 7 necessary boolean attributes, 17 other, irrelevant "
+ "boolean attributes with random values are added to make it harder. "
+ "By default 10 percent of noise are added to the data.\n"
+ "\n"
+ "More information can be found here:\n"
+ getTechnicalInformation().toString();
}
/**
* Returns an instance of a TechnicalInformation object, containing detailed
* information about the technical background of this class, e.g., paper
* reference or book this class is based on.
*
* @return the technical information about this class
*/
@Override
public TechnicalInformation getTechnicalInformation() {
TechnicalInformation result;
result = new TechnicalInformation(Type.INBOOK);
result.setValue(Field.AUTHOR,
"L. Breiman J.H. Friedman R.A. Olshen and C.J. Stone");
result.setValue(Field.YEAR, "1984");
result.setValue(Field.TITLE, "Classification and Regression Trees");
result.setValue(Field.PUBLISHER, "Wadsworth International Group");
result.setValue(Field.ADDRESS, "Belmont, California");
result.setValue(Field.PAGES, "43-49");
result.setValue(Field.ISBN, "0412048418");
result.setValue(Field.URL,
"http://www.ics.uci.edu/~mlearn/databases/led-display-creator/");
return result;
}
/**
* Returns an enumeration describing the available options.
*
* @return an enumeration of all the available options
*/
@Override
public Enumeration<Option> listOptions() {
Vector<Option> result = enumToVector(super.listOptions());
result.add(new Option("\tThe noise percentage. (default "
+ defaultNoisePercent() + ")", "N", 1, "-N <num>"));
return result.elements();
}
/**
* Parses a list of options for this object.
* <p/>
*
* <!-- options-start --> Valid options are:
* <p/>
*
* <pre>
* -h
* Prints this help.
* </pre>
*
* <pre>
* -o <file>
* The name of the output file, otherwise the generated data is
* printed to stdout.
* </pre>
*
* <pre>
* -r <name>
* The name of the relation.
* </pre>
*
* <pre>
* -d
* Whether to print debug informations.
* </pre>
*
* <pre>
* -S
* The seed for random function (default 1)
* </pre>
*
* <pre>
* -n <num>
* The number of examples to generate (default 100)
* </pre>
*
* <pre>
* -N <num>
* The noise percentage. (default 10.0)
* </pre>
*
* <!-- options-end -->
*
* @param options the list of options as an array of strings
* @throws Exception if an option is not supported
*/
@Override
public void setOptions(String[] options) throws Exception {
String tmpStr;
super.setOptions(options);
tmpStr = Utils.getOption('N', options);
if (tmpStr.length() != 0) {
setNoisePercent(Double.parseDouble(tmpStr));
} else {
setNoisePercent(defaultNoisePercent());
}
}
/**
* Gets the current settings of the datagenerator.
*
* @return an array of strings suitable for passing to setOptions
*/
@Override
public String[] getOptions() {
Vector<String> result;
String[] options;
int i;
result = new Vector<String>();
options = super.getOptions();
for (i = 0; i < options.length; i++) {
result.add(options[i]);
}
result.add("-N");
result.add("" + getNoisePercent());
return result.toArray(new String[result.size()]);
}
/**
* returns the default noise percentage
*
* @return the default noise percentage
*/
protected double defaultNoisePercent() {
return 10;
}
/**
* Gets the noise percentage.
*
* @return the noise percentage.
*/
public double getNoisePercent() {
return m_NoisePercent;
}
/**
* Sets the noise percentage.
*
* @param value the noise percentage.
*/
public void setNoisePercent(double value) {
if ((value >= 0.0) && (value <= 100.0)) {
m_NoisePercent = value;
} else {
throw new IllegalArgumentException(
"Noise percent must be in [0,100] (provided: " + value + ")!");
}
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String noisePercentTipText() {
return "The noise percent: 0 <= perc <= 100.";
}
/**
* Return if single mode is set for the given data generator mode depends on
* option setting and or generator type.
*
* @return single mode flag
* @throws Exception if mode is not set yet
*/
@Override
public boolean getSingleModeFlag() throws Exception {
return true;
}
/**
* Initializes the format for the dataset produced. Must be called before the
* generateExample or generateExamples methods are used. Re-initializes the
* random number generator with the given seed.
*
* @return the format for the dataset
* @throws Exception if the generating of the format failed
* @see #getSeed()
*/
@Override
public Instances defineDataFormat() throws Exception {
ArrayList<Attribute> atts;
ArrayList<String> attValues;
int i;
int n;
m_Random = new Random(getSeed());
// number of examples is the same as given per option
setNumExamplesAct(getNumExamples());
// set up attributes
atts = new ArrayList<Attribute>();
for (n = 1; n <= 24; n++) {
attValues = new ArrayList<String>();
for (i = 0; i < 2; i++) {
attValues.add("" + i);
}
atts.add(new Attribute("att" + n, attValues));
}
attValues = new ArrayList<String>();
for (i = 0; i < 10; i++) {
attValues.add("" + i);
}
atts.add(new Attribute("class", attValues));
// dataset
m_DatasetFormat = new Instances(getRelationNameToUse(), atts, 0);
return m_DatasetFormat;
}
/**
* Generates one example of the dataset.
*
* @return the generated example
* @throws Exception if the format of the dataset is not yet defined
* @throws Exception if the generator only works with generateExamples which
* means in non single mode
*/
@Override
public Instance generateExample() throws Exception {
Instance result;
double[] atts;
int i;
int selected;
Random random;
result = null;
random = getRandom();
if (m_DatasetFormat == null) {
throw new Exception("Dataset format not defined.");
}
atts = new double[m_DatasetFormat.numAttributes()];
selected = random.nextInt(10);
for (i = 0; i < 7; i++) {
if ((1 + (random.nextInt(100))) <= getNoisePercent()) {
atts[i] = m_originalInstances[selected][i] == 0 ? 1 : 0;
} else {
atts[i] = m_originalInstances[selected][i];
}
}
for (i = 0; i < m_numIrrelevantAttributes; i++) {
atts[i + 7] = random.nextInt(2);
}
atts[atts.length - 1] = selected;
// create instance
result = new DenseInstance(1.0, atts);
result.setDataset(m_DatasetFormat);
return result;
}
/**
* Generates all examples of the dataset. Re-initializes the random number
* generator with the given seed, before generating instances.
*
* @return the generated dataset
* @throws Exception if the format of the dataset is not yet defined
* @throws Exception if the generator only works with generateExample, which
* means in single mode
* @see #getSeed()
*/
@Override
public Instances generateExamples() throws Exception {
Instances result;
int i;
result = new Instances(m_DatasetFormat, 0);
m_Random = new Random(getSeed());
for (i = 0; i < getNumExamplesAct(); i++) {
result.add(generateExample());
}
return result;
}
/**
* Generates a comment string that documentates the data generator. By default
* this string is added at the beginning of the produced output as ARFF file
* type, next after the options.
*
* @return string contains info about the generated rules
*/
@Override
public String generateStart() {
return "";
}
/**
* Generates a comment string that documentats the data generator. By default
* this string is added at the end of theproduces output as ARFF file type.
*
* @return string contains info about the generated rules
* @throws Exception if the generating of the documentaion fails
*/
@Override
public String generateFinished() throws Exception {
return "";
}
/**
* Returns the revision string.
*
* @return the revision
*/
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
/**
* Main method for executing this class.
*
* @param args should contain arguments for the data producer:
*/
public static void main(String[] args) {
runDataGenerator(new LED24(), args);
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/datagenerators/classifiers
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/datagenerators/classifiers/classification/RDG1.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* RDG1.java
* Copyright (C) 2000-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.datagenerators.classifiers.classification;
import java.io.Serializable;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Enumeration;
import java.util.Random;
import java.util.Vector;
import weka.core.Attribute;
import weka.core.DenseInstance;
import weka.core.Instance;
import weka.core.Instances;
import weka.core.Option;
import weka.core.RevisionHandler;
import weka.core.RevisionUtils;
import weka.core.Utils;
import weka.core.WekaEnumeration;
import weka.datagenerators.ClassificationGenerator;
import weka.datagenerators.Test;
/**
* <!-- globalinfo-start --> A data generator that produces data randomly by
* producing a decision list.<br/>
* The decision list consists of rules.<br/>
* Instances are generated randomly one by one. If decision list fails to
* classify the current instance, a new rule according to this current instance
* is generated and added to the decision list.<br/>
* <br/>
* The option -V switches on voting, which means that at the end of the
* generation all instances are reclassified to the class value that is
* supported by the most rules.<br/>
* <br/>
* This data generator can generate 'boolean' attributes (= nominal with the
* values {true, false}) and numeric attributes. The rules can be 'A' or 'NOT A'
* for boolean values and 'B < random_value' or 'B >= random_value' for
* numeric values.
* <p/>
* <!-- globalinfo-end -->
*
* <!-- options-start --> Valid options are:
* <p/>
*
* <pre>
* -h
* Prints this help.
* </pre>
*
* <pre>
* -o <file>
* The name of the output file, otherwise the generated data is
* printed to stdout.
* </pre>
*
* <pre>
* -r <name>
* The name of the relation.
* </pre>
*
* <pre>
* -d
* Whether to print debug informations.
* </pre>
*
* <pre>
* -S
* The seed for random function (default 1)
* </pre>
*
* <pre>
* -n <num>
* The number of examples to generate (default 100)
* </pre>
*
* <pre>
* -a <num>
* The number of attributes (default 10).
* </pre>
*
* <pre>
* -c <num>
* The number of classes (default 2)
* </pre>
*
* <pre>
* -R <num>
* maximum size for rules (default 10)
* </pre>
*
* <pre>
* -M <num>
* minimum size for rules (default 1)
* </pre>
*
* <pre>
* -I <num>
* number of irrelevant attributes (default 0)
* </pre>
*
* <pre>
* -N
* number of numeric attributes (default 0)
* </pre>
*
* <pre>
* -V
* switch on voting (default is no voting)
* </pre>
*
* <!-- options-end -->
*
* Following an example of a generated dataset: <br/>
*
* <pre>
* %
* % weka.datagenerators.RDG1 -r expl -a 2 -c 3 -n 4 -N 1 -I 0 -M 2 -R 10 -S 2
* %
* relation expl
*
* attribute a0 {false,true}
* attribute a1 numeric
* attribute class {c0,c1,c2}
*
* data
*
* true,0.496823,c0
* false,0.743158,c1
* false,0.408285,c1
* false,0.993687,c2
* %
* % Number of attributes chosen as irrelevant = 0
* %
* % DECISIONLIST (number of rules = 3):
* % RULE 0: c0 := a1 < 0.986, a0
* % RULE 1: c1 := a1 < 0.95, not(a0)
* % RULE 2: c2 := not(a0), a1 >= 0.562
* </pre>
*
* @author Gabi Schmidberger (gabi@cs.waikato.ac.nz)
* @version $Revision$
*/
public class RDG1 extends ClassificationGenerator {
/** for serialization */
static final long serialVersionUID = 7751005204635320414L;
/**
* class to represent decisionlist
*/
private class RuleList implements Serializable, RevisionHandler {
/** for serialization */
static final long serialVersionUID = 2830125413361938177L;
/** rule list */
private ArrayList<Test> m_RuleList = null;
/** class */
double m_ClassValue = 0.0;
/**
* returns the class value
*
* @return the class value
*/
public double getClassValue() {
return m_ClassValue;
}
/**
* sets the class value
*
* @param newClassValue the new classvalue
*/
public void setClassValue(double newClassValue) {
m_ClassValue = newClassValue;
}
/**
* adds the given test to the list
*
* @param newTest the test to add
*/
private void addTest(Test newTest) {
if (m_RuleList == null) {
m_RuleList = new ArrayList<Test>();
}
m_RuleList.add(newTest);
}
/**
* classifies the given example
*
* @param example the instance to classify
* @return the classification
* @throws Exception if classification fails
*/
private double classifyInstance(Instance example) throws Exception {
boolean passedAllTests = true;
for (Enumeration<Test> e = new WekaEnumeration<Test>(m_RuleList); passedAllTests
&& e.hasMoreElements();) {
Test test = e.nextElement();
passedAllTests = test.passesTest(example);
}
if (passedAllTests) {
return m_ClassValue;
} else {
return -1.0;
}
}
/**
* returns a string representation of the rule list
*
* @return the rule list as string
*/
@Override
public String toString() {
StringBuffer str = new StringBuffer();
str = str.append(" c" + (int) m_ClassValue + " := ");
Enumeration<Test> e = new WekaEnumeration<Test>(m_RuleList);
if (e.hasMoreElements()) {
Test test = e.nextElement();
str = str.append(test.toPrologString());
}
while (e.hasMoreElements()) {
Test test = e.nextElement();
str = str.append(", " + test.toPrologString());
}
return str.toString();
}
/**
* Returns the revision string.
*
* @return the revision
*/
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
} /* end class RuleList ***** */
/** Number of attribute the dataset should have */
protected int m_NumAttributes;
/** Number of Classes the dataset should have */
protected int m_NumClasses;
/** maximum rule size */
private int m_MaxRuleSize;
/** minimum rule size */
private int m_MinRuleSize;
/** number of irrelevant attributes. */
private int m_NumIrrelevant;
/** number of numeric attribute */
private int m_NumNumeric;
/** flag that stores if voting is wished */
private boolean m_VoteFlag = false;
/** decision list */
private ArrayList<RuleList> m_DecisionList = null;
/**
* array defines which attributes are irrelevant, with: true = attribute is
* irrelevant; false = attribute is not irrelevant
*/
boolean[] m_AttList_Irr;
/**
* initializes the generator with default values
*/
public RDG1() {
super();
setNumAttributes(defaultNumAttributes());
setNumClasses(defaultNumClasses());
setMaxRuleSize(defaultMaxRuleSize());
setMinRuleSize(defaultMinRuleSize());
setNumIrrelevant(defaultNumIrrelevant());
setNumNumeric(defaultNumNumeric());
}
/**
* Returns a string describing this data generator.
*
* @return a description of the data generator suitable for displaying in the
* explorer/experimenter gui
*/
public String globalInfo() {
return "A data generator that produces data randomly by producing a decision list.\n"
+ "The decision list consists of rules.\n"
+ "Instances are generated randomly one by one. If decision list fails "
+ "to classify the current instance, a new rule according to this current "
+ "instance is generated and added to the decision list.\n\n"
+ "The option -V switches on voting, which means that at the end "
+ "of the generation all instances are "
+ "reclassified to the class value that is supported by the most rules.\n\n"
+ "This data generator can generate 'boolean' attributes (= nominal with "
+ "the values {true, false}) and numeric attributes. The rules can be "
+ "'A' or 'NOT A' for boolean values and 'B < random_value' or "
+ "'B >= random_value' for numeric values.";
}
/**
* Returns an enumeration describing the available options.
*
* @return an enumeration of all the available options
*/
@Override
public Enumeration<Option> listOptions() {
Vector<Option> result = enumToVector(super.listOptions());
result.addElement(new Option("\tThe number of attributes (default "
+ defaultNumAttributes() + ").", "a", 1, "-a <num>"));
result.addElement(new Option("\tThe number of classes (default "
+ defaultNumClasses() + ")", "c", 1, "-c <num>"));
result.addElement(new Option("\tmaximum size for rules (default "
+ defaultMaxRuleSize() + ") ", "R", 1, "-R <num>"));
result.addElement(new Option("\tminimum size for rules (default "
+ defaultMinRuleSize() + ") ", "M", 1, "-M <num>"));
result.addElement(new Option("\tnumber of irrelevant attributes (default "
+ defaultNumIrrelevant() + ")", "I", 1, "-I <num>"));
result.addElement(new Option("\tnumber of numeric attributes (default "
+ defaultNumNumeric() + ")", "N", 1, "-N"));
result.addElement(new Option("\tswitch on voting (default is no voting)",
"V", 1, "-V"));
return result.elements();
}
/**
* Parses a list of options for this object.
* <p/>
*
* <!-- options-start --> Valid options are:
* <p/>
*
* <pre>
* -h
* Prints this help.
* </pre>
*
* <pre>
* -o <file>
* The name of the output file, otherwise the generated data is
* printed to stdout.
* </pre>
*
* <pre>
* -r <name>
* The name of the relation.
* </pre>
*
* <pre>
* -d
* Whether to print debug informations.
* </pre>
*
* <pre>
* -S
* The seed for random function (default 1)
* </pre>
*
* <pre>
* -n <num>
* The number of examples to generate (default 100)
* </pre>
*
* <pre>
* -a <num>
* The number of attributes (default 10).
* </pre>
*
* <pre>
* -c <num>
* The number of classes (default 2)
* </pre>
*
* <pre>
* -R <num>
* maximum size for rules (default 10)
* </pre>
*
* <pre>
* -M <num>
* minimum size for rules (default 1)
* </pre>
*
* <pre>
* -I <num>
* number of irrelevant attributes (default 0)
* </pre>
*
* <pre>
* -N
* number of numeric attributes (default 0)
* </pre>
*
* <pre>
* -V
* switch on voting (default is no voting)
* </pre>
*
* <!-- options-end -->
*
* @param options the list of options as an array of strings
* @throws Exception if an option is not supported
*/
@Override
public void setOptions(String[] options) throws Exception {
String tmpStr;
super.setOptions(options);
tmpStr = Utils.getOption('a', options);
if (tmpStr.length() != 0) {
setNumAttributes(Integer.parseInt(tmpStr));
} else {
setNumAttributes(defaultNumAttributes());
}
tmpStr = Utils.getOption('c', options);
if (tmpStr.length() != 0) {
setNumClasses(Integer.parseInt(tmpStr));
} else {
setNumClasses(defaultNumClasses());
}
tmpStr = Utils.getOption('R', options);
if (tmpStr.length() != 0) {
setMaxRuleSize(Integer.parseInt(tmpStr));
} else {
setMaxRuleSize(defaultMaxRuleSize());
}
tmpStr = Utils.getOption('M', options);
if (tmpStr.length() != 0) {
setMinRuleSize(Integer.parseInt(tmpStr));
} else {
setMinRuleSize(defaultMinRuleSize());
}
tmpStr = Utils.getOption('I', options);
if (tmpStr.length() != 0) {
setNumIrrelevant(Integer.parseInt(tmpStr));
} else {
setNumIrrelevant(defaultNumIrrelevant());
}
if ((getNumAttributes() - getNumIrrelevant()) < getMinRuleSize()) {
throw new Exception("Possible rule size is below minimal rule size.");
}
tmpStr = Utils.getOption('N', options);
if (tmpStr.length() != 0) {
setNumNumeric(Integer.parseInt(tmpStr));
} else {
setNumNumeric(defaultNumNumeric());
}
setVoteFlag(Utils.getFlag('V', options));
}
/**
* Gets the current settings of the datagenerator RDG1.
*
* @return an array of strings suitable for passing to setOptions
*/
@Override
public String[] getOptions() {
Vector<String> result = new Vector<String>();
Collections.addAll(result, super.getOptions());
result.add("-a");
result.add("" + getNumAttributes());
result.add("-c");
result.add("" + getNumClasses());
result.add("-N");
result.add("" + getNumNumeric());
result.add("-I");
result.add("" + getNumIrrelevant());
result.add("-M");
result.add("" + getMinRuleSize());
result.add("-R");
result.add("" + getMaxRuleSize());
if (getVoteFlag()) {
result.add("-V");
}
return result.toArray(new String[result.size()]);
}
/**
* returns the default number of attributes
*
* @return the default number of attributes
*/
protected int defaultNumAttributes() {
return 10;
}
/**
* Sets the number of attributes the dataset should have.
*
* @param numAttributes the new number of attributes
*/
public void setNumAttributes(int numAttributes) {
m_NumAttributes = numAttributes;
}
/**
* Gets the number of attributes that should be produced.
*
* @return the number of attributes that should be produced
*/
public int getNumAttributes() {
return m_NumAttributes;
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String numAttributesTipText() {
return "The number of attributes the generated data will contain.";
}
/**
* returns the default number of classes
*
* @return the default number of classes
*/
protected int defaultNumClasses() {
return 2;
}
/**
* Sets the number of classes the dataset should have.
*
* @param numClasses the new number of classes
*/
public void setNumClasses(int numClasses) {
m_NumClasses = numClasses;
}
/**
* Gets the number of classes the dataset should have.
*
* @return the number of classes the dataset should have
*/
public int getNumClasses() {
return m_NumClasses;
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String numClassesTipText() {
return "The number of classes to generate.";
}
/**
* returns the default max size of rules
*
* @return the default max size of rules
*/
protected int defaultMaxRuleSize() {
return 10;
}
/**
* Gets the maximum number of tests in rules.
*
* @return the maximum number of tests allowed in rules
*/
public int getMaxRuleSize() {
return m_MaxRuleSize;
}
/**
* Sets the maximum number of tests in rules.
*
* @param newMaxRuleSize new maximum number of tests allowed in rules.
*/
public void setMaxRuleSize(int newMaxRuleSize) {
m_MaxRuleSize = newMaxRuleSize;
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String maxRuleSizeTipText() {
return "The maximum number of tests in rules.";
}
/**
* returns the default min size of rules
*
* @return the default min size of rules
*/
protected int defaultMinRuleSize() {
return 1;
}
/**
* Gets the minimum number of tests in rules.
*
* @return the minimum number of tests allowed in rules
*/
public int getMinRuleSize() {
return m_MinRuleSize;
}
/**
* Sets the minimum number of tests in rules.
*
* @param newMinRuleSize new minimum number of test in rules.
*/
public void setMinRuleSize(int newMinRuleSize) {
m_MinRuleSize = newMinRuleSize;
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String minRuleSizeTipText() {
return "The minimum number of tests in rules.";
}
/**
* returns the default number of irrelevant attributes
*
* @return the default number of irrelevant attributes
*/
protected int defaultNumIrrelevant() {
return 0;
}
/**
* Gets the number of irrelevant attributes.
*
* @return the number of irrelevant attributes
*/
public int getNumIrrelevant() {
return m_NumIrrelevant;
}
/**
* Sets the number of irrelevant attributes.
*
* @param newNumIrrelevant the number of irrelevant attributes.
*/
public void setNumIrrelevant(int newNumIrrelevant) {
m_NumIrrelevant = newNumIrrelevant;
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String numIrrelevantTipText() {
return "The number of irrelevant attributes.";
}
/**
* returns the default number of numeric attributes
*
* @return the default number of numeric attributes
*/
protected int defaultNumNumeric() {
return 0;
}
/**
* Gets the number of numerical attributes.
*
* @return the number of numerical attributes.
*/
public int getNumNumeric() {
return m_NumNumeric;
}
/**
* Sets the number of numerical attributes.
*
* @param newNumNumeric the number of numerical attributes.
*/
public void setNumNumeric(int newNumNumeric) {
m_NumNumeric = newNumNumeric;
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String numNumericTipText() {
return "The number of numerical attributes.";
}
/**
* Gets the vote flag.
*
* @return voting flag.
*/
public boolean getVoteFlag() {
return m_VoteFlag;
}
/**
* Sets the vote flag.
*
* @param newVoteFlag boolean with the new setting of the vote flag.
*/
public void setVoteFlag(boolean newVoteFlag) {
m_VoteFlag = newVoteFlag;
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String voteFlagTipText() {
return "Whether to use voting or not.";
}
/**
* Gets the single mode flag.
*
* @return true if methode generateExample can be used.
*/
@Override
public boolean getSingleModeFlag() {
return (!getVoteFlag());
}
/**
* Gets the array that defines which of the attributes are seen to be
* irrelevant.
*
* @return the array that defines the irrelevant attributes
*/
public boolean[] getAttList_Irr() {
return m_AttList_Irr;
}
/**
* Sets the array that defines which of the attributes are seen to be
* irrelevant.
*
* @param newAttList_Irr array that defines the irrelevant attributes.
*/
public void setAttList_Irr(boolean[] newAttList_Irr) {
m_AttList_Irr = newAttList_Irr;
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String attList_IrrTipText() {
return "The array with the indices of the irrelevant attributes.";
}
/**
* Initializes the format for the dataset produced.
*
* @return the output data format
* @throws Exception data format could not be defined
*/
@Override
public Instances defineDataFormat() throws Exception {
Instances dataset;
Random random = new Random(getSeed());
setRandom(random);
m_DecisionList = new ArrayList<RuleList>();
// number of examples is the same as given per option
setNumExamplesAct(getNumExamples());
// define dataset
dataset = defineDataset(random);
return dataset;
}
/**
* Generate an example of the dataset dataset.
*
* @return the instance generated
* @throws Exception if format not defined or generating <br/>
* examples one by one is not possible, because voting is chosen
*/
@Override
public Instance generateExample() throws Exception {
Random random = getRandom();
Instances format = getDatasetFormat();
if (format == null) {
throw new Exception("Dataset format not defined.");
}
if (getVoteFlag()) {
throw new Exception("Examples cannot be generated one by one.");
}
// generate values for all attributes
format = generateExamples(1, random, format);
return format.lastInstance();
}
/**
* Generate all examples of the dataset.
*
* @return the instance generated
* @throws Exception if format not defined or generating <br/>
* examples one by one is not possible, because voting is chosen
*/
@Override
public Instances generateExamples() throws Exception {
Random random = getRandom();
Instances format = getDatasetFormat();
if (format == null) {
throw new Exception("Dataset format not defined.");
}
// generate values for all attributes
format = generateExamples(getNumExamplesAct(), random, format);
// vote all examples, and set new class value
if (getVoteFlag()) {
format = voteDataset(format);
}
return format;
}
/**
* Generate all examples of the dataset.
*
* @param num the number of examples to generate
* @param random the random number generator to use
* @param format the dataset format
* @return the instance generated
* @throws Exception if format not defined or generating <br/>
* examples one by one is not possible, because voting is chosen
*/
public Instances generateExamples(int num, Random random, Instances format)
throws Exception {
if (format == null) {
throw new Exception("Dataset format not defined.");
}
// generate values for all attributes
for (int i = 0; i < num; i++) {
// over all examples to be produced
Instance example = generateExample(random, format);
// set class of example using decision list
boolean classDefined = classifyExample(example);
if (!classDefined) {
// set class with newly generated rule
example = updateDecisionList(random, example);
}
example.setDataset(format);
format.add(example);
}
return (format);
}
/**
* Generates a new rule for the decision list. and classifies the new example
*
* @param random random number generator
* @param example example used to update decision list
* @return the classified example
* @throws Exception if dataset format not defined
*/
private Instance updateDecisionList(Random random, Instance example)
throws Exception {
ArrayList<Test> TestList;
Instances format = getDatasetFormat();
if (format == null) {
throw new Exception("Dataset format not defined.");
}
TestList = generateTestList(random, example);
int maxSize = getMaxRuleSize() < TestList.size() ? getMaxRuleSize()
: TestList.size();
int ruleSize = ((int) (random.nextDouble() * (maxSize - getMinRuleSize())))
+ getMinRuleSize();
RuleList newRule = new RuleList();
for (int i = 0; i < ruleSize; i++) {
int testIndex = (int) (random.nextDouble() * TestList.size());
Test test = TestList.get(testIndex);
newRule.addTest(test);
TestList.remove(testIndex);
}
double newClassValue = 0.0;
if (m_DecisionList.size() > 0) {
RuleList r = (m_DecisionList.get(m_DecisionList.size() - 1));
double oldClassValue = (r.getClassValue());
newClassValue = (double) ((int) oldClassValue + 1) % getNumClasses();
}
newRule.setClassValue(newClassValue);
m_DecisionList.add(newRule);
example = (Instance) example.copy();
example.setDataset(format);
example.setClassValue(newClassValue);
return example;
}
/**
* Generates a new rule for the decision list and classifies the new example.
*
* @param random random number generator
* @param example the instance to classify
* @return a list of tests
* @throws Exception if dataset format not defined
*/
private ArrayList<Test> generateTestList(Random random, Instance example)
throws Exception {
Instances format = getDatasetFormat();
if (format == null) {
throw new Exception("Dataset format not defined.");
}
int numTests = getNumAttributes() - getNumIrrelevant();
ArrayList<Test> TestList = new ArrayList<Test>(numTests);
boolean[] irrelevant = getAttList_Irr();
for (int i = 0; i < getNumAttributes(); i++) {
if (!irrelevant[i]) {
Test newTest = null;
Attribute att = example.attribute(i);
if (att.isNumeric()) {
double newSplit = random.nextDouble();
boolean newNot = newSplit < example.value(i);
newTest = new Test(i, newSplit, format, newNot);
} else {
newTest = new Test(i, example.value(i), format, false);
}
TestList.add(newTest);
}
}
return TestList;
}
/**
* Generates an example with its classvalue set to missing and binds it to the
* datasets.
*
* @param random random number generator
* @param format dataset the example gets bind to
* @return the generated example
* @throws Exception if attribute type not supported
*/
private Instance generateExample(Random random, Instances format)
throws Exception {
double[] attributes;
Instance example;
attributes = new double[getNumAttributes() + 1];
for (int i = 0; i < getNumAttributes(); i++) {
double value = random.nextDouble();
if (format.attribute(i).isNumeric()) {
attributes[i] = value;
} else {
if (format.attribute(i).isNominal()) {
attributes[i] = (value > 0.5) ? 1.0 : 0.0;
} else {
throw new Exception("Attribute type is not supported.");
}
}
}
example = new DenseInstance(1.0, attributes);
example.setDataset(format);
example.setClassMissing();
return example;
}
/**
* Tries to classify an example.
*
* @param example the example to classify
* @return true if it could be classified
* @throws Exception if something goes wrong
*/
private boolean classifyExample(Instance example) throws Exception {
double classValue = -1.0;
for (Enumeration<RuleList> e = new WekaEnumeration<RuleList>(m_DecisionList); e
.hasMoreElements() && classValue < 0.0;) {
RuleList rl = e.nextElement();
classValue = rl.classifyInstance(example);
}
if (classValue >= 0.0) {
example.setClassValue(classValue);
return true;
} else {
return false;
}
}
/**
* Classify example with maximum vote the following way. With every rule in
* the decisionlist, it is evaluated if the given instance could be the class
* of the rule. Finally the class value that receives the highest number of
* votes is assigned to the example.
*
* @param example example to be reclassified
* @return instance with new class value
* @throws Exception if classification fails
*/
private Instance votedReclassifyExample(Instance example) throws Exception {
int classVotes[] = new int[getNumClasses()];
for (int i = 0; i < classVotes.length; i++) {
classVotes[i] = 0;
}
for (Enumeration<RuleList> e = new WekaEnumeration<RuleList>(m_DecisionList); e
.hasMoreElements();) {
RuleList rl = e.nextElement();
int classValue = (int) rl.classifyInstance(example);
if (classValue >= 0) {
classVotes[classValue]++;
}
}
int maxVote = 0;
int vote = -1;
for (int i = 0; i < classVotes.length; i++) {
if (classVotes[i] > maxVote) {
maxVote = classVotes[i];
vote = i;
}
}
if (vote >= 0) {
example.setClassValue(vote);
} else {
throw new Exception("Error in instance classification.");
}
return example;
}
/**
* Returns a dataset header.
*
* @param random random number generator
* @return dataset header
* @throws Exception if something goes wrong
*/
private Instances defineDataset(Random random) throws Exception {
boolean[] attList_Irr;
int[] attList_Num;
ArrayList<Attribute> attributes = new ArrayList<Attribute>();
Attribute attribute;
ArrayList<String> nominalValues = new ArrayList<String>(2);
nominalValues.add("false");
nominalValues.add("true");
ArrayList<String> classValues = new ArrayList<String>(getNumClasses());
Instances dataset;
// set randomly those attributes that are irrelevant
attList_Irr = defineIrrelevant(random);
setAttList_Irr(attList_Irr);
// set randomly those attributes that are numeric
attList_Num = defineNumeric(random);
// define dataset
for (int i = 0; i < getNumAttributes(); i++) {
if (attList_Num[i] == Attribute.NUMERIC) {
attribute = new Attribute("a" + i);
} else {
attribute = new Attribute("a" + i, nominalValues);
}
attributes.add(attribute);
}
for (int i = 0; i < getNumClasses(); i++) {
classValues.add("c" + i);
}
attribute = new Attribute("class", classValues);
attributes.add(attribute);
dataset = new Instances(getRelationNameToUse(), attributes,
getNumExamplesAct());
dataset.setClassIndex(getNumAttributes());
// set dataset format of this class
Instances format = new Instances(dataset, 0);
setDatasetFormat(format);
return dataset;
}
/**
* Defines randomly the attributes as irrelevant. Number of attributes to be
* set as irrelevant is either set with a preceeding call of
* setNumIrrelevant() or is per default 0.
*
* @param random the random number generator to use
* @return list of boolean values with one value for each attribute, and each
* value set true or false according to if the corresponding attribute
* was defined irrelevant or not
*/
private boolean[] defineIrrelevant(Random random) {
boolean[] irr = new boolean[getNumAttributes()];
// initialize
for (int i = 0; i < irr.length; i++) {
irr[i] = false;
}
// set randomly
int numIrr = 0;
for (int i = 0; (numIrr < getNumIrrelevant())
&& (i < getNumAttributes() * 5); i++) {
int maybeNext = (int) (random.nextDouble() * irr.length);
if (irr[maybeNext] == false) {
irr[maybeNext] = true;
numIrr++;
}
}
return irr;
}
/**
* Chooses randomly the attributes that get datatyp numeric.
*
* @param random the random number generator to use
* @return list of integer values, with one value for each attribute, and each
* value set to Attribut.NOMINAL or Attribut.NUMERIC
*/
private int[] defineNumeric(Random random) {
int[] num = new int[getNumAttributes()];
// initialize
for (int i = 0; i < num.length; i++) {
num[i] = Attribute.NOMINAL;
}
int numNum = 0;
for (int i = 0; (numNum < getNumNumeric()) && (i < getNumAttributes() * 5); i++) {
int maybeNext = (int) (random.nextDouble() * num.length);
if (num[maybeNext] != Attribute.NUMERIC) {
num[maybeNext] = Attribute.NUMERIC;
numNum++;
}
}
return num;
}
/**
* Generates a comment string that documentates the data generator. By default
* this string is added at the beginning of the produced output as ARFF file
* type, next after the options.
*
* @return string contains info about the generated rules
*/
@Override
public String generateStart() {
return "";
}
/**
* Compiles documentation about the data generation. This is the number of
* irrelevant attributes and the decisionlist with all rules. Considering that
* the decisionlist might get enhanced until the last instance is generated,
* this method should be called at the end of the data generation process.
*
* @return string with additional information about generated dataset
* @throws Exception no input structure has been defined
*/
@Override
public String generateFinished() throws Exception {
StringBuffer dLString = new StringBuffer();
// string for output at end of ARFF-File
boolean[] attList_Irr = getAttList_Irr();
Instances format = getDatasetFormat();
dLString.append("%\n% Number of attributes chosen as irrelevant = "
+ getNumIrrelevant() + "\n");
for (int i = 0; i < attList_Irr.length; i++) {
if (attList_Irr[i]) {
dLString.append("% " + format.attribute(i).name() + "\n");
}
}
dLString.append("%\n% DECISIONLIST (number of rules = "
+ m_DecisionList.size() + "):\n");
for (int i = 0; i < m_DecisionList.size(); i++) {
RuleList rl = m_DecisionList.get(i);
dLString.append("% RULE " + i + ": " + rl.toString() + "\n");
}
return dLString.toString();
}
/**
* Resets the class values of all instances using voting. For each instance
* the class value that satisfies the most rules is choosen as new class
* value.
*
* @param dataset the dataset to work on
* @return the changed instances
* @throws Exception if something goes wrong
*/
private Instances voteDataset(Instances dataset) throws Exception {
for (int i = 0; i < dataset.numInstances(); i++) {
Instance inst = dataset.firstInstance();
inst = votedReclassifyExample(inst);
dataset.add(inst);
dataset.delete(0);
}
return dataset;
}
/**
* Returns the revision string.
*
* @return the revision
*/
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
/**
* Main method for testing this class.
*
* @param args should contain arguments for the data producer:
*/
public static void main(String[] args) {
runDataGenerator(new RDG1(), args);
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/datagenerators/classifiers
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/datagenerators/classifiers/classification/RandomRBF.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* RandomRBF.java
* Copyright (C) 2005-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.datagenerators.classifiers.classification;
import java.util.ArrayList;
import java.util.Enumeration;
import java.util.Random;
import java.util.Vector;
import weka.core.Attribute;
import weka.core.DenseInstance;
import weka.core.Instance;
import weka.core.Instances;
import weka.core.Option;
import weka.core.RevisionUtils;
import weka.core.Utils;
import weka.datagenerators.ClassificationGenerator;
/**
* <!-- globalinfo-start --> RandomRBF data is generated by first creating a
* random set of centers for each class. Each center is randomly assigned a
* weight, a central point per attribute, and a standard deviation. To generate
* new instances, a center is chosen at random taking the weights of each center
* into consideration. Attribute values are randomly generated and offset from
* the center, where the overall vector has been scaled so that its length
* equals a value sampled randomly from the Gaussian distribution of the center.
* The particular center chosen determines the class of the instance.<br/>
* RandomRBF data contains only numeric attributes as it is non-trivial to
* include nominal values.
* <p/>
* <!-- globalinfo-end -->
*
* <!-- options-start --> Valid options are:
* <p/>
*
* <pre>
* -h
* Prints this help.
* </pre>
*
* <pre>
* -o <file>
* The name of the output file, otherwise the generated data is
* printed to stdout.
* </pre>
*
* <pre>
* -r <name>
* The name of the relation.
* </pre>
*
* <pre>
* -d
* Whether to print debug informations.
* </pre>
*
* <pre>
* -S
* The seed for random function (default 1)
* </pre>
*
* <pre>
* -n <num>
* The number of examples to generate (default 100)
* </pre>
*
* <pre>
* -a <num>
* The number of attributes (default 10).
* </pre>
*
* <pre>
* -c <num>
* The number of classes (default 2)
* </pre>
*
* <pre>
* -C <num>
* The number of centroids to use. (default 50)
* </pre>
*
* <!-- options-end -->
*
* @author Richard Kirkby (rkirkby at cs dot waikato dot ac dot nz)
* @author FracPete (fracpete at waikato dot ac dot nz)
* @version $Revision$
*/
public class RandomRBF extends ClassificationGenerator {
/** for serialization */
static final long serialVersionUID = 6069033710635728720L;
/** Number of attribute the dataset should have */
protected int m_NumAttributes;
/** Number of Classes the dataset should have */
protected int m_NumClasses;
/** the number of centroids to use for generation */
protected int m_NumCentroids;
/** the centroids */
protected double[][] m_centroids;
/** the classes of the centroids */
protected int[] m_centroidClasses;
/** the weights of the centroids */
protected double[] m_centroidWeights;
/** the stddevs of the centroids */
protected double[] m_centroidStdDevs;
/**
* initializes the generator with default values
*/
public RandomRBF() {
super();
setNumAttributes(defaultNumAttributes());
setNumClasses(defaultNumClasses());
setNumCentroids(defaultNumCentroids());
}
/**
* Returns a string describing this data generator.
*
* @return a description of the data generator suitable for displaying in the
* explorer/experimenter gui
*/
public String globalInfo() {
return "RandomRBF data is generated by first creating a random set of "
+ "centers for each class. Each center is randomly assigned a weight, "
+ "a central point per attribute, and a standard deviation. To "
+ "generate new instances, a center is chosen at random taking the "
+ "weights of each center into consideration. Attribute values are "
+ "randomly generated and offset from the center, where the overall "
+ "vector has been scaled so that its length equals a value sampled "
+ "randomly from the Gaussian distribution of the center. The "
+ "particular center chosen determines the class of the instance.\n "
+ "RandomRBF data contains only numeric attributes as it is "
+ "non-trivial to include nominal values.";
}
/**
* Returns an enumeration describing the available options.
*
* @return an enumeration of all the available options
*/
@Override
public Enumeration<Option> listOptions() {
Vector<Option> result = enumToVector(super.listOptions());
result.addElement(new Option("\tThe number of attributes (default "
+ defaultNumAttributes() + ").", "a", 1, "-a <num>"));
result.addElement(new Option("\tThe number of classes (default "
+ defaultNumClasses() + ")", "c", 1, "-c <num>"));
result.add(new Option("\tThe number of centroids to use. (default "
+ defaultNumCentroids() + ")", "C", 1, "-C <num>"));
return result.elements();
}
/**
* Parses a list of options for this object.
* <p/>
*
* <!-- options-start --> Valid options are:
* <p/>
*
* <pre>
* -h
* Prints this help.
* </pre>
*
* <pre>
* -o <file>
* The name of the output file, otherwise the generated data is
* printed to stdout.
* </pre>
*
* <pre>
* -r <name>
* The name of the relation.
* </pre>
*
* <pre>
* -d
* Whether to print debug informations.
* </pre>
*
* <pre>
* -S
* The seed for random function (default 1)
* </pre>
*
* <pre>
* -n <num>
* The number of examples to generate (default 100)
* </pre>
*
* <pre>
* -a <num>
* The number of attributes (default 10).
* </pre>
*
* <pre>
* -c <num>
* The number of classes (default 2)
* </pre>
*
* <pre>
* -C <num>
* The number of centroids to use. (default 50)
* </pre>
*
* <!-- options-end -->
*
* @param options the list of options as an array of strings
* @throws Exception if an option is not supported
*/
@Override
public void setOptions(String[] options) throws Exception {
String tmpStr;
super.setOptions(options);
tmpStr = Utils.getOption('a', options);
if (tmpStr.length() != 0) {
setNumAttributes(Integer.parseInt(tmpStr));
} else {
setNumAttributes(defaultNumAttributes());
}
tmpStr = Utils.getOption('c', options);
if (tmpStr.length() != 0) {
setNumClasses(Integer.parseInt(tmpStr));
} else {
setNumClasses(defaultNumClasses());
}
tmpStr = Utils.getOption('C', options);
if (tmpStr.length() != 0) {
setNumCentroids(Integer.parseInt(tmpStr));
} else {
setNumCentroids(defaultNumCentroids());
}
}
/**
* Gets the current settings of the datagenerator.
*
* @return an array of strings suitable for passing to setOptions
*/
@Override
public String[] getOptions() {
Vector<String> result;
String[] options;
int i;
result = new Vector<String>();
options = super.getOptions();
for (i = 0; i < options.length; i++) {
result.add(options[i]);
}
result.add("-a");
result.add("" + getNumAttributes());
result.add("-c");
result.add("" + getNumClasses());
result.add("-C");
result.add("" + getNumCentroids());
return result.toArray(new String[result.size()]);
}
/**
* returns the default number of attributes
*
* @return the default number of attributes
*/
protected int defaultNumAttributes() {
return 10;
}
/**
* Sets the number of attributes the dataset should have.
*
* @param numAttributes the new number of attributes
*/
public void setNumAttributes(int numAttributes) {
m_NumAttributes = numAttributes;
}
/**
* Gets the number of attributes that should be produced.
*
* @return the number of attributes that should be produced
*/
public int getNumAttributes() {
return m_NumAttributes;
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String numAttributesTipText() {
return "The number of attributes the generated data will contain.";
}
/**
* returns the default number of classes
*
* @return the default number of classes
*/
protected int defaultNumClasses() {
return 2;
}
/**
* Sets the number of classes the dataset should have.
*
* @param numClasses the new number of classes
*/
public void setNumClasses(int numClasses) {
m_NumClasses = numClasses;
}
/**
* Gets the number of classes the dataset should have.
*
* @return the number of classes the dataset should have
*/
public int getNumClasses() {
return m_NumClasses;
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String numClassesTipText() {
return "The number of classes to generate.";
}
/**
* returns the default number of centroids
*
* @return the default number of centroids
*/
protected int defaultNumCentroids() {
return 50;
}
/**
* Gets the number of centroids.
*
* @return the number of centroids.
*/
public int getNumCentroids() {
return m_NumCentroids;
}
/**
* Sets the number of centroids to use.
*
* @param value the number of centroids to use.
*/
public void setNumCentroids(int value) {
if (value > 0) {
m_NumCentroids = value;
} else {
System.out.println("At least 1 centroid is necessary (provided: " + value
+ ")!");
}
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String numCentroidsTipText() {
return "The number of centroids to use.";
}
/**
* Return if single mode is set for the given data generator mode depends on
* option setting and or generator type.
*
* @return single mode flag
* @throws Exception if mode is not set yet
*/
@Override
public boolean getSingleModeFlag() throws Exception {
return true;
}
/**
* returns a random index based on the given proportions
*
* @param proportionArray the proportions
* @param random the random number generator to use
* @return the random index
*/
protected int chooseRandomIndexBasedOnProportions(double[] proportionArray,
Random random) {
double probSum;
double val;
int index;
double sum;
probSum = Utils.sum(proportionArray);
val = random.nextDouble() * probSum;
index = 0;
sum = 0.0;
while ((sum <= val) && (index < proportionArray.length)) {
sum += proportionArray[index++];
}
return index - 1;
}
/**
* Initializes the format for the dataset produced. Must be called before the
* generateExample or generateExamples methods are used. Re-initializes the
* random number generator with the given seed.
*
* @return the format for the dataset
* @throws Exception if the generating of the format failed
* @see #getSeed()
*/
@Override
public Instances defineDataFormat() throws Exception {
int i;
int j;
ArrayList<Attribute> atts;
ArrayList<String> clsValues;
Random rand;
m_Random = new Random(getSeed());
rand = getRandom();
// number of examples is the same as given per option
setNumExamplesAct(getNumExamples());
// initialize centroids
m_centroids = new double[getNumCentroids()][getNumAttributes()];
m_centroidClasses = new int[getNumCentroids()];
m_centroidWeights = new double[getNumCentroids()];
m_centroidStdDevs = new double[getNumCentroids()];
for (i = 0; i < getNumCentroids(); i++) {
for (j = 0; j < getNumAttributes(); j++) {
m_centroids[i][j] = rand.nextDouble();
}
m_centroidClasses[i] = rand.nextInt(getNumClasses());
m_centroidWeights[i] = rand.nextDouble();
m_centroidStdDevs[i] = rand.nextDouble();
}
// initialize dataset format
atts = new ArrayList<Attribute>();
for (i = 0; i < getNumAttributes(); i++) {
atts.add(new Attribute("a" + i));
}
clsValues = new ArrayList<String>();
for (i = 0; i < getNumClasses(); i++) {
clsValues.add("c" + i);
}
atts.add(new Attribute("class", clsValues));
m_DatasetFormat = new Instances(getRelationNameToUse(), atts, 0);
return m_DatasetFormat;
}
/**
* Generates one example of the dataset.
*
* @return the generated example
* @throws Exception if the format of the dataset is not yet defined
* @throws Exception if the generator only works with generateExamples which
* means in non single mode
*/
@Override
public Instance generateExample() throws Exception {
Instance result;
int centroid;
double[] atts;
double magnitude;
double desiredMag;
double scale;
int i;
double label;
Random rand;
result = null;
rand = getRandom();
if (m_DatasetFormat == null) {
throw new Exception("Dataset format not defined.");
}
// generate class label based on class probs
centroid = chooseRandomIndexBasedOnProportions(m_centroidWeights, rand);
label = m_centroidClasses[centroid];
// generate attributes
atts = new double[getNumAttributes() + 1];
for (i = 0; i < getNumAttributes(); i++) {
atts[i] = (rand.nextDouble() * 2.0) - 1.0;
}
atts[atts.length - 1] = label;
magnitude = 0.0;
for (i = 0; i < getNumAttributes(); i++) {
magnitude += atts[i] * atts[i];
}
magnitude = Math.sqrt(magnitude);
desiredMag = rand.nextGaussian() * m_centroidStdDevs[centroid];
scale = desiredMag / magnitude;
for (i = 0; i < getNumAttributes(); i++) {
atts[i] *= scale;
atts[i] += m_centroids[centroid][i];
result = new DenseInstance(1.0, atts);
}
// dataset reference
result.setDataset(m_DatasetFormat);
return result;
}
/**
* Generates all examples of the dataset. Re-initializes the random number
* generator with the given seed, before generating instances.
*
* @return the generated dataset
* @throws Exception if the format of the dataset is not yet defined
* @throws Exception if the generator only works with generateExample, which
* means in single mode
* @see #getSeed()
*/
@Override
public Instances generateExamples() throws Exception {
Instances result;
int i;
result = new Instances(m_DatasetFormat, 0);
m_Random = new Random(getSeed());
for (i = 0; i < getNumExamplesAct(); i++) {
result.add(generateExample());
}
return result;
}
/**
* Generates a comment string that documentates the data generator. By default
* this string is added at the beginning of the produced output as ARFF file
* type, next after the options.
*
* @return string contains info about the generated rules
*/
@Override
public String generateStart() {
StringBuffer result;
int i;
result = new StringBuffer();
result.append("%\n");
result.append("% centroids:\n");
for (i = 0; i < getNumCentroids(); i++) {
result.append("% " + i + ".: " + Utils.arrayToString(m_centroids[i])
+ "\n");
}
result.append("%\n");
result.append("% centroidClasses: "
+ Utils.arrayToString(m_centroidClasses) + "\n");
result.append("%\n");
result.append("% centroidWeights: "
+ Utils.arrayToString(m_centroidWeights) + "\n");
result.append("%\n");
result.append("% centroidStdDevs: "
+ Utils.arrayToString(m_centroidStdDevs) + "\n");
result.append("%\n");
return result.toString();
}
/**
* Generates a comment string that documentats the data generator. By default
* this string is added at the end of theproduces output as ARFF file type.
*
* @return string contains info about the generated rules
* @throws Exception if the generating of the documentaion fails
*/
@Override
public String generateFinished() throws Exception {
return "";
}
/**
* Returns the revision string.
*
* @return the revision
*/
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
/**
* Main method for executing this class.
*
* @param args should contain arguments for the data producer:
*/
public static void main(String[] args) {
runDataGenerator(new RandomRBF(), args);
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/datagenerators/classifiers
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/datagenerators/classifiers/regression/Expression.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* Expression.java
* Copyright (C) 2005-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.datagenerators.classifiers.regression;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Enumeration;
import java.util.Random;
import java.util.Vector;
import weka.core.Attribute;
import weka.core.DenseInstance;
import weka.core.Instance;
import weka.core.Instances;
import weka.core.Option;
import weka.core.RevisionUtils;
import weka.core.Utils;
import weka.filters.unsupervised.attribute.AddExpression;
/**
* <!-- globalinfo-start --> A data generator for generating y according to a
* given expression out of randomly generated x.<br/>
* E.g., the mexican hat can be generated like this:<br/>
* sin(abs(a1)) / abs(a1)<br/>
* In addition to this function, the amplitude can be changed and gaussian noise
* can be added.
* <p/>
* <!-- globalinfo-end -->
*
* <!-- options-start --> Valid options are:
* <p/>
*
* <pre>
* -h
* Prints this help.
* </pre>
*
* <pre>
* -o <file>
* The name of the output file, otherwise the generated data is
* printed to stdout.
* </pre>
*
* <pre>
* -r <name>
* The name of the relation.
* </pre>
*
* <pre>
* -d
* Whether to print debug informations.
* </pre>
*
* <pre>
* -S
* The seed for random function (default 1)
* </pre>
*
* <pre>
* -n <num>
* The number of examples to generate (default 100)
* </pre>
*
* <pre>
* -A <num>
* The amplitude multiplier (default 1.0).
* </pre>
*
* <pre>
* -R <num>..<num>
* The range x is randomly drawn from (default -10.0..10.0).
* </pre>
*
* <pre>
* -N <num>
* The noise rate (default 0.0).
* </pre>
*
* <pre>
* -V <num>
* The noise variance (default 1.0).
* </pre>
*
* <pre>
* -E <expression>
* The expression to use for generating y out of x
* (default sin(abs(a1)) / abs(a1)).
* </pre>
*
* <!-- options-end -->
*
* @author FracPete (fracpete at waikato dot ac dot nz)
* @version $Revision$
* @see AddExpression
* @see MexicanHat
*/
public class Expression extends MexicanHat {
/** for serialization */
static final long serialVersionUID = -4237047357682277211L;
/** the expression for computing y */
protected String m_Expression;
/** the filter for generating y out of x */
protected AddExpression m_Filter;
/** the input data structure for the filter */
protected Instances m_RawData;
/**
* initializes the generator
*/
public Expression() {
super();
setExpression(defaultExpression());
}
/**
* Returns a string describing this data generator.
*
* @return a description of the data generator suitable for displaying in the
* explorer/experimenter gui
*/
@Override
public String globalInfo() {
return "A data generator for generating y according to a given expression "
+ "out of randomly generated x.\n"
+ "E.g., the mexican hat can be generated like this:\n"
+ " sin(abs(a1)) / abs(a1)\n"
+ "In addition to this function, the amplitude can be changed and "
+ "gaussian noise can be added.";
}
/**
* Returns an enumeration describing the available options.
*
* @return an enumeration of all the available options
*/
@Override
public Enumeration<Option> listOptions() {
Vector<Option> result = enumToVector(super.listOptions());
result.addElement(new Option(
"\tThe expression to use for generating y out of x \n" + "\t(default "
+ defaultExpression() + ").", "E", 1, "-E <expression>"));
return result.elements();
}
/**
* Parses a list of options for this object.
* <p/>
*
* <!-- options-start --> Valid options are:
* <p/>
*
* <pre>
* -h
* Prints this help.
* </pre>
*
* <pre>
* -o <file>
* The name of the output file, otherwise the generated data is
* printed to stdout.
* </pre>
*
* <pre>
* -r <name>
* The name of the relation.
* </pre>
*
* <pre>
* -d
* Whether to print debug informations.
* </pre>
*
* <pre>
* -S
* The seed for random function (default 1)
* </pre>
*
* <pre>
* -n <num>
* The number of examples to generate (default 100)
* </pre>
*
* <pre>
* -A <num>
* The amplitude multiplier (default 1.0).
* </pre>
*
* <pre>
* -R <num>..<num>
* The range x is randomly drawn from (default -10.0..10.0).
* </pre>
*
* <pre>
* -N <num>
* The noise rate (default 0.0).
* </pre>
*
* <pre>
* -V <num>
* The noise variance (default 1.0).
* </pre>
*
* <pre>
* -E <expression>
* The expression to use for generating y out of x
* (default sin(abs(a1)) / abs(a1)).
* </pre>
*
* <!-- options-end -->
*
* @param options the list of options as an array of strings
* @exception Exception if an option is not supported
*/
@Override
public void setOptions(String[] options) throws Exception {
String tmpStr;
super.setOptions(options);
tmpStr = Utils.getOption('E', options);
if (tmpStr.length() != 0) {
setExpression(tmpStr);
} else {
setExpression(defaultExpression());
}
}
/**
* Gets the current settings of the datagenerator BIRCHCluster.
*
* @return an array of strings suitable for passing to setOptions
*/
@Override
public String[] getOptions() {
Vector<String> result;
String[] options;
result = new Vector<String>();
options = super.getOptions();
Collections.addAll(result, options);
result.add("-E");
result.add("" + getExpression());
return result.toArray(new String[result.size()]);
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
@Override
public String amplitudeTipText() {
return "The amplitude to multiply the y value with.";
}
/**
* returns the default expression
*
* @return the default expression
*/
protected String defaultExpression() {
return "sin(abs(a1)) / abs(a1)";
}
/**
* Gets the mathematical expression for generating y out of x
*
* @return the expression for computing y
*/
public String getExpression() {
return m_Expression;
}
/**
* Sets the mathematical expression to generate y out of x.
*
* @param value the expression for computing y
*/
public void setExpression(String value) {
if (value.length() != 0) {
m_Expression = value;
} else {
throw new IllegalArgumentException("An expression has to be provided!");
}
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String expressionTipText() {
return "The expression for generating y out of x.";
}
/**
* Return if single mode is set for the given data generator mode depends on
* option setting and or generator type.
*
* @return single mode flag
* @throws Exception if mode is not set yet
*/
@Override
public boolean getSingleModeFlag() throws Exception {
return true;
}
/**
* Initializes the format for the dataset produced. Must be called before the
* generateExample or generateExamples methods are used. Re-initializes the
* random number generator with the given seed.
*
* @return the format for the dataset
* @throws Exception if the generating of the format failed
* @see #getSeed()
*/
@Override
public Instances defineDataFormat() throws Exception {
ArrayList<Attribute> atts;
// initialize input format
atts = new ArrayList<Attribute>();
atts.add(new Attribute("x"));
m_RawData = new Instances(getRelationNameToUse(), atts, 0);
m_Filter = new AddExpression();
m_Filter.setName("y");
m_Filter.setExpression(getExpression());
m_Filter.setInputFormat(m_RawData);
return super.defineDataFormat();
}
/**
* Generates one example of the dataset.
*
* @return the generated example
* @throws Exception if the format of the dataset is not yet defined
* @throws Exception if the generator only works with generateExamples which
* means in non single mode
*/
@Override
public Instance generateExample() throws Exception {
Instance result;
Random rand;
double x;
double y;
double[] atts;
Instance inst;
rand = getRandom();
if (m_DatasetFormat == null) {
throw new Exception("Dataset format not defined.");
}
// random x
x = rand.nextDouble();
// fit into range
x = x * (getMaxRange() - getMinRange()) + getMinRange();
// generate y
atts = new double[1];
atts[0] = x;
inst = new DenseInstance(1.0, atts);
inst.setDataset(m_RawData);
m_Filter.input(inst);
m_Filter.batchFinished();
inst = m_Filter.output();
// noise
y = inst.value(1) + getAmplitude() * m_NoiseRandom.nextGaussian()
* getNoiseRate() * getNoiseVariance();
// generate attributes
atts = new double[m_DatasetFormat.numAttributes()];
atts[0] = x;
atts[1] = y;
result = new DenseInstance(1.0, atts);
// dataset reference
result.setDataset(m_DatasetFormat);
return result;
}
/**
* Generates all examples of the dataset. Re-initializes the random number
* generator with the given seed, before generating instances.
*
* @return the generated dataset
* @throws Exception if the format of the dataset is not yet defined
* @throws Exception if the generator only works with generateExample, which
* means in single mode
* @see #getSeed()
*/
@Override
public Instances generateExamples() throws Exception {
Instances result;
int i;
result = new Instances(m_DatasetFormat, 0);
m_Random = new Random(getSeed());
for (i = 0; i < getNumExamplesAct(); i++) {
result.add(generateExample());
}
return result;
}
/**
* Generates a comment string that documentates the data generator. By default
* this string is added at the beginning of the produced output as ARFF file
* type, next after the options.
*
* @return string contains info about the generated rules
*/
@Override
public String generateStart() {
return "";
}
/**
* Generates a comment string that documentats the data generator. By default
* this string is added at the end of theproduces output as ARFF file type.
*
* @return string contains info about the generated rules
* @throws Exception if the generating of the documentaion fails
*/
@Override
public String generateFinished() throws Exception {
return "";
}
/**
* Returns the revision string.
*
* @return the revision
*/
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
/**
* Main method for testing this class.
*
* @param args should contain arguments for the data producer:
*/
public static void main(String[] args) {
runDataGenerator(new Expression(), args);
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/datagenerators/classifiers
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/datagenerators/classifiers/regression/MexicanHat.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* MexicanHat.java
* Copyright (C) 2005-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.datagenerators.classifiers.regression;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Enumeration;
import java.util.Random;
import java.util.Vector;
import weka.core.Attribute;
import weka.core.DenseInstance;
import weka.core.Instance;
import weka.core.Instances;
import weka.core.Option;
import weka.core.RevisionUtils;
import weka.core.Utils;
import weka.datagenerators.RegressionGenerator;
/**
* <!-- globalinfo-start --> A data generator for the simple 'Mexian Hat'
* function:<br/>
* y = sin|x| / |x|<br/>
* In addition to this simple function, the amplitude can be changed and
* gaussian noise can be added.
* <p/>
* <!-- globalinfo-end -->
*
* <!-- options-start --> Valid options are:
* <p/>
*
* <pre>
* -h
* Prints this help.
* </pre>
*
* <pre>
* -o <file>
* The name of the output file, otherwise the generated data is
* printed to stdout.
* </pre>
*
* <pre>
* -r <name>
* The name of the relation.
* </pre>
*
* <pre>
* -d
* Whether to print debug informations.
* </pre>
*
* <pre>
* -S
* The seed for random function (default 1)
* </pre>
*
* <pre>
* -n <num>
* The number of examples to generate (default 100)
* </pre>
*
* <pre>
* -A <num>
* The amplitude multiplier (default 1.0).
* </pre>
*
* <pre>
* -R <num>..<num>
* The range x is randomly drawn from (default -10.0..10.0).
* </pre>
*
* <pre>
* -N <num>
* The noise rate (default 0.0).
* </pre>
*
* <pre>
* -V <num>
* The noise variance (default 1.0).
* </pre>
*
* <!-- options-end -->
*
* @author FracPete (fracpete at waikato dot ac dot nz)
* @version $Revision$
*/
public class MexicanHat extends RegressionGenerator {
/** for serialization */
static final long serialVersionUID = 4577016375261512975L;
/** the amplitude of y */
protected double m_Amplitude;
/** the lower boundary of the range, x is drawn from */
protected double m_MinRange;
/** the upper boundary of the range, x is drawn from */
protected double m_MaxRange;
/** the rate of the gaussian noise */
protected double m_NoiseRate;
/** the variance of the gaussian noise */
protected double m_NoiseVariance;
/** the random number generator for the noise */
protected Random m_NoiseRandom = null;
/**
* initializes the generator
*/
public MexicanHat() {
super();
setAmplitude(defaultAmplitude());
setMinRange(defaultMinRange());
setMaxRange(defaultMaxRange());
setNoiseRate(defaultNoiseRate());
setNoiseVariance(defaultNoiseVariance());
}
/**
* Returns a string describing this data generator.
*
* @return a description of the data generator suitable for displaying in the
* explorer/experimenter gui
*/
public String globalInfo() {
return "A data generator for the simple 'Mexian Hat' function:\n"
+ " y = sin|x| / |x|\n"
+ "In addition to this simple function, the amplitude can be changed and "
+ "gaussian noise can be added.";
}
/**
* Returns an enumeration describing the available options.
*
* @return an enumeration of all the available options
*/
@Override
public Enumeration<Option> listOptions() {
Vector<Option> result = enumToVector(super.listOptions());
result.addElement(new Option("\tThe amplitude multiplier (default "
+ defaultAmplitude() + ").", "A", 1, "-A <num>"));
result.addElement(new Option(
"\tThe range x is randomly drawn from (default " + defaultMinRange()
+ ".." + defaultMaxRange() + ").", "R", 1, "-R <num>..<num>"));
result.addElement(new Option("\tThe noise rate (default "
+ defaultNoiseRate() + ").", "N", 1, "-N <num>"));
result.addElement(new Option("\tThe noise variance (default "
+ defaultNoiseVariance() + ").", "V", 1, "-V <num>"));
return result.elements();
}
/**
* Parses a list of options for this object.
* <p/>
*
* <!-- options-start --> Valid options are:
* <p/>
*
* <pre>
* -h
* Prints this help.
* </pre>
*
* <pre>
* -o <file>
* The name of the output file, otherwise the generated data is
* printed to stdout.
* </pre>
*
* <pre>
* -r <name>
* The name of the relation.
* </pre>
*
* <pre>
* -d
* Whether to print debug informations.
* </pre>
*
* <pre>
* -S
* The seed for random function (default 1)
* </pre>
*
* <pre>
* -n <num>
* The number of examples to generate (default 100)
* </pre>
*
* <pre>
* -A <num>
* The amplitude multiplier (default 1.0).
* </pre>
*
* <pre>
* -R <num>..<num>
* The range x is randomly drawn from (default -10.0..10.0).
* </pre>
*
* <pre>
* -N <num>
* The noise rate (default 0.0).
* </pre>
*
* <pre>
* -V <num>
* The noise variance (default 1.0).
* </pre>
*
* <!-- options-end -->
*
* @param options the list of options as an array of strings
* @exception Exception if an option is not supported
*/
@Override
public void setOptions(String[] options) throws Exception {
String tmpStr;
super.setOptions(options);
tmpStr = Utils.getOption('A', options);
if (tmpStr.length() != 0) {
setAmplitude(Double.parseDouble(tmpStr));
} else {
setAmplitude(defaultAmplitude());
}
tmpStr = Utils.getOption('R', options);
if (tmpStr.length() != 0) {
setRange(tmpStr);
} else {
setRange(defaultMinRange() + ".." + defaultMaxRange());
}
tmpStr = Utils.getOption('N', options);
if (tmpStr.length() != 0) {
setNoiseRate(Double.parseDouble(tmpStr));
} else {
setNoiseRate(defaultNoiseRate());
}
tmpStr = Utils.getOption('V', options);
if (tmpStr.length() != 0) {
setNoiseVariance(Double.parseDouble(tmpStr));
} else {
setNoiseVariance(defaultNoiseVariance());
}
}
/**
* Gets the current settings of the datagenerator BIRCHCluster.
*
* @return an array of strings suitable for passing to setOptions
*/
@Override
public String[] getOptions() {
Vector<String> result;
String[] options;
result = new Vector<String>();
options = removeBlacklist(super.getOptions());
Collections.addAll(result, options);
result.add("-A");
result.add("" + getAmplitude());
result.add("-R");
result.add("" + getRange());
result.add("-N");
result.add("" + getNoiseRate());
result.add("-V");
result.add("" + getNoiseVariance());
return result.toArray(new String[result.size()]);
}
/**
* returns the default amplitude
*
* @return the default amplitude
*/
protected double defaultAmplitude() {
return 1.0;
}
/**
* Gets the amplitude multiplier.
*
* @return the amplitude multiplier
*/
public double getAmplitude() {
return m_Amplitude;
}
/**
* Sets the amplitude multiplier.
*
* @param value the amplitude multiplier
*/
public void setAmplitude(double value) {
m_Amplitude = value;
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String amplitudeTipText() {
return "The amplitude of the mexican hat.";
}
/**
* Sets the upper and lower boundary for the range of x
*
* @param fromTo the string containing the upper and lower boundary for the
* range of x, separated by ..
*/
protected void setRange(String fromTo) {
int i = fromTo.indexOf("..");
String from = fromTo.substring(0, i);
setMinRange(Double.valueOf(from).doubleValue());
String to = fromTo.substring(i + 2, fromTo.length());
setMaxRange(Double.valueOf(to).doubleValue());
}
/**
* Gets the upper and lower boundary for the range of x
*
* @return the string containing the upper and lower boundary for the range of
* x, separated by ..
*/
protected String getRange() {
String fromTo = "" + Utils.doubleToString(getMinRange(), 2) + ".."
+ Utils.doubleToString(getMaxRange(), 2);
return fromTo;
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
protected String rangeTipText() {
return "The upper and lower boundary for the range x is drawn from randomly.";
}
/**
* returns the default min range
*
* @return the default min range
*/
protected double defaultMinRange() {
return -10;
}
/**
* Sets the lower boundary for the range of x
*
* @param value the lower boundary
*/
public void setMinRange(double value) {
m_MinRange = value;
}
/**
* Gets the lower boundary for the range of x
*
* @return the lower boundary for the range of x
*/
public double getMinRange() {
return m_MinRange;
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String minRangeTipText() {
return "The lower boundary for the range x is drawn from randomly.";
}
/**
* returns the default max range
*
* @return the default max range
*/
protected double defaultMaxRange() {
return 10;
}
/**
* Sets the upper boundary for the range of x
*
* @param value the upper boundary
*/
public void setMaxRange(double value) {
m_MaxRange = value;
}
/**
* Gets the upper boundary for the range of x
*
* @return the upper boundary for the range of x
*/
public double getMaxRange() {
return m_MaxRange;
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String maxRangeTipText() {
return "The upper boundary for the range x is drawn from randomly.";
}
/**
* returns the default gaussian noise rate
*
* @return the default gaussian noise rate
*/
protected double defaultNoiseRate() {
return 0.0;
}
/**
* Gets the gaussian noise rate.
*
* @return the gaussian noise rate
*/
public double getNoiseRate() {
return m_NoiseRate;
}
/**
* Sets the gaussian noise rate.
*
* @param value the gaussian noise rate
*/
public void setNoiseRate(double value) {
m_NoiseRate = value;
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String noiseRateTipText() {
return "The gaussian noise rate to use.";
}
/**
* returns the default variance of the noise rate
*
* @return the default variance of the noise rate
*/
protected double defaultNoiseVariance() {
return 1.0;
}
/**
* Gets the noise variance
*
* @return the noise variance
*/
public double getNoiseVariance() {
return m_NoiseVariance;
}
/**
* Sets the noise variance
*
* @param value the noise variance
*/
public void setNoiseVariance(double value) {
if (value > 0) {
m_NoiseVariance = value;
} else {
throw new IllegalArgumentException(
"Noise variance needs to be > 0 (provided: " + value + ")!");
}
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String noiseVarianceTipText() {
return "The noise variance to use.";
}
/**
* Return if single mode is set for the given data generator mode depends on
* option setting and or generator type.
*
* @return single mode flag
* @throws Exception if mode is not set yet
*/
@Override
public boolean getSingleModeFlag() throws Exception {
return true;
}
/**
* Initializes the format for the dataset produced. Must be called before the
* generateExample or generateExamples methods are used. Re-initializes the
* random number generator with the given seed.
*
* @return the format for the dataset
* @throws Exception if the generating of the format failed
* @see #getSeed()
*/
@Override
public Instances defineDataFormat() throws Exception {
ArrayList<Attribute> atts;
m_Random = new Random(getSeed());
m_NoiseRandom = new Random(getSeed());
// number of examples is the same as given per option
setNumExamplesAct(getNumExamples());
// initialize dataset format
atts = new ArrayList<Attribute>();
atts.add(new Attribute("x"));
atts.add(new Attribute("y"));
m_DatasetFormat = new Instances(getRelationNameToUse(), atts, 0);
return m_DatasetFormat;
}
/**
* Generates one example of the dataset.
*
* @return the generated example
* @throws Exception if the format of the dataset is not yet defined
* @throws Exception if the generator only works with generateExamples which
* means in non single mode
*/
@Override
public Instance generateExample() throws Exception {
Instance result;
Random rand;
double x;
double y;
double[] atts;
result = null;
rand = getRandom();
if (m_DatasetFormat == null) {
throw new Exception("Dataset format not defined.");
}
// generate attributes
atts = new double[m_DatasetFormat.numAttributes()];
// random x
x = rand.nextDouble();
// fit into range
x = x * (getMaxRange() - getMinRange()) + getMinRange();
// generate y
if (Utils.eq(x, 0)) {
y = getAmplitude();
} else {
y = getAmplitude() * StrictMath.sin(StrictMath.abs(x))
/ StrictMath.abs(x);
}
// noise
y = y + getAmplitude() * m_NoiseRandom.nextGaussian() * getNoiseRate()
* getNoiseVariance();
atts[0] = x;
atts[1] = y;
result = new DenseInstance(1.0, atts);
// dataset reference
result.setDataset(m_DatasetFormat);
return result;
}
/**
* Generates all examples of the dataset. Re-initializes the random number
* generator with the given seed, before generating instances.
*
* @return the generated dataset
* @throws Exception if the format of the dataset is not yet defined
* @throws Exception if the generator only works with generateExample, which
* means in single mode
* @see #getSeed()
*/
@Override
public Instances generateExamples() throws Exception {
Instances result;
int i;
result = new Instances(m_DatasetFormat, 0);
m_Random = new Random(getSeed());
for (i = 0; i < getNumExamplesAct(); i++) {
result.add(generateExample());
}
return result;
}
/**
* Generates a comment string that documentates the data generator. By default
* this string is added at the beginning of the produced output as ARFF file
* type, next after the options.
*
* @return string contains info about the generated rules
*/
@Override
public String generateStart() {
return "";
}
/**
* Generates a comment string that documentats the data generator. By default
* this string is added at the end of theproduces output as ARFF file type.
*
* @return string contains info about the generated rules
* @throws Exception if the generating of the documentaion fails
*/
@Override
public String generateFinished() throws Exception {
return "";
}
/**
* Returns the revision string.
*
* @return the revision
*/
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
/**
* Main method for testing this class.
*
* @param args should contain arguments for the data producer:
*/
public static void main(String[] args) {
runDataGenerator(new MexicanHat(), args);
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/datagenerators
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/datagenerators/clusterers/BIRCHCluster.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* BIRCHCluster.java
* Copyright (C) 2001-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.datagenerators.clusterers;
import java.io.Serializable;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Enumeration;
import java.util.Random;
import java.util.Vector;
import weka.core.Attribute;
import weka.core.DenseInstance;
import weka.core.Instance;
import weka.core.Instances;
import weka.core.Option;
import weka.core.RevisionHandler;
import weka.core.RevisionUtils;
import weka.core.SelectedTag;
import weka.core.Tag;
import weka.core.TechnicalInformation;
import weka.core.TechnicalInformation.Field;
import weka.core.TechnicalInformation.Type;
import weka.core.TechnicalInformationHandler;
import weka.core.Utils;
import weka.core.WekaEnumeration;
import weka.datagenerators.ClusterGenerator;
/**
* <!-- globalinfo-start --> Cluster data generator designed for the BIRCH
* System<br/>
* <br/>
* Dataset is generated with instances in K clusters.<br/>
* Instances are 2-d data points.<br/>
* Each cluster is characterized by the number of data points in itits radius
* and its center. The location of the cluster centers isdetermined by the
* pattern parameter. Three patterns are currentlysupported grid, sine and
* random.<br/>
* <br/>
* For more information refer to:<br/>
* <br/>
* Tian Zhang, Raghu Ramakrishnan, Miron Livny: BIRCH: An Efficient Data
* Clustering Method for Very Large Databases. In: ACM SIGMOD International
* Conference on Management of Data, 103-114, 1996.
* <p/>
* <!-- globalinfo-end -->
*
* <!-- technical-bibtex-start --> BibTeX:
*
* <pre>
* @inproceedings{Zhang1996,
* author = {Tian Zhang and Raghu Ramakrishnan and Miron Livny},
* booktitle = {ACM SIGMOD International Conference on Management of Data},
* pages = {103-114},
* publisher = {ACM Press},
* title = {BIRCH: An Efficient Data Clustering Method for Very Large Databases},
* year = {1996}
* }
* </pre>
* <p/>
* <!-- technical-bibtex-end -->
*
* <!-- options-start --> Valid options are:
* <p/>
*
* <pre>
* -h
* Prints this help.
* </pre>
*
* <pre>
* -o <file>
* The name of the output file, otherwise the generated data is
* printed to stdout.
* </pre>
*
* <pre>
* -r <name>
* The name of the relation.
* </pre>
*
* <pre>
* -d
* Whether to print debug informations.
* </pre>
*
* <pre>
* -S
* The seed for random function (default 1)
* </pre>
*
* <pre>
* -a <num>
* The number of attributes (default 10).
* </pre>
*
* <pre>
* -c
* Class Flag, if set, the cluster is listed in extra attribute.
* </pre>
*
* <pre>
* -b <range>
* The indices for boolean attributes.
* </pre>
*
* <pre>
* -m <range>
* The indices for nominal attributes.
* </pre>
*
* <pre>
* -k <num>
* The number of clusters (default 4)
* </pre>
*
* <pre>
* -G
* Set pattern to grid (default is random).
* This flag cannot be used at the same time as flag I.
* The pattern is random, if neither flag G nor flag I is set.
* </pre>
*
* <pre>
* -I
* Set pattern to sine (default is random).
* This flag cannot be used at the same time as flag I.
* The pattern is random, if neither flag G nor flag I is set.
* </pre>
*
* <pre>
* -N <num>..<num>
* The range of number of instances per cluster (default 1..50).
* Lower number must be between 0 and 2500,
* upper number must be between 50 and 2500.
* </pre>
*
* <pre>
* -R <num>..<num>
* The range of radius per cluster (default 0.1..1.4142135623730951).
* Lower number must be between 0 and SQRT(2),
* upper number must be between SQRT(2) and SQRT(32).
* </pre>
*
* <pre>
* -M <num>
* The distance multiplier (default 4.0).
* </pre>
*
* <pre>
* -C <num>
* The number of cycles (default 4).
* </pre>
*
* <pre>
* -O
* Flag for input order is ORDERED. If flag is not set then
* input order is RANDOMIZED. RANDOMIZED is currently not
* implemented, therefore is the input order always ORDERED.
* </pre>
*
* <pre>
* -P <num>
* The noise rate in percent (default 0.0).
* Can be between 0% and 30%. (Remark: The original
* algorithm only allows noise up to 10%.)
* </pre>
*
* <!-- options-end -->
*
* @author Gabi Schmidberger (gabi@cs.waikato.ac.nz)
* @author FracPete (fracpete at waikato dot ac dot nz)
* @version $Revision$
*/
public class BIRCHCluster extends ClusterGenerator implements
TechnicalInformationHandler {
/** for serialization */
static final long serialVersionUID = -334820527230755027L;
/** Number of Clusters the dataset should have */
protected int m_NumClusters;
/** minimal number of instances per cluster (option N) */
private int m_MinInstNum;
/** maximal number of instances per cluster (option N) */
private int m_MaxInstNum;
/** minimum radius (option R) */
private double m_MinRadius;
/** maximum radius (option R) */
private double m_MaxRadius;
/** Constant set for choice of pattern. (option G) */
public static final int GRID = 0;
/** Constant set for choice of pattern. (option I) */
public static final int SINE = 1;
/** Constant set for choice of pattern. (default) */
public static final int RANDOM = 2;
/** the pattern tags */
public static final Tag[] TAGS_PATTERN = { new Tag(GRID, "Grid"),
new Tag(SINE, "Sine"), new Tag(RANDOM, "Random") };
/** pattern (changed with options G or S) */
private int m_Pattern;
/** distance multiplier (option M) */
private double m_DistMult;
/** number of cycles (option C) */
private int m_NumCycles;
/** Constant set for input order (option O) */
public static final int ORDERED = 0;
/** Constant set for input order (default) */
public static final int RANDOMIZED = 1;
/** the input order tags */
public static final Tag[] TAGS_INPUTORDER = { new Tag(ORDERED, "ordered"),
new Tag(RANDOMIZED, "randomized") };
/** input order (changed with option O) */
private int m_InputOrder;
/** noise rate in percent (option P, between 0 and 30) */
private double m_NoiseRate;
/** cluster list */
private ArrayList<Cluster> m_ClusterList;
// following are used for pattern is GRID
/** grid size */
private int m_GridSize;
/** grid width */
private double m_GridWidth;
/**
* class to represent cluster
*/
private class Cluster implements Serializable, RevisionHandler {
/** for serialization */
static final long serialVersionUID = -8336901069823498140L;
/** number of instances for this cluster */
private final int m_InstNum;
/**
* radius of cluster variance is radius ** 2 / 2
*/
private final double m_Radius;
/** center of cluster = array of Double values */
private final double[] m_Center;
/**
* Constructor, used for pattern = RANDOM
*
* @param instNum the number of instances
* @param radius radius of the cluster
* @param random the random number generator to use
*/
private Cluster(int instNum, double radius, Random random) {
m_InstNum = instNum;
m_Radius = radius;
m_Center = new double[getNumAttributes()];
for (int i = 0; i < getNumAttributes(); i++) {
m_Center[i] = random.nextDouble() * m_NumClusters;
}
}
/**
* Constructor, used for pattern = GRID
*
* @param instNum the number of instances
* @param radius radius of the cluster
* @param gridVector vector for grid positions
* @param gridWidth factor for grid position
*/
// center is defined in the constructor of cluster
private Cluster(int instNum, double radius, int[] gridVector,
double gridWidth) {
m_InstNum = instNum;
m_Radius = radius;
m_Center = new double[getNumAttributes()];
for (int i = 0; i < getNumAttributes(); i++) {
m_Center[i] = (gridVector[i] + 1.0) * gridWidth;
}
}
/**
* returns the number of instances
*
* @return the number of instances
*/
private int getInstNum() {
return m_InstNum;
}
/**
* returns the standard deviation
*
* @return the standard deviation
*/
private double getStdDev() {
return (m_Radius / Math.pow(2.0, 0.5));
}
/**
* returns the centers
*
* @return the centers
*/
private double[] getCenter() {
return m_Center;
}
/**
* Returns the revision string.
*
* @return the revision
*/
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
} // end class Cluster
/**
* class to represent Vector for placement of the center in space
*/
private class GridVector implements Serializable, RevisionHandler {
/** for serialization */
static final long serialVersionUID = -1900309948991039522L;
/** array of integer */
private final int[] m_GridVector;
/**
* one higher then the highest possible integer value in any of the integers
* in the gridvector
*/
private final int m_Base;
/** size of vector */
private final int m_Size;
/**
* Constructor
*
* @param numDim number of dimensions = number of attributes
* @param base is one higher then the highest possible integer value in any
* of the integers in the gridvector
*/
private GridVector(int numDim, int base) {
m_Size = numDim;
m_Base = base;
m_GridVector = new int[numDim];
for (int i = 0; i < numDim; i++) {
m_GridVector[i] = 0;
}
}
/**
* returns the integer array
*
* @return the integer array
*/
private int[] getGridVector() {
return m_GridVector;
}
/**
* Overflow has occurred when integer is zero.
*
* @param digit the input integer
* @return true if digit is 0
*/
private boolean overflow(int digit) {
return (digit == 0);
}
/**
* Adds one to integer and sets to zero, if new value was equal m_Base.
*
* @param digit the input integer
* @return new integer object
*/
private int addOne(int digit) {
int value = digit + 1;
if (value >= m_Base) {
value = 0;
}
return value;
}
/**
* add 1 to vector
*/
private void addOne() {
m_GridVector[0] = addOne(m_GridVector[0]);
int i = 1;
while (overflow(m_GridVector[i - 1]) && i < m_Size) {
m_GridVector[i] = addOne(m_GridVector[i]);
i++;
}
}
/**
* Returns the revision string.
*
* @return the revision
*/
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
} // end class GridVector
/**
* initializes the generator with default values
*/
public BIRCHCluster() {
super();
setNumClusters(defaultNumClusters());
setMinInstNum(defaultMinInstNum());
setMaxInstNum(defaultMaxInstNum());
setMinRadius(defaultMinRadius());
setMaxRadius(defaultMaxRadius());
setPattern(defaultPattern());
setDistMult(defaultDistMult());
setNumCycles(defaultNumCycles());
setInputOrder(defaultInputOrder());
setNoiseRate(defaultNoiseRate());
}
/**
* Returns a string describing this data generator.
*
* @return a description of the data generator suitable for displaying in the
* explorer/experimenter gui
*/
public String globalInfo() {
return "Cluster data generator designed for the BIRCH System\n\n"
+ "Dataset is generated with instances in K clusters.\n"
+ "Instances are 2-d data points.\n"
+ "Each cluster is characterized by the number of data points in it"
+ "its radius and its center. The location of the cluster centers is"
+ "determined by the pattern parameter. Three patterns are currently"
+ "supported grid, sine and random.\n\n"
+ "For more information refer to:\n\n"
+ getTechnicalInformation().toString();
}
/**
* Returns an instance of a TechnicalInformation object, containing detailed
* information about the technical background of this class, e.g., paper
* reference or book this class is based on.
*
* @return the technical information about this class
*/
@Override
public TechnicalInformation getTechnicalInformation() {
TechnicalInformation result;
result = new TechnicalInformation(Type.INPROCEEDINGS);
result.setValue(Field.AUTHOR,
"Tian Zhang and Raghu Ramakrishnan and Miron Livny");
result.setValue(Field.TITLE,
"BIRCH: An Efficient Data Clustering Method for Very Large Databases");
result.setValue(Field.BOOKTITLE,
"ACM SIGMOD International Conference on Management of Data");
result.setValue(Field.YEAR, "1996");
result.setValue(Field.PAGES, "103-114");
result.setValue(Field.PUBLISHER, "ACM Press");
return result;
}
/**
* Returns an enumeration describing the available options.
*
* @return an enumeration of all the available options
*/
@Override
public Enumeration<Option> listOptions() {
Vector<Option> result = enumToVector(super.listOptions());
result.addElement(new Option("\tThe number of clusters (default "
+ defaultNumClusters() + ")", "k", 1, "-k <num>"));
result.addElement(new Option("\tSet pattern to grid (default is random).\n"
+ "\tThis flag cannot be used at the same time as flag I.\n"
+ "\tThe pattern is random, if neither flag G nor flag I is set.", "G",
0, "-G"));
result.addElement(new Option("\tSet pattern to sine (default is random).\n"
+ "\tThis flag cannot be used at the same time as flag I.\n"
+ "\tThe pattern is random, if neither flag G nor flag I is set.", "I",
0, "-I"));
result.addElement(new Option(
"\tThe range of number of instances per cluster (default "
+ defaultMinInstNum() + ".." + defaultMaxInstNum() + ").\n"
+ "\tLower number must be between 0 and 2500,\n"
+ "\tupper number must be between 50 and 2500.", "N", 1,
"-N <num>..<num>"));
result.addElement(new Option("\tThe range of radius per cluster (default "
+ defaultMinRadius() + ".." + defaultMaxRadius() + ").\n"
+ "\tLower number must be between 0 and SQRT(2), \n"
+ "\tupper number must be between SQRT(2) and SQRT(32).", "R", 1,
"-R <num>..<num>"));
result.addElement(new Option("\tThe distance multiplier (default "
+ defaultDistMult() + ").", "M", 1, "-M <num>"));
result.addElement(new Option("\tThe number of cycles (default "
+ defaultNumCycles() + ").", "C", 1, "-C <num>"));
result.addElement(new Option(
"\tFlag for input order is ORDERED. If flag is not set then \n"
+ "\tinput order is RANDOMIZED. RANDOMIZED is currently not \n"
+ "\timplemented, therefore is the input order always ORDERED.", "O",
0, "-O"));
result.addElement(new Option("\tThe noise rate in percent (default "
+ defaultNoiseRate() + ").\n"
+ "\tCan be between 0% and 30%. (Remark: The original \n"
+ "\talgorithm only allows noise up to 10%.)", "P", 1, "-P <num>"));
return result.elements();
}
/**
* Parses a list of options for this object.
* <p/>
*
* <!-- options-start --> Valid options are:
* <p/>
*
* <pre>
* -h
* Prints this help.
* </pre>
*
* <pre>
* -o <file>
* The name of the output file, otherwise the generated data is
* printed to stdout.
* </pre>
*
* <pre>
* -r <name>
* The name of the relation.
* </pre>
*
* <pre>
* -d
* Whether to print debug informations.
* </pre>
*
* <pre>
* -S
* The seed for random function (default 1)
* </pre>
*
* <pre>
* -a <num>
* The number of attributes (default 10).
* </pre>
*
* <pre>
* -c
* Class Flag, if set, the cluster is listed in extra attribute.
* </pre>
*
* <pre>
* -b <range>
* The indices for boolean attributes.
* </pre>
*
* <pre>
* -m <range>
* The indices for nominal attributes.
* </pre>
*
* <pre>
* -k <num>
* The number of clusters (default 4)
* </pre>
*
* <pre>
* -G
* Set pattern to grid (default is random).
* This flag cannot be used at the same time as flag I.
* The pattern is random, if neither flag G nor flag I is set.
* </pre>
*
* <pre>
* -I
* Set pattern to sine (default is random).
* This flag cannot be used at the same time as flag I.
* The pattern is random, if neither flag G nor flag I is set.
* </pre>
*
* <pre>
* -N <num>..<num>
* The range of number of instances per cluster (default 1..50).
* Lower number must be between 0 and 2500,
* upper number must be between 50 and 2500.
* </pre>
*
* <pre>
* -R <num>..<num>
* The range of radius per cluster (default 0.1..1.4142135623730951).
* Lower number must be between 0 and SQRT(2),
* upper number must be between SQRT(2) and SQRT(32).
* </pre>
*
* <pre>
* -M <num>
* The distance multiplier (default 4.0).
* </pre>
*
* <pre>
* -C <num>
* The number of cycles (default 4).
* </pre>
*
* <pre>
* -O
* Flag for input order is ORDERED. If flag is not set then
* input order is RANDOMIZED. RANDOMIZED is currently not
* implemented, therefore is the input order always ORDERED.
* </pre>
*
* <pre>
* -P <num>
* The noise rate in percent (default 0.0).
* Can be between 0% and 30%. (Remark: The original
* algorithm only allows noise up to 10%.)
* </pre>
*
* <!-- options-end -->
*
* @param options the list of options as an array of strings
* @throws Exception if an option is not supported
*/
@Override
public void setOptions(String[] options) throws Exception {
String tmpStr;
super.setOptions(options);
tmpStr = Utils.getOption('k', options);
if (tmpStr.length() != 0) {
setNumClusters(Integer.parseInt(tmpStr));
} else {
setNumClusters(defaultNumClusters());
}
tmpStr = Utils.getOption('N', options);
if (tmpStr.length() != 0) {
setInstNums(tmpStr);
} else {
setInstNums(defaultMinInstNum() + ".." + defaultMaxInstNum());
}
tmpStr = Utils.getOption('R', options);
if (tmpStr.length() != 0) {
setRadiuses(tmpStr);
} else {
setRadiuses(defaultMinRadius() + ".." + defaultMaxRadius());
}
boolean grid = Utils.getFlag('G', options);
boolean sine = Utils.getFlag('I', options);
if (grid && sine) {
throw new Exception("Flags -G and -I can only be set mutually exclusiv.");
}
setPattern(new SelectedTag(RANDOM, TAGS_PATTERN));
if (grid) {
setPattern(new SelectedTag(GRID, TAGS_PATTERN));
}
if (sine) {
setPattern(new SelectedTag(SINE, TAGS_PATTERN));
}
tmpStr = Utils.getOption('M', options);
if (tmpStr.length() != 0) {
if (!grid) {
throw new Exception("Option M can only be used with GRID pattern.");
}
setDistMult(Double.parseDouble(tmpStr));
} else {
setDistMult(defaultDistMult());
}
tmpStr = Utils.getOption('C', options);
if (tmpStr.length() != 0) {
if (!sine) {
throw new Exception("Option C can only be used with SINE pattern.");
}
setNumCycles(Integer.parseInt(tmpStr));
} else {
setNumCycles(defaultNumCycles());
}
if (Utils.getFlag('O', options)) {
setInputOrder(new SelectedTag(ORDERED, TAGS_INPUTORDER));
} else {
setInputOrder(defaultInputOrder());
}
tmpStr = Utils.getOption('P', options);
if (tmpStr.length() != 0) {
setNoiseRate(Double.parseDouble(tmpStr));
} else {
setNoiseRate(defaultNoiseRate());
}
}
/**
* Gets the current settings of the datagenerator BIRCHCluster.
*
* @return an array of strings suitable for passing to setOptions
*/
@Override
public String[] getOptions() {
Vector<String> result;
result = new Vector<String>();
Collections.addAll(result, super.getOptions());
result.add("-k");
result.add("" + getNumClusters());
result.add("-N");
result.add("" + getInstNums());
result.add("-R");
result.add("" + getRadiuses());
if (m_Pattern == GRID) {
result.add("-G");
result.add("-M");
result.add("" + getDistMult());
}
if (m_Pattern == SINE) {
result.add("-I");
result.add("-C");
result.add("" + getNumCycles());
}
if (getOrderedFlag()) {
result.add("-O");
}
result.add("-P");
result.add("" + getNoiseRate());
return result.toArray(new String[result.size()]);
}
/**
* returns the default number of clusters
*
* @return the default number of clusters
*/
protected int defaultNumClusters() {
return 4;
}
/**
* Sets the number of clusters the dataset should have.
*
* @param numClusters the new number of clusters
*/
public void setNumClusters(int numClusters) {
m_NumClusters = numClusters;
}
/**
* Gets the number of clusters the dataset should have.
*
* @return the number of clusters the dataset should have
*/
public int getNumClusters() {
return m_NumClusters;
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String numClustersTipText() {
return "The number of clusters to generate.";
}
/**
* Sets the upper and lower boundary for instances per cluster.
*
* @param fromTo the string containing the upper and lower boundary for
* instances per cluster separated by ..
*/
protected void setInstNums(String fromTo) {
int i = fromTo.indexOf("..");
String from = fromTo.substring(0, i);
setMinInstNum(Integer.parseInt(from));
String to = fromTo.substring(i + 2, fromTo.length());
setMaxInstNum(Integer.parseInt(to));
}
/**
* Gets the upper and lower boundary for instances per cluster.
*
* @return the string containing the upper and lower boundary for instances
* per cluster separated by ..
*/
protected String getInstNums() {
String fromTo = "" + getMinInstNum() + ".." + getMaxInstNum();
return fromTo;
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
protected String instNumsTipText() {
return "The upper and lowet boundary for instances per cluster.";
}
/**
* returns the default min number of instances
*
* @return the default min number of instances
*/
protected int defaultMinInstNum() {
return 1;
}
/**
* Gets the lower boundary for instances per cluster.
*
* @return the the lower boundary for instances per cluster
*/
public int getMinInstNum() {
return m_MinInstNum;
}
/**
* Sets the lower boundary for instances per cluster.
*
* @param newMinInstNum new lower boundary for instances per cluster
*/
public void setMinInstNum(int newMinInstNum) {
m_MinInstNum = newMinInstNum;
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String minInstNumTipText() {
return "The lower boundary for instances per cluster.";
}
/**
* returns the default max number of instances
*
* @return the default max number of instances
*/
protected int defaultMaxInstNum() {
return 50;
}
/**
* Gets the upper boundary for instances per cluster.
*
* @return the upper boundary for instances per cluster
*/
public int getMaxInstNum() {
return m_MaxInstNum;
}
/**
* Sets the upper boundary for instances per cluster.
*
* @param newMaxInstNum new upper boundary for instances per cluster
*/
public void setMaxInstNum(int newMaxInstNum) {
m_MaxInstNum = newMaxInstNum;
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String maxInstNumTipText() {
return "The upper boundary for instances per cluster.";
}
/**
* Sets the upper and lower boundary for the radius of the clusters.
*
* @param fromTo the string containing the upper and lower boundary for the
* radius of the clusters, separated by ..
*/
protected void setRadiuses(String fromTo) {
int i = fromTo.indexOf("..");
String from = fromTo.substring(0, i);
setMinRadius(Double.valueOf(from).doubleValue());
String to = fromTo.substring(i + 2, fromTo.length());
setMaxRadius(Double.valueOf(to).doubleValue());
}
/**
* Gets the upper and lower boundary for the radius of the clusters.
*
* @return the string containing the upper and lower boundary for the radius
* of the clusters, separated by ..
*/
protected String getRadiuses() {
String fromTo = "" + Utils.doubleToString(getMinRadius(), 2) + ".."
+ Utils.doubleToString(getMaxRadius(), 2);
return fromTo;
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
protected String radiusesTipText() {
return "The upper and lower boundary for the radius of the clusters.";
}
/**
* returns the default min radius
*
* @return the default min radius
*/
protected double defaultMinRadius() {
return 0.1;
}
/**
* Gets the lower boundary for the radiuses of the clusters.
*
* @return the lower boundary for the radiuses of the clusters
*/
public double getMinRadius() {
return m_MinRadius;
}
/**
* Sets the lower boundary for the radiuses of the clusters.
*
* @param newMinRadius new lower boundary for the radiuses of the clusters
*/
public void setMinRadius(double newMinRadius) {
m_MinRadius = newMinRadius;
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String minRadiusTipText() {
return "The lower boundary for the radius of the clusters.";
}
/**
* returns the default max radius
*
* @return the default max radius
*/
protected double defaultMaxRadius() {
return Math.sqrt(2.0);
}
/**
* Gets the upper boundary for the radiuses of the clusters.
*
* @return the upper boundary for the radiuses of the clusters
*/
public double getMaxRadius() {
return m_MaxRadius;
}
/**
* Sets the upper boundary for the radiuses of the clusters.
*
* @param newMaxRadius new upper boundary for the radiuses of the clusters
*/
public void setMaxRadius(double newMaxRadius) {
m_MaxRadius = newMaxRadius;
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String maxRadiusTipText() {
return "The upper boundary for the radius of the clusters.";
}
/**
* returns the default pattern
*
* @return the default pattern
*/
protected SelectedTag defaultPattern() {
return new SelectedTag(RANDOM, TAGS_PATTERN);
}
/**
* Gets the pattern type.
*
* @return the current pattern type
*/
public SelectedTag getPattern() {
return new SelectedTag(m_Pattern, TAGS_PATTERN);
}
/**
* Sets the pattern type.
*
* @param value new pattern type
*/
public void setPattern(SelectedTag value) {
if (value.getTags() == TAGS_PATTERN) {
m_Pattern = value.getSelectedTag().getID();
}
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String patternTipText() {
return "The pattern for generating the data.";
}
/**
* returns the default distance multiplier
*
* @return the default distance multiplier
*/
protected double defaultDistMult() {
return 4.0;
}
/**
* Gets the distance multiplier.
*
* @return the distance multiplier
*/
public double getDistMult() {
return m_DistMult;
}
/**
* Sets the distance multiplier.
*
* @param newDistMult new distance multiplier
*/
public void setDistMult(double newDistMult) {
m_DistMult = newDistMult;
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String distMultTipText() {
return "The distance multiplier (in combination with the 'Grid' pattern).";
}
/**
* returns the default number of cycles
*
* @return the default number of cycles
*/
protected int defaultNumCycles() {
return 4;
}
/**
* Gets the number of cycles.
*
* @return the number of cycles
*/
public int getNumCycles() {
return m_NumCycles;
}
/**
* Sets the the number of cycles.
*
* @param newNumCycles new number of cycles
*/
public void setNumCycles(int newNumCycles) {
m_NumCycles = newNumCycles;
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String numCyclesTipText() {
return "The number of cycles to use (in combination with the 'Sine' pattern).";
}
/**
* returns the default input order
*
* @return the default input order
*/
protected SelectedTag defaultInputOrder() {
return new SelectedTag(ORDERED, TAGS_INPUTORDER); // TODO: the only one that
// is currently
// implemented, normally
// RANDOMIZED
}
/**
* Gets the input order.
*
* @return the current input order
*/
public SelectedTag getInputOrder() {
return new SelectedTag(m_InputOrder, TAGS_INPUTORDER);
}
/**
* Sets the input order.
*
* @param value new input order
*/
public void setInputOrder(SelectedTag value) {
if (value.getTags() == TAGS_INPUTORDER) {
m_InputOrder = value.getSelectedTag().getID();
}
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String inputOrderTipText() {
return "The input order to use.";
}
/**
* Gets the ordered flag (option O).
*
* @return true if ordered flag is set
*/
public boolean getOrderedFlag() {
return m_InputOrder == ORDERED;
}
/**
* returns the default noise rate
*
* @return the default noise rate
*/
protected double defaultNoiseRate() {
return 0.0;
}
/**
* Gets the percentage of noise set.
*
* @return the percentage of noise set
*/
public double getNoiseRate() {
return m_NoiseRate;
}
/**
* Sets the percentage of noise set.
*
* @param newNoiseRate new percentage of noise
*/
public void setNoiseRate(double newNoiseRate) {
m_NoiseRate = newNoiseRate;
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String noiseRateTipText() {
return "The noise rate to use.";
}
/**
* Gets the single mode flag.
*
* @return true if methode generateExample can be used.
*/
@Override
public boolean getSingleModeFlag() {
return false;
}
/**
* Initializes the format for the dataset produced.
*
* @return the output data format
* @throws Exception data format could not be defined
*/
@Override
public Instances defineDataFormat() throws Exception {
Random random = new Random(getSeed());
setRandom(random);
Instances dataset;
ArrayList<Attribute> attributes = new ArrayList<Attribute>(3);
Attribute attribute;
boolean classFlag = getClassFlag();
ArrayList<String> classValues = null;
if (classFlag) {
classValues = new ArrayList<String>(m_NumClusters);
}
// define dataset
for (int i = 0; i < getNumAttributes(); i++) {
attribute = new Attribute("X" + i);
attributes.add(attribute);
}
if (classFlag) {
for (int i = 0; i < m_NumClusters; i++) {
classValues.add("c" + i);
}
attribute = new Attribute("class", classValues);
attributes.add(attribute);
}
dataset = new Instances(getRelationNameToUse(), attributes, 0);
if (classFlag) {
dataset.setClassIndex(getNumAttributes());
}
// set dataset format of this class
Instances format = new Instances(dataset, 0);
setDatasetFormat(format);
m_ClusterList = defineClusters(random);
// System.out.println("dataset" + dataset.numAttributes());
return dataset;
}
/**
* Generate an example of the dataset.
*
* @return the instance generated
* @throws Exception if format not defined or generating <br/>
* examples one by one is not possible, because voting is chosen
*/
@Override
public Instance generateExample() throws Exception {
throw new Exception("Examples cannot be generated" + " one by one.");
}
/**
* Generate all examples of the dataset.
*
* @return the instance generated
* @throws Exception if format not defined
*/
@Override
public Instances generateExamples() throws Exception {
Random random = getRandom();
Instances data = getDatasetFormat();
if (data == null) {
throw new Exception("Dataset format not defined.");
}
// generate examples
if (getOrderedFlag()) {
data = generateExamples(random, data);
} else {
throw new Exception("RANDOMIZED is not yet implemented.");
}
return (data);
}
/**
* Generate all examples of the dataset.
*
* @param random the random number generator to use
* @param format the dataset format
* @return the instance generated
* @throws Exception if format not defined
*/
public Instances generateExamples(Random random, Instances format)
throws Exception {
Instance example = null;
if (format == null) {
throw new Exception("Dataset format not defined.");
}
// generate examples for one cluster after another
int cNum = 0;
for (Enumeration<Cluster> enm = new WekaEnumeration<Cluster>(m_ClusterList); enm
.hasMoreElements(); cNum++) {
Cluster cl = enm.nextElement();
double stdDev = cl.getStdDev();
int instNum = cl.getInstNum();
double[] center = cl.getCenter();
String cName = "c" + cNum;
for (int i = 0; i < instNum; i++) {
// generate example
example = generateInstance(format, random, stdDev, center, cName);
if (example != null) {
example.setDataset(format);
}
format.add(example);
}
}
return (format);
}
/**
* Generate an example of the dataset.
*
* @param format the dataset format
* @param randomG the random number generator
* @param stdDev the standard deviation to use
* @param center the centers
* @param cName the class value
* @return the instance generated examples one by one is not possible, because
* voting is chosen
*/
private Instance generateInstance(Instances format, Random randomG,
double stdDev, double[] center, String cName) {
Instance example;
int numAtts = getNumAttributes();
if (getClassFlag()) {
numAtts++;
}
double[] data = new double[numAtts];
for (int i = 0; i < getNumAttributes(); i++) {
data[i] = randomG.nextGaussian() * stdDev + center[i];
}
if (getClassFlag()) {
data[format.classIndex()] = format.classAttribute().indexOfValue(cName);
}
example = new DenseInstance(1.0, data);
example.setDataset(format);
return example;
}
/**
* Defines the clusters
*
* @param random random number generator
* @return the cluster definitions
* @throws Exception if defining fails
*/
private ArrayList<Cluster> defineClusters(Random random) throws Exception {
if (m_Pattern == GRID) {
return defineClustersGRID(random);
} else {
return defineClustersRANDOM(random);
}
}
/**
* Defines the clusters if pattern is GRID
*
* @param random random number generator
* @return the defined clusters for GRID
* @throws Exception if something goes wrong
*/
private ArrayList<Cluster> defineClustersGRID(Random random) throws Exception {
ArrayList<Cluster> clusters = new ArrayList<Cluster>(m_NumClusters);
double diffInstNum = m_MaxInstNum - m_MinInstNum;
double minInstNum = m_MinInstNum;
double diffRadius = m_MaxRadius - m_MinRadius;
Cluster cluster;
// compute gridsize
double gs = Math.pow(m_NumClusters, 1.0 / getNumAttributes());
if (gs - (((int) gs)) > 0.0) {
m_GridSize = (int) (gs + 1.0);
} else {
m_GridSize = (int) gs;
}
// compute gridwidth
m_GridWidth = ((m_MaxRadius + m_MinRadius) / 2) * m_DistMult;
// System.out.println("GridSize= " + m_GridSize);
// System.out.println("GridWidth= " + m_GridWidth);
// initialize gridvector with zeros
GridVector gv = new GridVector(getNumAttributes(), m_GridSize);
for (int i = 0; i < m_NumClusters; i++) {
int instNum = (int) (random.nextDouble() * diffInstNum + minInstNum);
double radius = (random.nextDouble() * diffRadius) + m_MinRadius;
// center is defined in the constructor of cluster
cluster = new Cluster(instNum, radius, gv.getGridVector(), m_GridWidth);
clusters.add(cluster);
gv.addOne();
}
return clusters;
}
/**
* Defines the clusters if pattern is RANDOM
*
* @param random random number generator
* @return the cluster definitions
* @throws Exception if something goes wrong
*/
private ArrayList<Cluster> defineClustersRANDOM(Random random)
throws Exception {
ArrayList<Cluster> clusters = new ArrayList<Cluster>(m_NumClusters);
double diffInstNum = m_MaxInstNum - m_MinInstNum;
double minInstNum = m_MinInstNum;
double diffRadius = m_MaxRadius - m_MinRadius;
Cluster cluster;
for (int i = 0; i < m_NumClusters; i++) {
int instNum = (int) (random.nextDouble() * diffInstNum + minInstNum);
double radius = (random.nextDouble() * diffRadius) + m_MinRadius;
// center is defined in the constructor of cluster
cluster = new Cluster(instNum, radius, random);
clusters.add(cluster);
}
return clusters;
}
/**
* Compiles documentation about the data generation after the generation
* process
*
* @return string with additional information about generated dataset
* @throws Exception no input structure has been defined
*/
@Override
public String generateFinished() throws Exception {
return "";
}
/**
* Compiles documentation about the data generation before the generation
* process
*
* @return string with additional information
*/
@Override
public String generateStart() {
StringBuffer docu = new StringBuffer();
int sumInst = 0;
int cNum = 0;
for (Enumeration<Cluster> enm = new WekaEnumeration<Cluster>(m_ClusterList); enm
.hasMoreElements(); cNum++) {
Cluster cl = enm.nextElement();
docu.append("%\n");
docu.append("% Cluster: c" + cNum + "\n");
docu.append("% ----------------------------------------------\n");
docu.append("% StandardDeviation: "
+ Utils.doubleToString(cl.getStdDev(), 2) + "\n");
docu.append("% Number of instances: " + cl.getInstNum() + "\n");
sumInst += cl.getInstNum();
double[] center = cl.getCenter();
docu.append("% ");
for (int i = 0; i < center.length - 1; i++) {
docu.append(Utils.doubleToString(center[i], 2) + ", ");
}
docu.append(Utils.doubleToString(center[center.length - 1], 2) + "\n");
}
docu.append("%\n% ----------------------------------------------\n");
docu.append("% Total number of instances: " + sumInst + "\n");
docu.append("% in " + cNum + " clusters\n");
docu.append("% Pattern chosen : ");
if (m_Pattern == GRID) {
docu.append("GRID, " + "distance multiplier = "
+ Utils.doubleToString(m_DistMult, 2) + "\n");
} else if (m_Pattern == SINE) {
docu.append("SINE\n");
} else {
docu.append("RANDOM\n");
}
return docu.toString();
}
/**
* Returns the revision string.
*
* @return the revision
*/
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
/**
* Main method for testing this class.
*
* @param args should contain arguments for the data producer:
*/
public static void main(String[] args) {
runDataGenerator(new BIRCHCluster(), args);
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/datagenerators
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/datagenerators/clusterers/SubspaceCluster.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* SubspaceCluster.java
* Copyright (C) 2001-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.datagenerators.clusterers;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Enumeration;
import java.util.Random;
import java.util.Vector;
import weka.core.Attribute;
import weka.core.DenseInstance;
import weka.core.Instance;
import weka.core.Instances;
import weka.core.Option;
import weka.core.Range;
import weka.core.RevisionUtils;
import weka.core.Tag;
import weka.core.Utils;
import weka.datagenerators.ClusterDefinition;
import weka.datagenerators.ClusterGenerator;
/**
* <!-- globalinfo-start --> A data generator that produces data points in
* hyperrectangular subspace clusters.
* <p/>
* <!-- globalinfo-end -->
*
* <!-- options-start --> Valid options are:
* <p/>
*
* <pre>
* -h
* Prints this help.
* </pre>
*
* <pre>
* -o <file>
* The name of the output file, otherwise the generated data is
* printed to stdout.
* </pre>
*
* <pre>
* -r <name>
* The name of the relation.
* </pre>
*
* <pre>
* -d
* Whether to print debug informations.
* </pre>
*
* <pre>
* -S
* The seed for random function (default 1)
* </pre>
*
* <pre>
* -a <num>
* The number of attributes (default 1).
* </pre>
*
* <pre>
* -c
* Class Flag, if set, the cluster is listed in extra attribute.
* </pre>
*
* <pre>
* -b <range>
* The indices for boolean attributes.
* </pre>
*
* <pre>
* -m <range>
* The indices for nominal attributes.
* </pre>
*
* <pre>
* -P <num>
* The noise rate in percent (default 0.0).
* Can be between 0% and 30%. (Remark: The original
* algorithm only allows noise up to 10%.)
* </pre>
*
* <pre>
* -C <cluster-definition>
* A cluster definition of class 'SubspaceClusterDefinition'
* (definition needs to be quoted to be recognized as
* a single argument).
* </pre>
*
* <pre>
* Options specific to weka.datagenerators.clusterers.SubspaceClusterDefinition:
* </pre>
*
* <pre>
* -A <range>
* Generates randomly distributed instances in the cluster.
* </pre>
*
* <pre>
* -U <range>
* Generates uniformly distributed instances in the cluster.
* </pre>
*
* <pre>
* -G <range>
* Generates gaussian distributed instances in the cluster.
* </pre>
*
* <pre>
* -D <num>,<num>
* The attribute min/max (-A and -U) or mean/stddev (-G) for
* the cluster.
* </pre>
*
* <pre>
* -N <num>..<num>
* The range of number of instances per cluster (default 1..50).
* </pre>
*
* <pre>
* -I
* Uses integer instead of continuous values (default continuous).
* </pre>
*
* <!-- options-end -->
*
* @author Gabi Schmidberger (gabi@cs.waikato.ac.nz)
* @author FracPete (fracpete at waikato dot ac dot nz)
* @version $Revision$
*/
public class SubspaceCluster extends ClusterGenerator {
/** for serialization */
static final long serialVersionUID = -3454999858505621128L;
/** noise rate in percent (option P, between 0 and 30) */
protected double m_NoiseRate;
/** cluster list */
protected ClusterDefinition[] m_Clusters;
/** if nominal, store number of values */
protected int[] m_numValues;
/** cluster type: uniform/random */
public static final int UNIFORM_RANDOM = 0;
/** cluster type: total uniform */
public static final int TOTAL_UNIFORM = 1;
/** cluster type: gaussian */
public static final int GAUSSIAN = 2;
/** the tags for the cluster types */
public static final Tag[] TAGS_CLUSTERTYPE = {
new Tag(UNIFORM_RANDOM, "uniform/random"),
new Tag(TOTAL_UNIFORM, "total uniform"), new Tag(GAUSSIAN, "gaussian") };
/** cluster subtype: continuous */
public static final int CONTINUOUS = 0;
/** cluster subtype: integer */
public static final int INTEGER = 1;
/** the tags for the cluster types */
public static final Tag[] TAGS_CLUSTERSUBTYPE = {
new Tag(CONTINUOUS, "continuous"), new Tag(INTEGER, "integer") };
/**
* initializes the generator, sets the number of clusters to 0, since user has
* to specify them explicitly
*/
public SubspaceCluster() {
super();
setNoiseRate(defaultNoiseRate());
}
/**
* Returns a string describing this data generator.
*
* @return a description of the data generator suitable for displaying in the
* explorer/experimenter gui
*/
public String globalInfo() {
return "A data generator that produces data points in "
+ "hyperrectangular subspace clusters.";
}
/**
* Returns an enumeration describing the available options.
*
* @return an enumeration of all the available options
*/
@Override
public Enumeration<Option> listOptions() {
Vector<Option> result = enumToVector(super.listOptions());
result.addElement(new Option("\tThe noise rate in percent (default "
+ defaultNoiseRate() + ").\n"
+ "\tCan be between 0% and 30%. (Remark: The original \n"
+ "\talgorithm only allows noise up to 10%.)", "P", 1, "-P <num>"));
result.addElement(new Option("\tA cluster definition of class '"
+ SubspaceClusterDefinition.class.getName().replaceAll(".*\\.", "")
+ "'\n" + "\t(definition needs to be quoted to be recognized as \n"
+ "\ta single argument).", "C", 1, "-C <cluster-definition>"));
result.addElement(new Option("", "", 0, "\nOptions specific to "
+ SubspaceClusterDefinition.class.getName() + ":"));
result.addAll(enumToVector(new SubspaceClusterDefinition(this)
.listOptions()));
return result.elements();
}
/**
* Parses a list of options for this object.
* <p/>
*
* <!-- options-start --> Valid options are:
* <p/>
*
* <pre>
* -h
* Prints this help.
* </pre>
*
* <pre>
* -o <file>
* The name of the output file, otherwise the generated data is
* printed to stdout.
* </pre>
*
* <pre>
* -r <name>
* The name of the relation.
* </pre>
*
* <pre>
* -d
* Whether to print debug informations.
* </pre>
*
* <pre>
* -S
* The seed for random function (default 1)
* </pre>
*
* <pre>
* -a <num>
* The number of attributes (default 1).
* </pre>
*
* <pre>
* -c
* Class Flag, if set, the cluster is listed in extra attribute.
* </pre>
*
* <pre>
* -b <range>
* The indices for boolean attributes.
* </pre>
*
* <pre>
* -m <range>
* The indices for nominal attributes.
* </pre>
*
* <pre>
* -P <num>
* The noise rate in percent (default 0.0).
* Can be between 0% and 30%. (Remark: The original
* algorithm only allows noise up to 10%.)
* </pre>
*
* <pre>
* -C <cluster-definition>
* A cluster definition of class 'SubspaceClusterDefinition'
* (definition needs to be quoted to be recognized as
* a single argument).
* </pre>
*
* <pre>
* Options specific to weka.datagenerators.clusterers.SubspaceClusterDefinition:
* </pre>
*
* <pre>
* -A <range>
* Generates randomly distributed instances in the cluster.
* </pre>
*
* <pre>
* -U <range>
* Generates uniformly distributed instances in the cluster.
* </pre>
*
* <pre>
* -G <range>
* Generates gaussian distributed instances in the cluster.
* </pre>
*
* <pre>
* -D <num>,<num>
* The attribute min/max (-A and -U) or mean/stddev (-G) for
* the cluster.
* </pre>
*
* <pre>
* -N <num>..<num>
* The range of number of instances per cluster (default 1..50).
* </pre>
*
* <pre>
* -I
* Uses integer instead of continuous values (default continuous).
* </pre>
*
* <!-- options-end -->
*
* @param options the list of options as an array of strings
* @throws Exception if an option is not supported
*/
@Override
public void setOptions(String[] options) throws Exception {
String tmpStr;
SubspaceClusterDefinition cl;
Vector<SubspaceClusterDefinition> list;
super.setOptions(options);
m_numValues = new int[getNumAttributes()];
// numValues might be changed by a cluster definition
// (only relevant for nominal data)
for (int i = 0; i < getNumAttributes(); i++) {
m_numValues[i] = 1;
}
tmpStr = Utils.getOption('P', options);
if (tmpStr.length() != 0) {
setNoiseRate(Double.parseDouble(tmpStr));
} else {
setNoiseRate(defaultNoiseRate());
}
// cluster definitions
list = new Vector<SubspaceClusterDefinition>();
do {
tmpStr = Utils.getOption('C', options);
if (tmpStr.length() != 0) {
cl = new SubspaceClusterDefinition(this);
cl.setOptions(Utils.splitOptions(tmpStr));
list.add(cl);
}
} while (tmpStr.length() != 0);
m_Clusters = list.toArray(new ClusterDefinition[list.size()]);
// in case no cluster definition was provided, make sure that there's at
// least one definition present -> see getClusters()
getClusters();
}
/**
* Gets the current settings of the datagenerator.
*
* @return an array of strings suitable for passing to setOptions
*/
@Override
public String[] getOptions() {
Vector<String> result = new Vector<String>();
Collections.addAll(result, super.getOptions());
result.add("-P");
result.add("" + getNoiseRate());
for (int i = 0; i < getClusters().length; i++) {
result.add("-C");
result.add(Utils.joinOptions(getClusters()[i].getOptions()));
}
return result.toArray(new String[result.size()]);
}
/**
* returns the current cluster definitions, if necessary initializes them
*
* @return the current cluster definitions
*/
protected ClusterDefinition[] getClusters() {
if ((m_Clusters == null) || (m_Clusters.length == 0)) {
if (m_Clusters != null) {
System.out.println("NOTE: at least 1 cluster definition is necessary, "
+ "created default one.");
}
m_Clusters = new ClusterDefinition[] { new SubspaceClusterDefinition(this) };
}
return m_Clusters;
}
/**
* returns the default number of attributes
*
* @return the default number of attributes
*/
@Override
protected int defaultNumAttributes() {
return 1;
}
/**
* Sets the number of attributes the dataset should have.
*
* @param numAttributes the new number of attributes
*/
@Override
public void setNumAttributes(int numAttributes) {
super.setNumAttributes(numAttributes);
m_numValues = new int[getNumAttributes()];
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
@Override
public String numAttributesTipText() {
return "The number of attributes the generated data will contain (Note: they must be covered by the cluster definitions!)";
}
/**
* returns the default noise rate
*
* @return the default noise rate
*/
protected double defaultNoiseRate() {
return 0.0;
}
/**
* Gets the percentage of noise set.
*
* @return the percentage of noise set
*/
public double getNoiseRate() {
return m_NoiseRate;
}
/**
* Sets the percentage of noise set.
*
* @param newNoiseRate new percentage of noise
*/
public void setNoiseRate(double newNoiseRate) {
m_NoiseRate = newNoiseRate;
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String noiseRateTipText() {
return "The noise rate to use.";
}
/**
* returns the currently set clusters
*
* @return the currently set clusters
*/
public ClusterDefinition[] getClusterDefinitions() {
return getClusters();
}
/**
* sets the clusters to use
*
* @param value the clusters do use
* @throws Exception if clusters are not the correct class
*/
public void setClusterDefinitions(ClusterDefinition[] value) throws Exception {
String indexStr;
indexStr = "";
m_Clusters = value;
for (int i = 0; i < getClusters().length; i++) {
if (!(getClusters()[i] instanceof SubspaceClusterDefinition)) {
if (indexStr.length() != 0) {
indexStr += ",";
}
indexStr += "" + (i + 1);
}
getClusters()[i].setParent(this);
getClusters()[i].setOptions(getClusters()[i].getOptions()); // for
// initializing!
}
// any wrong classes encountered?
if (indexStr.length() != 0) {
throw new Exception("These cluster definitions are not '"
+ SubspaceClusterDefinition.class.getName() + "': " + indexStr);
}
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String clusterDefinitionsTipText() {
return "The clusters to use.";
}
/**
* Checks, whether all attributes are covered by cluster definitions and
* returns TRUE in that case.
*
* @return whether all attributes are covered
*/
protected boolean checkCoverage() {
int i;
int n;
int[] count;
Range r;
String attrIndex;
SubspaceClusterDefinition cl;
// check whether all the attributes are covered
count = new int[getNumAttributes()];
for (i = 0; i < getNumAttributes(); i++) {
if (m_nominalCols.isInRange(i)) {
count[i]++;
}
if (m_booleanCols.isInRange(i)) {
count[i]++;
}
for (n = 0; n < getClusters().length; n++) {
cl = (SubspaceClusterDefinition) getClusters()[n];
r = new Range(cl.getAttrIndexRange());
r.setUpper(getNumAttributes());
if (r.isInRange(i)) {
count[i]++;
}
}
}
// list all indices that are not covered
attrIndex = "";
for (i = 0; i < count.length; i++) {
if (count[i] == 0) {
if (attrIndex.length() != 0) {
attrIndex += ",";
}
attrIndex += (i + 1);
}
}
if (attrIndex.length() != 0) {
throw new IllegalArgumentException(
"The following attributes are not covered by a cluster "
+ "definition: " + attrIndex + "\n");
}
return true;
}
/**
* Gets the single mode flag.
*
* @return true if methode generateExample can be used.
*/
@Override
public boolean getSingleModeFlag() {
return false;
}
/**
* Initializes the format for the dataset produced.
*
* @return the output data format
* @throws Exception data format could not be defined
*/
@Override
public Instances defineDataFormat() throws Exception {
// initialize
setOptions(getOptions());
checkCoverage();
Random random = new Random(getSeed());
setRandom(random);
Instances dataset;
ArrayList<Attribute> attributes = new ArrayList<Attribute>(3);
Attribute attribute;
boolean classFlag = getClassFlag();
ArrayList<String> classValues = null;
if (classFlag) {
classValues = new ArrayList<String>(getClusters().length);
}
ArrayList<String> boolValues = new ArrayList<String>(2);
boolValues.add("false");
boolValues.add("true");
ArrayList<String> nomValues = null;
// define dataset
for (int i = 0; i < getNumAttributes(); i++) {
// define boolean attribute
if (m_booleanCols.isInRange(i)) {
attribute = new Attribute("B" + i, boolValues);
} else if (m_nominalCols.isInRange(i)) {
// define nominal attribute
nomValues = new ArrayList<String>(m_numValues[i]);
for (int j = 0; j < m_numValues[i]; j++) {
nomValues.add("value-" + j);
}
attribute = new Attribute("N" + i, nomValues);
} else {
// numerical attribute
attribute = new Attribute("X" + i);
}
attributes.add(attribute);
}
if (classFlag) {
for (int i = 0; i < getClusters().length; i++) {
classValues.add("c" + i);
}
attribute = new Attribute("class", classValues);
attributes.add(attribute);
}
dataset = new Instances(getRelationNameToUse(), attributes, 0);
if (classFlag) {
dataset.setClassIndex(m_NumAttributes);
}
// set dataset format of this class
Instances format = new Instances(dataset, 0);
setDatasetFormat(format);
for (int i = 0; i < getClusters().length; i++) {
SubspaceClusterDefinition cl = (SubspaceClusterDefinition) getClusters()[i];
cl.setNumInstances(random);
cl.setParent(this);
}
return dataset;
}
/**
* Returns true if attribute is boolean
*
* @param index of the attribute
* @return true if the attribute is boolean
*/
public boolean isBoolean(int index) {
return m_booleanCols.isInRange(index);
}
/**
* Returns true if attribute is nominal
*
* @param index of the attribute
* @return true if the attribute is nominal
*/
public boolean isNominal(int index) {
return m_nominalCols.isInRange(index);
}
/**
* returns array that stores the number of values for a nominal attribute.
*
* @return the array that stores the number of values for a nominal attribute
*/
public int[] getNumValues() {
return m_numValues;
}
/**
* Generate an example of the dataset.
*
* @return the instance generated
* @throws Exception if format not defined or generating <br/>
* examples one by one is not possible, because voting is chosen
*/
@Override
public Instance generateExample() throws Exception {
throw new Exception("Examples cannot be generated one by one.");
}
/**
* Generate all examples of the dataset.
*
* @return the instance generated
* @throws Exception if format not defined
*/
@Override
public Instances generateExamples() throws Exception {
Instances format = getDatasetFormat();
Instance example = null;
if (format == null) {
throw new Exception("Dataset format not defined.");
}
// generate examples for one cluster after another
for (int cNum = 0; cNum < getClusters().length; cNum++) {
SubspaceClusterDefinition cl = (SubspaceClusterDefinition) getClusters()[cNum];
// get the number of instances to create
int instNum = cl.getNumInstances();
// class value is c + cluster number
String cName = "c" + cNum;
switch (cl.getClusterType().getSelectedTag().getID()) {
case (UNIFORM_RANDOM):
for (int i = 0; i < instNum; i++) {
// generate example
example = generateExample(format, getRandom(), cl, cName);
if (example != null) {
format.add(example);
}
}
break;
case (TOTAL_UNIFORM):
// generate examples
if (!cl.isInteger()) {
generateUniformExamples(format, instNum, cl, cName);
} else {
generateUniformIntegerExamples(format, instNum, cl, cName);
}
break;
case (GAUSSIAN):
// generate examples
generateGaussianExamples(format, instNum, getRandom(), cl, cName);
break;
}
}
return format;
}
/**
* Generate an example of the dataset.
*
* @param format the dataset format
* @param randomG the random number generator to use
* @param cl the cluster definition
* @param cName the class value
* @return the generated instance
*/
private Instance generateExample(Instances format, Random randomG,
SubspaceClusterDefinition cl, String cName) {
boolean makeInteger = cl.isInteger();
int num = -1;
int numAtts = m_NumAttributes;
if (getClassFlag()) {
numAtts++;
}
double[] values = new double[numAtts];
boolean[] attributes = cl.getAttributes();
double[] minValue = cl.getMinValue();
double[] maxValue = cl.getMaxValue();
double value;
int clusterI = -1;
for (int i = 0; i < m_NumAttributes; i++) {
if (attributes[i]) {
clusterI++;
num++;
// boolean or nominal attribute
if (isBoolean(i) || isNominal(i)) {
if (minValue[clusterI] == maxValue[clusterI]) {
value = minValue[clusterI];
} else {
int numValues = (int) (maxValue[clusterI] - minValue[clusterI] + 1.0);
value = randomG.nextInt(numValues);
value += minValue[clusterI];
}
} else {
// numeric attribute
value = randomG.nextDouble() * (maxValue[num] - minValue[num])
+ minValue[num];
if (makeInteger) {
value = Math.round(value);
}
}
values[i] = value;
} else {
values[i] = Utils.missingValue();
}
}
if (getClassFlag()) {
values[format.classIndex()] = format.classAttribute().indexOfValue(cName);
}
DenseInstance example = new DenseInstance(1.0, values);
example.setDataset(format);
return example;
}
/**
* Generate examples for a uniform cluster dataset.
*
* @param format the dataset format
* @param numInstances the number of instances to generator
* @param cl the cluster definition
* @param cName the class value
*/
private void generateUniformExamples(Instances format, int numInstances,
SubspaceClusterDefinition cl, String cName) {
int numAtts = m_NumAttributes;
if (getClassFlag()) {
numAtts++;
}
boolean[] attributes = cl.getAttributes();
double[] minValue = cl.getMinValue();
double[] maxValue = cl.getMaxValue();
double[] diff = new double[minValue.length];
for (int i = 0; i < minValue.length; i++) {
diff[i] = (maxValue[i] - minValue[i]);
}
for (int j = 0; j < numInstances; j++) {
double[] values = new double[numAtts];
int num = -1;
for (int i = 0; i < m_NumAttributes; i++) {
if (attributes[i]) {
num++;
double value = minValue[num]
+ (diff[num] * ((double) j / (double) (numInstances - 1)));
values[i] = value;
} else {
values[i] = Utils.missingValue();
}
}
if (getClassFlag()) {
values[format.classIndex()] = format.classAttribute().indexOfValue(cName);
}
DenseInstance example = new DenseInstance(1.0, values);
example.setDataset(format);
format.add(example);
}
}
/**
* Generate examples for a uniform cluster dataset.
*
* @param format the dataset format
* @param numInstances the number of instances to generator
* @param cl the cluster definition
* @param cName the class value
*/
private void generateUniformIntegerExamples(Instances format,
int numInstances, SubspaceClusterDefinition cl, String cName) {
int numAtts = m_NumAttributes;
if (getClassFlag()) {
numAtts++;
}
double[] values = new double[numAtts];
boolean[] attributes = cl.getAttributes();
double[] minValue = cl.getMinValue();
double[] maxValue = cl.getMaxValue();
int[] minInt = new int[minValue.length];
int[] maxInt = new int[maxValue.length];
int[] intValue = new int[maxValue.length];
int[] numInt = new int[minValue.length];
int num = 1;
for (int i = 0; i < minValue.length; i++) {
minInt[i] = (int) Math.ceil(minValue[i]);
maxInt[i] = (int) Math.floor(maxValue[i]);
numInt[i] = (maxInt[i] - minInt[i] + 1);
num = num * numInt[i];
}
int numEach = numInstances / num;
int rest = numInstances - numEach * num;
// initialize with smallest values combination
for (int i = 0; i < m_NumAttributes; i++) {
if (attributes[i]) {
values[i] = minInt[i];
intValue[i] = minInt[i];
} else {
values[i] = Utils.missingValue();
}
}
if (getClassFlag()) {
values[format.classIndex()] = format.classAttribute().indexOfValue(cName);
}
DenseInstance example = new DenseInstance(1.0, values);
example.setDataset(format);
int added = 0;
int attr = 0;
// do while not added all
do {
// add all for one value combination
for (int k = 0; k < numEach; k++) {
format.add(example); // Instance will be copied here
added++;
}
if (rest > 0) {
format.add(example); // Instance will be copied here
added++;
rest--;
}
if (added >= numInstances) {
break;
}
// switch to the next value combination
boolean done = false;
do {
if (attributes[attr] && (intValue[attr] + 1 <= maxInt[attr])) {
intValue[attr]++;
done = true;
} else {
attr++;
}
} while (!done);
example.setValue(attr, intValue[attr]);
} while (added < numInstances);
}
/**
* Generate examples for a uniform cluster dataset.
*
* @param format the dataset format
* @param numInstances the number of instances to generate
* @param random the random number generator
* @param cl the cluster definition
* @param cName the class value
*/
private void generateGaussianExamples(Instances format, int numInstances,
Random random, SubspaceClusterDefinition cl, String cName) {
boolean makeInteger = cl.isInteger();
int numAtts = m_NumAttributes;
if (getClassFlag()) {
numAtts++;
}
boolean[] attributes = cl.getAttributes();
double[] meanValue = cl.getMeanValue();
double[] stddevValue = cl.getStddevValue();
for (int j = 0; j < numInstances; j++) {
double[] values = new double[numAtts];
int num = -1;
for (int i = 0; i < m_NumAttributes; i++) {
if (attributes[i]) {
num++;
double value = meanValue[num]
+ (random.nextGaussian() * stddevValue[num]);
if (makeInteger) {
value = Math.round(value);
}
values[i] = value;
} else {
values[i] = Utils.missingValue();
}
}
if (getClassFlag()) {
values[format.classIndex()] = format.classAttribute().indexOfValue(cName);
}
DenseInstance example = new DenseInstance(1.0, values);
example.setDataset(format);
format.add(example);
}
}
/**
* Compiles documentation about the data generation after the generation
* process
*
* @return string with additional information about generated dataset
* @throws Exception no input structure has been defined
*/
@Override
public String generateFinished() throws Exception {
return "";
}
/**
* Compiles documentation about the data generation before the generation
* process
*
* @return string with additional information
*/
@Override
public String generateStart() {
StringBuffer docu = new StringBuffer();
int sumInst = 0;
for (int cNum = 0; cNum < getClusters().length; cNum++) {
SubspaceClusterDefinition cl = (SubspaceClusterDefinition) getClusters()[cNum];
docu.append("%\n");
docu.append("% Cluster: c" + cNum + " ");
switch (cl.getClusterType().getSelectedTag().getID()) {
case UNIFORM_RANDOM:
docu.append("Uniform Random");
break;
case TOTAL_UNIFORM:
docu.append("Total Random");
break;
case GAUSSIAN:
docu.append("Gaussian");
break;
}
if (cl.isInteger()) {
docu.append(" / INTEGER");
}
docu.append("\n% ----------------------------------------------\n");
docu.append("%" + cl.attributesToString());
docu.append("\n% Number of Instances: " + cl.getInstNums()
+ "\n");
docu.append("% Generated Number of Instances: " + cl.getNumInstances()
+ "\n");
sumInst += cl.getNumInstances();
}
docu.append("%\n% ----------------------------------------------\n");
docu.append("% Total Number of Instances: " + sumInst + "\n");
docu.append("% in " + getClusters().length
+ " Cluster(s)\n%");
return docu.toString();
}
/**
* Returns the revision string.
*
* @return the revision
*/
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
/**
* Main method for testing this class.
*
* @param args should contain arguments for the data producer:
*/
public static void main(String[] args) {
runDataGenerator(new SubspaceCluster(), args);
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/datagenerators
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/datagenerators/clusterers/SubspaceClusterDefinition.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* SubspaceClusterDefinition.java
* Copyright (C) 2005-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.datagenerators.clusterers;
import java.util.Enumeration;
import java.util.Random;
import java.util.StringTokenizer;
import java.util.Vector;
import weka.core.Option;
import weka.core.Range;
import weka.core.RevisionUtils;
import weka.core.SelectedTag;
import weka.core.Utils;
import weka.datagenerators.ClusterDefinition;
import weka.datagenerators.ClusterGenerator;
/**
* <!-- globalinfo-start --> A single cluster for the SubspaceCluster
* datagenerator
* <p/>
* <!-- globalinfo-end -->
*
* <!-- options-start --> Valid options are:
* <p/>
*
* <pre>
* -A <range>
* Generates randomly distributed instances in the cluster.
* </pre>
*
* <pre>
* -U <range>
* Generates uniformly distributed instances in the cluster.
* </pre>
*
* <pre>
* -G <range>
* Generates gaussian distributed instances in the cluster.
* </pre>
*
* <pre>
* -D <num>,<num>
* The attribute min/max (-A and -U) or mean/stddev (-G) for
* the cluster.
* </pre>
*
* <pre>
* -N <num>..<num>
* The range of number of instances per cluster (default 1..50).
* </pre>
*
* <pre>
* -I
* Uses integer instead of continuous values (default continuous).
* </pre>
*
* <!-- options-end -->
*
* @author Gabi Schmidberger (gabi@cs.waikato.ac.nz)
* @author FracPete (fracpete at waikato dot ac dot nz)
* @version $Revision$
* @see SubspaceCluster
*/
public class SubspaceClusterDefinition extends ClusterDefinition {
/** for serialization */
static final long serialVersionUID = 3135678125044007231L;
/** cluster type */
protected int m_clustertype;
/** cluster subtypes */
protected int m_clustersubtype;
/** number of attributes the cluster is defined for */
protected int m_numClusterAttributes;
/** number of instances for this cluster */
protected int m_numInstances;
/** minimal number of instances for this cluster */
protected int m_MinInstNum;
/** maximal number of instances for this cluster */
protected int m_MaxInstNum;
/** range of atttributes */
protected Range m_AttrIndexRange;
/** attributes of this cluster */
protected boolean[] m_attributes;
/** global indices of the attributes of the cluster */
protected int[] m_attrIndices;
/** min or mean */
protected double[] m_valueA;
/** max or stddev */
protected double[] m_valueB;
/**
* initializes the cluster, without a parent cluster (necessary for GOE)
*/
public SubspaceClusterDefinition() {
super();
}
/**
* initializes the cluster with default values
*
* @param parent the datagenerator this cluster belongs to
*/
public SubspaceClusterDefinition(ClusterGenerator parent) {
super(parent);
}
/**
* sets the default values
*
* @throws Exception if setting of defaults fails
*/
@Override
protected void setDefaults() throws Exception {
setClusterType(defaultClusterType());
setClusterSubType(defaultClusterSubType());
setMinInstNum(defaultMinInstNum());
setMaxInstNum(defaultMaxInstNum());
setAttrIndexRange(defaultAttrIndexRange());
m_numClusterAttributes = 1;
setValuesList(defaultValuesList());
}
/**
* Returns a string describing this data generator.
*
* @return a description of the data generator suitable for displaying in the
* explorer/experimenter gui
*/
@Override
public String globalInfo() {
return "A single cluster for the SubspaceCluster datagenerator";
}
/**
* Returns an enumeration describing the available options.
*
* @return an enumeration of all the available options
*/
@Override
public Enumeration<Option> listOptions() {
Vector<Option> result = new Vector<Option>();
result.addElement(new Option(
"\tGenerates randomly distributed instances in the cluster.", "A", 1,
"-A <range>"));
result.addElement(new Option(
"\tGenerates uniformly distributed instances in the cluster.", "U", 1,
"-U <range>"));
result.addElement(new Option(
"\tGenerates gaussian distributed instances in the cluster.", "G", 1,
"-G <range>"));
result.addElement(new Option(
"\tThe attribute min/max (-A and -U) or mean/stddev (-G) for\n"
+ "\tthe cluster.", "D", 1, "-D <num>,<num>"));
result.addElement(new Option(
"\tThe range of number of instances per cluster (default "
+ defaultMinInstNum() + ".." + defaultMaxInstNum() + ").", "N", 1,
"-N <num>..<num>"));
result.addElement(new Option(
"\tUses integer instead of continuous values (default continuous).", "I",
0, "-I"));
return result.elements();
}
/**
* Parses a list of options for this object.
* <p/>
*
* <!-- options-start --> Valid options are:
* <p/>
*
* <pre>
* -A <range>
* Generates randomly distributed instances in the cluster.
* </pre>
*
* <pre>
* -U <range>
* Generates uniformly distributed instances in the cluster.
* </pre>
*
* <pre>
* -G <range>
* Generates gaussian distributed instances in the cluster.
* </pre>
*
* <pre>
* -D <num>,<num>
* The attribute min/max (-A and -U) or mean/stddev (-G) for
* the cluster.
* </pre>
*
* <pre>
* -N <num>..<num>
* The range of number of instances per cluster (default 1..50).
* </pre>
*
* <pre>
* -I
* Uses integer instead of continuous values (default continuous).
* </pre>
*
* <!-- options-end -->
*
* @param options the list of options as an array of strings
* @throws Exception if an option is not supported
*/
@Override
public void setOptions(String[] options) throws Exception {
String tmpStr;
String fromToStr;
int typeCount;
typeCount = 0;
fromToStr = "";
tmpStr = Utils.getOption('A', options);
if (tmpStr.length() != 0) {
fromToStr = tmpStr;
setClusterType(new SelectedTag(SubspaceCluster.UNIFORM_RANDOM,
SubspaceCluster.TAGS_CLUSTERTYPE));
typeCount++;
}
tmpStr = Utils.getOption('U', options);
if (tmpStr.length() != 0) {
fromToStr = tmpStr;
setClusterType(new SelectedTag(SubspaceCluster.TOTAL_UNIFORM,
SubspaceCluster.TAGS_CLUSTERTYPE));
typeCount++;
}
tmpStr = Utils.getOption('G', options);
if (tmpStr.length() != 0) {
fromToStr = tmpStr;
setClusterType(new SelectedTag(SubspaceCluster.GAUSSIAN,
SubspaceCluster.TAGS_CLUSTERTYPE));
typeCount++;
}
// default is uniform/random
if (typeCount == 0) {
setClusterType(new SelectedTag(SubspaceCluster.UNIFORM_RANDOM,
SubspaceCluster.TAGS_CLUSTERTYPE));
} else if (typeCount > 1) {
throw new Exception("Only one cluster type can be specified!");
}
setAttrIndexRange(fromToStr);
tmpStr = Utils.getOption('D', options);
if (tmpStr.length() != 0) {
setValuesList(tmpStr);
} else {
setValuesList(defaultValuesList());
}
tmpStr = Utils.getOption('N', options);
if (tmpStr.length() != 0) {
setInstNums(tmpStr);
} else {
setInstNums(defaultMinInstNum() + ".." + defaultMaxInstNum());
}
if (Utils.getFlag('I', options)) {
setClusterSubType(new SelectedTag(SubspaceCluster.INTEGER,
SubspaceCluster.TAGS_CLUSTERSUBTYPE));
} else {
setClusterSubType(new SelectedTag(SubspaceCluster.CONTINUOUS,
SubspaceCluster.TAGS_CLUSTERSUBTYPE));
}
}
/**
* Gets the current settings of the datagenerator BIRCHCluster.
*
* @return an array of strings suitable for passing to setOptions
*/
@Override
public String[] getOptions() {
Vector<String> result;
result = new Vector<String>();
if (isRandom()) {
result.add("-A");
result.add("" + getAttrIndexRange());
} else if (isUniform()) {
result.add("-U");
result.add("" + getAttrIndexRange());
} else if (isGaussian()) {
result.add("-G");
result.add("" + getAttrIndexRange());
}
result.add("-D");
result.add("" + getValuesList());
result.add("-N");
result.add("" + getInstNums());
if (m_clustersubtype == SubspaceCluster.INTEGER) {
result.add("-I");
}
return result.toArray(new String[result.size()]);
}
/**
* Make a string from the attribues list.
*
* @return the attributes as string
*/
public String attributesToString() {
StringBuffer text = new StringBuffer();
int j = 0;
for (int i = 0; i < m_attributes.length; i++) {
if (m_attributes[i]) {
if (isGaussian()) {
text.append(" Attribute: " + i);
text.append(" Mean: " + m_valueA[j]);
text.append(" StdDev: " + m_valueB[j] + "\n%");
} else {
text.append(" Attribute: " + i);
text.append(" Range: " + m_valueA[j]);
text.append(" - " + m_valueB[j] + "\n%");
}
j++;
}
}
return text.toString();
}
/**
* Make a string from the cluster features.
*
* @return the cluster features as string
*/
@Override
public String toString() {
StringBuffer text = new StringBuffer();
text.append("attributes " + attributesToString() + "\n");
text.append("number of instances " + getInstNums());
return text.toString();
}
/**
* sets the parent datagenerator this cluster belongs to
*
* @param parent the parent datagenerator
*/
public void setParent(SubspaceCluster parent) {
super.setParent(parent);
m_AttrIndexRange.setUpper(getParent().getNumAttributes());
}
/**
* returns the default attribute index range
*
* @return the default attribute index range
*/
protected String defaultAttrIndexRange() {
return "1";
}
/**
* Sets which attributes are used in the cluster attributes among the
* selection will be discretized.
*
* @param rangeList a string representing the list of attributes. Since the
* string will typically come from a user, attributes are indexed
* from 1. <br/>
* eg: first-3,5,6-last
*/
public void setAttrIndexRange(String rangeList) {
m_numClusterAttributes = 0;
if (m_AttrIndexRange == null) {
m_AttrIndexRange = new Range();
}
m_AttrIndexRange.setRanges(rangeList);
if (getParent() != null) {
m_AttrIndexRange.setUpper(getParent().getNumAttributes());
m_attributes = new boolean[getParent().getNumAttributes()];
for (int i = 0; i < m_attributes.length; i++) {
if (m_AttrIndexRange.isInRange(i)) {
m_numClusterAttributes++;
m_attributes[i] = true;
} else {
m_attributes[i] = false;
}
}
// store translation from attr in cluster to attr in whole dataset
m_attrIndices = new int[m_numClusterAttributes];
int clusterI = -1;
for (int i = 0; i < m_attributes.length; i++) {
if (m_AttrIndexRange.isInRange(i)) {
clusterI++;
m_attrIndices[clusterI] = i;
}
}
}
}
/**
* returns the attribute range(s).
*
* @return the attribute range(s).
*/
public String getAttrIndexRange() {
return m_AttrIndexRange.getRanges();
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String attrIndexRangeTipText() {
return "The attribute range(s).";
}
public boolean[] getAttributes() {
return m_attributes;
}
public double[] getMinValue() {
return m_valueA;
}
public double[] getMaxValue() {
return m_valueB;
}
public double[] getMeanValue() {
return m_valueA;
}
public double[] getStddevValue() {
return m_valueB;
}
public int getNumInstances() {
return m_numInstances;
}
/**
* returns the default cluster type
*
* @return the default cluster type
*/
protected SelectedTag defaultClusterType() {
return new SelectedTag(SubspaceCluster.UNIFORM_RANDOM,
SubspaceCluster.TAGS_CLUSTERTYPE);
}
/**
* Gets the cluster type.
*
* @return the cluster type
* @see SubspaceCluster#TAGS_CLUSTERTYPE
*/
public SelectedTag getClusterType() {
return new SelectedTag(m_clustertype, SubspaceCluster.TAGS_CLUSTERTYPE);
}
/**
* Sets the cluster type.
*
* @param value the new cluster type.
* @see SubspaceCluster#TAGS_CLUSTERTYPE
*/
public void setClusterType(SelectedTag value) {
if (value.getTags() == SubspaceCluster.TAGS_CLUSTERTYPE) {
m_clustertype = value.getSelectedTag().getID();
}
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String clusterTypeTipText() {
return "The type of cluster to use.";
}
/**
* returns the default cluster sub type
*
* @return the default cluster sub type
*/
protected SelectedTag defaultClusterSubType() {
return new SelectedTag(SubspaceCluster.CONTINUOUS,
SubspaceCluster.TAGS_CLUSTERSUBTYPE);
}
/**
* Gets the cluster sub type.
*
* @return the cluster sub type
* @see SubspaceCluster#TAGS_CLUSTERSUBTYPE
*/
public SelectedTag getClusterSubType() {
return new SelectedTag(m_clustersubtype,
SubspaceCluster.TAGS_CLUSTERSUBTYPE);
}
/**
* Sets the cluster sub type.
*
* @param value the new cluster sub type.
* @see SubspaceCluster#TAGS_CLUSTERSUBTYPE
*/
public void setClusterSubType(SelectedTag value) {
if (value.getTags() == SubspaceCluster.TAGS_CLUSTERSUBTYPE) {
m_clustersubtype = value.getSelectedTag().getID();
}
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String clusterSubTypeTipText() {
return "The sub-type of cluster to use.";
}
/**
* checks, whether cluster type is random
*
* @return true if cluster type is random
*/
public boolean isRandom() {
return (m_clustertype == SubspaceCluster.UNIFORM_RANDOM);
}
/**
* checks, whether cluster type is uniform
*
* @return true if cluster type is uniform
*/
public boolean isUniform() {
return (m_clustertype == SubspaceCluster.TOTAL_UNIFORM);
}
/**
* checks, whether cluster type is gaussian
*
* @return true if cluster type is gaussian
*/
public boolean isGaussian() {
return (m_clustertype == SubspaceCluster.GAUSSIAN);
}
/**
* checks, whether cluster sub type is continuous
*
* @return true if cluster sub type is continuous
*/
public boolean isContinuous() {
return (m_clustertype == SubspaceCluster.CONTINUOUS);
}
/**
* checks, whether cluster sub type is integer
*
* @return true if cluster sub type is integer
*/
public boolean isInteger() {
return (m_clustersubtype == SubspaceCluster.INTEGER);
}
/**
* Sets the upper and lower boundary for instances for this cluster.
*
* @param fromTo the string containing the upper and lower boundary for
* instances per cluster separated by ..
*/
protected void setInstNums(String fromTo) {
int i = fromTo.indexOf("..");
if (i == -1) {
i = fromTo.length();
}
String from = fromTo.substring(0, i);
m_MinInstNum = Integer.parseInt(from);
if (i < fromTo.length()) {
String to = fromTo.substring(i + 2, fromTo.length());
m_MaxInstNum = Integer.parseInt(to);
} else {
m_MaxInstNum = m_MinInstNum;
}
}
/**
* Get a string with the upper and lower boundary for the number of instances
* for this cluster.
*
* @return the string containing the upper and lower boundary for instances
* per cluster separated by ..
*/
protected String getInstNums() {
String text = new String("" + m_MinInstNum + ".." + m_MaxInstNum);
return text;
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
protected String instNumsTipText() {
return "The lower and upper boundary for the number of instances in this cluster.";
}
/**
* returns the default min number of instances
*
* @return the default min number of instances
*/
protected int defaultMinInstNum() {
return 1;
}
/**
* Gets the lower boundary for instances per cluster.
*
* @return the the lower boundary for instances per cluster
*/
public int getMinInstNum() {
return m_MinInstNum;
}
/**
* Sets the lower boundary for instances per cluster.
*
* @param newMinInstNum new lower boundary for instances per cluster
*/
public void setMinInstNum(int newMinInstNum) {
m_MinInstNum = newMinInstNum;
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String minInstNumTipText() {
return "The lower boundary for instances per cluster.";
}
/**
* returns the default max number of instances
*
* @return the default max number of instances
*/
protected int defaultMaxInstNum() {
return 50;
}
/**
* Gets the upper boundary for instances per cluster.
*
* @return the upper boundary for instances per cluster
*/
public int getMaxInstNum() {
return m_MaxInstNum;
}
/**
* Sets the upper boundary for instances per cluster.
*
* @param newMaxInstNum new upper boundary for instances per cluster
*/
public void setMaxInstNum(int newMaxInstNum) {
m_MaxInstNum = newMaxInstNum;
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String maxInstNumTipText() {
return "The upper boundary for instances per cluster.";
}
/**
* Sets the real number of instances for this cluster.
*
* @param r random number generator
*/
public void setNumInstances(Random r) {
if (m_MaxInstNum > m_MinInstNum) {
m_numInstances = (int) (r.nextDouble() * (m_MaxInstNum - m_MinInstNum) + m_MinInstNum);
} else {
m_numInstances = m_MinInstNum;
}
}
/**
* returns the default values list
*
* @return the default values list
*/
protected String defaultValuesList() {
return "1,10";
}
/**
* Sets the ranges for each attribute.
*
* @param fromToList the string containing the upper and lower boundary for
* instances per cluster separated by ..
* @throws Exception if values are not correct in number or value
*/
public void setValuesList(String fromToList) throws Exception {
m_valueA = new double[m_numClusterAttributes];
m_valueB = new double[m_numClusterAttributes];
setValuesList(fromToList, m_valueA, m_valueB, "D");
SubspaceCluster parent = (SubspaceCluster) getParent();
for (int i = 0; i < m_numClusterAttributes; i++) {
if ((!isGaussian()) && (m_valueA[i] > m_valueB[i])) {
throw new Exception("Min must be smaller than max.");
}
if (getParent() != null) {
// boolean values are only 0.0 and 1.0
if (parent.isBoolean(m_attrIndices[i])) {
parent.getNumValues()[m_attrIndices[i]] = 2;
if (((m_valueA[i] != 0.0) && (m_valueA[i] != 1.0))
|| ((m_valueB[i] != 0.0) && (m_valueB[i] != 1.0))) {
throw new Exception("Ranges for boolean must be 0 or 1 only.");
}
}
if (parent.isNominal(m_attrIndices[i])) {
// nominal values: attributes range might have to be enlarged
double rest = m_valueA[i] - Math.rint(m_valueA[i]);
if (rest != 0.0) {
throw new Exception(" Ranges for nominal must be integer");
}
rest = m_valueB[i] - Math.rint(m_valueB[i]);
if (rest != 0.0) {
throw new Exception("Ranges for nominal must be integer");
}
if (m_valueA[i] < 0.0) {
throw new Exception(
"Range for nominal must start with number 0.0 or higher");
}
if (m_valueB[i] + 1 > parent.getNumValues()[m_attrIndices[i]]) {
// add new values to attribute
// (actual format is not yet defined)
parent.getNumValues()[m_attrIndices[i]] = (int) m_valueB[i] + 1;
}
}
}
}
}
/**
* returns the range for each attribute as string
*/
public String getValuesList() {
String result;
int i;
result = "";
if (m_valueA != null) {
for (i = 0; i < m_valueA.length; i++) {
if (i > 0) {
result += ",";
}
result += "" + m_valueA[i] + "," + m_valueB[i];
}
}
return result;
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String valuesListTipText() {
return "The min (mean) and max (standard deviation) for each attribute as a comma-separated string.";
}
/**
* Sets the ranges for each attribute.
*
* @param fromToList the string containing the upper and lower boundary for
* instances per cluster separated by ..
* @param first the "from's"
* @param second the "to's"
* @param optionLetter the option, from which the list came
* @throws Exception if values are not correct in number or value
*/
public void setValuesList(String fromToList, double[] first, double[] second,
String optionLetter) throws Exception {
StringTokenizer tok;
int index;
tok = new StringTokenizer(fromToList, ",");
if (tok.countTokens() != first.length + second.length) {
throw new Exception("Wrong number of values for option '-" + optionLetter
+ "'.");
}
index = 0;
while (tok.hasMoreTokens()) {
first[index] = Double.parseDouble(tok.nextToken());
second[index] = Double.parseDouble(tok.nextToken());
index++;
}
}
/**
* Returns the revision string.
*
* @return the revision
*/
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/estimators/CheckEstimator.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* CheckEstimator.java
* Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.estimators;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Enumeration;
import java.util.Random;
import java.util.Vector;
import weka.core.Attribute;
import weka.core.Instance;
import weka.core.Instances;
import weka.core.Option;
import weka.core.OptionHandler;
import weka.core.RevisionHandler;
import weka.core.RevisionUtils;
import weka.core.TestInstances;
import weka.core.Utils;
import weka.core.WeightedInstancesHandler;
/**
* Class for examining the capabilities and finding problems with estimators. If
* you implement a estimator using the WEKA.libraries, you should run the checks
* on it to ensure robustness and correct operation. Passing all the tests of
* this object does not mean bugs in the estimator don't exist, but this will
* help find some common ones.
* <p/>
*
* Typical usage:
* <p/>
* <code>java weka.estimators.CheckEstimator -W estimator_name
* estimator_options </code>
* <p/>
*
* This class uses code from the CheckEstimatorClass ATTENTION! Current
* estimators can only 1. split on a nominal class attribute 2. build estimators
* for nominal and numeric attributes 3. build estimators independendly of the
* class type The functionality to test on other class and attribute types is
* left in big parts in the code.
*
* CheckEstimator reports on the following:
* <ul>
* <li>Estimator abilities
* <ul>
* <li>Possible command line options to the estimator</li>
* <li>Whether the estimator can predict nominal, numeric, string, date or
* relational class attributes. Warnings will be displayed if performance is
* worse than ZeroR</li>
* <li>Whether the estimator can be trained incrementally</li>
* <li>Whether the estimator can build estimates for numeric attributes</li>
* <li>Whether the estimator can handle nominal attributes</li>
* <li>Whether the estimator can handle string attributes</li>
* <li>Whether the estimator can handle date attributes</li>
* <li>Whether the estimator can handle relational attributes</li>
* <li>Whether the estimator build estimates for multi-instance data</li>
* <li>Whether the estimator can handle missing attribute values</li>
* <li>Whether the estimator can handle missing class values</li>
* <li>Whether a nominal estimator only handles 2 class problems</li>
* <li>Whether the estimator can handle instance weights</li>
* </ul>
* </li>
* <li>Correct functioning
* <ul>
* <li>Correct initialisation during addvalues (i.e. no result changes when
* addValues called repeatedly)</li>
* <li>Whether incremental training produces the same results as during
* non-incremental training (which may or may not be OK)</li>
* <li>Whether the estimator alters the data pased to it (number of instances,
* instance order, instance weights, etc)</li>
* </ul>
* </li>
* <li>Degenerate cases
* <ul>
* <li>building estimator with zero training instances</li>
* <li>all but one attribute attribute values missing</li>
* <li>all attribute attribute values missing</li>
* <li>all but one class values missing</li>
* <li>all class values missing</li>
* </ul>
* </li>
* </ul>
* Running CheckEstimator with the debug option set will output the training and
* test datasets for any failed tests.
* <p/>
*
* The <code>weka.estimators.AbstractEstimatorTest</code> uses this class to
* test all the estimators. Any changes here, have to be checked in that
* abstract test class, too.
* <p/>
*
* <!-- options-start --> Valid options are:
* <p/>
*
* <pre>
* -D
* Turn on debugging output.
* </pre>
*
* <pre>
* -S
* Silent mode - prints nothing to stdout.
* </pre>
*
* <pre>
* -N <num>
* The number of instances in the datasets (default 100).
* </pre>
*
* <pre>
* -W
* Full name of the estimator analysed.
* eg: weka.estimators.NormalEstimator
* </pre>
*
* <pre>
* Options specific to estimator weka.estimators.NormalEstimator:
* </pre>
*
* <pre>
* -D
* If set, estimator is run in debug mode and
* may output additional info to the console
* </pre>
*
* <!-- options-end -->
*
* Options after -- are passed to the designated estimator.
* <p/>
*
* @author Len Trigg (trigg@cs.waikato.ac.nz)
* @author FracPete (fracpete at waikato dot ac dot nz)
* @version $Revision$
* @see TestInstances
*/
public class CheckEstimator implements OptionHandler, RevisionHandler {
/*
* Note about test methods: - methods return array of booleans - first index:
* success or not - second index: acceptable or not (e.g., Exception is OK) -
* in case the performance is worse than that of ZeroR both indices are true
*
* FracPete (fracpete at waikato dot ac dot nz)
*/
/**
* a class for postprocessing the test-data
*/
public class PostProcessor implements RevisionHandler {
/**
* Provides a hook for derived classes to further modify the data.
* Currently, the data is just passed through.
*
* @param data the data to process
* @return the processed data
*/
protected Instances process(Instances data) {
return data;
}
/**
* Returns the revision string.
*
* @return the revision
*/
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
}
/*** The estimator to be examined */
protected Estimator m_Estimator = new weka.estimators.NormalEstimator(
0.000001);
/** The options to be passed to the base estimator. */
protected String[] m_EstimatorOptions;
/** The results of the analysis as a string */
protected String m_AnalysisResults;
/** Debugging mode, gives extra output if true */
protected boolean m_Debug = false;
/** Silent mode, for no output at all to stdout */
protected boolean m_Silent = false;
/** The number of instances in the datasets */
protected int m_NumInstances = 100;
/** for post-processing the data even further */
protected PostProcessor m_PostProcessor = null;
/** whether classpath problems occurred */
protected boolean m_ClasspathProblems = false;
/**
* class that contains info about the attribute types the estimator can
* estimate estimator work on one attribute only
*/
public static class AttrTypes implements RevisionHandler {
boolean nominal = false;
boolean numeric = false;
boolean string = false;
boolean date = false;
boolean relational = false;
AttrTypes() {
}
AttrTypes(AttrTypes newTypes) {
nominal = newTypes.nominal;
numeric = newTypes.numeric;
string = newTypes.string;
date = newTypes.date;
relational = newTypes.relational;
}
AttrTypes(int type) {
if (type == Attribute.NOMINAL) {
nominal = true;
}
if (type == Attribute.NUMERIC) {
numeric = true;
}
if (type == Attribute.STRING) {
string = true;
}
if (type == Attribute.DATE) {
date = true;
}
if (type == Attribute.RELATIONAL) {
relational = true;
}
}
int getSetType() throws Exception {
int sum = 0;
int type = -1;
if (nominal) {
sum++;
type = Attribute.NOMINAL;
}
if (numeric) {
sum++;
type = Attribute.NUMERIC;
}
if (string) {
sum++;
type = Attribute.STRING;
}
if (date) {
sum++;
type = Attribute.DATE;
}
if (relational) {
sum++;
type = Attribute.RELATIONAL;
}
if (sum > 1) {
throw new Exception("Expected to have only one type set used wrongly.");
}
if (type < 0) {
throw new Exception("No type set.");
}
return type;
}
boolean oneIsSet() {
return (nominal || numeric || string || date || relational);
}
public Vector<Integer> getVectorOfAttrTypes() {
Vector<Integer> attrs = new Vector<Integer>();
if (nominal) {
attrs.add(new Integer(Attribute.NOMINAL));
}
if (numeric) {
attrs.add(new Integer(Attribute.NUMERIC));
}
if (string) {
attrs.add(new Integer(Attribute.STRING));
}
if (date) {
attrs.add(new Integer(Attribute.DATE));
}
if (relational) {
attrs.add(new Integer(Attribute.RELATIONAL));
}
return attrs;
}
/**
* Returns the revision string.
*
* @return the revision
*/
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
}
/**
* public class that contains info about the chosen attribute type estimator
* work on one attribute only
*/
public static class EstTypes implements RevisionHandler {
boolean incremental = false;
boolean weighted = false;
boolean supervised = false;
/**
* Constructor
*/
public EstTypes() {
}
/**
* Constructor
*/
public EstTypes(boolean i, boolean w, boolean s) {
incremental = i;
weighted = w;
supervised = s;
}
/**
* Returns the revision string.
*
* @return the revision
*/
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
}
/**
* Returns an enumeration describing the available options.
*
* @return an enumeration of all the available options.
*/
@Override
public Enumeration<Option> listOptions() {
Vector<Option> newVector = new Vector<Option>(4);
newVector
.addElement(new Option("\tTurn on debugging output.", "D", 0, "-D"));
newVector.addElement(new Option(
"\tSilent mode - prints nothing to stdout.", "S", 0, "-S"));
newVector.addElement(new Option(
"\tThe number of instances in the datasets (default 100).", "N", 1,
"-N <num>"));
newVector.addElement(new Option("\tFull name of the estimator analysed.\n"
+ "\teg: weka.estimators.NormalEstimator", "W", 1, "-W"));
if ((m_Estimator != null) && (m_Estimator instanceof OptionHandler)) {
newVector.addElement(new Option("", "", 0,
"\nOptions specific to estimator " + m_Estimator.getClass().getName()
+ ":"));
newVector.addAll(Collections.list(((OptionHandler) m_Estimator)
.listOptions()));
}
return newVector.elements();
}
/**
* Parses a given list of options.
*
* <!-- options-start --> Valid options are:
* <p/>
*
* <pre>
* -D
* Turn on debugging output.
* </pre>
*
* <pre>
* -S
* Silent mode - prints nothing to stdout.
* </pre>
*
* <pre>
* -N <num>
* The number of instances in the datasets (default 100).
* </pre>
*
* <pre>
* -W
* Full name of the estimator analysed.
* eg: weka.estimators.NormalEstimator
* </pre>
*
* <pre>
* Options specific to estimator weka.estimators.NormalEstimator:
* </pre>
*
* <pre>
* -D
* If set, estimator is run in debug mode and
* may output additional info to the console
* </pre>
*
* <!-- options-end -->
*
* @param options the list of options as an array of strings
* @throws Exception if an option is not supported
*/
@Override
public void setOptions(String[] options) throws Exception {
String tmpStr;
setDebug(Utils.getFlag('D', options));
setSilent(Utils.getFlag('S', options));
tmpStr = Utils.getOption('N', options);
if (tmpStr.length() != 0) {
setNumInstances(Integer.parseInt(tmpStr));
} else {
setNumInstances(100);
}
tmpStr = Utils.getOption('W', options);
if (tmpStr.length() == 0) {
throw new Exception("A estimator must be specified with the -W option.");
}
setEstimator(Estimator.forName(tmpStr, Utils.partitionOptions(options)));
}
/**
* Gets the current settings of the CheckEstimator.
*
* @return an array of strings suitable for passing to setOptions
*/
@Override
public String[] getOptions() {
Vector<String> result = new Vector<String>();
if (getDebug()) {
result.add("-D");
}
if (getSilent()) {
result.add("-S");
}
result.add("-N");
result.add("" + getNumInstances());
if (getEstimator() != null) {
result.add("-W");
result.add(getEstimator().getClass().getName());
}
if ((m_Estimator != null) && (m_Estimator instanceof OptionHandler)) {
String[] options = ((OptionHandler) m_Estimator).getOptions();
if (options.length > 0) {
result.add("--");
Collections.addAll(result, options);
}
}
return result.toArray(new String[result.size()]);
}
/**
* sets the PostProcessor to use
*
* @param value the new PostProcessor
* @see #m_PostProcessor
*/
public void setPostProcessor(PostProcessor value) {
m_PostProcessor = value;
}
/**
* returns the current PostProcessor, can be null
*
* @return the current PostProcessor
*/
public PostProcessor getPostProcessor() {
return m_PostProcessor;
}
/**
* returns TRUE if the estimator returned a "not in classpath" Exception
*
* @return true if CLASSPATH problems occurred
*/
public boolean hasClasspathProblems() {
return m_ClasspathProblems;
}
/**
* Begin the tests, reporting results to System.out
*/
public void doTests() {
if (getEstimator() == null) {
println("\n=== No estimator set ===");
return;
}
println("\n=== Check on Estimator: " + getEstimator().getClass().getName()
+ " ===\n");
m_ClasspathProblems = false;
// Start tests with test for options
canTakeOptions();
// test what type of estimator it is
EstTypes estTypes = new EstTypes();
estTypes.incremental = incrementalEstimator()[0];
estTypes.weighted = weightedInstancesHandler()[0];
estTypes.supervised = supervisedEstimator()[0];
// in none of the estimators yet the functionality is depending on the class
// type
// since this could change the basic structure taken from checkclassifiers
// is kept here
int classType = Attribute.NOMINAL;
AttrTypes attrTypes = testsPerClassType(classType, estTypes);
// only nominal class can be split up so far
canSplitUpClass(attrTypes, classType);
}
/**
* Set debugging mode
*
* @param debug true if debug output should be printed
*/
public void setDebug(boolean debug) {
m_Debug = debug;
// disable silent mode, if necessary
if (getDebug()) {
setSilent(false);
}
}
/**
* Get whether debugging is turned on
*
* @return true if debugging output is on
*/
public boolean getDebug() {
return m_Debug;
}
/**
* Set slient mode, i.e., no output at all to stdout
*
* @param value whether silent mode is active or not
*/
public void setSilent(boolean value) {
m_Silent = value;
}
/**
* Get whether silent mode is turned on
*
* @return true if silent mode is on
*/
public boolean getSilent() {
return m_Silent;
}
/**
* Sets the number of instances to use in the datasets (some estimators might
* require more instances).
*
* @param value the number of instances to use
*/
public void setNumInstances(int value) {
m_NumInstances = value;
}
/**
* Gets the current number of instances to use for the datasets.
*
* @return the number of instances
*/
public int getNumInstances() {
return m_NumInstances;
}
/**
* Set the estimator for boosting.
*
* @param newEstimator the Estimator to use.
*/
public void setEstimator(Estimator newEstimator) {
m_Estimator = newEstimator;
}
/**
* Get the estimator used as the estimator
*
* @return the estimator used as the estimator
*/
public Estimator getEstimator() {
return m_Estimator;
}
/**
* prints the given message to stdout, if not silent mode
*
* @param msg the text to print to stdout
*/
protected void print(Object msg) {
if (!getSilent()) {
System.out.print(msg);
}
}
/**
* prints the given message (+ LF) to stdout, if not silent mode
*
* @param msg the message to println to stdout
*/
protected void println(Object msg) {
print(msg + "\n");
}
/**
* prints a LF to stdout, if not silent mode
*/
protected void println() {
print("\n");
}
/**
* Run a battery of tests for a given class attribute type
*
* @param classType true if the class attribute should be numeric
* @param estTypes types the estimator is, like incremental, weighted,
* supervised etc
* @return attribute types estimator can work with
*/
protected AttrTypes testsPerClassType(int classType, EstTypes estTypes) {
// in none of the estimators yet is the estimation depending on the class
// type
// since this could change the basic structure taken from checkclassifiers
// is kept here
// test A: simple test - if can estimate
AttrTypes attrTypes = new AttrTypes();
AttrTypes at = new AttrTypes(Attribute.NOMINAL);
attrTypes.nominal = canEstimate(at, estTypes.supervised, classType)[0];
at = new AttrTypes(Attribute.NUMERIC);
attrTypes.numeric = canEstimate(at, estTypes.supervised, classType)[0];
attrTypes.string = false;
attrTypes.date = false;
attrTypes.relational = false;
// if (!multiInstance)
// PRel = canEstimate(false, false, false, false, true, classType)[0];
// else
// PRel = false;
// one of the attribute types succeeded
if (attrTypes.oneIsSet()) {
Vector<Integer> attributesSet = attrTypes.getVectorOfAttrTypes();
// make tests for each attribute
for (int i = 0; i < attributesSet.size(); i++) {
AttrTypes workAttrTypes = new AttrTypes(attributesSet.elementAt(i)
.intValue());
// test B: weights change estimate or not
if (estTypes.weighted) {
instanceWeights(workAttrTypes, classType);
}
if (classType == Attribute.NOMINAL) {
int numClasses = 4;
canHandleNClasses(workAttrTypes, numClasses);
}
// tests with class not the last attribute and the attribute not the
// first
// if (!multiInstance) {
int numAtt = 4;
canHandleClassAsNthAttribute(workAttrTypes, numAtt, 0, classType, 1);
// TODOTODOcanHandleAttrAsNthAttribute(workAttrTypes, numAtt, 2,
// classType);
// }
canHandleZeroTraining(workAttrTypes, classType);
boolean handleMissingAttributes = canHandleMissing(workAttrTypes,
classType, true, false, 20)[0];
if (handleMissingAttributes) {
canHandleMissing(workAttrTypes, classType, true, false, 100);
}
boolean handleMissingClass = canHandleMissing(workAttrTypes, classType,
false, true, 20)[0];
if (handleMissingClass) {
canHandleMissing(workAttrTypes, classType, false, true, 100);
}
correctBuildInitialisation(workAttrTypes, classType);
datasetIntegrity(workAttrTypes, classType, handleMissingAttributes,
handleMissingClass);
if (estTypes.incremental) {
incrementingEquality(workAttrTypes, classType);
}
}
}
return attrTypes;
}
/**
* Checks whether the scheme can take command line options.
*
* @return index 0 is true if the estimator can take options
*/
protected boolean[] canTakeOptions() {
boolean[] result = new boolean[2];
print("options...");
if (m_Estimator instanceof OptionHandler) {
println("yes");
if (m_Debug) {
println("\n=== Full report ===");
Enumeration<Option> enu = ((OptionHandler) m_Estimator).listOptions();
while (enu.hasMoreElements()) {
Option option = enu.nextElement();
print(option.synopsis() + "\n" + option.description() + "\n");
}
println("\n");
}
result[0] = true;
} else {
println("no");
result[0] = false;
}
return result;
}
/**
* Checks whether the scheme can build models incrementally.
*
* @return index 0 is true if the estimator can train incrementally
*/
protected boolean[] incrementalEstimator() {
boolean[] result = new boolean[2];
print("incremental estimator...");
if (m_Estimator instanceof IncrementalEstimator) {
println("yes");
result[0] = true;
} else {
println("no");
result[0] = false;
}
return result;
}
/**
* Checks whether the scheme says it can handle instance weights.
*
* @return true if the estimator handles instance weights
*/
protected boolean[] weightedInstancesHandler() {
boolean[] result = new boolean[2];
print("weighted instances estimator...");
if (m_Estimator instanceof WeightedInstancesHandler) {
println("yes");
result[0] = true;
} else {
println("no");
result[0] = false;
}
return result;
}
/**
* Checks whether the estimator is supervised.
*
* @return true if the estimator handles instance weights
*/
protected boolean[] supervisedEstimator() {
boolean[] result = new boolean[2];
result[0] = false;
return result;
}
/**
* Checks basic estimation of one attribute of the scheme, for simple
* non-troublesome datasets.
*
* @param attrTypes the types the estimator can work with
* @param classType the class type (NOMINAL, NUMERIC, etc.)
* @return index 0 is true if the test was passed, index 1 is true if test was
* acceptable
*/
protected boolean[] canEstimate(AttrTypes attrTypes, boolean supervised,
int classType) {
// supervised is ignored, no supervised estimators used yet
print("basic estimation");
printAttributeSummary(attrTypes, classType);
print("...");
ArrayList<String> accepts = new ArrayList<String>();
accepts.add("nominal");
accepts.add("numeric");
accepts.add("string");
accepts.add("date");
accepts.add("relational");
accepts.add("not in classpath");
int numTrain = getNumInstances(), numTest = getNumInstances(), numClasses = 2, missingLevel = 0;
boolean attributeMissing = false, classMissing = false;
int numAtts = 1, attrIndex = 0;
return runBasicTest(attrTypes, numAtts, attrIndex, classType, missingLevel,
attributeMissing, classMissing, numTrain, numTest, numClasses, accepts);
}
/**
* Checks basic estimation of one attribute of the scheme, for simple
* non-troublesome datasets.
*
* @param attrTypes the types the estimator can work with
* @param classType the class type (NOMINAL, NUMERIC, etc.)
*/
protected void canSplitUpClass(AttrTypes attrTypes, int classType) {
if (attrTypes.nominal) {
canSplitUpClass(Attribute.NOMINAL, classType);
}
if (attrTypes.numeric) {
canSplitUpClass(Attribute.NUMERIC, classType);
}
}
/**
* Checks basic estimation of one attribute of the scheme, for simple
* non-troublesome datasets.
*
* @param attrType the type of the estimator
* @param classType the class type (NOMINAL, NUMERIC, etc.)
* @return index 0 is true if the test was passed, index 1 is true if test was
* acceptable
*/
protected boolean[] canSplitUpClass(int attrType, int classType) {
boolean[] result = new boolean[2];
ArrayList<String> accepts = new ArrayList<String>();
accepts.add("not in classpath");
// supervised is ignored, no supervised estimators used yet
print("split per class type ");
printAttributeSummary(attrType, Attribute.NOMINAL);
print("...");
int numTrain = getNumInstances(), numTest = getNumInstances(), numClasses = 2;
int numAtts = 3, attrIndex = 0, classIndex = 1;
Instances train = null;
Vector<Double> test;
Estimator estimator = null;
boolean built = false;
try {
AttrTypes at = new AttrTypes(attrType);
train = makeTestDataset(42, numTrain, numAtts, at, numClasses, classType,
classIndex);
// prepare training data set and test value list
test = makeTestValueList(24, numTest, train, attrIndex, attrType);
estimator = Estimator.makeCopies(getEstimator(), 1)[0];
} catch (Exception ex) {
ex.printStackTrace();
throw new Error("Error setting up for tests: " + ex.getMessage());
}
try {
estimator.addValues(train, attrIndex, classType, classIndex);
built = true;
testWithTestValues(estimator, test);
println("yes");
result[0] = true;
} catch (Exception ex) {
boolean acceptable = false;
String msg;
if (ex.getMessage() == null) {
msg = "";
} else {
msg = ex.getMessage().toLowerCase();
}
if (msg.indexOf("not in classpath") > -1) {
m_ClasspathProblems = true;
}
for (int i = 0; i < accepts.size(); i++) {
if (msg.indexOf(accepts.get(i)) >= 0) {
acceptable = true;
}
}
println("no" + (acceptable ? " (OK error message)" : ""));
result[1] = acceptable;
if (m_Debug) {
println("\n=== Full Report ===");
print("Problem during");
if (built) {
print(" testing");
} else {
print(" training");
}
println(": " + ex.getMessage() + "\n");
if (!acceptable) {
if (accepts.size() > 0) {
print("Error message doesn't mention ");
for (int i = 0; i < accepts.size(); i++) {
if (i != 0) {
print(" or ");
}
print('"' + accepts.get(i) + '"');
}
}
println("here are the datasets:\n");
println("=== Train Dataset ===\n" + train.toString() + "\n");
println("=== Test Dataset ===\n" + test.toString() + "\n\n");
}
}
}
return result;
}
/**
* Checks whether nominal schemes can handle more than two classes. If a
* scheme is only designed for two-class problems it should throw an
* appropriate exception for multi-class problems.
*
* @param attrTypes attribute types the estimator excepts
* @param numClasses the number of classes to test
* @return index 0 is true if the test was passed, index 1 is true if test was
* acceptable
*/
protected boolean[] canHandleNClasses(AttrTypes attrTypes, int numClasses) {
print("more than two class problems");
printAttributeSummary(attrTypes, Attribute.NOMINAL);
print("...");
ArrayList<String> accepts = new ArrayList<String>();
accepts.add("number");
accepts.add("class");
int numTrain = getNumInstances(), numTest = getNumInstances(), missingLevel = 0;
boolean attributeMissing = false, classMissing = false;
int numAttr = 1, attrIndex = 0;
return runBasicTest(attrTypes, numAttr, attrIndex, Attribute.NOMINAL,
missingLevel, attributeMissing, classMissing, numTrain, numTest,
numClasses, accepts);
}
/**
* Checks whether the scheme can handle class attributes as Nth attribute.
*
* @param attrTypes the attribute types the estimator accepts
* @param numAtts of attributes
* @param attrIndex the index of the attribute
* @param classType the class type (NUMERIC, NOMINAL, etc.)
* @param classIndex the index of the class attribute (0-based, -1 means last
* attribute)
* @return index 0 is true if the test was passed, index 1 is true if test was
* acceptable
* @see TestInstances#CLASS_IS_LAST
*/
protected boolean[] canHandleClassAsNthAttribute(AttrTypes attrTypes,
int numAtts, int attrIndex, int classType, int classIndex) {
if (classIndex == TestInstances.CLASS_IS_LAST) {
print("class attribute as last attribute");
} else {
print("class attribute as " + (classIndex + 1) + ". attribute");
}
printAttributeSummary(attrTypes, classType);
print("...");
ArrayList<String> accepts = new ArrayList<String>();
int numTrain = getNumInstances(), numTest = getNumInstances(), numClasses = 2, missingLevel = 0;
boolean attributeMissing = false, classMissing = false;
return runBasicTest(attrTypes, numAtts, attrIndex, classType, classIndex,
missingLevel, attributeMissing, classMissing, numTrain, numTest,
numClasses, accepts);
}
/**
* Checks whether the scheme can handle zero training instances.
*
* @param attrTypes attribute types that can be estimated
* @param classType the class type (NUMERIC, NOMINAL, etc.)
* @return index 0 is true if the test was passed, index 1 is true if test was
* acceptable
*/
protected boolean[] canHandleZeroTraining(AttrTypes attrTypes, int classType) {
print("handle zero training instances");
printAttributeSummary(attrTypes, classType);
print("...");
ArrayList<String> accepts = new ArrayList<String>();
accepts.add("train");
accepts.add("value");
int numTrain = 0, numTest = getNumInstances(), numClasses = 2, missingLevel = 0;
boolean attributeMissing = false, classMissing = false;
int numAtts = 1;
int attrIndex = 0;
return runBasicTest(attrTypes, numAtts, attrIndex, classType, missingLevel,
attributeMissing, classMissing, numTrain, numTest, numClasses, accepts);
}
/**
* Checks whether the scheme correctly initialises models when buildEstimator
* is called. This test calls buildEstimator with one training dataset and
* records performance on a test set. buildEstimator is then called on a
* training set with different structure, and then again with the original
* training set. The performance on the test set is compared with the original
* results and any performance difference noted as incorrect build
* initialisation.
*
* @param attrTypes attribute types that can be estimated
* @param classType the class type (NUMERIC, NOMINAL, etc.)
* @return index 0 is true if the test was passed, index 1 is true if the
* scheme performs worse than ZeroR, but without error (index 0 is
* false)
*/
protected boolean[] correctBuildInitialisation(AttrTypes attrTypes,
int classType) {
boolean[] result = new boolean[2];
print("correct initialisation during buildEstimator");
printAttributeSummary(attrTypes, classType);
print("...");
int numTrain = getNumInstances();
getNumInstances();
int numClasses = 2, missingLevel = 0;
boolean attributeMissing = false, classMissing = false;
Instances train1 = null;
Instances train2 = null;
Estimator estimator = null;
Estimator estimator1 = null;
boolean built = false;
int stage = 0;
int attrIndex1 = 1;
int attrIndex2 = 2;
try {
// Make two sets of train/test splits with different
// numbers of attributes
train1 = makeTestDataset(42, numTrain, 2, attrTypes, numClasses,
classType);
train2 = makeTestDataset(84, numTrain, 3, attrTypes, numClasses,
classType);
if (missingLevel > 0) {
addMissing(train1, missingLevel, attributeMissing, classMissing,
attrIndex1);
addMissing(train2, missingLevel, attributeMissing, classMissing,
attrIndex2);
}
estimator = Estimator.makeCopies(getEstimator(), 1)[0];
} catch (Exception ex) {
throw new Error("Error setting up for tests: " + ex.getMessage());
}
try {
// TESTING??
stage = 0;
estimator.addValues(train1, attrIndex1);
built = true;
estimator1 = Estimator.makeCopies(getEstimator(), 1)[0];
stage = 1;
built = false;
estimator.addValues(train2, attrIndex2);
built = true;
stage = 2;
built = false;
estimator.addValues(train1, attrIndex1);
built = true;
stage = 3;
if (!estimator.equals(estimator1)) {
if (m_Debug) {
println("\n=== Full report ===\n" + "\nFirst build estimator\n"
+ estimator.toString() + "\n\n");
println("\nSecond build estimator\n" + estimator.toString() + "\n\n");
}
throw new Exception("Results differ between buildEstimator calls");
}
println("yes");
result[0] = true;
} catch (Exception ex) {
String msg = ex.getMessage().toLowerCase();
if (msg.indexOf("worse than zeror") >= 0) {
println("warning: performs worse than ZeroR");
result[0] = true;
result[1] = true;
} else {
println("no");
result[0] = false;
}
if (m_Debug) {
println("\n=== Full Report ===");
print("Problem during");
if (built) {
print(" testing");
} else {
print(" training");
}
switch (stage) {
case 0:
print(" of dataset 1");
break;
case 1:
print(" of dataset 2");
break;
case 2:
print(" of dataset 1 (2nd build)");
break;
case 3:
print(", comparing results from builds of dataset 1");
break;
}
println(": " + ex.getMessage() + "\n");
println("here are the datasets:\n");
println("=== Train1 Dataset ===\n" + train1.toString() + "\n");
println("=== Train2 Dataset ===\n" + train2.toString() + "\n");
}
}
return result;
}
/**
* Checks basic missing value handling of the scheme. If the missing values
* cause an exception to be thrown by the scheme, this will be recorded.
*
* @param attrTypes attribute types that can be estimated
* @param classType the class type (NUMERIC, NOMINAL, etc.)
* @param attributeMissing true if the missing values may be in the attributes
* @param classMissing true if the missing values may be in the class
* @param missingLevel the percentage of missing values
* @return index 0 is true if the test was passed, index 1 is true if test was
* acceptable
*/
protected boolean[] canHandleMissing(AttrTypes attrTypes, int classType,
boolean attributeMissing, boolean classMissing, int missingLevel) {
if (missingLevel == 100) {
print("100% ");
}
print("missing");
if (attributeMissing) {
print(" attribute");
if (classMissing) {
print(" and");
}
}
if (classMissing) {
print(" class");
}
print(" values");
printAttributeSummary(attrTypes, classType);
print("...");
ArrayList<String> accepts = new ArrayList<String>();
accepts.add("missing");
accepts.add("value");
accepts.add("train");
int numTrain = getNumInstances(), numTest = getNumInstances(), numClasses = 2;
int numAtts = 1, attrIndex = 0;
return runBasicTest(attrTypes, numAtts, attrIndex, classType, missingLevel,
attributeMissing, classMissing, numTrain, numTest, numClasses, accepts);
}
/**
* Checks whether an incremental scheme produces the same model when trained
* incrementally as when batch trained. The model itself cannot be compared,
* so we compare the evaluation on test data for both models. It is possible
* to get a false positive on this test (likelihood depends on the estimator).
*
* @param attrTypes attribute types that can be estimated
* @param classType the class type (NUMERIC, NOMINAL, etc.)
* @return index 0 is true if the test was passed
*/
protected boolean[] incrementingEquality(AttrTypes attrTypes, int classType) {
print("incremental training produces the same results"
+ " as batch training");
printAttributeSummary(attrTypes, classType);
print("...");
int numTrain = getNumInstances(), numTest = getNumInstances(), numClasses = 2, missingLevel = 0;
boolean attributeMissing = false, classMissing = false;
boolean[] result = new boolean[2];
Instances train = null;
Estimator[] estimators = null;
boolean built = false;
int attrIndex = 0;
Vector<Double> test;
try {
train = makeTestDataset(42, numTrain, 1, attrTypes, numClasses, classType);
// prepare training data set and test value list
test = makeTestValueList(24, numTest, train, attrIndex,
attrTypes.getSetType());
if (missingLevel > 0) {
addMissing(train, missingLevel, attributeMissing, classMissing,
attrIndex);
}
estimators = Estimator.makeCopies(getEstimator(), 2);
estimators[0].addValues(train, attrIndex);
} catch (Exception ex) {
throw new Error("Error setting up for tests: " + ex.getMessage());
}
try {
for (int i = 0; i < train.numInstances(); i++) {
((IncrementalEstimator) estimators[1]).addValue(train.instance(i)
.value(attrIndex), 1.0);
}
built = true;
if (!estimators[0].equals(estimators[1])) {
println("no");
result[0] = false;
if (m_Debug) {
println("\n=== Full Report ===");
println("Results differ between batch and "
+ "incrementally built models.\n"
+ "Depending on the estimator, this may be OK");
println("Here are the results:\n");
println("batch built results\n" + estimators[0].toString());
println("incrementally built results\n" + estimators[1].toString());
println("Here are the datasets:\n");
println("=== Train Dataset ===\n" + train.toString() + "\n");
println("=== Test Dataset ===\n" + test.toString() + "\n\n");
}
} else {
println("yes");
result[0] = true;
}
} catch (Exception ex) {
result[0] = false;
print("Problem during");
if (built) {
print(" testing");
} else {
print(" training");
}
println(": " + ex.getMessage() + "\n");
}
return result;
}
/**
* Checks whether the estimator can handle instance weights. This test
* compares the estimator performance on two datasets that are identical
* except for the training weights. If the results change, then the estimator
* must be using the weights. It may be possible to get a false positive from
* this test if the weight changes aren't significant enough to induce a
* change in estimator performance (but the weights are chosen to minimize the
* likelihood of this).
*
* @param attrTypes attribute types that can be estimated
* @param classType the class type (NUMERIC, NOMINAL, etc.)
* @return index 0 true if the test was passed
*/
protected boolean[] instanceWeights(AttrTypes attrTypes, int classType) {
print("estimator uses instance weights");
printAttributeSummary(attrTypes, classType);
print("...");
int numTrain = 2 * getNumInstances(), numTest = getNumInstances(), numClasses = 2, missingLevel = 0;
boolean attributeMissing = false, classMissing = false;
boolean[] result = new boolean[2];
Instances train = null;
Vector<Double> test = null;
Estimator[] estimators = null;
Vector<Double> resultProbsO = null;
Vector<Double> resultProbsW = null;
boolean built = false;
boolean evalFail = false;
int attrIndex = 0;
try {
train = makeTestDataset(42, numTrain, 1, attrTypes, numClasses, classType);
// prepare training data set and test value list
test = makeTestValueList(24, numTest, train, attrIndex,
attrTypes.getSetType());
if (missingLevel > 0) {
addMissing(train, missingLevel, attributeMissing, classMissing,
attrIndex);
}
estimators = Estimator.makeCopies(getEstimator(), 2);
estimators[0].addValues(train, attrIndex);
resultProbsO = testWithTestValues(estimators[0], test);
} catch (Exception ex) {
throw new Error("Error setting up for tests: " + ex.getMessage());
}
try {
// Now modify instance weights and re-built
for (int i = 0; i < train.numInstances(); i++) {
train.instance(i).setWeight(0);
}
Random random = new Random(1);
for (int i = 0; i < train.numInstances() / 2; i++) {
int inst = random.nextInt(train.numInstances());
int weight = random.nextInt(10) + 1;
train.instance(inst).setWeight(weight);
}
estimators[1].addValues(train, attrIndex);
resultProbsW = testWithTestValues(estimators[1], test);
built = true;
if (resultProbsO.equals(resultProbsW)) {
// println("no");
evalFail = true;
throw new Exception("evalFail");
}
println("yes");
result[0] = true;
} catch (Exception ex) {
println("no");
result[0] = false;
if (m_Debug) {
println("\n=== Full Report ===");
if (evalFail) {
println("Results don't differ between non-weighted and "
+ "weighted instance models.");
println("Here are the results:\n");
println(probsToString(resultProbsO));
} else {
print("Problem during");
if (built) {
print(" testing");
} else {
print(" training");
}
println(": " + ex.getMessage() + "\n");
}
println("Here are the datasets:\n");
println("=== Train Dataset ===\n" + train.toString() + "\n");
println("=== Train Weights ===\n");
for (int i = 0; i < train.numInstances(); i++) {
println(" " + (i + 1) + " " + train.instance(i).weight());
}
println("=== Test Dataset ===\n" + test.toString() + "\n\n");
println("(test weights all 1.0\n");
}
}
return result;
}
/**
* Checks whether the scheme alters the training dataset during training. If
* the scheme needs to modify the training data it should take a copy of the
* training data. Currently checks for changes to header structure, number of
* instances, order of instances, instance weights.
*
* @param attrTypes attribute types that can be estimated
* @param classType the class type (NUMERIC, NOMINAL, etc.)
* @param attributeMissing true if we know the estimator can handle (at least)
* moderate missing attribute values
* @param classMissing true if we know the estimator can handle (at least)
* moderate missing class values
* @return index 0 is true if the test was passed
*/
protected boolean[] datasetIntegrity(AttrTypes attrTypes, int classType,
boolean attributeMissing, boolean classMissing) {
Estimator estimator = null;
print("estimator doesn't alter original datasets");
printAttributeSummary(attrTypes, classType);
print("...");
int numTrain = getNumInstances();
getNumInstances();
int numClasses = 2, missingLevel = 100;
boolean[] result = new boolean[2];
Instances train = null;
boolean built = false;
try {
train = makeTestDataset(42, numTrain, 1, attrTypes, numClasses, classType);
int attrIndex = 0;
if (missingLevel > 0) {
addMissing(train, missingLevel, attributeMissing, classMissing,
attrIndex);
}
estimator = Estimator.makeCopies(getEstimator(), 1)[0];
} catch (Exception ex) {
throw new Error("Error setting up for tests: " + ex.getMessage());
}
try {
Instances trainCopy = new Instances(train);
int attrIndex = 0;
estimator.addValues(trainCopy, attrIndex);
compareDatasets(train, trainCopy);
built = true;
println("yes");
result[0] = true;
} catch (Exception ex) {
println("no");
result[0] = false;
if (m_Debug) {
println("\n=== Full Report ===");
print("Problem during");
if (built) {
print(" testing");
} else {
print(" training");
}
println(": " + ex.getMessage() + "\n");
println("Here are the datasets:\n");
println("=== Train Dataset ===\n" + train.toString() + "\n");
}
}
return result;
}
/**
* Runs a text on the datasets with the given characteristics.
*
* @param attrTypes attribute types that can be estimated
* @param numAtts number of attributes
* @param attrIndex attribute index
* @param classType the class type (NUMERIC, NOMINAL, etc.)
* @param missingLevel the percentage of missing values
* @param attributeMissing true if the missing values may be in the attributes
* @param classMissing true if the missing values may be in the class
* @param numTrain the number of instances in the training set
* @param numTest the number of instaces in the test set
* @param numClasses the number of classes
* @param accepts the acceptable string in an exception
* @return index 0 is true if the test was passed, index 1 is true if test was
* acceptable
*/
protected boolean[] runBasicTest(AttrTypes attrTypes, int numAtts,
int attrIndex, int classType, int missingLevel, boolean attributeMissing,
boolean classMissing, int numTrain, int numTest, int numClasses,
ArrayList<String> accepts) {
return runBasicTest(attrTypes, numAtts, attrIndex, classType,
TestInstances.CLASS_IS_LAST, missingLevel, attributeMissing,
classMissing, numTrain, numTest, numClasses, accepts);
}
/**
* Runs a text on the datasets with the given characteristics.
*
* @param attrTypes attribute types that can be estimated
* @param numAtts number of attributes
* @param classType the class type (NUMERIC, NOMINAL, etc.)
* @param classIndex the attribute index of the class
* @param missingLevel the percentage of missing values
* @param attributeMissing true if the missing values may be in the attributes
* @param classMissing true if the missing values may be in the class
* @param numTrain the number of instances in the training set
* @param numTest the number of instaces in the test set
* @param numClasses the number of classes
* @param accepts the acceptable string in an exception
* @return index 0 is true if the test was passed, index 1 is true if test was
* acceptable
*/
protected boolean[] runBasicTest(AttrTypes attrTypes, int numAtts,
int attrIndex, int classType, int classIndex, int missingLevel,
boolean attributeMissing, boolean classMissing, int numTrain, int numTest,
int numClasses, ArrayList<String> accepts) {
boolean[] result = new boolean[2];
Instances train = null;
Vector<Double> test = null;
Estimator estimator = null;
boolean built = false;
try {
train = makeTestDataset(42, numTrain, numAtts, attrTypes, numClasses,
classType, classIndex);
// prepare training data set and test value list
if (numTrain > 0) {
test = makeTestValueList(24, numTest, train, attrIndex,
attrTypes.getSetType());
} else {
double min = -10.0;
double max = 8.0;
test = makeTestValueList(24, numTest, min, max, attrTypes.getSetType());
}
if (missingLevel > 0) {
addMissing(train, missingLevel, attributeMissing, classMissing,
attrIndex);
}
estimator = Estimator.makeCopies(getEstimator(), 1)[0];
} catch (Exception ex) {
ex.printStackTrace();
throw new Error("Error setting up for tests: " + ex.getMessage());
}
try {
estimator.addValues(train, attrIndex);
built = true;
testWithTestValues(estimator, test);
println("yes");
result[0] = true;
} catch (Exception ex) {
boolean acceptable = false;
String msg;
if (ex.getMessage() == null) {
msg = "";
} else {
msg = ex.getMessage().toLowerCase();
}
if (msg.indexOf("not in classpath") > -1) {
m_ClasspathProblems = true;
}
for (int i = 0; i < accepts.size(); i++) {
if (msg.indexOf(accepts.get(i)) >= 0) {
acceptable = true;
}
}
println("no" + (acceptable ? " (OK error message)" : ""));
result[1] = acceptable;
if (m_Debug) {
println("\n=== Full Report ===");
print("Problem during");
if (built) {
print(" testing");
} else {
print(" training");
}
println(": " + ex.getMessage() + "\n");
if (!acceptable) {
if (accepts.size() > 0) {
print("Error message doesn't mention ");
for (int i = 0; i < accepts.size(); i++) {
if (i != 0) {
print(" or ");
}
print('"' + accepts.get(i) + '"');
}
}
println("here are the datasets:\n");
println("=== Train Dataset ===\n" + train.toString() + "\n");
println("=== Test Dataset ===\n" + test.toString() + "\n\n");
}
}
}
return result;
}
/**
* Compare two datasets to see if they differ.
*
* @param data1 one set of instances
* @param data2 the other set of instances
* @throws Exception if the datasets differ
*/
protected void compareDatasets(Instances data1, Instances data2)
throws Exception {
if (!data2.equalHeaders(data1)) {
throw new Exception("header has been modified\n"
+ data2.equalHeadersMsg(data1));
}
if (!(data2.numInstances() == data1.numInstances())) {
throw new Exception("number of instances has changed");
}
for (int i = 0; i < data2.numInstances(); i++) {
Instance orig = data1.instance(i);
Instance copy = data2.instance(i);
for (int j = 0; j < orig.numAttributes(); j++) {
if (orig.isMissing(j)) {
if (!copy.isMissing(j)) {
throw new Exception("instances have changed");
}
} else if (orig.value(j) != copy.value(j)) {
throw new Exception("instances have changed");
}
if (orig.weight() != copy.weight()) {
throw new Exception("instance weights have changed");
}
}
}
}
/**
* Add missing values to a dataset.
*
* @param data the instances to add missing values to
* @param level the level of missing values to add (if positive, this is the
* probability that a value will be set to missing, if negative all
* but one value will be set to missing (not yet implemented))
* @param attributeMissing if true, attributes will be modified
* @param classMissing if true, the class attribute will be modified
* @param attrIndex index of the attribute
*/
protected void addMissing(Instances data, int level,
boolean attributeMissing, boolean classMissing, int attrIndex) {
int classIndex = data.classIndex();
Random random = new Random(1);
for (int i = 0; i < data.numInstances(); i++) {
Instance current = data.instance(i);
for (int j = 0; j < data.numAttributes(); j++) {
if (((j == classIndex) && classMissing)
|| ((j == attrIndex) && attributeMissing)) {
if (random.nextInt(100) < level) {
current.setMissing(j);
}
}
}
}
}
/**
* Make a simple set of instances, which can later be modified for use in
* specific tests.
*
* @param seed the random number seed
* @param numInstances the number of instances to generate
* @param numAttr the number of attributes
* @param attrTypes the attribute types
* @param numClasses the number of classes (if nominal class)
* @param classType the class type (NUMERIC, NOMINAL, etc.)
* @return the test dataset
* @throws Exception if the dataset couldn't be generated
* @see #process(Instances)
*/
protected Instances makeTestDataset(int seed, int numInstances, int numAttr,
AttrTypes attrTypes, int numClasses, int classType) throws Exception {
return makeTestDataset(seed, numInstances, numAttr, attrTypes, numClasses,
classType, TestInstances.CLASS_IS_LAST);
}
/**
* Make a simple set of instances with variable position of the class
* attribute, which can later be modified for use in specific tests.
*
* @param seed the random number seed
* @param numInstances the number of instances to generate
* @param numAttr the number of attributes to generate
* @param attrTypes the type of attrbute that is excepted
* @param numClasses the number of classes (if nominal class)
* @param classType the class type (NUMERIC, NOMINAL, etc.)
* @param classIndex the index of the class (0-based, -1 as last)
* @return the test dataset
* @throws Exception if the dataset couldn't be generated
* @see TestInstances#CLASS_IS_LAST
* @see #process(Instances)
*/
protected Instances makeTestDataset(int seed, int numInstances, int numAttr,
AttrTypes attrTypes, int numClasses, int classType, int classIndex)
throws Exception {
TestInstances dataset = new TestInstances();
dataset.setSeed(seed);
dataset.setNumInstances(numInstances);
dataset.setNumNominal(attrTypes.nominal ? numAttr : 0);
dataset.setNumNumeric(attrTypes.numeric ? numAttr : 0);
dataset.setNumString(attrTypes.string ? numAttr : 0);
dataset.setNumDate(attrTypes.date ? numAttr : 0);
dataset.setNumRelational(attrTypes.relational ? numAttr : 0);
dataset.setNumClasses(numClasses);
dataset.setClassType(classType);
dataset.setClassIndex(classIndex);
return process(dataset.generate());
}
/**
* Make a simple set of values. Only one of the num'type' parameters should be
* larger 0. (just to make parameter similar to the makeTestDataset
* parameters)
*
* @param seed the random number seed
* @param numValues the number of values to generate
* @param data the dataset to make test examples for
* @param attrIndex index of the attribute
* @param attrType the class type (NUMERIC, NOMINAL, etc.)
* @throws Exception if the dataset couldn't be generated
* @see #process(Instances)
*/
protected Vector<Double> makeTestValueList(int seed, int numValues,
Instances data, int attrIndex, int attrType) throws Exception {
// get min max
double[] minMax = getMinimumMaximum(data, attrIndex);
double minValue = minMax[0];
double maxValue = minMax[1];
// make value list and put into a VECTOR
double range = maxValue - minValue;
Vector<Double> values = new Vector<Double>(numValues);
Random random = new Random(seed);
if (attrType == Attribute.NOMINAL) {
for (int i = 0; i < numValues; i++) {
Double v = new Double(random.nextInt((int) range)
+ (int) minValue);
values.add(v);
}
}
if (attrType == Attribute.NUMERIC) {
for (int i = 0; i < numValues; i++) {
Double v = new Double(random.nextDouble() * range + minValue);
values.add(v);
}
}
return values;
}
/**
* Make a simple set of values. Only one of the num'type' parameters should be
* larger 0. (just to make parameter similar to the makeTestDataset
* parameters)
*
* @param seed the random number seed
* @param numValues the number of values to generate
* @param minValue the minimal data value
* @param maxValue the maximal data value
* @param attrType the class type (NUMERIC, NOMINAL, etc.)
* @throws Exception if the dataset couldn't be generated
* @see #process(Instances)
*/
protected Vector<Double> makeTestValueList(int seed, int numValues,
double minValue, double maxValue, int attrType) throws Exception {
// make value list and put into a VECTOR
double range = maxValue - minValue;
Vector<Double> values = new Vector<Double>(numValues);
Random random = new Random(seed);
if (attrType == Attribute.NOMINAL) {
for (int i = 0; i < numValues; i++) {
Double v = new Double(random.nextInt((int) range)
+ (int) minValue);
values.add(v);
}
}
if (attrType == Attribute.NUMERIC) {
for (int i = 0; i < numValues; i++) {
Double v = new Double(random.nextDouble() * range + minValue);
values.add(v);
}
}
return values;
}
/**
* Test with test values.
*
* @param est estimator to be tested
* @param test vector with test values
*
**/
protected Vector<Double> testWithTestValues(Estimator est, Vector<Double> test) {
Vector<Double> results = new Vector<Double>();
for (int i = 0; i < test.size(); i++) {
double testValue = (test.elementAt(i)).doubleValue();
double prob = est.getProbability(testValue);
Double p = new Double(prob);
results.add(p);
}
return results;
}
/**
* Gets the minimum and maximum of the values a the first attribute of the
* given data set
*
* @param inst the instance
* @param attrIndex the index of the attribut to find min and max
* @return the array with the minimum value on index 0 and the max on index 1
*/
protected double[] getMinimumMaximum(Instances inst, int attrIndex) {
double[] minMax = new double[2];
try {
getMinMax(inst, attrIndex, minMax);
} catch (Exception ex) {
ex.printStackTrace();
System.out.println(ex.getMessage());
}
return minMax;
// double minValue = minMax[0];
// double maxValue = minMax[1];
}
/**
* Find the minimum and the maximum of the attribute and return it in the last
* parameter..
*
* @param inst instances used to build the estimator
* @param attrIndex index of the attribute
* @param minMax the array to return minimum and maximum in
* @return number of not missing values
* @exception Exception if parameter minMax wasn't initialized properly
*/
public static int getMinMax(Instances inst, int attrIndex, double[] minMax)
throws Exception {
double min = Double.NaN;
double max = Double.NaN;
Instance instance = null;
int numNotMissing = 0;
if ((minMax == null) || (minMax.length < 2)) {
throw new Exception("Error in Program, privat method getMinMax");
}
Enumeration<Instance> enumInst = inst.enumerateInstances();
if (enumInst.hasMoreElements()) {
do {
instance = enumInst.nextElement();
} while (instance.isMissing(attrIndex) && (enumInst.hasMoreElements()));
// add values if not missing
if (!instance.isMissing(attrIndex)) {
numNotMissing++;
min = instance.value(attrIndex);
max = instance.value(attrIndex);
}
while (enumInst.hasMoreElements()) {
instance = enumInst.nextElement();
if (!instance.isMissing(attrIndex)) {
numNotMissing++;
if (instance.value(attrIndex) < min) {
min = (instance.value(attrIndex));
} else {
if (instance.value(attrIndex) > max) {
max = (instance.value(attrIndex));
}
}
}
}
}
minMax[0] = min;
minMax[1] = max;
return numNotMissing;
}
/**
* Print the probabilities after testing
*
* @param probs vector with probability values
* @return string with probability values printed
*/
private String probsToString(Vector<Double> probs) {
StringBuffer txt = new StringBuffer(" ");
for (int i = 0; i < probs.size(); i++) {
txt.append("" + (probs.elementAt(i)).doubleValue() + " ");
}
return txt.toString();
}
/**
* Provides a hook for derived classes to further modify the data.
*
* @param data the data to process
* @return the processed data
* @see #m_PostProcessor
*/
protected Instances process(Instances data) {
if (getPostProcessor() == null) {
return data;
} else {
return getPostProcessor().process(data);
}
}
/**
* Print out a short summary string for the dataset characteristics
*
* @param attrTypes the attribute types used (NUMERIC, NOMINAL, etc.)
* @param classType the class type (NUMERIC, NOMINAL, etc.)
*/
protected void printAttributeSummary(AttrTypes attrTypes, int classType) {
String str = "";
if (attrTypes.numeric) {
str += " numeric";
}
if (attrTypes.nominal) {
if (str.length() > 0) {
str += " &";
}
str += " nominal";
}
if (attrTypes.string) {
if (str.length() > 0) {
str += " &";
}
str += " string";
}
if (attrTypes.date) {
if (str.length() > 0) {
str += " &";
}
str += " date";
}
if (attrTypes.relational) {
if (str.length() > 0) {
str += " &";
}
str += " relational";
}
str += " attributes)";
switch (classType) {
case Attribute.NUMERIC:
str = " (numeric class," + str;
break;
case Attribute.NOMINAL:
str = " (nominal class," + str;
break;
case Attribute.STRING:
str = " (string class," + str;
break;
case Attribute.DATE:
str = " (date class," + str;
break;
case Attribute.RELATIONAL:
str = " (relational class," + str;
break;
}
print(str);
}
/**
* Print out a short summary string for the dataset characteristics
*
* @param attrType the attribute type (NUMERIC, NOMINAL, etc.)
* @param classType the class type (NUMERIC, NOMINAL, etc.)
*/
protected void printAttributeSummary(int attrType, int classType) {
String str = "";
switch (attrType) {
case Attribute.NUMERIC:
str = " numeric" + str;
break;
case Attribute.NOMINAL:
str = " nominal" + str;
break;
case Attribute.STRING:
str = " string" + str;
break;
case Attribute.DATE:
str = " date" + str;
break;
case Attribute.RELATIONAL:
str = " relational" + str;
break;
}
str += " attribute(s))";
switch (classType) {
case Attribute.NUMERIC:
str = " (numeric class," + str;
break;
case Attribute.NOMINAL:
str = " (nominal class," + str;
break;
case Attribute.STRING:
str = " (string class," + str;
break;
case Attribute.DATE:
str = " (date class," + str;
break;
case Attribute.RELATIONAL:
str = " (relational class," + str;
break;
}
print(str);
}
/**
* Returns the revision string.
*
* @return the revision
*/
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
/**
* Test method for this class
*
* @param args the commandline parameters
*/
public static void main(String[] args) {
try {
CheckEstimator check = new CheckEstimator();
try {
check.setOptions(args);
Utils.checkForRemainingOptions(args);
} catch (Exception ex) {
String result = ex.getMessage() + "\n\n"
+ check.getClass().getName().replaceAll(".*\\.", "")
+ " Options:\n\n";
Enumeration<Option> enu = check.listOptions();
while (enu.hasMoreElements()) {
Option option = enu.nextElement();
result += option.synopsis() + "\n" + option.description() + "\n";
}
throw new Exception(result);
}
check.doTests();
} catch (Exception ex) {
System.err.println(ex.getMessage());
}
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/estimators/ConditionalEstimator.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* ConditionalEstimator.java
* Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.estimators;
import weka.core.RevisionHandler;
/**
* Interface for conditional probability estimators. Example code: <p>
*
* <code> <pre>
* NNConditionalEstimator newEst = new NNConditionalEstimator();
*
* // Create 50 random points and add them
* Random r = new Random(seed);
* for(int i = 0; i < 50; i++) {
* int x = Math.abs(r.nextInt() % 100);
* int y = Math.abs(r.nextInt() % 100);
* System.out.println("# " + x + " " + y);
* newEst.addValue(x, y, 1);
* }
*
* // Pick a random conditional value
* int cond = Math.abs(r.nextInt() % 100);
* System.out.println("## Conditional = " + cond);
*
* // Print the probabilities conditional on that value
* Estimator result = newEst.getEstimator(cond);
* for(int i = 0; i <= 100; i+= 5) {
* System.out.println(" " + i + " " + result.getProbability(i));
* }
* </pre> </code>
*
* @author Len Trigg (trigg@cs.waikato.ac.nz)
* @version $Revision$
*/
public interface ConditionalEstimator extends RevisionHandler {
/**
* Add a new data value to the current estimator.
*
* @param data the new data value
* @param given the new value that data is conditional upon
* @param weight the weight assigned to the data value
*/
void addValue(double data, double given, double weight);
/**
* Get a probability estimator for a value
*
* @param given the new value that data is conditional upon
* @return the estimator for the supplied value given the condition
*/
Estimator getEstimator(double given);
/**
* Get a probability for a value conditional on another value
*
* @param data the value to estimate the probability of
* @param given the new value that data is conditional upon
* @return the estimator for the supplied value given the condition
*/
double getProbability(double data, double given);
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/estimators/DDConditionalEstimator.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* DDConditionalEstimator.java
* Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.estimators;
import weka.core.RevisionUtils;
/**
* Conditional probability estimator for a discrete domain conditional upon
* a discrete domain.
*
* @author Len Trigg (trigg@cs.waikato.ac.nz)
* @version $Revision$
*/
public class DDConditionalEstimator implements ConditionalEstimator {
/** Hold the sub-estimators */
private DiscreteEstimator [] m_Estimators;
/**
* Constructor
*
* @param numSymbols the number of possible symbols (remember to include 0)
* @param numCondSymbols the number of conditioning symbols
* @param laplace if true, sub-estimators will use laplace
*/
public DDConditionalEstimator(int numSymbols, int numCondSymbols,
boolean laplace) {
m_Estimators = new DiscreteEstimator [numCondSymbols];
for(int i = 0; i < numCondSymbols; i++) {
m_Estimators[i] = new DiscreteEstimator(numSymbols, laplace);
}
}
/**
* Add a new data value to the current estimator.
*
* @param data the new data value
* @param given the new value that data is conditional upon
* @param weight the weight assigned to the data value
*/
public void addValue(double data, double given, double weight) {
m_Estimators[(int)given].addValue(data, weight);
}
/**
* Get a probability estimator for a value
*
* @param given the new value that data is conditional upon
* @return the estimator for the supplied value given the condition
*/
public Estimator getEstimator(double given) {
return m_Estimators[(int)given];
}
/**
* Get a probability estimate for a value
*
* @param data the value to estimate the probability of
* @param given the new value that data is conditional upon
* @return the estimated probability of the supplied value
*/
public double getProbability(double data, double given) {
return getEstimator(given).getProbability(data);
}
/** Display a representation of this estimator */
public String toString() {
String result = "DD Conditional Estimator. "
+ m_Estimators.length + " sub-estimators:\n";
for(int i = 0; i < m_Estimators.length; i++) {
result += "Sub-estimator " + i + ": " + m_Estimators[i];
}
return result;
}
/**
* Returns the revision string.
*
* @return the revision
*/
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
/**
* Main method for testing this class.
*
* @param argv should contain a sequence of pairs of integers which
* will be treated as symbolic.
*/
public static void main(String [] argv) {
try {
if (argv.length == 0) {
System.out.println("Please specify a set of instances.");
return;
}
int currentA = Integer.parseInt(argv[0]);
int maxA = currentA;
int currentB = Integer.parseInt(argv[1]);
int maxB = currentB;
for(int i = 2; i < argv.length - 1; i += 2) {
currentA = Integer.parseInt(argv[i]);
currentB = Integer.parseInt(argv[i + 1]);
if (currentA > maxA) {
maxA = currentA;
}
if (currentB > maxB) {
maxB = currentB;
}
}
DDConditionalEstimator newEst = new DDConditionalEstimator(maxA + 1,
maxB + 1,
true);
for(int i = 0; i < argv.length - 1; i += 2) {
currentA = Integer.parseInt(argv[i]);
currentB = Integer.parseInt(argv[i + 1]);
System.out.println(newEst);
System.out.println("Prediction for " + currentA + '|' + currentB
+ " = "
+ newEst.getProbability(currentA, currentB));
newEst.addValue(currentA, currentB, 1);
}
} catch (Exception e) {
System.out.println(e.getMessage());
}
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/estimators/DKConditionalEstimator.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* DKConditionalEstimator.java
* Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.estimators;
import weka.core.RevisionUtils;
/**
* Conditional probability estimator for a discrete domain conditional upon
* a numeric domain.
*
* @author Len Trigg (trigg@cs.waikato.ac.nz)
* @version $Revision$
*/
public class DKConditionalEstimator implements ConditionalEstimator {
/** Hold the sub-estimators */
private KernelEstimator [] m_Estimators;
/** Hold the weights for each of the sub-estimators */
private DiscreteEstimator m_Weights;
/**
* Constructor
*
* @param numSymbols the number of symbols
* @param precision the precision to which numeric values are given. For
* example, if the precision is stated to be 0.1, the values in the
* interval (0.25,0.35] are all treated as 0.3.
*/
public DKConditionalEstimator(int numSymbols, double precision) {
m_Estimators = new KernelEstimator [numSymbols];
for(int i = 0; i < numSymbols; i++) {
m_Estimators[i] = new KernelEstimator(precision);
}
m_Weights = new DiscreteEstimator(numSymbols, true);
}
/**
* Add a new data value to the current estimator.
*
* @param data the new data value
* @param given the new value that data is conditional upon
* @param weight the weight assigned to the data value
*/
public void addValue(double data, double given, double weight) {
m_Estimators[(int)data].addValue(given, weight);
m_Weights.addValue((int)data, weight);
}
/**
* Get a probability estimator for a value
*
* @param given the new value that data is conditional upon
* @return the estimator for the supplied value given the condition
*/
public Estimator getEstimator(double given) {
Estimator result = new DiscreteEstimator(m_Estimators.length,false);
for(int i = 0; i < m_Estimators.length; i++) {
//System.out.println("Val " + i
// + " Weight:" + m_Weights.getProbability(i)
// +" EstProb(" + given + ")="
// + m_Estimators[i].getProbability(given));
result.addValue(i, m_Weights.getProbability(i)
* m_Estimators[i].getProbability(given));
}
return result;
}
/**
* Get a probability estimate for a value
*
* @param data the value to estimate the probability of
* @param given the new value that data is conditional upon
* @return the estimated probability of the supplied value
*/
public double getProbability(double data, double given) {
return getEstimator(given).getProbability(data);
}
/**
* Display a representation of this estimator
*/
public String toString() {
String result = "DK Conditional Estimator. "
+ m_Estimators.length + " sub-estimators:\n";
for(int i = 0; i < m_Estimators.length; i++) {
result += "Sub-estimator " + i + ": " + m_Estimators[i];
}
result += "Weights of each estimator given by " + m_Weights;
return result;
}
/**
* Returns the revision string.
*
* @return the revision
*/
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
/**
* Main method for testing this class.
*
* @param argv should contain a sequence of pairs of integers which
* will be treated as pairs of symbolic, numeric.
*/
public static void main(String [] argv) {
try {
if (argv.length == 0) {
System.out.println("Please specify a set of instances.");
return;
}
int currentA = Integer.parseInt(argv[0]);
int maxA = currentA;
int currentB = Integer.parseInt(argv[1]);
int maxB = currentB;
for(int i = 2; i < argv.length - 1; i += 2) {
currentA = Integer.parseInt(argv[i]);
currentB = Integer.parseInt(argv[i + 1]);
if (currentA > maxA) {
maxA = currentA;
}
if (currentB > maxB) {
maxB = currentB;
}
}
DKConditionalEstimator newEst = new DKConditionalEstimator(maxA + 1,
1);
for(int i = 0; i < argv.length - 1; i += 2) {
currentA = Integer.parseInt(argv[i]);
currentB = Integer.parseInt(argv[i + 1]);
System.out.println(newEst);
System.out.println("Prediction for " + currentA + '|' + currentB
+ " = "
+ newEst.getProbability(currentA, currentB));
newEst.addValue(currentA, currentB, 1);
}
} catch (Exception e) {
System.out.println(e.getMessage());
}
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/estimators/DNConditionalEstimator.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* DNConditionalEstimator.java
* Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.estimators;
import weka.core.RevisionUtils;
/**
* Conditional probability estimator for a discrete domain conditional upon
* a numeric domain.
*
* @author Len Trigg (trigg@cs.waikato.ac.nz)
* @version $Revision$
*/
public class DNConditionalEstimator implements ConditionalEstimator {
/** Hold the sub-estimators */
private NormalEstimator [] m_Estimators;
/** Hold the weights for each of the sub-estimators */
private DiscreteEstimator m_Weights;
/**
* Constructor
*
* @param numSymbols the number of symbols
* @param precision the precision to which numeric values are given. For
* example, if the precision is stated to be 0.1, the values in the
* interval (0.25,0.35] are all treated as 0.3.
*/
public DNConditionalEstimator(int numSymbols, double precision) {
m_Estimators = new NormalEstimator [numSymbols];
for(int i = 0; i < numSymbols; i++) {
m_Estimators[i] = new NormalEstimator(precision);
}
m_Weights = new DiscreteEstimator(numSymbols, true);
}
/**
* Add a new data value to the current estimator.
*
* @param data the new data value
* @param given the new value that data is conditional upon
* @param weight the weight assigned to the data value
*/
public void addValue(double data, double given, double weight) {
m_Estimators[(int)data].addValue(given, weight);
m_Weights.addValue((int)data, weight);
}
/**
* Get a probability estimator for a value
*
* @param given the new value that data is conditional upon
* @return the estimator for the supplied value given the condition
*/
public Estimator getEstimator(double given) {
Estimator result = new DiscreteEstimator(m_Estimators.length,false);
for(int i = 0; i < m_Estimators.length; i++) {
result.addValue(i,m_Weights.getProbability(i)
*m_Estimators[i].getProbability(given));
}
return result;
}
/**
* Get a probability estimate for a value
*
* @param data the value to estimate the probability of
* @param given the new value that data is conditional upon
* @return the estimated probability of the supplied value
*/
public double getProbability(double data, double given) {
return getEstimator(given).getProbability(data);
}
/** Display a representation of this estimator */
public String toString() {
String result = "DN Conditional Estimator. "
+ m_Estimators.length + " sub-estimators:\n";
for(int i = 0; i < m_Estimators.length; i++) {
result += "Sub-estimator " + i + ": " + m_Estimators[i];
}
result += "Weights of each estimator given by " + m_Weights;
return result;
}
/**
* Returns the revision string.
*
* @return the revision
*/
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
/**
* Main method for testing this class.
*
* @param argv should contain a sequence of pairs of integers which
* will be treated as pairs of symbolic, numeric.
*/
public static void main(String [] argv) {
try {
if (argv.length == 0) {
System.out.println("Please specify a set of instances.");
return;
}
int currentA = Integer.parseInt(argv[0]);
int maxA = currentA;
int currentB = Integer.parseInt(argv[1]);
int maxB = currentB;
for(int i = 2; i < argv.length - 1; i += 2) {
currentA = Integer.parseInt(argv[i]);
currentB = Integer.parseInt(argv[i + 1]);
if (currentA > maxA) {
maxA = currentA;
}
if (currentB > maxB) {
maxB = currentB;
}
}
DNConditionalEstimator newEst = new DNConditionalEstimator(maxA + 1,
1);
for(int i = 0; i < argv.length - 1; i += 2) {
currentA = Integer.parseInt(argv[i]);
currentB = Integer.parseInt(argv[i + 1]);
System.out.println(newEst);
System.out.println("Prediction for " + currentA + '|' + currentB
+ " = "
+ newEst.getProbability(currentA, currentB));
newEst.addValue(currentA, currentB, 1);
}
} catch (Exception e) {
System.out.println(e.getMessage());
}
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/estimators/DiscreteEstimator.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* DiscreteEstimator.java
* Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.estimators;
import weka.core.Capabilities;
import weka.core.Capabilities.Capability;
import weka.core.Aggregateable;
import weka.core.RevisionUtils;
import weka.core.Utils;
/**
* Simple symbolic probability estimator based on symbol counts.
*
* @author Len Trigg (trigg@cs.waikato.ac.nz)
* @version $Revision$
*/
public class DiscreteEstimator extends Estimator implements
IncrementalEstimator, Aggregateable<DiscreteEstimator> {
/** for serialization */
private static final long serialVersionUID = -5526486742612434779L;
/** Hold the counts */
private final double[] m_Counts;
/** Hold the sum of counts */
private double m_SumOfCounts;
/** Initialization for counts */
private double m_FPrior;
/**
* Constructor
*
* @param numSymbols the number of possible symbols (remember to include 0)
* @param laplace if true, counts will be initialised to 1
*/
public DiscreteEstimator(int numSymbols, boolean laplace) {
m_Counts = new double[numSymbols];
m_SumOfCounts = 0;
if (laplace) {
m_FPrior = 1;
for (int i = 0; i < numSymbols; i++) {
m_Counts[i] = 1;
}
m_SumOfCounts = numSymbols;
}
}
/**
* Constructor
*
* @param nSymbols the number of possible symbols (remember to include 0)
* @param fPrior value with which counts will be initialised
*/
public DiscreteEstimator(int nSymbols, double fPrior) {
m_Counts = new double[nSymbols];
m_FPrior = fPrior;
for (int iSymbol = 0; iSymbol < nSymbols; iSymbol++) {
m_Counts[iSymbol] = fPrior;
}
m_SumOfCounts = fPrior * nSymbols;
}
/**
* Add a new data value to the current estimator.
*
* @param data the new data value
* @param weight the weight assigned to the data value
*/
@Override
public void addValue(double data, double weight) {
m_Counts[(int) data] += weight;
m_SumOfCounts += weight;
}
/**
* Get a probability estimate for a value
*
* @param data the value to estimate the probability of
* @return the estimated probability of the supplied value
*/
@Override
public double getProbability(double data) {
if (m_SumOfCounts == 0) {
return 0;
}
return m_Counts[(int) data] / m_SumOfCounts;
}
/**
* Gets the number of symbols this estimator operates with
*
* @return the number of estimator symbols
*/
public int getNumSymbols() {
return (m_Counts == null) ? 0 : m_Counts.length;
}
/**
* Get the count for a value
*
* @param data the value to get the count of
* @return the count of the supplied value
*/
public double getCount(double data) {
if (m_SumOfCounts == 0) {
return 0;
}
return m_Counts[(int) data];
}
/**
* Get the sum of all the counts
*
* @return the total sum of counts
*/
public double getSumOfCounts() {
return m_SumOfCounts;
}
/**
* Display a representation of this estimator
*/
@Override
public String toString() {
StringBuffer result = new StringBuffer("Discrete Estimator. Counts = ");
if (m_SumOfCounts > 1) {
for (int i = 0; i < m_Counts.length; i++) {
result.append(" ").append(Utils.doubleToString(m_Counts[i], 2));
}
result.append(" (Total = ").append(
Utils.doubleToString(m_SumOfCounts, 2));
result.append(")\n");
} else {
for (int i = 0; i < m_Counts.length; i++) {
result.append(" ").append(m_Counts[i]);
}
result.append(" (Total = ").append(m_SumOfCounts).append(")\n");
}
return result.toString();
}
/**
* Returns default capabilities of the classifier.
*
* @return the capabilities of this classifier
*/
@Override
public Capabilities getCapabilities() {
Capabilities result = super.getCapabilities();
result.disableAll();
// class
if (!m_noClass) {
result.enable(Capability.NOMINAL_CLASS);
result.enable(Capability.MISSING_CLASS_VALUES);
} else {
result.enable(Capability.NO_CLASS);
}
// attributes
result.enable(Capability.NUMERIC_ATTRIBUTES);
return result;
}
/**
* Returns the revision string.
*
* @return the revision
*/
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
@Override
public DiscreteEstimator aggregate(DiscreteEstimator toAggregate)
throws Exception {
if (toAggregate.m_Counts.length != m_Counts.length) {
throw new Exception("DiscreteEstimator to aggregate has a different "
+ "number of symbols");
}
m_SumOfCounts += toAggregate.m_SumOfCounts;
for (int i = 0; i < m_Counts.length; i++) {
m_Counts[i] += (toAggregate.m_Counts[i] - toAggregate.m_FPrior);
}
m_SumOfCounts -= (toAggregate.m_FPrior * m_Counts.length);
return this;
}
@Override
public void finalizeAggregation() throws Exception {
// nothing to do
}
protected static void testAggregation() {
DiscreteEstimator df = new DiscreteEstimator(5, true);
DiscreteEstimator one = new DiscreteEstimator(5, true);
DiscreteEstimator two = new DiscreteEstimator(5, true);
java.util.Random r = new java.util.Random(1);
for (int i = 0; i < 100; i++) {
int z = r.nextInt(5);
df.addValue(z, 1);
if (i < 50) {
one.addValue(z, 1);
} else {
two.addValue(z, 1);
}
}
try {
System.out.println("\n\nFull\n");
System.out.println(df.toString());
System.out.println("Prob (0): " + df.getProbability(0));
System.out.println("\nOne\n" + one.toString());
System.out.println("Prob (0): " + one.getProbability(0));
System.out.println("\nTwo\n" + two.toString());
System.out.println("Prob (0): " + two.getProbability(0));
one = one.aggregate(two);
System.out.println("\nAggregated\n");
System.out.println(one.toString());
System.out.println("Prob (0): " + one.getProbability(0));
} catch (Exception ex) {
ex.printStackTrace();
}
}
/**
* Main method for testing this class.
*
* @param argv should contain a sequence of integers which will be treated as
* symbolic.
*/
public static void main(String[] argv) {
try {
if (argv.length == 0) {
System.out.println("Please specify a set of instances.");
return;
}
int current = Integer.parseInt(argv[0]);
int max = current;
for (int i = 1; i < argv.length; i++) {
current = Integer.parseInt(argv[i]);
if (current > max) {
max = current;
}
}
DiscreteEstimator newEst = new DiscreteEstimator(max + 1, true);
for (int i = 0; i < argv.length; i++) {
current = Integer.parseInt(argv[i]);
System.out.println(newEst);
System.out.println("Prediction for " + current + " = "
+ newEst.getProbability(current));
newEst.addValue(current, 1);
}
DiscreteEstimator.testAggregation();
} catch (Exception e) {
System.out.println(e.getMessage());
}
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/estimators/Estimator.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* Estimator.java
* Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.estimators;
import java.io.BufferedReader;
import java.io.FileReader;
import java.io.InputStreamReader;
import java.io.Reader;
import java.io.Serializable;
import java.util.Enumeration;
import java.util.Vector;
import weka.core.Capabilities;
import weka.core.CapabilitiesHandler;
import weka.core.CapabilitiesIgnorer;
import weka.core.Instance;
import weka.core.Instances;
import weka.core.Option;
import weka.core.OptionHandler;
import weka.core.RevisionHandler;
import weka.core.RevisionUtils;
import weka.core.SerializedObject;
import weka.core.Utils;
/**
*
* Abstract class for all estimators.
*
* Example code for a nonincremental estimator <code> <pre>
* // create a histogram for estimation
* EqualWidthEstimator est = new EqualWidthEstimator();
* est.addValues(instances, attrIndex);
* </pre> </code>
*
*
* Example code for an incremental estimator (incremental estimators must
* implement interface IncrementalEstimator) <code> <pre>
* // Create a discrete estimator that takes values 0 to 9
* DiscreteEstimator newEst = new DiscreteEstimator(10, true);
*
* // Create 50 random integers first predicting the probability of the
* // value, then adding the value to the estimator
* Random r = new Random(seed);
* for(int i = 0; i < 50; i++) {
* current = Math.abs(r.nextInt() % 10);
* System.out.println(newEst);
* System.out.println("Prediction for " + current
* + " = " + newEst.getProbability(current));
* newEst.addValue(current, 1);
* }
* </pre> </code>
*
*
* Example code for a main method for an estimator.
* <p>
* <code> <pre>
* public static void main(String [] argv) {
*
* try {
* LoglikeliEstimator est = new LoglikeliEstimator();
* Estimator.buildEstimator((Estimator) est, argv, false);
* System.out.println(est.toString());
* } catch (Exception ex) {
* ex.printStackTrace();
* System.out.println(ex.getMessage());
* }
* }
* </pre> </code>
*
*
* @author Gabi Schmidberger (gabi@cs.waikato.ac.nz)
* @author Len Trigg (trigg@cs.waikato.ac.nz)
* @version $Revision$
*/
public abstract class Estimator implements Cloneable, Serializable,
OptionHandler, CapabilitiesHandler,
CapabilitiesIgnorer, RevisionHandler {
/** for serialization */
static final long serialVersionUID = -5902411487362274342L;
/** Debugging mode */
private boolean m_Debug = false;
/**
* The class value index is > -1 if subset is taken with specific class value
* only
*/
protected double m_classValueIndex = -1.0;
/** set if class is not important */
protected boolean m_noClass = true;
/**
* Class to support a building process of an estimator.
*/
private static class Builder implements Serializable, RevisionHandler {
/** for serialization */
private static final long serialVersionUID = -5810927990193597303L;
/** instances of the builder */
Instances m_instances = null;
/** attribute index of the builder */
int m_attrIndex = -1;
/** class index of the builder, only relevant if class value index is set */
int m_classIndex = -1;
/** class value index of the builder */
int m_classValueIndex = -1;
/**
* Returns the revision string.
*
* @return the revision
*/
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
}
/** Whether capabilities should not be checked */
protected boolean m_DoNotCheckCapabilities = false;
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String doNotCheckCapabilitiesTipText() {
return "If set, estimator capabilities are not checked before estimator is built"
+ " (Use with caution to reduce runtime).";
}
/**
* Set whether not to check capabilities.
*
* @param doNotCheckCapabilities true if capabilities are not to be checked.
*/
public void setDoNotCheckCapabilities(boolean doNotCheckCapabilities) {
m_DoNotCheckCapabilities = doNotCheckCapabilities;
}
/**
* Get whether capabilities checking is turned off.
*
* @return true if capabilities checking is turned off.
*/
public boolean getDoNotCheckCapabilities() {
return m_DoNotCheckCapabilities;
}
/**
* Add a new data value to the current estimator.
*
* @param data the new data value
* @param weight the weight assigned to the data value
*/
public void addValue(double data, double weight) {
try {
throw new Exception("Method to add single value is not implemented!\n"
+ "Estimator should implement IncrementalEstimator.");
} catch (Exception ex) {
ex.printStackTrace();
System.out.println(ex.getMessage());
}
}
/**
* Initialize the estimator with a new dataset. Finds min and max first.
*
* @param data the dataset used to build this estimator
* @param attrIndex attribute the estimator is for
* @exception Exception if building of estimator goes wrong
*/
public void addValues(Instances data, int attrIndex) throws Exception {
// can estimator handle the data?
getCapabilities().testWithFail(data);
double[] minMax = new double[2];
try {
EstimatorUtils.getMinMax(data, attrIndex, minMax);
} catch (Exception ex) {
ex.printStackTrace();
System.out.println(ex.getMessage());
}
double min = minMax[0];
double max = minMax[1];
// factor is 1.0, data set has not been reduced
addValues(data, attrIndex, min, max, 1.0);
}
/**
* Initialize the estimator with all values of one attribute of a dataset.
* Some estimator might ignore the min and max values.
*
* @param data the dataset used to build this estimator
* @param attrIndex attribute the estimator is for
* @param min minimal border of range
* @param max maximal border of range
* @param factor number of instances has been reduced to that factor
* @exception Exception if building of estimator goes wrong
*/
public void addValues(Instances data, int attrIndex, double min, double max,
double factor) throws Exception {
// no handling of factor, would have to be overridden
// no handling of min and max, would have to be overridden
int numInst = data.numInstances();
for (int i = 1; i < numInst; i++) {
addValue(data.instance(i).value(attrIndex), 1.0);
}
}
/**
* Initialize the estimator using only the instance of one class. It is using
* the values of one attribute only.
*
* @param data the dataset used to build this estimator
* @param attrIndex attribute the estimator is for
* @param classIndex index of the class attribute
* @param classValue the class value
* @exception Exception if building of estimator goes wrong
*/
public void addValues(Instances data, int attrIndex, int classIndex,
int classValue) throws Exception {
// can estimator handle the data?
m_noClass = false;
getCapabilities().testWithFail(data);
// find the minimal and the maximal value
double[] minMax = new double[2];
try {
EstimatorUtils.getMinMax(data, attrIndex, minMax);
} catch (Exception ex) {
ex.printStackTrace();
System.out.println(ex.getMessage());
}
double min = minMax[0];
double max = minMax[1];
// extract the instances with the given class value
Instances workData = new Instances(data, 0);
double factor = getInstancesFromClass(data, attrIndex, classIndex,
classValue, workData);
// if no data return
if (workData.numInstances() == 0) {
return;
}
addValues(data, attrIndex, min, max, factor);
}
/**
* Initialize the estimator using only the instance of one class. It is using
* the values of one attribute only.
*
* @param data the dataset used to build this estimator
* @param attrIndex attribute the estimator is for
* @param classIndex index of the class attribute
* @param classValue the class value
* @param min minimal value of this attribute
* @param max maximal value of this attribute
* @exception Exception if building of estimator goes wrong
*/
public void addValues(Instances data, int attrIndex, int classIndex,
int classValue, double min, double max) throws Exception {
// extract the instances with the given class value
Instances workData = new Instances(data, 0);
double factor = getInstancesFromClass(data, attrIndex, classIndex,
classValue, workData);
// if no data return
if (workData.numInstances() == 0) {
return;
}
addValues(data, attrIndex, min, max, factor);
}
/**
* Returns a dataset that contains all instances of a certain class value.
*
* @param data dataset to select the instances from
* @param attrIndex index of the relevant attribute
* @param classIndex index of the class attribute
* @param classValue the relevant class value
* @return a dataset with only
*/
private double getInstancesFromClass(Instances data, int attrIndex,
int classIndex, double classValue, Instances workData) {
// DBO.pln("getInstancesFromClass classValue"+classValue+" workData"+data.numInstances());
int num = 0;
int numClassValue = 0;
for (int i = 0; i < data.numInstances(); i++) {
if (!data.instance(i).isMissing(attrIndex)) {
num++;
if (data.instance(i).value(classIndex) == classValue) {
workData.add(data.instance(i));
numClassValue++;
}
}
}
Double alphaFactor = new Double((double) numClassValue / (double) num);
return alphaFactor;
}
/**
* Get a probability estimate for a value.
*
* @param data the value to estimate the probability of
* @return the estimated probability of the supplied value
*/
public abstract double getProbability(double data);
/**
* Build an estimator using the options. The data is given in the options.
*
* @param est the estimator used
* @param options the list of options
* @param isIncremental true if estimator is incremental
* @exception Exception if something goes wrong or the user requests help on
* command options
*/
public static void buildEstimator(Estimator est, String[] options,
boolean isIncremental) throws Exception {
// DBO.pln("buildEstimator");
// read all options
Builder build = new Builder();
try {
setGeneralOptions(build, est, options);
if (est instanceof OptionHandler) {
((OptionHandler) est).setOptions(options);
}
Utils.checkForRemainingOptions(options);
buildEstimator(est, build.m_instances, build.m_attrIndex,
build.m_classIndex, build.m_classValueIndex, isIncremental);
} catch (Exception ex) {
ex.printStackTrace();
System.out.println(ex.getMessage());
String specificOptions = "";
// Output the error and also the valid options
if (est instanceof OptionHandler) {
specificOptions += "\nEstimator options:\n\n";
Enumeration<Option> enumOptions = ((OptionHandler) est).listOptions();
while (enumOptions.hasMoreElements()) {
Option option = enumOptions.nextElement();
specificOptions += option.synopsis() + '\n' + option.description()
+ "\n";
}
}
String genericOptions = "\nGeneral options:\n\n"
+ "-h\n"
+ "\tGet help on available options.\n"
+ "-i <file>\n"
+ "\tThe name of the file containing input instances.\n"
+ "\tIf not supplied then instances will be read from stdin.\n"
+ "-a <attribute index>\n"
+ "\tThe number of the attribute the probability distribution\n"
+ "\testimation is done for.\n"
+ "\t\"first\" and \"last\" are also valid entries.\n"
+ "\tIf not supplied then no class is assigned.\n"
+ "-c <class index>\n"
+ "\tIf class value index is set, this attribute is taken as class.\n"
+ "\t\"first\" and \"last\" are also valid entries.\n"
+ "\tIf not supplied then last is default.\n"
+ "-v <class value index>\n"
+ "\tIf value is different to -1, select instances of this class value.\n"
+ "\t\"first\" and \"last\" are also valid entries.\n"
+ "\tIf not supplied then all instances are taken.\n";
throw new Exception('\n' + ex.getMessage() + specificOptions
+ genericOptions);
}
}
public static void buildEstimator(Estimator est, Instances instances,
int attrIndex, int classIndex, int classValueIndex, boolean isIncremental)
throws Exception {
// DBO.pln("buildEstimator 2 " + classValueIndex);
// non-incremental estimator add all instances at once
if (!isIncremental) {
if (classValueIndex == -1) {
// DBO.pln("before addValues -- Estimator");
est.addValues(instances, attrIndex);
} else {
// DBO.pln("before addValues with classvalue -- Estimator");
est.addValues(instances, attrIndex, classIndex, classValueIndex);
}
} else {
// incremental estimator, read one value at a time
Enumeration<Instance> enumInsts = (instances).enumerateInstances();
while (enumInsts.hasMoreElements()) {
Instance instance = enumInsts.nextElement();
((IncrementalEstimator) est).addValue(instance.value(attrIndex),
instance.weight());
}
}
}
/**
* Parses and sets the general options
*
* @param build contains the data used
* @param est the estimator used
* @param options the options from the command line
*/
private static void setGeneralOptions(Builder build, Estimator est,
String[] options) throws Exception {
Reader input = null;
// help request option
boolean helpRequest = Utils.getFlag('h', options);
if (helpRequest) {
throw new Exception("Help requested.\n");
}
// instances used
String infileName = Utils.getOption('i', options);
if (infileName.length() != 0) {
input = new BufferedReader(new FileReader(infileName));
} else {
input = new BufferedReader(new InputStreamReader(System.in));
}
build.m_instances = new Instances(input);
// attribute index
String attrIndex = Utils.getOption('a', options);
if (attrIndex.length() != 0) {
if (attrIndex.equals("first")) {
build.m_attrIndex = 0;
} else if (attrIndex.equals("last")) {
build.m_attrIndex = build.m_instances.numAttributes() - 1;
} else {
int index = Integer.parseInt(attrIndex) - 1;
if ((index < 0) || (index >= build.m_instances.numAttributes())) {
throw new IllegalArgumentException(
"Option a: attribute index out of range.");
}
build.m_attrIndex = index;
}
} else {
// default is the first attribute
build.m_attrIndex = 0;
}
// class index, if not given is set to last attribute
String classIndex = Utils.getOption('c', options);
if (classIndex.length() == 0) {
classIndex = "last";
}
if (classIndex.length() != 0) {
if (classIndex.equals("first")) {
build.m_classIndex = 0;
} else if (classIndex.equals("last")) {
build.m_classIndex = build.m_instances.numAttributes() - 1;
} else {
int cl = Integer.parseInt(classIndex);
if (cl == -1) {
build.m_classIndex = build.m_instances.numAttributes() - 1;
} else {
build.m_classIndex = cl - 1;
}
}
}
// class value index, if not given is set to -1
String classValueIndex = Utils.getOption('v', options);
if (classValueIndex.length() != 0) {
if (classValueIndex.equals("first")) {
build.m_classValueIndex = 0;
} else if (classValueIndex.equals("last")) {
build.m_classValueIndex = build.m_instances.numAttributes() - 1;
} else {
int cl = Integer.parseInt(classValueIndex);
if (cl == -1) {
build.m_classValueIndex = -1;
} else {
build.m_classValueIndex = cl - 1;
}
}
}
build.m_instances.setClassIndex(build.m_classIndex);
}
/**
* Creates a deep copy of the given estimator using serialization.
*
* @param model the estimator to copy
* @return a deep copy of the estimator
* @exception Exception if an error occurs
*/
public static Estimator clone(Estimator model) throws Exception {
return makeCopy(model);
}
/**
* Creates a deep copy of the given estimator using serialization.
*
* @param model the estimator to copy
* @return a deep copy of the estimator
* @exception Exception if an error occurs
*/
public static Estimator makeCopy(Estimator model) throws Exception {
return (Estimator) new SerializedObject(model).getObject();
}
/**
* Creates a given number of deep copies of the given estimator using
* serialization.
*
* @param model the estimator to copy
* @param num the number of estimator copies to create.
* @return an array of estimators.
* @exception Exception if an error occurs
*/
public static Estimator[] makeCopies(Estimator model, int num)
throws Exception {
if (model == null) {
throw new Exception("No model estimator set");
}
Estimator[] estimators = new Estimator[num];
SerializedObject so = new SerializedObject(model);
for (int i = 0; i < estimators.length; i++) {
estimators[i] = (Estimator) so.getObject();
}
return estimators;
}
/**
* Tests whether the current estimation object is equal to another estimation
* object
*
* @param obj the object to compare against
* @return true if the two objects are equal
*/
@Override
public boolean equals(Object obj) {
if ((obj == null) || !(obj.getClass().equals(this.getClass()))) {
return false;
}
Estimator cmp = (Estimator) obj;
if (m_Debug != cmp.m_Debug) {
return false;
}
if (m_classValueIndex != cmp.m_classValueIndex) {
return false;
}
if (m_noClass != cmp.m_noClass) {
return false;
}
return true;
}
/**
* Returns an enumeration describing the available options.
*
* @return an enumeration of all the available options.
*/
@Override
public Enumeration<Option> listOptions() {
Vector<Option> newVector = new Vector<Option>(1);
newVector.addElement(new Option(
"\tIf set, estimator is run in debug mode and\n"
+ "\tmay output additional info to the console", "D", 0, "-D"));
return newVector.elements();
}
/**
* Parses a given list of options. Valid options are:
* <p>
*
* -D <br>
* If set, estimator is run in debug mode and may output additional info to
* the console.
* <p>
*
* @param options the list of options as an array of strings
* @exception Exception if an option is not supported
*/
@Override
public void setOptions(String[] options) throws Exception {
setDebug(Utils.getFlag('D', options));
}
/**
* Gets the current settings of the Estimator.
*
* @return an array of strings suitable for passing to setOptions
*/
@Override
public String[] getOptions() {
String[] options;
if (getDebug()) {
options = new String[1];
options[0] = "-D";
} else {
options = new String[0];
}
return options;
}
/**
* Creates a new instance of a estimatorr given it's class name and (optional)
* arguments to pass to it's setOptions method. If the estimator implements
* OptionHandler and the options parameter is non-null, the estimator will
* have it's options set.
*
* @param name the fully qualified class name of the estimatorr
* @param options an array of options suitable for passing to setOptions. May
* be null.
* @return the newly created estimator, ready for use.
* @exception Exception if the estimator name is invalid, or the options
* supplied are not acceptable to the estimator
*/
public static Estimator forName(String name, String[] options)
throws Exception {
return (Estimator) Utils.forName(Estimator.class, name, options);
}
/**
* Set debugging mode.
*
* @param debug true if debug output should be printed
*/
public void setDebug(boolean debug) {
m_Debug = debug;
}
/**
* Get whether debugging is turned on.
*
* @return true if debugging output is on
*/
public boolean getDebug() {
return m_Debug;
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String debugTipText() {
return "If set to true, estimator may output additional info to "
+ "the console.";
}
/**
* Returns the Capabilities of this Estimator. Derived estimators have to
* override this method to enable capabilities.
*
* @return the capabilities of this object
* @see Capabilities
*/
@Override
public Capabilities getCapabilities() {
Capabilities result = new Capabilities(this);
result.enableAll();
/*
* // class if (!m_noClass) { result.enable(Capability.NOMINAL_CLASS);
* result.enable(Capability.MISSING_CLASS_VALUES); } else {
* result.enable(Capability.NO_CLASS); }
*/
return result;
}
/**
* Returns the revision string.
*
* @return the revision
*/
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
/**
* Test if the estimator can handle the data.
*
* @param data the dataset the estimator takes an attribute from
* @param attrIndex the index of the attribute
* @see Capabilities
*/
public void testCapabilities(Instances data, int attrIndex) throws Exception {
getCapabilities().testWithFail(data);
getCapabilities().testWithFail(data.attribute(attrIndex));
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/estimators/EstimatorUtils.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* EstimatorUtils.java
* Copyright (C) 2004-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.estimators;
import java.io.FileOutputStream;
import java.io.PrintWriter;
import java.util.Enumeration;
import java.util.Vector;
import weka.core.Instance;
import weka.core.Instances;
import weka.core.RevisionHandler;
import weka.core.RevisionUtils;
/**
* Contains static utility functions for Estimators.
* <p>
*
* @author Gabi Schmidberger (gabi@cs.waikato.ac.nz)
* @version $Revision$
*/
public class EstimatorUtils implements RevisionHandler {
/**
* Find the minimum distance between values
*
* @param inst sorted instances, sorted
* @param attrIndex index of the attribute, they are sorted after
* @return the minimal distance
*/
public static double findMinDistance(Instances inst, int attrIndex) {
double min = Double.MAX_VALUE;
int numInst = inst.numInstances();
double diff;
if (numInst < 2) {
return min;
}
int begin = -1;
Instance instance = null;
do {
begin++;
if (begin < numInst) {
instance = inst.instance(begin);
}
} while (begin < numInst && instance.isMissing(attrIndex));
double secondValue = inst.instance(begin).value(attrIndex);
for (int i = begin; i < numInst && !inst.instance(i).isMissing(attrIndex); i++) {
double firstValue = secondValue;
secondValue = inst.instance(i).value(attrIndex);
if (secondValue != firstValue) {
diff = secondValue - firstValue;
if (diff < min && diff > 0.0) {
min = diff;
}
}
}
return min;
}
/**
* Find the minimum and the maximum of the attribute and return it in the last
* parameter..
*
* @param inst instances used to build the estimator
* @param attrIndex index of the attribute
* @param minMax the array to return minimum and maximum in
* @return number of not missing values
* @exception Exception if parameter minMax wasn't initialized properly
*/
public static int getMinMax(Instances inst, int attrIndex, double[] minMax)
throws Exception {
double min = Double.NaN;
double max = Double.NaN;
Instance instance = null;
int numNotMissing = 0;
if ((minMax == null) || (minMax.length < 2)) {
throw new Exception("Error in Program, privat method getMinMax");
}
Enumeration<Instance> enumInst = inst.enumerateInstances();
if (enumInst.hasMoreElements()) {
do {
instance = enumInst.nextElement();
} while (instance.isMissing(attrIndex) && (enumInst.hasMoreElements()));
// add values if not missing
if (!instance.isMissing(attrIndex)) {
numNotMissing++;
min = instance.value(attrIndex);
max = instance.value(attrIndex);
}
while (enumInst.hasMoreElements()) {
instance = enumInst.nextElement();
if (!instance.isMissing(attrIndex)) {
numNotMissing++;
if (instance.value(attrIndex) < min) {
min = (instance.value(attrIndex));
} else {
if (instance.value(attrIndex) > max) {
max = (instance.value(attrIndex));
}
}
}
}
}
minMax[0] = min;
minMax[1] = max;
return numNotMissing;
}
/**
* Returns a dataset that contains all instances of a certain class value.
*
* @param data dataset to select the instances from
* @param attrIndex index of the relevant attribute
* @param classIndex index of the class attribute
* @param classValue the relevant class value
* @return a dataset with only
*/
public static Vector<Object> getInstancesFromClass(Instances data,
int attrIndex, int classIndex, double classValue, Instances workData) {
// Oops.pln("getInstancesFromClass classValue"+classValue+" workData"+data.numInstances());
Vector<Object> dataPlusInfo = new Vector<Object>(0);
int num = 0;
int numClassValue = 0;
// workData = new Instances(data, 0);
for (int i = 0; i < data.numInstances(); i++) {
if (!data.instance(i).isMissing(attrIndex)) {
num++;
if (data.instance(i).value(classIndex) == classValue) {
workData.add(data.instance(i));
numClassValue++;
}
}
}
Double alphaFactor = new Double((double) numClassValue / (double) num);
dataPlusInfo.add(workData);
dataPlusInfo.add(alphaFactor);
return dataPlusInfo;
}
/**
* Returns a dataset that contains of all instances of a certain class value.
*
* @param data dataset to select the instances from
* @param classIndex index of the class attribute
* @param classValue the class value
* @return a dataset with only instances of one class value
*/
public static Instances getInstancesFromClass(Instances data, int classIndex,
double classValue) {
Instances workData = new Instances(data, 0);
for (int i = 0; i < data.numInstances(); i++) {
if (data.instance(i).value(classIndex) == classValue) {
workData.add(data.instance(i));
}
}
return workData;
}
/**
* Output of an n points of a density curve. Filename is parameter f +
* ".curv".
*
* @param f string to build filename
* @param est
* @param min
* @param max
* @param numPoints
* @throws Exception if something goes wrong
*/
public static void writeCurve(String f, Estimator est, double min,
double max, int numPoints) throws Exception {
PrintWriter output = null;
StringBuffer text = new StringBuffer("");
if (f.length() != 0) {
// add attribute indexnumber to filename and extension .hist
String name = f + ".curv";
output = new PrintWriter(new FileOutputStream(name));
} else {
return;
}
double diff = (max - min) / (numPoints - 1.0);
try {
text.append("" + min + " " + est.getProbability(min) + " \n");
for (double value = min + diff; value < max; value += diff) {
text.append("" + value + " " + est.getProbability(value) + " \n");
}
text.append("" + max + " " + est.getProbability(max) + " \n");
} catch (Exception ex) {
ex.printStackTrace();
System.out.println(ex.getMessage());
}
output.println(text.toString());
// close output
if (output != null) {
output.close();
}
}
/**
* Output of an n points of a density curve. Filename is parameter f +
* ".curv".
*
* @param f string to build filename
* @param est
* @param classEst
* @param classIndex
* @param min
* @param max
* @param numPoints
* @throws Exception if something goes wrong
*/
public static void writeCurve(String f, Estimator est, Estimator classEst,
double classIndex, double min, double max, int numPoints) throws Exception {
PrintWriter output = null;
StringBuffer text = new StringBuffer("");
if (f.length() != 0) {
// add attribute indexnumber to filename and extension .hist
String name = f + ".curv";
output = new PrintWriter(new FileOutputStream(name));
} else {
return;
}
double diff = (max - min) / (numPoints - 1.0);
try {
text.append("" + min + " " + est.getProbability(min)
* classEst.getProbability(classIndex) + " \n");
for (double value = min + diff; value < max; value += diff) {
text.append("" + value + " " + est.getProbability(value)
* classEst.getProbability(classIndex) + " \n");
}
text.append("" + max + " " + est.getProbability(max)
* classEst.getProbability(classIndex) + " \n");
} catch (Exception ex) {
ex.printStackTrace();
System.out.println(ex.getMessage());
}
output.println(text.toString());
// close output
if (output != null) {
output.close();
}
}
/**
* Returns a dataset that contains of all instances of a certain value for the
* given attribute.
*
* @param data dataset to select the instances from
* @param index the index of the attribute
* @param v the value
* @return a subdataset with only instances of one value for the attribute
*/
public static Instances getInstancesFromValue(Instances data, int index,
double v) {
Instances workData = new Instances(data, 0);
for (int i = 0; i < data.numInstances(); i++) {
if (data.instance(i).value(index) == v) {
workData.add(data.instance(i));
}
}
return workData;
}
/**
* Returns a string representing the cutpoints
*/
public static String cutpointsToString(double[] cutPoints,
boolean[] cutAndLeft) {
StringBuffer text = new StringBuffer("");
if (cutPoints == null) {
text.append("\n# no cutpoints found - attribute \n");
} else {
text.append("\n#* " + cutPoints.length + " cutpoint(s) -\n");
for (int i = 0; i < cutPoints.length; i++) {
text.append("# " + cutPoints[i] + " ");
text.append("" + cutAndLeft[i] + "\n");
}
text.append("# end\n");
}
return text.toString();
}
/**
* Returns the revision string.
*
* @return the revision
*/
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/estimators/IncrementalEstimator.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* IncrementalEstimator.java
* Copyright (C) 2004-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.estimators;
/**
* Interface for an incremental probability estimators.<p>
*
* @author Gabi Schmidberger (gabi@cs.waikato.ac.nz)
* @version $Revision$
*/
public interface IncrementalEstimator {
/**
* Add one value to the current estimator.
*
* @param data the new data value
* @param weight the weight assigned to the data value
*/
void addValue(double data, double weight);
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/estimators/KDConditionalEstimator.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* KDConditionalEstimator.java
* Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.estimators;
import weka.core.RevisionUtils;
/**
* Conditional probability estimator for a numeric domain conditional upon
* a discrete domain (utilises separate kernel estimators for each discrete
* conditioning value).
*
* @author Len Trigg (trigg@cs.waikato.ac.nz)
* @version $Revision$
*/
public class KDConditionalEstimator implements ConditionalEstimator {
/** Hold the sub-estimators */
private KernelEstimator [] m_Estimators;
/**
* Constructor
*
* @param numCondSymbols the number of conditioning symbols
* @param precision the precision to which numeric values are given. For
* example, if the precision is stated to be 0.1, the values in the
* interval (0.25,0.35] are all treated as 0.3.
*/
public KDConditionalEstimator(int numCondSymbols, double precision) {
m_Estimators = new KernelEstimator [numCondSymbols];
for(int i = 0; i < numCondSymbols; i++) {
m_Estimators[i] = new KernelEstimator(precision);
}
}
/**
* Add a new data value to the current estimator.
*
* @param data the new data value
* @param given the new value that data is conditional upon
* @param weight the weight assigned to the data value
*/
public void addValue(double data, double given, double weight) {
m_Estimators[(int)given].addValue(data, weight);
}
/**
* Get a probability estimator for a value
*
* @param given the new value that data is conditional upon
* @return the estimator for the supplied value given the condition
*/
public Estimator getEstimator(double given) {
return m_Estimators[(int)given];
}
/**
* Get a probability estimate for a value
*
* @param data the value to estimate the probability of
* @param given the new value that data is conditional upon
* @return the estimated probability of the supplied value
*/
public double getProbability(double data, double given) {
return getEstimator(given).getProbability(data);
}
/** Display a representation of this estimator */
public String toString() {
String result = "KD Conditional Estimator. "
+ m_Estimators.length + " sub-estimators:\n";
for(int i = 0; i < m_Estimators.length; i++) {
result += "Sub-estimator " + i + ": " + m_Estimators[i];
}
return result;
}
/**
* Returns the revision string.
*
* @return the revision
*/
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
/**
* Main method for testing this class.
*
* @param argv should contain a sequence of pairs of integers which
* will be treated as numeric, symbolic.
*/
public static void main(String [] argv) {
try {
if (argv.length == 0) {
System.out.println("Please specify a set of instances.");
return;
}
int currentA = Integer.parseInt(argv[0]);
int maxA = currentA;
int currentB = Integer.parseInt(argv[1]);
int maxB = currentB;
for(int i = 2; i < argv.length - 1; i += 2) {
currentA = Integer.parseInt(argv[i]);
currentB = Integer.parseInt(argv[i + 1]);
if (currentA > maxA) {
maxA = currentA;
}
if (currentB > maxB) {
maxB = currentB;
}
}
KDConditionalEstimator newEst = new KDConditionalEstimator(maxB + 1,
1);
for(int i = 0; i < argv.length - 1; i += 2) {
currentA = Integer.parseInt(argv[i]);
currentB = Integer.parseInt(argv[i + 1]);
System.out.println(newEst);
System.out.println("Prediction for " + currentA + '|' + currentB
+ " = "
+ newEst.getProbability(currentA, currentB));
newEst.addValue(currentA, currentB, 1);
}
} catch (Exception e) {
System.out.println(e.getMessage());
}
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/estimators/KKConditionalEstimator.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* KKConditionalEstimator.java
* Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.estimators;
import java.util.Random;
import weka.core.RevisionUtils;
import weka.core.Statistics;
import weka.core.Utils;
/**
* Conditional probability estimator for a numeric domain conditional upon
* a numeric domain.
*
* @author Len Trigg (trigg@cs.waikato.ac.nz)
* @version $Revision$
*/
public class KKConditionalEstimator implements ConditionalEstimator {
/** Vector containing all of the values seen */
private double [] m_Values;
/** Vector containing all of the conditioning values seen */
private double [] m_CondValues;
/** Vector containing the associated weights */
private double [] m_Weights;
/**
* Number of values stored in m_Weights, m_CondValues, and m_Values so far
*/
private int m_NumValues;
/** The sum of the weights so far */
private double m_SumOfWeights;
/** Current standard dev */
private double m_StandardDev;
/** Whether we can optimise the kernel summation */
private boolean m_AllWeightsOne;
/** The numeric precision */
private double m_Precision;
/**
* Execute a binary search to locate the nearest data value
*
* @param key the data value to locate
* @param secondaryKey the data value to locate
* @return the index of the nearest data value
*/
private int findNearestPair(double key, double secondaryKey) {
int low = 0;
int high = m_NumValues;
int middle = 0;
while (low < high) {
middle = (low + high) / 2;
double current = m_CondValues[middle];
if (current == key) {
double secondary = m_Values[middle];
if (secondary == secondaryKey) {
return middle;
}
if (secondary > secondaryKey) {
high = middle;
} else if (secondary < secondaryKey) {
low = middle+1;
}
}
if (current > key) {
high = middle;
} else if (current < key) {
low = middle+1;
}
}
return low;
}
/**
* Round a data value using the defined precision for this estimator
*
* @param data the value to round
* @return the rounded data value
*/
private double round(double data) {
return Math.rint(data / m_Precision) * m_Precision;
}
/**
* Constructor
*
* @param precision the precision to which numeric values are given. For
* example, if the precision is stated to be 0.1, the values in the
* interval (0.25,0.35] are all treated as 0.3.
*/
public KKConditionalEstimator(double precision) {
m_CondValues = new double [50];
m_Values = new double [50];
m_Weights = new double [50];
m_NumValues = 0;
m_SumOfWeights = 0;
m_StandardDev = 0;
m_AllWeightsOne = true;
m_Precision = precision;
}
/**
* Add a new data value to the current estimator.
*
* @param data the new data value
* @param given the new value that data is conditional upon
* @param weight the weight assigned to the data value
*/
public void addValue(double data, double given, double weight) {
data = round(data);
given = round(given);
int insertIndex = findNearestPair(given, data);
if ((m_NumValues <= insertIndex)
|| (m_CondValues[insertIndex] != given)
|| (m_Values[insertIndex] != data)) {
if (m_NumValues < m_Values.length) {
int left = m_NumValues - insertIndex;
System.arraycopy(m_Values, insertIndex,
m_Values, insertIndex + 1, left);
System.arraycopy(m_CondValues, insertIndex,
m_CondValues, insertIndex + 1, left);
System.arraycopy(m_Weights, insertIndex,
m_Weights, insertIndex + 1, left);
m_Values[insertIndex] = data;
m_CondValues[insertIndex] = given;
m_Weights[insertIndex] = weight;
m_NumValues++;
} else {
double [] newValues = new double [m_Values.length*2];
double [] newCondValues = new double [m_Values.length*2];
double [] newWeights = new double [m_Values.length*2];
int left = m_NumValues - insertIndex;
System.arraycopy(m_Values, 0, newValues, 0, insertIndex);
System.arraycopy(m_CondValues, 0, newCondValues, 0, insertIndex);
System.arraycopy(m_Weights, 0, newWeights, 0, insertIndex);
newValues[insertIndex] = data;
newCondValues[insertIndex] = given;
newWeights[insertIndex] = weight;
System.arraycopy(m_Values, insertIndex,
newValues, insertIndex+1, left);
System.arraycopy(m_CondValues, insertIndex,
newCondValues, insertIndex+1, left);
System.arraycopy(m_Weights, insertIndex,
newWeights, insertIndex+1, left);
m_NumValues++;
m_Values = newValues;
m_CondValues = newCondValues;
m_Weights = newWeights;
}
if (weight != 1) {
m_AllWeightsOne = false;
}
} else {
m_Weights[insertIndex] += weight;
m_AllWeightsOne = false;
}
m_SumOfWeights += weight;
double range = m_CondValues[m_NumValues-1] - m_CondValues[0];
m_StandardDev = Math.max(range / Math.sqrt(m_SumOfWeights),
// allow at most 3 sds within one interval
m_Precision / (2 * 3));
}
/**
* Get a probability estimator for a value
*
* @param given the new value that data is conditional upon
* @return the estimator for the supplied value given the condition
*/
public Estimator getEstimator(double given) {
Estimator result = new KernelEstimator(m_Precision);
if (m_NumValues == 0) {
return result;
}
double delta = 0, currentProb = 0;
double zLower, zUpper;
for(int i = 0; i < m_NumValues; i++) {
delta = m_CondValues[i] - given;
zLower = (delta - (m_Precision / 2)) / m_StandardDev;
zUpper = (delta + (m_Precision / 2)) / m_StandardDev;
currentProb = (Statistics.normalProbability(zUpper)
- Statistics.normalProbability(zLower));
result.addValue(m_Values[i], currentProb * m_Weights[i]);
}
return result;
}
/**
* Get a probability estimate for a value
*
* @param data the value to estimate the probability of
* @param given the new value that data is conditional upon
* @return the estimated probability of the supplied value
*/
public double getProbability(double data, double given) {
return getEstimator(given).getProbability(data);
}
/**
* Display a representation of this estimator
*/
public String toString() {
String result = "KK Conditional Estimator. "
+ m_NumValues + " Normal Kernels:\n"
+ "StandardDev = " + Utils.doubleToString(m_StandardDev,4,2)
+ " \nMeans =";
for(int i = 0; i < m_NumValues; i++) {
result += " (" + m_Values[i] + ", " + m_CondValues[i] + ")";
if (!m_AllWeightsOne) {
result += "w=" + m_Weights[i];
}
}
return result;
}
/**
* Returns the revision string.
*
* @return the revision
*/
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
/**
* Main method for testing this class. Creates some random points
* in the range 0 - 100,
* and prints out a distribution conditional on some value
*
* @param argv should contain: seed conditional_value numpoints
*/
public static void main(String [] argv) {
try {
int seed = 42;
if (argv.length > 0) {
seed = Integer.parseInt(argv[0]);
}
KKConditionalEstimator newEst = new KKConditionalEstimator(0.1);
// Create 100 random points and add them
Random r = new Random(seed);
int numPoints = 50;
if (argv.length > 2) {
numPoints = Integer.parseInt(argv[2]);
}
for(int i = 0; i < numPoints; i++) {
int x = Math.abs(r.nextInt()%100);
int y = Math.abs(r.nextInt()%100);
System.out.println("# " + x + " " + y);
newEst.addValue(x, y, 1);
}
// System.out.println(newEst);
int cond;
if (argv.length > 1) {
cond = Integer.parseInt(argv[1]);
} else {
cond = Math.abs(r.nextInt()%100);
}
System.out.println("## Conditional = " + cond);
Estimator result = newEst.getEstimator(cond);
for(int i = 0; i <= 100; i+= 5) {
System.out.println(" " + i + " " + result.getProbability(i));
}
} catch (Exception e) {
System.out.println(e.getMessage());
}
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/estimators/KernelEstimator.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* KernelEstimator.java
* Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.estimators;
import weka.core.Capabilities;
import weka.core.Capabilities.Capability;
import weka.core.Aggregateable;
import weka.core.RevisionUtils;
import weka.core.Statistics;
import weka.core.Utils;
/**
* Simple kernel density estimator. Uses one gaussian kernel per observed data
* value.
*
* @author Len Trigg (trigg@cs.waikato.ac.nz)
* @version $Revision$
*/
public class KernelEstimator extends Estimator implements IncrementalEstimator,
Aggregateable<KernelEstimator> {
/** for serialization */
private static final long serialVersionUID = 3646923563367683925L;
/** Vector containing all of the values seen */
private double[] m_Values;
/** Vector containing the associated weights */
private double[] m_Weights;
/** Number of values stored in m_Weights and m_Values so far */
private int m_NumValues;
/** The sum of the weights so far */
private double m_SumOfWeights;
/** The standard deviation */
private double m_StandardDev;
/** The precision of data values */
private double m_Precision;
/** Whether we can optimise the kernel summation */
private boolean m_AllWeightsOne;
/** Maximum percentage error permitted in probability calculations */
private static double MAX_ERROR = 0.01;
/**
* Execute a binary search to locate the nearest data value
*
* @param the data value to locate
* @return the index of the nearest data value
*/
private int findNearestValue(double key) {
int low = 0;
int high = m_NumValues;
int middle = 0;
while (low < high) {
middle = (low + high) / 2;
double current = m_Values[middle];
if (current == key) {
return middle;
}
if (current > key) {
high = middle;
} else if (current < key) {
low = middle + 1;
}
}
return low;
}
/**
* Round a data value using the defined precision for this estimator
*
* @param data the value to round
* @return the rounded data value
*/
private double round(double data) {
return Math.rint(data / m_Precision) * m_Precision;
}
// ===============
// Public methods.
// ===============
/**
* Constructor that takes a precision argument.
*
* @param precision the precision to which numeric values are given. For
* example, if the precision is stated to be 0.1, the values in the
* interval (0.25,0.35] are all treated as 0.3.
*/
public KernelEstimator(double precision) {
m_Values = new double[50];
m_Weights = new double[50];
m_NumValues = 0;
m_SumOfWeights = 0;
m_AllWeightsOne = true;
m_Precision = precision;
// precision cannot be zero
if (m_Precision < Utils.SMALL)
m_Precision = Utils.SMALL;
// m_StandardDev = 1e10 * m_Precision; // Set the standard deviation
// initially very wide
m_StandardDev = m_Precision / (2 * 3);
}
/**
* Add a new data value to the current estimator.
*
* @param data the new data value
* @param weight the weight assigned to the data value
*/
@Override
public void addValue(double data, double weight) {
if (weight == 0) {
return;
}
data = round(data);
int insertIndex = findNearestValue(data);
if ((m_NumValues <= insertIndex) || (m_Values[insertIndex] != data)) {
if (m_NumValues < m_Values.length) {
int left = m_NumValues - insertIndex;
System
.arraycopy(m_Values, insertIndex, m_Values, insertIndex + 1, left);
System.arraycopy(m_Weights, insertIndex, m_Weights, insertIndex + 1,
left);
m_Values[insertIndex] = data;
m_Weights[insertIndex] = weight;
m_NumValues++;
} else {
double[] newValues = new double[m_Values.length * 2];
double[] newWeights = new double[m_Values.length * 2];
int left = m_NumValues - insertIndex;
System.arraycopy(m_Values, 0, newValues, 0, insertIndex);
System.arraycopy(m_Weights, 0, newWeights, 0, insertIndex);
newValues[insertIndex] = data;
newWeights[insertIndex] = weight;
System.arraycopy(m_Values, insertIndex, newValues, insertIndex + 1,
left);
System.arraycopy(m_Weights, insertIndex, newWeights, insertIndex + 1,
left);
m_NumValues++;
m_Values = newValues;
m_Weights = newWeights;
}
if (weight != 1) {
m_AllWeightsOne = false;
}
} else {
m_Weights[insertIndex] += weight;
m_AllWeightsOne = false;
}
m_SumOfWeights += weight;
double range = m_Values[m_NumValues - 1] - m_Values[0];
if (range > 0) {
m_StandardDev = Math.max(range / Math.sqrt(m_SumOfWeights),
// allow at most 3 sds within one interval
m_Precision / (2 * 3));
}
}
/**
* Get a probability estimate for a value.
*
* @param data the value to estimate the probability of
* @return the estimated probability of the supplied value
*/
@Override
public double getProbability(double data) {
double delta = 0, sum = 0, currentProb = 0;
double zLower = 0, zUpper = 0;
if (m_NumValues == 0) {
zLower = (data - (m_Precision / 2)) / m_StandardDev;
zUpper = (data + (m_Precision / 2)) / m_StandardDev;
return (Statistics.normalProbability(zUpper) - Statistics
.normalProbability(zLower));
}
double weightSum = 0;
int start = findNearestValue(data);
for (int i = start; i < m_NumValues; i++) {
delta = m_Values[i] - data;
zLower = (delta - (m_Precision / 2)) / m_StandardDev;
zUpper = (delta + (m_Precision / 2)) / m_StandardDev;
currentProb = (Statistics.normalProbability(zUpper) - Statistics
.normalProbability(zLower));
sum += currentProb * m_Weights[i];
/*
* System.out.print("zL" + (i + 1) + ": " + zLower + " ");
* System.out.print("zU" + (i + 1) + ": " + zUpper + " ");
* System.out.print("P" + (i + 1) + ": " + currentProb + " ");
* System.out.println("total: " + (currentProb * m_Weights[i]) + " ");
*/
weightSum += m_Weights[i];
if (currentProb * (m_SumOfWeights - weightSum) < sum * MAX_ERROR) {
break;
}
}
for (int i = start - 1; i >= 0; i--) {
delta = m_Values[i] - data;
zLower = (delta - (m_Precision / 2)) / m_StandardDev;
zUpper = (delta + (m_Precision / 2)) / m_StandardDev;
currentProb = (Statistics.normalProbability(zUpper) - Statistics
.normalProbability(zLower));
sum += currentProb * m_Weights[i];
weightSum += m_Weights[i];
if (currentProb * (m_SumOfWeights - weightSum) < sum * MAX_ERROR) {
break;
}
}
return sum / m_SumOfWeights;
}
/** Display a representation of this estimator */
@Override
public String toString() {
String result = m_NumValues + " Normal Kernels. \nStandardDev = "
+ Utils.doubleToString(m_StandardDev, 6, 4) + " Precision = "
+ m_Precision;
if (m_NumValues == 0) {
result += " \nMean = 0";
} else {
result += " \nMeans =";
for (int i = 0; i < m_NumValues; i++) {
result += " " + m_Values[i];
}
if (!m_AllWeightsOne) {
result += "\nWeights = ";
for (int i = 0; i < m_NumValues; i++) {
result += " " + m_Weights[i];
}
}
}
return result + "\n";
}
/**
* Return the number of kernels in this kernel estimator
*
* @return the number of kernels
*/
public int getNumKernels() {
return m_NumValues;
}
/**
* Return the means of the kernels.
*
* @return the means of the kernels
*/
public double[] getMeans() {
return m_Values;
}
/**
* Return the weights of the kernels.
*
* @return the weights of the kernels
*/
public double[] getWeights() {
return m_Weights;
}
/**
* Return the precision of this kernel estimator.
*
* @return the precision
*/
public double getPrecision() {
return m_Precision;
}
/**
* Return the standard deviation of this kernel estimator.
*
* @return the standard deviation
*/
public double getStdDev() {
return m_StandardDev;
}
/**
* Returns default capabilities of the classifier.
*
* @return the capabilities of this classifier
*/
@Override
public Capabilities getCapabilities() {
Capabilities result = super.getCapabilities();
result.disableAll();
// class
if (!m_noClass) {
result.enable(Capability.NOMINAL_CLASS);
result.enable(Capability.MISSING_CLASS_VALUES);
} else {
result.enable(Capability.NO_CLASS);
}
// attributes
result.enable(Capability.NUMERIC_ATTRIBUTES);
return result;
}
/**
* Returns the revision string.
*
* @return the revision
*/
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
@Override
public KernelEstimator aggregate(KernelEstimator toAggregate)
throws Exception {
for (int i = 0; i < toAggregate.m_NumValues; i++) {
addValue(toAggregate.m_Values[i], toAggregate.m_Weights[i]);
}
return this;
}
@Override
public void finalizeAggregation() throws Exception {
// nothing to do
}
public static void testAggregation() {
KernelEstimator ke = new KernelEstimator(0.01);
KernelEstimator one = new KernelEstimator(0.01);
KernelEstimator two = new KernelEstimator(0.01);
java.util.Random r = new java.util.Random(1);
for (int i = 0; i < 100; i++) {
double z = r.nextDouble();
ke.addValue(z, 1);
if (i < 50) {
one.addValue(z, 1);
} else {
two.addValue(z, 1);
}
}
try {
System.out.println("\n\nFull\n");
System.out.println(ke.toString());
System.out.println("Prob (0): " + ke.getProbability(0));
System.out.println("\nOne\n" + one.toString());
System.out.println("Prob (0): " + one.getProbability(0));
System.out.println("\nTwo\n" + two.toString());
System.out.println("Prob (0): " + two.getProbability(0));
one = one.aggregate(two);
System.out.println("Aggregated\n");
System.out.println(one.toString());
System.out.println("Prob (0): " + one.getProbability(0));
} catch (Exception ex) {
ex.printStackTrace();
}
}
/**
* Main method for testing this class.
*
* @param argv should contain a sequence of numeric values
*/
public static void main(String[] argv) {
try {
if (argv.length < 2) {
System.out.println("Please specify a set of instances.");
return;
}
KernelEstimator newEst = new KernelEstimator(0.01);
for (int i = 0; i < argv.length - 3; i += 2) {
newEst.addValue(Double.valueOf(argv[i]).doubleValue(),
Double.valueOf(argv[i + 1]).doubleValue());
}
System.out.println(newEst);
double start = Double.valueOf(argv[argv.length - 2]).doubleValue();
double finish = Double.valueOf(argv[argv.length - 1]).doubleValue();
for (double current = start; current < finish; current += (finish - start) / 50) {
System.out.println("Data: " + current + " "
+ newEst.getProbability(current));
}
KernelEstimator.testAggregation();
} catch (Exception e) {
System.out.println(e.getMessage());
}
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/estimators/MahalanobisEstimator.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* MahalanobisEstimator.java
* Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.estimators;
import weka.core.Capabilities;
import weka.core.Capabilities.Capability;
import weka.core.RevisionUtils;
import weka.core.Utils;
import weka.core.matrix.Matrix;
/**
* Simple probability estimator that places a single normal distribution
* over the observed values.
*
* @author Len Trigg (trigg@cs.waikato.ac.nz)
* @version $Revision$
*/
public class MahalanobisEstimator extends Estimator implements IncrementalEstimator {
/** for serialization */
private static final long serialVersionUID = 8950225468990043868L;
/** The inverse of the covariance matrix */
private Matrix m_CovarianceInverse;
/** The determinant of the covariance matrix */
private double m_Determinant;
/**
* The difference between the conditioning value and the conditioning mean
*/
private double m_ConstDelta;
/** The mean of the values */
private double m_ValueMean;
/** 2 * PI */
private static double TWO_PI = 2 * Math.PI;
/**
* Returns value for normal kernel
*
* @param x the argument to the kernel function
* @param variance the variance
* @return the value for a normal kernel
*/
private double normalKernel(double x) {
Matrix thisPoint = new Matrix(1, 2);
thisPoint.set(0, 0, x);
thisPoint.set(0, 1, m_ConstDelta);
return Math.exp(-thisPoint.times(m_CovarianceInverse).
times(thisPoint.transpose()).get(0, 0)
/ 2) / (Math.sqrt(TWO_PI) * m_Determinant);
}
/**
* Constructor
*
* @param covariance
* @param constDelta
* @param valueMean
*/
public MahalanobisEstimator(Matrix covariance, double constDelta,
double valueMean) {
m_CovarianceInverse = null;
if ((covariance.getRowDimension() == 2) && (covariance.getColumnDimension() == 2)) {
double a = covariance.get(0, 0);
double b = covariance.get(0, 1);
double c = covariance.get(1, 0);
double d = covariance.get(1, 1);
if (a == 0) {
a = c; c = 0;
double temp = b;
b = d; d = temp;
}
if (a == 0) {
return;
}
double denom = d - c * b / a;
if (denom == 0) {
return;
}
m_Determinant = covariance.get(0, 0) * covariance.get(1, 1)
- covariance.get(1, 0) * covariance.get(0, 1);
m_CovarianceInverse = new Matrix(2, 2);
m_CovarianceInverse.set(0, 0, 1.0 / a + b * c / a / a / denom);
m_CovarianceInverse.set(0, 1, -b / a / denom);
m_CovarianceInverse.set(1, 0, -c / a / denom);
m_CovarianceInverse.set(1, 1, 1.0 / denom);
m_ConstDelta = constDelta;
m_ValueMean = valueMean;
}
}
/**
* Add a new data value to the current estimator. Does nothing because the
* data is provided in the constructor.
*
* @param data the new data value
* @param weight the weight assigned to the data value
*/
public void addValue(double data, double weight) {
}
/**
* Get a probability estimate for a value
*
* @param data the value to estimate the probability of
* @return the estimated probability of the supplied value
*/
public double getProbability(double data) {
double delta = data - m_ValueMean;
if (m_CovarianceInverse == null) {
return 0;
}
return normalKernel(delta);
}
/** Display a representation of this estimator */
public String toString() {
if (m_CovarianceInverse == null) {
return "No covariance inverse\n";
}
return "Mahalanovis Distribution. Mean = "
+ Utils.doubleToString(m_ValueMean, 4, 2)
+ " ConditionalOffset = "
+ Utils.doubleToString(m_ConstDelta, 4, 2) + "\n"
+ "Covariance Matrix: Determinant = " + m_Determinant
+ " Inverse:\n" + m_CovarianceInverse;
}
/**
* Returns default capabilities of the classifier.
*
* @return the capabilities of this classifier
*/
public Capabilities getCapabilities() {
Capabilities result = super.getCapabilities();
result.disableAll();
// class
if (!m_noClass) {
result.enable(Capability.NOMINAL_CLASS);
result.enable(Capability.MISSING_CLASS_VALUES);
} else {
result.enable(Capability.NO_CLASS);
}
// attributes
result.enable(Capability.NUMERIC_ATTRIBUTES);
return result;
}
/**
* Returns the revision string.
*
* @return the revision
*/
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
/**
* Main method for testing this class.
*
* @param argv should contain a sequence of numeric values
*/
public static void main(String [] argv) {
try {
double delta = 0.5;
double xmean = 0;
double lower = 0;
double upper = 10;
Matrix covariance = new Matrix(2, 2);
covariance.set(0, 0, 2);
covariance.set(0, 1, -3);
covariance.set(1, 0, -4);
covariance.set(1, 1, 5);
if (argv.length > 0) {
covariance.set(0, 0, Double.valueOf(argv[0]).doubleValue());
}
if (argv.length > 1) {
covariance.set(0, 1, Double.valueOf(argv[1]).doubleValue());
}
if (argv.length > 2) {
covariance.set(1, 0, Double.valueOf(argv[2]).doubleValue());
}
if (argv.length > 3) {
covariance.set(1, 1, Double.valueOf(argv[3]).doubleValue());
}
if (argv.length > 4) {
delta = Double.valueOf(argv[4]).doubleValue();
}
if (argv.length > 5) {
xmean = Double.valueOf(argv[5]).doubleValue();
}
MahalanobisEstimator newEst = new MahalanobisEstimator(covariance,
delta, xmean);
if (argv.length > 6) {
lower = Double.valueOf(argv[6]).doubleValue();
if (argv.length > 7) {
upper = Double.valueOf(argv[7]).doubleValue();
}
double increment = (upper - lower) / 50;
for(double current = lower; current <= upper; current+= increment)
System.out.println(current + " " + newEst.getProbability(current));
} else {
System.out.println("Covariance Matrix\n" + covariance);
System.out.println(newEst);
}
} catch (Exception e) {
System.out.println(e.getMessage());
}
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/estimators/MultivariateEstimator.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* MultivariateEstimator.java
* Copyright (C) 2013 University of Waikato
*/
package weka.estimators;
/**
* Interface to Multivariate Distribution Estimation
*
* @author Uday Kamath, PhD candidate George Mason University
* @version $Revision$
*/
public interface MultivariateEstimator {
/**
* Fits the value to the density estimator.
*
* @param value the value to add
* @param weight the weight of the value
*/
void estimate(double[][] value, double[] weight);
/**
* Returns the natural logarithm of the density estimate at the given point.
*
* @param value the value at which to evaluate
* @return the natural logarithm of the density estimate at the given value
*/
double logDensity(double[] value);
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/estimators/MultivariateGaussianEstimator.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* MultivariateNormalEstimator.java
* Copyright (C) 2013 University of Waikato
*/
package weka.estimators;
import no.uib.cipr.matrix.*;
import no.uib.cipr.matrix.Matrix;
import weka.core.Utils;
import java.io.Serializable;
/**
* Implementation of maximum likelihood Multivariate Distribution Estimation using Normal
* Distribution.
*
* @author Uday Kamath, PhD, George Mason University
* @author Eibe Frank, University of Waikato
* @version $Revision$
*
*/
public class MultivariateGaussianEstimator implements MultivariateEstimator, Serializable {
/** Mean vector */
protected DenseVector mean;
/** Inverse of covariance matrix */
protected UpperSPDDenseMatrix covarianceInverse;
/** Factor to make density integrate to one (log of this factor) */
protected double lnconstant;
/** Ridge parameter to add to diagonal of covariance matrix */
protected double m_Ridge = 1e-6;
/**
* Log of twice the number pi: log(2*pi).
*/
public static final double Log2PI = Math.log(2 * Math.PI);
/**
* Returns string summarizing the estimator.
*/
public String toString() {
StringBuffer sb = new StringBuffer();
sb.append("Natural logarithm of normalizing factor: " + lnconstant + "\n\n");
sb.append("Mean vector:\n\n" + mean + "\n");
sb.append("Inverse of covariance matrix:\n\n" + covarianceInverse + "\n");
return sb.toString();
}
/**
* Returns the mean vector.
*/
public double[] getMean() {
return mean.getData();
}
/**
* Returns the log of the density value for the given vector.
*
* @param valuePassed input vector
* @return log density based on given distribution
*/
@Override
public double logDensity(double[] valuePassed) {
// calculate mean subtractions
Vector x = new DenseVector(valuePassed);
return lnconstant - 0.5 * x.dot(covarianceInverse.mult(x.add(-1.0, mean), new DenseVector(x.size())));
}
/**
* Generates the estimator based on the given observations and weight vector.
* Equal weights are assumed if the weight vector is null.
*/
@Override
public void estimate(double[][] observations, double[] weights) {
if (weights == null) {
weights = new double[observations.length];
for (int i = 0; i < weights.length; i++) {
weights[i] = 1.0;
}
}
DenseVector weightVector = new DenseVector(weights);
weightVector = weightVector.scale(1.0 / weightVector.norm(Vector.Norm.One));
mean = weightedMean(observations, weightVector);
Matrix cov = weightedCovariance(observations, weightVector, mean);
// Compute inverse of covariance matrix
DenseCholesky chol = new DenseCholesky(observations[0].length, true).factor((UpperSPDDenseMatrix)cov);
covarianceInverse = new UpperSPDDenseMatrix(chol.solve(Matrices.identity(observations[0].length)));
double logDeterminant = 0;
for (int i = 0; i < observations[0].length; i++) {
logDeterminant += Math.log(chol.getU().get(i, i));
}
logDeterminant *= 2;
lnconstant = -(Log2PI * observations[0].length + logDeterminant) * 0.5;
}
/**
* Generates pooled estimator for linear discriminant analysis based on the given groups of
* observations and weight vectors. The pooled covariance matrix is the weighted mean
* of the per-group covariance matrices. The pooled mean vector is the mean vector for all observations.
*
* @return the per group mean vectors
*/
public double[][] estimatePooled(double[][][] observations, double[][] weights) {
// Establish number of attributes and number of classes
int m = -1;
int c = observations.length;
for (int i = 0; i < observations.length; i++) {
if (observations[i].length > 0) {
m = observations[i][0].length;
}
}
if (m == -1) {
throw new IllegalArgumentException("Cannot compute pooled estimates with no data.");
}
// Compute per-group covariance matrices and mean vectors
Matrix[] groupCovariance = new Matrix[c];
DenseVector[] groupMean = new DenseVector[c];
double[] groupWeights = new double[c];
for (int i = 0; i < groupCovariance.length; i++) {
if (observations[i].length > 0) {
DenseVector weightVector = new DenseVector(weights[i]);
weightVector = weightVector.scale(1.0 / weightVector.norm(Vector.Norm.One));
groupMean[i] = weightedMean(observations[i], weightVector);
groupCovariance[i] = weightedCovariance(observations[i], weightVector, groupMean[i]);
groupWeights[i] = Utils.sum(weights[i]);
}
}
Utils.normalize(groupWeights);
// Pool covariance matrices and means
double[][] means = new double[c][];
Matrix cov = new UpperSPDDenseMatrix(m);
mean = new DenseVector(groupMean[0].size());
for (int i = 0; i < c; i++) {
if (observations[i].length > 0) {
cov = cov.add(groupWeights[i], groupCovariance[i]);
mean = (DenseVector) mean.add(groupWeights[i], groupMean[i]);
means[i] = groupMean[i].getData();
}
}
// Compute inverse of covariance matrix
DenseCholesky chol = new DenseCholesky(m, true).factor((UpperSPDDenseMatrix)cov);
covarianceInverse = new UpperSPDDenseMatrix(chol.solve(Matrices.identity(m)));
double logDeterminant = 0;
for (int i = 0; i < m; i++) {
logDeterminant += Math.log(chol.getU().get(i, i));
}
logDeterminant *= 2;
lnconstant = -(Log2PI * m + logDeterminant) * 0.5;
return means;
}
/**
* Computes the mean vector
* @param matrix the data (assumed to contain at least one row)
* @param weights the observation weights, normalized to sum to 1.
* @return the weighted mean
*/
private DenseVector weightedMean(double[][] matrix, DenseVector weights) {
return (DenseVector)new DenseMatrix(matrix).transMult(weights, new DenseVector(matrix[0].length));
}
/**
* Computes the estimate of the covariance matrix.
*
* @param matrix A multi-dimensional array containing the matrix values (assumed to contain at least one row).
* @param weights The observation weights, normalized to sum to 1.
* @param mean The values' mean vector.
* @return The covariance matrix, including the ridge.
*/
private UpperSPDDenseMatrix weightedCovariance(double[][] matrix, DenseVector weights, Vector mean) {
int rows = matrix.length;
int cols = matrix[0].length;
if (mean.size() != cols) {
throw new IllegalArgumentException("Length of the mean vector must match matrix.");
}
// Create matrix with centered transposed data, weighted appropriately
DenseMatrix transposed = new DenseMatrix(cols, rows);
for (int i = 0; i < rows; i++) {
for (int j = 0; j < cols; j++) {
transposed.set(j, i, Math.sqrt(weights.get(i)) * (matrix[i][j] - mean.get(j)));
}
}
UpperSPDDenseMatrix covT = (UpperSPDDenseMatrix) new UpperSPDDenseMatrix(cols).rank1(transposed);
for (int i = 0; i < cols; i++) {
covT.add(i, i, m_Ridge);
}
return covT;
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String ridgeTipText() {
return "The value of the ridge parameter.";
}
/**
* Get the value of Ridge.
*
* @return Value of Ridge.
*/
public double getRidge() {
return m_Ridge;
}
/**
* Set the value of Ridge.
*
* @param newRidge Value to assign to Ridge.
*/
public void setRidge(double newRidge) {
m_Ridge = newRidge;
}
/**
* Main method for testing this class.
* @param args command-line parameters
*/
public static void main(String[] args) {
double[][] dataset1 = new double[4][1];
dataset1[0][0] = 0.49;
dataset1[1][0] = 0.46;
dataset1[2][0] = 0.51;
dataset1[3][0] = 0.55;
MultivariateEstimator mv1 = new MultivariateGaussianEstimator();
mv1.estimate(dataset1, new double[]{0.7, 0.2, 0.05, 0.05});
System.err.println(mv1);
double integral1 = 0;
int numVals = 1000;
for (int i = 0; i < numVals; i++) {
double[] point = new double[1];
point[0] = (i + 0.5) * (1.0 / numVals);
double logdens = mv1.logDensity(point);
if (!Double.isNaN(logdens)) {
integral1 += Math.exp(logdens) * (1.0 / numVals);
}
}
System.err.println("Approximate integral: " + integral1);
double[][] dataset = new double[4][3];
dataset[0][0] = 0.49;
dataset[0][1] = 0.51;
dataset[0][2] = 0.53;
dataset[1][0] = 0.46;
dataset[1][1] = 0.47;
dataset[1][2] = 0.52;
dataset[2][0] = 0.51;
dataset[2][1] = 0.49;
dataset[2][2] = 0.47;
dataset[3][0] = 0.55;
dataset[3][1] = 0.52;
dataset[3][2] = 0.54;
MultivariateEstimator mv = new MultivariateGaussianEstimator();
mv.estimate(dataset, new double[]{2, 0.2, 0.05, 0.05});
System.err.println(mv);
double integral = 0;
int numVals2 = 200;
for (int i = 0; i < numVals2; i++) {
for (int j = 0; j < numVals2; j++) {
for (int k = 0; k < numVals2; k++) {
double[] point = new double[3];
point[0] = (i + 0.5) * (1.0 / numVals2);
point[1] = (j + 0.5) * (1.0 / numVals2);
point[2] = (k + 0.5) * (1.0 / numVals2);
double logdens = mv.logDensity(point);
if (!Double.isNaN(logdens)) {
integral += Math.exp(logdens) / (numVals2 * numVals2 * numVals2);
}
}
}
}
System.err.println("Approximate integral: " + integral);
double[][] dataset3 = new double[5][3];
dataset3[0][0] = 0.49;
dataset3[0][1] = 0.51;
dataset3[0][2] = 0.53;
dataset3[4][0] = 0.49;
dataset3[4][1] = 0.51;
dataset3[4][2] = 0.53;
dataset3[1][0] = 0.46;
dataset3[1][1] = 0.47;
dataset3[1][2] = 0.52;
dataset3[2][0] = 0.51;
dataset3[2][1] = 0.49;
dataset3[2][2] = 0.47;
dataset3[3][0] = 0.55;
dataset3[3][1] = 0.52;
dataset3[3][2] = 0.54;
MultivariateEstimator mv3 = new MultivariateGaussianEstimator();
mv3.estimate(dataset3, new double[]{1, 0.2, 0.05, 0.05, 1});
System.err.println(mv3);
double integral3 = 0;
int numVals3 = 200;
for (int i = 0; i < numVals3; i++) {
for (int j = 0; j < numVals3; j++) {
for (int k = 0; k < numVals3; k++) {
double[] point = new double[3];
point[0] = (i + 0.5) * (1.0 / numVals3);
point[1] = (j + 0.5) * (1.0 / numVals3);
point[2] = (k + 0.5) * (1.0 / numVals3);
double logdens = mv.logDensity(point);
if (!Double.isNaN(logdens)) {
integral3 += Math.exp(logdens) / (numVals3 * numVals3 * numVals3);
}
}
}
}
System.err.println("Approximate integral: " + integral3);
double[][][] dataset4 = new double[2][][];
dataset4[0] = new double[2][3];
dataset4[1] = new double[3][3];
dataset4[0][0][0] = 0.49;
dataset4[0][0][1] = 0.51;
dataset4[0][0][2] = 0.53;
dataset4[0][1][0] = 0.49;
dataset4[0][1][1] = 0.51;
dataset4[0][1][2] = 0.53;
dataset4[1][0][0] = 0.46;
dataset4[1][0][1] = 0.47;
dataset4[1][0][2] = 0.52;
dataset4[1][1][0] = 0.51;
dataset4[1][1][1] = 0.49;
dataset4[1][1][2] = 0.47;
dataset4[1][2][0] = 0.55;
dataset4[1][2][1] = 0.52;
dataset4[1][2][2] = 0.54;
double[][] weights = new double[2][];
weights[0] = new double[] {1, 3};
weights[1] = new double[] {2, 1, 1};
MultivariateGaussianEstimator mv4 = new MultivariateGaussianEstimator();
mv4.estimatePooled(dataset4, weights);
System.err.println(mv4);
double integral4 = 0;
int numVals4 = 200;
for (int i = 0; i < numVals4; i++) {
for (int j = 0; j < numVals4; j++) {
for (int k = 0; k < numVals4; k++) {
double[] point = new double[3];
point[0] = (i + 0.5) * (1.0 / numVals4);
point[1] = (j + 0.5) * (1.0 / numVals4);
point[2] = (k + 0.5) * (1.0 / numVals4);
double logdens = mv.logDensity(point);
if (!Double.isNaN(logdens)) {
integral4 += Math.exp(logdens) / (numVals4 * numVals4 * numVals4);
}
}
}
}
System.err.println("Approximate integral: " + integral4);
double[][][] dataset5 = new double[2][][];
dataset5[0] = new double[4][3];
dataset5[1] = new double[4][3];
dataset5[0][0][0] = 0.49;
dataset5[0][0][1] = 0.51;
dataset5[0][0][2] = 0.53;
dataset5[0][1][0] = 0.49;
dataset5[0][1][1] = 0.51;
dataset5[0][1][2] = 0.53;
dataset5[0][2][0] = 0.49;
dataset5[0][2][1] = 0.51;
dataset5[0][2][2] = 0.53;
dataset5[0][3][0] = 0.49;
dataset5[0][3][1] = 0.51;
dataset5[0][3][2] = 0.53;
dataset5[1][0][0] = 0.46;
dataset5[1][0][1] = 0.47;
dataset5[1][0][2] = 0.52;
dataset5[1][1][0] = 0.46;
dataset5[1][1][1] = 0.47;
dataset5[1][1][2] = 0.52;
dataset5[1][2][0] = 0.51;
dataset5[1][2][1] = 0.49;
dataset5[1][2][2] = 0.47;
dataset5[1][3][0] = 0.55;
dataset5[1][3][1] = 0.52;
dataset5[1][3][2] = 0.54;
double[][] weights2 = new double[2][];
weights2[0] = new double[] {1, 1, 1, 1};
weights2[1] = new double[] {1, 1, 1, 1};
MultivariateGaussianEstimator mv5 = new MultivariateGaussianEstimator();
mv5.estimatePooled(dataset5, weights2);
System.err.println(mv5);
double integral5 = 0;
int numVals5 = 200;
for (int i = 0; i < numVals5; i++) {
for (int j = 0; j < numVals5; j++) {
for (int k = 0; k < numVals5; k++) {
double[] point = new double[3];
point[0] = (i + 0.5) * (1.0 / numVals5);
point[1] = (j + 0.5) * (1.0 / numVals5);
point[2] = (k + 0.5) * (1.0 / numVals5);
double logdens = mv.logDensity(point);
if (!Double.isNaN(logdens)) {
integral5 += Math.exp(logdens) / (numVals5 * numVals5 * numVals5);
}
}
}
}
System.err.println("Approximate integral: " + integral5);
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/estimators/NDConditionalEstimator.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* NDConditionalEstimator.java
* Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.estimators;
import weka.core.RevisionUtils;
/**
* Conditional probability estimator for a numeric domain conditional upon
* a discrete domain (utilises separate normal estimators for each discrete
* conditioning value).
*
* @author Len Trigg (trigg@cs.waikato.ac.nz)
* @version $Revision$
*/
public class NDConditionalEstimator implements ConditionalEstimator {
/** Hold the sub-estimators */
private NormalEstimator [] m_Estimators;
/**
* Constructor
*
* @param numCondSymbols the number of conditioning symbols
* @param precision the precision to which numeric values are given. For
* example, if the precision is stated to be 0.1, the values in the
* interval (0.25,0.35] are all treated as 0.3.
*/
public NDConditionalEstimator(int numCondSymbols, double precision) {
m_Estimators = new NormalEstimator [numCondSymbols];
for(int i = 0; i < numCondSymbols; i++) {
m_Estimators[i] = new NormalEstimator(precision);
}
}
/**
* Add a new data value to the current estimator.
*
* @param data the new data value
* @param given the new value that data is conditional upon
* @param weight the weight assigned to the data value
*/
public void addValue(double data, double given, double weight) {
m_Estimators[(int)given].addValue(data, weight);
}
/**
* Get a probability estimator for a value
*
* @param given the new value that data is conditional upon
* @return the estimator for the supplied value given the condition
*/
public Estimator getEstimator(double given) {
return m_Estimators[(int)given];
}
/**
* Get a probability estimate for a value
*
* @param data the value to estimate the probability of
* @param given the new value that data is conditional upon
* @return the estimated probability of the supplied value
*/
public double getProbability(double data, double given) {
return getEstimator(given).getProbability(data);
}
/**
* Display a representation of this estimator
*/
public String toString() {
String result = "ND Conditional Estimator. "
+ m_Estimators.length + " sub-estimators:\n";
for(int i = 0; i < m_Estimators.length; i++) {
result += "Sub-estimator " + i + ": " + m_Estimators[i];
}
return result;
}
/**
* Returns the revision string.
*
* @return the revision
*/
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
/**
* Main method for testing this class.
*
* @param argv should contain a sequence of pairs of integers which
* will be treated as numeric, symbolic.
*/
public static void main(String [] argv) {
try {
if (argv.length == 0) {
System.out.println("Please specify a set of instances.");
return;
}
int currentA = Integer.parseInt(argv[0]);
int maxA = currentA;
int currentB = Integer.parseInt(argv[1]);
int maxB = currentB;
for(int i = 2; i < argv.length - 1; i += 2) {
currentA = Integer.parseInt(argv[i]);
currentB = Integer.parseInt(argv[i + 1]);
if (currentA > maxA) {
maxA = currentA;
}
if (currentB > maxB) {
maxB = currentB;
}
}
NDConditionalEstimator newEst = new NDConditionalEstimator(maxB + 1,
1);
for(int i = 0; i < argv.length - 1; i += 2) {
currentA = Integer.parseInt(argv[i]);
currentB = Integer.parseInt(argv[i + 1]);
System.out.println(newEst);
System.out.println("Prediction for " + currentA + '|' + currentB
+ " = "
+ newEst.getProbability(currentA, currentB));
newEst.addValue(currentA, currentB, 1);
}
} catch (Exception e) {
System.out.println(e.getMessage());
}
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/estimators/NNConditionalEstimator.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* NNConditionalEstimator.java
* Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.estimators;
import java.util.Random;
import java.util.Vector;
import weka.core.RevisionUtils;
import weka.core.Utils;
import weka.core.matrix.Matrix;
/**
* Conditional probability estimator for a numeric domain conditional upon a
* numeric domain (using Mahalanobis distance).
*
* @author Len Trigg (trigg@cs.waikato.ac.nz)
* @version $Revision$
*/
public class NNConditionalEstimator implements ConditionalEstimator {
/** Vector containing all of the values seen */
private final Vector<Double> m_Values = new Vector<Double>();
/** Vector containing all of the conditioning values seen */
private final Vector<Double> m_CondValues = new Vector<Double>();
/** Vector containing the associated weights */
private final Vector<Double> m_Weights = new Vector<Double>();
/** The sum of the weights so far */
private double m_SumOfWeights;
/** Current Conditional mean */
private double m_CondMean;
/** Current Values mean */
private double m_ValueMean;
/** Current covariance matrix */
private Matrix m_Covariance;
// ===============
// Private methods
// ===============
/**
* Execute a binary search to locate the nearest data value
*
* @param key the data value to locate
* @param secondaryKey the data value to locate
* @return the index of the nearest data value
*/
private int findNearestPair(double key, double secondaryKey) {
int low = 0;
int high = m_CondValues.size();
int middle = 0;
while (low < high) {
middle = (low + high) / 2;
double current = m_CondValues.elementAt(middle).doubleValue();
if (current == key) {
double secondary = m_Values.elementAt(middle).doubleValue();
if (secondary == secondaryKey) {
return middle;
}
if (secondary > secondaryKey) {
high = middle;
} else if (secondary < secondaryKey) {
low = middle + 1;
}
}
if (current > key) {
high = middle;
} else if (current < key) {
low = middle + 1;
}
}
return low;
}
/** Calculate covariance and value means */
private void calculateCovariance() {
double sumValues = 0, sumConds = 0;
for (int i = 0; i < m_Values.size(); i++) {
sumValues += m_Values.elementAt(i).doubleValue()
* m_Weights.elementAt(i).doubleValue();
sumConds += m_CondValues.elementAt(i).doubleValue()
* m_Weights.elementAt(i).doubleValue();
}
m_ValueMean = sumValues / m_SumOfWeights;
m_CondMean = sumConds / m_SumOfWeights;
double c00 = 0, c01 = 0, c10 = 0, c11 = 0;
for (int i = 0; i < m_Values.size(); i++) {
double x = m_Values.elementAt(i).doubleValue();
double y = m_CondValues.elementAt(i).doubleValue();
double weight = m_Weights.elementAt(i).doubleValue();
c00 += (x - m_ValueMean) * (x - m_ValueMean) * weight;
c01 += (x - m_ValueMean) * (y - m_CondMean) * weight;
c11 += (y - m_CondMean) * (y - m_CondMean) * weight;
}
c00 /= (m_SumOfWeights - 1.0);
c01 /= (m_SumOfWeights - 1.0);
c10 = c01;
c11 /= (m_SumOfWeights - 1.0);
m_Covariance = new Matrix(2, 2);
m_Covariance.set(0, 0, c00);
m_Covariance.set(0, 1, c01);
m_Covariance.set(1, 0, c10);
m_Covariance.set(1, 1, c11);
}
/**
* Add a new data value to the current estimator.
*
* @param data the new data value
* @param given the new value that data is conditional upon
* @param weight the weight assigned to the data value
*/
@Override
public void addValue(double data, double given, double weight) {
int insertIndex = findNearestPair(given, data);
if ((m_Values.size() <= insertIndex)
|| (m_CondValues.elementAt(insertIndex).doubleValue() != given)
|| (m_Values.elementAt(insertIndex).doubleValue() != data)) {
m_CondValues.insertElementAt(new Double(given), insertIndex);
m_Values.insertElementAt(new Double(data), insertIndex);
m_Weights.insertElementAt(new Double(weight), insertIndex);
if (weight != 1) {
}
} else {
double newWeight = m_Weights.elementAt(insertIndex).doubleValue();
newWeight += weight;
m_Weights.setElementAt(new Double(newWeight), insertIndex);
}
m_SumOfWeights += weight;
// Invalidate any previously calculated covariance matrix
m_Covariance = null;
}
/**
* Get a probability estimator for a value
*
* @param given the new value that data is conditional upon
* @return the estimator for the supplied value given the condition
*/
@Override
public Estimator getEstimator(double given) {
if (m_Covariance == null) {
calculateCovariance();
}
Estimator result = new MahalanobisEstimator(m_Covariance, given
- m_CondMean, m_ValueMean);
return result;
}
/**
* Get a probability estimate for a value
*
* @param data the value to estimate the probability of
* @param given the new value that data is conditional upon
* @return the estimated probability of the supplied value
*/
@Override
public double getProbability(double data, double given) {
return getEstimator(given).getProbability(data);
}
/** Display a representation of this estimator */
@Override
public String toString() {
if (m_Covariance == null) {
calculateCovariance();
}
String result = "NN Conditional Estimator. " + m_CondValues.size()
+ " data points. Mean = " + Utils.doubleToString(m_ValueMean, 4, 2)
+ " Conditional mean = " + Utils.doubleToString(m_CondMean, 4, 2);
result += " Covariance Matrix: \n" + m_Covariance;
return result;
}
/**
* Returns the revision string.
*
* @return the revision
*/
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
/**
* Main method for testing this class.
*
* @param argv should contain a sequence of numeric values
*/
public static void main(String[] argv) {
try {
int seed = 42;
if (argv.length > 0) {
seed = Integer.parseInt(argv[0]);
}
NNConditionalEstimator newEst = new NNConditionalEstimator();
// Create 100 random points and add them
Random r = new Random(seed);
int numPoints = 50;
if (argv.length > 2) {
numPoints = Integer.parseInt(argv[2]);
}
for (int i = 0; i < numPoints; i++) {
int x = Math.abs(r.nextInt() % 100);
int y = Math.abs(r.nextInt() % 100);
System.out.println("# " + x + " " + y);
newEst.addValue(x, y, 1);
}
// System.out.println(newEst);
int cond;
if (argv.length > 1) {
cond = Integer.parseInt(argv[1]);
} else {
cond = Math.abs(r.nextInt() % 100);
}
System.out.println("## Conditional = " + cond);
Estimator result = newEst.getEstimator(cond);
for (int i = 0; i <= 100; i += 5) {
System.out.println(" " + i + " " + result.getProbability(i));
}
} catch (Exception e) {
System.out.println(e.getMessage());
}
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/estimators/NormalEstimator.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* NormalEstimator.java
* Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.estimators;
import weka.core.Capabilities;
import weka.core.Capabilities.Capability;
import weka.core.Aggregateable;
import weka.core.RevisionUtils;
import weka.core.Statistics;
import weka.core.Utils;
/**
* Simple probability estimator that places a single normal distribution over
* the observed values.
*
* @author Len Trigg (trigg@cs.waikato.ac.nz)
* @version $Revision$
*/
public class NormalEstimator extends Estimator implements IncrementalEstimator,
Aggregateable<NormalEstimator> {
/** for serialization */
private static final long serialVersionUID = 93584379632315841L;
/** The sum of the weights */
private double m_SumOfWeights;
/** The sum of the values seen */
private double m_SumOfValues;
/** The sum of the values squared */
private double m_SumOfValuesSq;
/** The current mean */
private double m_Mean;
/** The current standard deviation */
private double m_StandardDev;
/** The precision of numeric values ( = minimum std dev permitted) */
private double m_Precision;
/**
* Round a data value using the defined precision for this estimator
*
* @param data the value to round
* @return the rounded data value
*/
private double round(double data) {
return Math.rint(data / m_Precision) * m_Precision;
}
// ===============
// Public methods.
// ===============
/**
* Constructor that takes a precision argument.
*
* @param precision the precision to which numeric values are given. For
* example, if the precision is stated to be 0.1, the values in the
* interval (0.25,0.35] are all treated as 0.3.
*/
public NormalEstimator(double precision) {
m_Precision = precision;
// Allow at most 3 sd's within one interval
m_StandardDev = m_Precision / (2 * 3);
}
/**
* Add a new data value to the current estimator.
*
* @param data the new data value
* @param weight the weight assigned to the data value
*/
@Override
public void addValue(double data, double weight) {
if (weight == 0) {
return;
}
data = round(data);
m_SumOfWeights += weight;
m_SumOfValues += data * weight;
m_SumOfValuesSq += data * data * weight;
computeParameters();
}
/**
* Compute the parameters of the distribution
*/
protected void computeParameters() {
if (m_SumOfWeights > 0) {
m_Mean = m_SumOfValues / m_SumOfWeights;
double stdDev = Math.sqrt(Math.abs(m_SumOfValuesSq - m_Mean
* m_SumOfValues)
/ m_SumOfWeights);
// If the stdDev ~= 0, we really have no idea of scale yet,
// so stick with the default. Otherwise...
if (stdDev > 1e-10) {
m_StandardDev = Math.max(m_Precision / (2 * 3),
// allow at most 3sd's within one interval
stdDev);
}
}
}
/**
* Get a probability estimate for a value
*
* @param data the value to estimate the probability of
* @return the estimated probability of the supplied value
*/
@Override
public double getProbability(double data) {
data = round(data);
double zLower = (data - m_Mean - (m_Precision / 2)) / m_StandardDev;
double zUpper = (data - m_Mean + (m_Precision / 2)) / m_StandardDev;
double pLower = Statistics.normalProbability(zLower);
double pUpper = Statistics.normalProbability(zUpper);
return pUpper - pLower;
}
/**
* Display a representation of this estimator
*/
@Override
public String toString() {
return "Normal Distribution. Mean = " + Utils.doubleToString(m_Mean, 4)
+ " StandardDev = " + Utils.doubleToString(m_StandardDev, 4)
+ " WeightSum = " + Utils.doubleToString(m_SumOfWeights, 4)
+ " Precision = " + m_Precision + "\n";
}
/**
* Returns default capabilities of the classifier.
*
* @return the capabilities of this classifier
*/
@Override
public Capabilities getCapabilities() {
Capabilities result = super.getCapabilities();
result.disableAll();
// class
if (!m_noClass) {
result.enable(Capability.NOMINAL_CLASS);
result.enable(Capability.MISSING_CLASS_VALUES);
} else {
result.enable(Capability.NO_CLASS);
}
// attributes
result.enable(Capability.NUMERIC_ATTRIBUTES);
return result;
}
/**
* Return the value of the mean of this normal estimator.
*
* @return the mean
*/
public double getMean() {
return m_Mean;
}
/**
* Return the value of the standard deviation of this normal estimator.
*
* @return the standard deviation
*/
public double getStdDev() {
return m_StandardDev;
}
/**
* Return the value of the precision of this normal estimator.
*
* @return the precision
*/
public double getPrecision() {
return m_Precision;
}
/**
* Return the sum of the weights for this normal estimator.
*
* @return the sum of the weights
*/
public double getSumOfWeights() {
return m_SumOfWeights;
}
/**
* Returns the revision string.
*
* @return the revision
*/
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
@Override
public NormalEstimator aggregate(NormalEstimator toAggregate)
throws Exception {
m_SumOfWeights += toAggregate.m_SumOfWeights;
m_SumOfValues += toAggregate.m_SumOfValues;
m_SumOfValuesSq += toAggregate.m_SumOfValuesSq;
if (toAggregate.m_Precision < m_Precision) {
m_Precision = toAggregate.m_Precision;
}
computeParameters();
return this;
}
@Override
public void finalizeAggregation() throws Exception {
// nothing to do
}
public static void testAggregation() {
NormalEstimator ne = new NormalEstimator(0.01);
NormalEstimator one = new NormalEstimator(0.01);
NormalEstimator two = new NormalEstimator(0.01);
java.util.Random r = new java.util.Random(1);
for (int i = 0; i < 100; i++) {
double z = r.nextDouble();
ne.addValue(z, 1);
if (i < 50) {
one.addValue(z, 1);
} else {
two.addValue(z, 1);
}
}
try {
System.out.println("\n\nFull\n");
System.out.println(ne.toString());
System.out.println("Prob (0): " + ne.getProbability(0));
System.out.println("\nOne\n" + one.toString());
System.out.println("Prob (0): " + one.getProbability(0));
System.out.println("\nTwo\n" + two.toString());
System.out.println("Prob (0): " + two.getProbability(0));
one = one.aggregate(two);
System.out.println("\nAggregated\n");
System.out.println(one.toString());
System.out.println("Prob (0): " + one.getProbability(0));
} catch (Exception ex) {
ex.printStackTrace();
}
}
/**
* Main method for testing this class.
*
* @param argv should contain a sequence of numeric values
*/
public static void main(String[] argv) {
try {
if (argv.length == 0) {
System.out.println("Please specify a set of instances.");
return;
}
NormalEstimator newEst = new NormalEstimator(0.01);
for (int i = 0; i < argv.length; i++) {
double current = Double.valueOf(argv[i]).doubleValue();
System.out.println(newEst);
System.out.println("Prediction for " + current + " = "
+ newEst.getProbability(current));
newEst.addValue(current, 1);
}
NormalEstimator.testAggregation();
} catch (Exception e) {
System.out.println(e.getMessage());
}
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/estimators/PoissonEstimator.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* PoissonEstimator.java
* Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.estimators;
import weka.core.Capabilities;
import weka.core.Capabilities.Capability;
import weka.core.RevisionUtils;
import weka.core.Utils;
/**
* Simple probability estimator that places a single Poisson distribution
* over the observed values.
*
* @author Len Trigg (trigg@cs.waikato.ac.nz)
* @version $Revision$
*/
public class PoissonEstimator
extends Estimator
implements IncrementalEstimator {
/** for serialization */
private static final long serialVersionUID = 7669362595289236662L;
/** The number of values seen */
private double m_NumValues;
/** The sum of the values seen */
private double m_SumOfValues;
/**
* The average number of times
* an event occurs in an interval.
*/
private double m_Lambda;
/**
* Calculates the log factorial of a number.
*
* @param x input number.
* @return log factorial of x.
*/
private double logFac(double x) {
double result = 0;
for (double i = 2; i <= x; i++) {
result += Math.log(i);
}
return result;
}
/**
* Returns value for Poisson distribution
*
* @param x the argument to the kernel function
* @return the value for a Poisson kernel
*/
private double Poisson(double x) {
return Math.exp(-m_Lambda + (x * Math.log(m_Lambda)) - logFac(x));
}
/**
* Add a new data value to the current estimator.
*
* @param data the new data value
* @param weight the weight assigned to the data value
*/
public void addValue(double data, double weight) {
m_NumValues += weight;
m_SumOfValues += data * weight;
if (m_NumValues != 0) {
m_Lambda = m_SumOfValues / m_NumValues;
}
}
/**
* Get a probability estimate for a value
*
* @param data the value to estimate the probability of
* @return the estimated probability of the supplied value
*/
public double getProbability(double data) {
return Poisson(data);
}
/** Display a representation of this estimator */
public String toString() {
return "Poisson Lambda = " + Utils.doubleToString(m_Lambda, 4, 2) + "\n";
}
/**
* Returns default capabilities of the classifier.
*
* @return the capabilities of this classifier
*/
public Capabilities getCapabilities() {
Capabilities result = super.getCapabilities();
result.disableAll();
// class
if (!m_noClass) {
result.enable(Capability.NOMINAL_CLASS);
result.enable(Capability.MISSING_CLASS_VALUES);
} else {
result.enable(Capability.NO_CLASS);
}
// attributes
result.enable(Capability.NUMERIC_ATTRIBUTES);
return result;
}
/**
* Returns the revision string.
*
* @return the revision
*/
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
/**
* Main method for testing this class.
*
* @param argv should contain a sequence of numeric values
*/
public static void main(String [] argv) {
try {
if (argv.length == 0) {
System.out.println("Please specify a set of instances.");
return;
}
PoissonEstimator newEst = new PoissonEstimator();
for(int i = 0; i < argv.length; i++) {
double current = Double.valueOf(argv[i]).doubleValue();
System.out.println(newEst);
System.out.println("Prediction for " + current
+ " = " + newEst.getProbability(current));
newEst.addValue(current, 1);
}
} catch (Exception e) {
System.out.println(e.getMessage());
}
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/estimators/UnivariateDensityEstimator.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* UnivariateDensityEstimator.java
* Copyright (C) 2009-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.estimators;
import weka.core.RevisionHandler;
/**
* Interface that can be implemented by simple weighted univariate
* density estimators.
*
* @author Eibe Frank (eibe@cs.waikato.ac.nz)
* @version $Revision$
*/
public interface UnivariateDensityEstimator extends RevisionHandler {
/**
* Adds a value to the density estimator.
*
* @param value the value to add
* @param weight the weight of the value
*/
void addValue(double value, double weight);
/**
* Returns the natural logarithm of the density estimate at the given
* point.
*
* @param value the value at which to evaluate
* @return the natural logarithm of the density estimate at the given
* value
*/
double logDensity(double value);
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/estimators/UnivariateEqualFrequencyHistogramEstimator.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* UnivariateEqualFrequencyEstimator.java
* Copyright (C) 2009-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.estimators;
import java.io.Serializable;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Iterator;
import java.util.Map;
import java.util.Random;
import java.util.TreeMap;
import weka.core.RevisionUtils;
import weka.core.Statistics;
import weka.core.Utils;
/**
* Simple histogram density estimator. Uses equal-frequency histograms based on
* the specified number of bins (default: 10).
*
* @author Eibe Frank (eibe@cs.waikato.ac.nz)
* @version $Revision$
*/
public class UnivariateEqualFrequencyHistogramEstimator implements UnivariateDensityEstimator, UnivariateIntervalEstimator, UnivariateQuantileEstimator, Serializable {
/** For serialization */
private static final long serialVersionUID = -3180287591539683137L;
/** The collection used to store the weighted values. */
protected TreeMap<Double, Double> m_TM = new TreeMap<Double, Double>();
/** The interval boundaries. */
protected double[] m_Boundaries = null;
/** The weight of each interval. */
protected double[] m_Weights = null;
/** The weighted sum of values */
protected double m_WeightedSum = 0;
/** The weighted sum of squared values */
protected double m_WeightedSumSquared = 0;
/** The total sum of weights. */
protected double m_SumOfWeights = 0;
/** The number of bins to use. */
protected int m_NumBins = 10;
/** The current bandwidth (only computed when needed) */
protected double m_Width = Double.MAX_VALUE;
/** The exponent to use in computation of bandwidth (default: -0.25) */
protected double m_Exponent = -0.25;
/** The minimum allowed value of the kernel width (default: 1.0E-6) */
protected double m_MinWidth = 1.0E-6;
/** Constant for Gaussian density. */
public static final double CONST = -0.5 * Math.log(2 * Math.PI);
/** The number of intervals used to approximate prediction interval. */
protected int m_NumIntervals = 1000;
/** Whether boundaries are updated or only weights. */
protected boolean m_UpdateWeightsOnly = false;
/**
* Returns a string describing the estimator.
*/
public String globalInfo() {
return "Provides a univariate histogram estimator based on equal-frequency bins.";
}
/**
* Gets the number of bins
*
* @return the number of bins.
*/
public int getNumBins() {
return this.m_NumBins;
}
/**
* Sets the number of bins
*
* @param numBins the number of bins
*/
public void setNumBins(final int numBins) {
this.m_NumBins = numBins;
}
/**
* Triggers construction of estimator based on current data and then
* initializes the statistics.
*/
public void initializeStatistics() {
this.updateBoundariesAndOrWeights();
this.m_TM = new TreeMap<Double, Double>();
this.m_WeightedSum = 0;
this.m_WeightedSumSquared = 0;
this.m_SumOfWeights = 0;
this.m_Weights = null;
}
/**
* Sets whether only weights should be udpated.
*/
public void setUpdateWeightsOnly(final boolean flag) {
this.m_UpdateWeightsOnly = flag;
}
/**
* Gets whether only weights should be udpated.*
*/
public boolean getUpdateWeightsOnly() {
return this.m_UpdateWeightsOnly;
}
/**
* Adds a value to the density estimator.
*
* @param value the value to add
* @param weight the weight of the value
*/
@Override
public void addValue(final double value, final double weight) {
// Add data point to collection
this.m_WeightedSum += value * weight;
this.m_WeightedSumSquared += value * value * weight;
this.m_SumOfWeights += weight;
if (this.m_TM.get(value) == null) {
this.m_TM.put(value, weight);
} else {
this.m_TM.put(value, this.m_TM.get(value) + weight);
}
// Make sure estimator is updated
if (!this.getUpdateWeightsOnly()) {
this.m_Boundaries = null;
}
this.m_Weights = null;
}
/**
* Updates the boundaries if necessary.
*/
protected void updateBoundariesAndOrWeights() {
// Do we need to update?
if (this.m_Weights != null) {
return;
}
// Update widths for cases that are out of bounds,
// using same code as in kernel estimator
// First, compute variance for scaling
double mean = this.m_WeightedSum / this.m_SumOfWeights;
double variance = this.m_WeightedSumSquared / this.m_SumOfWeights - mean * mean;
if (variance < 0) {
variance = 0;
}
// Compute kernel bandwidth
this.m_Width = Math.sqrt(variance) * Math.pow(this.m_SumOfWeights, this.m_Exponent);
if (this.m_Width <= this.m_MinWidth) {
this.m_Width = this.m_MinWidth;
}
// Do we need to update weights only
if (this.getUpdateWeightsOnly()) {
this.updateWeightsOnly();
} else {
this.updateBoundariesAndWeights();
}
}
/**
* Updates the weights only.
*/
protected void updateWeightsOnly() throws IllegalArgumentException {
// Get values and keys from tree map
Iterator<Map.Entry<Double, Double>> itr = this.m_TM.entrySet().iterator();
int j = 1;
this.m_Weights = new double[this.m_Boundaries.length - 1];
while (itr.hasNext()) {
Map.Entry<Double, Double> entry = itr.next();
double value = entry.getKey();
double weight = entry.getValue();
if ((value < this.m_Boundaries[0]) || (value > this.m_Boundaries[this.m_Boundaries.length - 1])) {
throw new IllegalArgumentException("Out-of-range value during weight update");
}
while (value > this.m_Boundaries[j]) {
j++;
}
this.m_Weights[j - 1] += weight;
}
}
/**
* Updates the boundaries and weights.
*/
protected void updateBoundariesAndWeights() {
// Get values and keys from tree map
double[] values = new double[this.m_TM.size()];
double[] weights = new double[this.m_TM.size()];
Iterator<Map.Entry<Double, Double>> itr = this.m_TM.entrySet().iterator();
int j = 0;
while (itr.hasNext()) {
Map.Entry<Double, Double> entry = itr.next();
values[j] = entry.getKey();
weights[j] = entry.getValue();
j++;
}
double freq = this.m_SumOfWeights / this.m_NumBins;
double[] cutPoints = new double[this.m_NumBins - 1];
double[] binWeights = new double[this.m_NumBins];
double sumOfWeights = this.m_SumOfWeights;
// Compute break points
double weightSumSoFar = 0, lastWeightSum = 0;
int cpindex = 0, lastIndex = -1;
for (int i = 0; i < values.length - 1; i++) {
// Update weight statistics
weightSumSoFar += weights[i];
sumOfWeights -= weights[i];
// Have we passed the ideal size?
if (weightSumSoFar >= freq) {
// Is this break point worse than the last one?
if (((freq - lastWeightSum) < (weightSumSoFar - freq)) && (lastIndex != -1)) {
cutPoints[cpindex] = (values[lastIndex] + values[lastIndex + 1]) / 2;
weightSumSoFar -= lastWeightSum;
binWeights[cpindex] = lastWeightSum;
lastWeightSum = weightSumSoFar;
lastIndex = i;
} else {
cutPoints[cpindex] = (values[i] + values[i + 1]) / 2;
binWeights[cpindex] = weightSumSoFar;
weightSumSoFar = 0;
lastWeightSum = 0;
lastIndex = -1;
}
cpindex++;
freq = (sumOfWeights + weightSumSoFar) / ((cutPoints.length + 1) - cpindex);
} else {
lastIndex = i;
lastWeightSum = weightSumSoFar;
}
}
// Check whether there was another possibility for a cut point
if ((cpindex < cutPoints.length) && (lastIndex != -1)) {
cutPoints[cpindex] = (values[lastIndex] + values[lastIndex + 1]) / 2;
binWeights[cpindex] = lastWeightSum;
cpindex++;
binWeights[cpindex] = weightSumSoFar - lastWeightSum;
} else {
binWeights[cpindex] = weightSumSoFar;
}
// Did we find any cutpoints?
if (cpindex == 0) {
this.m_Boundaries = null;
this.m_Weights = null;
} else {
// Need to add weight of last data point to right-most bin
binWeights[cpindex] += weights[values.length - 1];
// Copy over boundaries and weights
this.m_Boundaries = new double[cpindex + 2];
this.m_Boundaries[0] = this.m_TM.firstKey();
this.m_Boundaries[cpindex + 1] = this.m_TM.lastKey();
System.arraycopy(cutPoints, 0, this.m_Boundaries, 1, cpindex);
this.m_Weights = new double[cpindex + 1];
System.arraycopy(binWeights, 0, this.m_Weights, 0, cpindex + 1);
}
}
/**
* Returns the interval for the given confidence value.
*
* @param conf the confidence value in the interval [0, 1]
* @return the interval
* @throws InterruptedException
*/
@Override
public double[][] predictIntervals(final double conf) throws InterruptedException {
// Update the bandwidth
this.updateBoundariesAndOrWeights();
// Compute minimum and maximum value, and delta
double val = Statistics.normalInverse(1.0 - (1.0 - conf) / 2);
double min = this.m_TM.firstKey() - val * this.m_Width;
double max = this.m_TM.lastKey() + val * this.m_Width;
double delta = (max - min) / this.m_NumIntervals;
// Create array with estimated probabilities
double[] probabilities = new double[this.m_NumIntervals];
double leftVal = Math.exp(this.logDensity(min));
for (int i = 0; i < this.m_NumIntervals; i++) {
double rightVal = Math.exp(this.logDensity(min + (i + 1) * delta));
probabilities[i] = 0.5 * (leftVal + rightVal) * delta;
leftVal = rightVal;
}
// Sort array based on area of bin estimates
int[] sortedIndices = Utils.sort(probabilities);
// Mark the intervals to use
double sum = 0;
boolean[] toUse = new boolean[probabilities.length];
int k = 0;
while ((sum < conf) && (k < toUse.length)) {
toUse[sortedIndices[toUse.length - (k + 1)]] = true;
sum += probabilities[sortedIndices[toUse.length - (k + 1)]];
k++;
}
// Don't need probabilities anymore
probabilities = null;
// Create final list of intervals
ArrayList<double[]> intervals = new ArrayList<double[]>();
// The current interval
double[] interval = null;
// Iterate through kernels
boolean haveStartedInterval = false;
for (int i = 0; i < this.m_NumIntervals; i++) {
// Should the current bin be used?
if (toUse[i]) {
// Do we need to create a new interval?
if (haveStartedInterval == false) {
haveStartedInterval = true;
interval = new double[2];
interval[0] = min + i * delta;
}
// Regardless, we should update the upper boundary
interval[1] = min + (i + 1) * delta;
} else {
// We need to finalize and store the last interval
// if necessary.
if (haveStartedInterval) {
haveStartedInterval = false;
intervals.add(interval);
}
}
}
// Add last interval if there is one
if (haveStartedInterval) {
intervals.add(interval);
}
return intervals.toArray(new double[0][0]);
}
/**
* Returns the quantile for the given percentage.
*
* @param percentage the percentage
* @return the quantile
*/
@Override
public double predictQuantile(final double percentage) {
// Update the bandwidth
this.updateBoundariesAndOrWeights();
// Compute minimum and maximum value, and delta
double val = Statistics.normalInverse(1.0 - (1.0 - 0.95) / 2);
double min = this.m_TM.firstKey() - val * this.m_Width;
double max = this.m_TM.lastKey() + val * this.m_Width;
double delta = (max - min) / this.m_NumIntervals;
double sum = 0;
double leftVal = Math.exp(this.logDensity(min));
for (int i = 0; i < this.m_NumIntervals; i++) {
if (sum >= percentage) {
return min + i * delta;
}
double rightVal = Math.exp(this.logDensity(min + (i + 1) * delta));
sum += 0.5 * (leftVal + rightVal) * delta;
leftVal = rightVal;
}
return max;
}
/**
* Returns the natural logarithm of the density estimate at the given point.
*
* @param value the value at which to evaluate
* @return the natural logarithm of the density estimate at the given value
*/
@Override
public double logDensity(final double value) {
// Update boundaries if necessary
this.updateBoundariesAndOrWeights();
if (this.m_Boundaries == null) {
return Math.log(Double.MIN_VALUE);
}
// Find the bin
int index = Arrays.binarySearch(this.m_Boundaries, value);
// Is the value outside?
if ((index == -1) || (index == -this.m_Boundaries.length - 1)) {
// Use normal density outside
double val = 0;
if (index == -1) { // Smaller than minimum
val = this.m_TM.firstKey() - value;
} else {
val = value - this.m_TM.lastKey();
}
return (CONST - Math.log(this.m_Width) - 0.5 * (val * val / (this.m_Width * this.m_Width))) - Math.log(this.m_SumOfWeights + 2);
}
// Is value exactly equal to right-most boundary?
if (index == this.m_Boundaries.length - 1) {
index--;
} else {
// Need to reverse index if necessary
if (index < 0) {
index = -index - 2;
}
}
// Figure out of width
double width = this.m_Boundaries[index + 1] - this.m_Boundaries[index];
// Density compontent from smeared-out data point
double densSmearedOut = 1.0 / ((this.m_SumOfWeights + 2) * (this.m_Boundaries[this.m_Boundaries.length - 1] - this.m_Boundaries[0]));
// Return log of density
if (this.m_Weights[index] <= 0) {
/*
* System.out.println(value); System.out.println(this); System.exit(1);
*/
// Just use one smeared-out data point
return Math.log(densSmearedOut);
} else {
return Math.log(densSmearedOut + this.m_Weights[index] / ((this.m_SumOfWeights + 2) * width));
}
}
/**
* Returns the revision string.
*
* @return the revision
*/
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
/**
* Returns textual description of this estimator.
*/
@Override
public String toString() {
StringBuffer text = new StringBuffer();
text.append("EqualFrequencyHistogram estimator\n\n" + "Bandwidth for out of range cases " + this.m_Width + ", total weight " + this.m_SumOfWeights);
if (this.m_Boundaries != null) {
text.append("\nLeft boundary\tRight boundary\tWeight\n");
for (int i = 0; i < this.m_Boundaries.length - 1; i++) {
text.append(this.m_Boundaries[i] + "\t" + this.m_Boundaries[i + 1] + "\t" + this.m_Weights[i] + "\t" + Math.exp(this.logDensity((this.m_Boundaries[i + 1] + this.m_Boundaries[i]) / 2)) + "\n");
}
}
return text.toString();
}
/**
* Main method, used for testing this class.
* @throws InterruptedException
*/
public static void main(final String[] args) throws InterruptedException {
// Get random number generator initialized by system
Random r = new Random();
// Create density estimator
UnivariateEqualFrequencyHistogramEstimator e = new UnivariateEqualFrequencyHistogramEstimator();
// Output the density estimator
System.out.println(e);
// Monte Carlo integration
double sum = 0;
for (int i = 0; i < 1000; i++) {
sum += Math.exp(e.logDensity(r.nextDouble() * 10.0 - 5.0));
}
System.out.println("Approximate integral: " + 10.0 * sum / 1000);
// Add Gaussian values into it
for (int i = 0; i < 1000; i++) {
e.addValue(0.1 * r.nextGaussian() - 3, 1);
e.addValue(r.nextGaussian() * 0.25, 3);
}
// Monte Carlo integration
sum = 0;
int points = 10000000;
for (int i = 0; i < points; i++) {
double value = r.nextDouble() * 20.0 - 10.0;
sum += Math.exp(e.logDensity(value));
}
// Output the density estimator
System.out.println(e);
System.out.println("Approximate integral: " + 20.0 * sum / points);
// Check interval estimates
double[][] Intervals = e.predictIntervals(0.9);
System.out.println("Printing histogram intervals ---------------------");
for (double[] interval : Intervals) {
System.out.println("Left: " + interval[0] + "\t Right: " + interval[1]);
}
System.out.println("Finished histogram printing intervals ---------------------");
double Covered = 0;
for (int i = 0; i < 1000; i++) {
double val = -1;
if (r.nextDouble() < 0.25) {
val = 0.1 * r.nextGaussian() - 3.0;
} else {
val = r.nextGaussian() * 0.25;
}
for (double[] interval : Intervals) {
if (val >= interval[0] && val <= interval[1]) {
Covered++;
break;
}
}
}
System.out.println("Coverage at 0.9 level for histogram intervals: " + Covered / 1000);
for (int j = 1; j < 5; j++) {
double numTrain = Math.pow(10, j);
System.out.println("Number of training cases: " + numTrain);
// Compare performance to normal estimator on normally distributed data
UnivariateEqualFrequencyHistogramEstimator eHistogram = new UnivariateEqualFrequencyHistogramEstimator();
UnivariateNormalEstimator eNormal = new UnivariateNormalEstimator();
// Add training cases
for (int i = 0; i < numTrain; i++) {
double val = r.nextGaussian() * 1.5 + 0.5;
/*
* if (j == 4) { System.err.println(val); }
*/
eHistogram.addValue(val, 1);
eNormal.addValue(val, 1);
}
// Monte Carlo integration
sum = 0;
points = 10000000;
for (int i = 0; i < points; i++) {
double value = r.nextDouble() * 20.0 - 10.0;
sum += Math.exp(eHistogram.logDensity(value));
}
System.out.println(eHistogram);
System.out.println("Approximate integral for histogram estimator: " + 20.0 * sum / points);
// Evaluate estimators
double loglikelihoodHistogram = 0, loglikelihoodNormal = 0;
for (int i = 0; i < 1000; i++) {
double val = r.nextGaussian() * 1.5 + 0.5;
loglikelihoodHistogram += eHistogram.logDensity(val);
loglikelihoodNormal += eNormal.logDensity(val);
}
System.out.println("Loglikelihood for histogram estimator: " + loglikelihoodHistogram / 1000);
System.out.println("Loglikelihood for normal estimator: " + loglikelihoodNormal / 1000);
// Check interval estimates
double[][] histogramIntervals = eHistogram.predictIntervals(0.95);
double[][] normalIntervals = eNormal.predictIntervals(0.95);
System.out.println("Printing histogram intervals ---------------------");
for (double[] histogramInterval : histogramIntervals) {
System.out.println("Left: " + histogramInterval[0] + "\t Right: " + histogramInterval[1]);
}
System.out.println("Finished histogram printing intervals ---------------------");
System.out.println("Printing normal intervals ---------------------");
for (double[] normalInterval : normalIntervals) {
System.out.println("Left: " + normalInterval[0] + "\t Right: " + normalInterval[1]);
}
System.out.println("Finished normal printing intervals ---------------------");
double histogramCovered = 0;
double normalCovered = 0;
for (int i = 0; i < 1000; i++) {
double val = r.nextGaussian() * 1.5 + 0.5;
for (double[] histogramInterval : histogramIntervals) {
if (val >= histogramInterval[0] && val <= histogramInterval[1]) {
histogramCovered++;
break;
}
}
for (double[] normalInterval : normalIntervals) {
if (val >= normalInterval[0] && val <= normalInterval[1]) {
normalCovered++;
break;
}
}
}
System.out.println("Coverage at 0.95 level for histogram intervals: " + histogramCovered / 1000);
System.out.println("Coverage at 0.95 level for normal intervals: " + normalCovered / 1000);
histogramIntervals = eHistogram.predictIntervals(0.8);
normalIntervals = eNormal.predictIntervals(0.8);
histogramCovered = 0;
normalCovered = 0;
for (int i = 0; i < 1000; i++) {
double val = r.nextGaussian() * 1.5 + 0.5;
for (double[] histogramInterval : histogramIntervals) {
if (val >= histogramInterval[0] && val <= histogramInterval[1]) {
histogramCovered++;
break;
}
}
for (double[] normalInterval : normalIntervals) {
if (val >= normalInterval[0] && val <= normalInterval[1]) {
normalCovered++;
break;
}
}
}
System.out.println("Coverage at 0.8 level for histogram intervals: " + histogramCovered / 1000);
System.out.println("Coverage at 0.8 level for normal intervals: " + normalCovered / 1000);
}
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/estimators/UnivariateIntervalEstimator.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* UnivariateIntervalEstimator.java
* Copyright (C) 2009-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.estimators;
/**
* Interface that can be implemented by simple weighted univariate
* interval estimators.
*
* @author Eibe Frank (eibe@cs.waikato.ac.nz)
* @version $Revision$
*/
public interface UnivariateIntervalEstimator {
/**
* Adds a value to the interval estimator.
*
* @param value the value to add
* @param weight the weight of the value
*/
void addValue(double value, double weight);
/**
* Returns the intervals at the given confidence value. Each row has
* one interval. The first element in each row is the lower bound,
* the second element the upper one.
*
* @param confidenceValue the value at which to evaluate
* @return the interval
* @throws InterruptedException
*/
double[][] predictIntervals(double confidenceValue) throws InterruptedException;
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/estimators/UnivariateKernelEstimator.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* UnivariateKernelEstimator.java
* Copyright (C) 2009-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.estimators;
import java.io.Serializable;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.Map;
import java.util.Random;
import java.util.Set;
import java.util.TreeMap;
import weka.core.RevisionUtils;
import weka.core.Statistics;
import weka.core.Utils;
/**
* Simple weighted kernel density estimator.
*
* @author Eibe Frank (eibe@cs.waikato.ac.nz)
* @version $Revision$
*/
public class UnivariateKernelEstimator implements UnivariateDensityEstimator, UnivariateIntervalEstimator, UnivariateQuantileEstimator, Serializable {
/** For serialization */
private static final long serialVersionUID = -1163983347810498880L;
/** The collection used to store the weighted values. */
protected TreeMap<Double, Double> m_TM = new TreeMap<Double, Double>();
/** The weighted sum of values */
protected double m_WeightedSum = 0;
/** The weighted sum of squared values */
protected double m_WeightedSumSquared = 0;
/** The weight of the values collected so far */
protected double m_SumOfWeights = 0;
/** The current bandwidth (only computed when needed) */
protected double m_Width = Double.MAX_VALUE;
/** The exponent to use in computation of bandwidth (default: -0.25) */
protected double m_Exponent = -0.25;
/** The minimum allowed value of the kernel width (default: 1.0E-6) */
protected double m_MinWidth = 1.0E-6;
/** Constant for Gaussian density. */
public static final double CONST = -0.5 * Math.log(2 * Math.PI);
/** Threshold at which further kernels are no longer added to sum. */
protected double m_Threshold = 1.0E-6;
/** The number of intervals used to approximate prediction interval. */
protected int m_NumIntervals = 1000;
/**
* Returns a string describing the estimator.
*/
public String globalInfo() {
return "Provides a univariate kernel estimator.";
}
/**
* Adds a value to the density estimator.
*
* @param value the value to add
* @param weight the weight of the value
*/
@Override
public void addValue(final double value, final double weight) {
this.m_WeightedSum += value * weight;
this.m_WeightedSumSquared += value * value * weight;
this.m_SumOfWeights += weight;
if (this.m_TM.get(value) == null) {
this.m_TM.put(value, weight);
} else {
this.m_TM.put(value, this.m_TM.get(value) + weight);
}
}
/**
* Updates bandwidth: the sample standard deviation is multiplied by the total
* weight to the power of the given exponent.
*
* If the total weight is not greater than zero, the width is set to
* Double.MAX_VALUE. If that is not the case, but the width becomes smaller
* than m_MinWidth, the width is set to the value of m_MinWidth.
*/
public void updateWidth() {
// OK, need to do some work
if (this.m_SumOfWeights > 0) {
// Compute variance for scaling
double mean = this.m_WeightedSum / this.m_SumOfWeights;
double variance = this.m_WeightedSumSquared / this.m_SumOfWeights - mean * mean;
if (variance < 0) {
variance = 0;
}
// Compute kernel bandwidth
this.m_Width = Math.sqrt(variance) * Math.pow(this.m_SumOfWeights, this.m_Exponent);
if (this.m_Width <= this.m_MinWidth) {
this.m_Width = this.m_MinWidth;
}
} else {
this.m_Width = Double.MAX_VALUE;
}
}
/**
* Returns the interval for the given confidence value.
*
* @param conf the confidence value in the interval [0, 1]
* @return the interval
* @throws InterruptedException
*/
@Override
public double[][] predictIntervals(final double conf) throws InterruptedException {
// Update the bandwidth
this.updateWidth();
// Compute minimum and maximum value, and delta
double val = Statistics.normalInverse(1.0 - (1.0 - conf) / 2);
double min = this.m_TM.firstKey() - val * this.m_Width;
double max = this.m_TM.lastKey() + val * this.m_Width;
double delta = (max - min) / this.m_NumIntervals;
// Create array with estimated probabilities
double[] probabilities = new double[this.m_NumIntervals];
double leftVal = Math.exp(this.logDensity(min));
for (int i = 0; i < this.m_NumIntervals; i++) {
double rightVal = Math.exp(this.logDensity(min + (i + 1) * delta));
probabilities[i] = 0.5 * (leftVal + rightVal) * delta;
leftVal = rightVal;
}
// Sort array based on area of bin estimates
int[] sortedIndices = Utils.sort(probabilities);
// Mark the intervals to use
double sum = 0;
boolean[] toUse = new boolean[probabilities.length];
int k = 0;
while ((sum < conf) && (k < toUse.length)) {
toUse[sortedIndices[toUse.length - (k + 1)]] = true;
sum += probabilities[sortedIndices[toUse.length - (k + 1)]];
k++;
}
// Don't need probabilities anymore
probabilities = null;
// Create final list of intervals
ArrayList<double[]> intervals = new ArrayList<double[]>();
// The current interval
double[] interval = null;
// Iterate through kernels
boolean haveStartedInterval = false;
for (int i = 0; i < this.m_NumIntervals; i++) {
// Should the current bin be used?
if (toUse[i]) {
// Do we need to create a new interval?
if (haveStartedInterval == false) {
haveStartedInterval = true;
interval = new double[2];
interval[0] = min + i * delta;
}
// Regardless, we should update the upper boundary
interval[1] = min + (i + 1) * delta;
} else {
// We need to finalize and store the last interval
// if necessary.
if (haveStartedInterval) {
haveStartedInterval = false;
intervals.add(interval);
}
}
}
// Add last interval if there is one
if (haveStartedInterval) {
intervals.add(interval);
}
return intervals.toArray(new double[0][0]);
}
/**
* Returns the quantile for the given percentage.
*
* @param percentage the percentage
* @return the quantile
*/
@Override
public double predictQuantile(final double percentage) {
// Update the bandwidth
this.updateWidth();
// Compute minimum and maximum value, and delta
double val = Statistics.normalInverse(1.0 - (1.0 - 0.95) / 2);
double min = this.m_TM.firstKey() - val * this.m_Width;
double max = this.m_TM.lastKey() + val * this.m_Width;
double delta = (max - min) / this.m_NumIntervals;
double sum = 0;
double leftVal = Math.exp(this.logDensity(min));
for (int i = 0; i < this.m_NumIntervals; i++) {
if (sum >= percentage) {
return min + i * delta;
}
double rightVal = Math.exp(this.logDensity(min + (i + 1) * delta));
sum += 0.5 * (leftVal + rightVal) * delta;
leftVal = rightVal;
}
return max;
}
/**
* Computes the logarithm of x and y given the logarithms of x and y.
*
* This is based on Tobias P. Mann's description in "Numerically Stable Hidden
* Markov Implementation" (2006).
*/
protected double logOfSum(final double logOfX, final double logOfY) {
// Check for cases where log of zero is present
if (Double.isNaN(logOfX)) {
return logOfY;
}
if (Double.isNaN(logOfY)) {
return logOfX;
}
// Otherwise return proper result, taken care of overflows
if (logOfX > logOfY) {
return logOfX + Math.log(1 + Math.exp(logOfY - logOfX));
} else {
return logOfY + Math.log(1 + Math.exp(logOfX - logOfY));
}
}
/**
* Compute running sum of density values and weights.
*/
protected void runningSum(final Set<Map.Entry<Double, Double>> c, final double value, final double[] sums) {
// Auxiliary variables
double offset = CONST - Math.log(this.m_Width);
double logFactor = Math.log(this.m_Threshold) - Math.log(1 - this.m_Threshold);
double logSumOfWeights = Math.log(this.m_SumOfWeights);
// Iterate through values
Iterator<Map.Entry<Double, Double>> itr = c.iterator();
while (itr.hasNext()) {
Map.Entry<Double, Double> entry = itr.next();
// Skip entry if weight is zero because it cannot contribute to sum
if (entry.getValue() > 0) {
double diff = (entry.getKey() - value) / this.m_Width;
double logDensity = offset - 0.5 * diff * diff;
double logWeight = Math.log(entry.getValue());
sums[0] = this.logOfSum(sums[0], logWeight + logDensity);
sums[1] = this.logOfSum(sums[1], logWeight);
// Can we stop assuming worst case?
if (logDensity + logSumOfWeights < this.logOfSum(logFactor + sums[0], logDensity + sums[1])) {
break;
}
}
}
}
/**
* Returns the natural logarithm of the density estimate at the given point.
*
* @param value the value at which to evaluate
* @return the natural logarithm of the density estimate at the given value
*/
@Override
public double logDensity(final double value) {
// Update the bandwidth
this.updateWidth();
// Array used to keep running sums
double[] sums = new double[2];
sums[0] = Double.NaN;
sums[1] = Double.NaN;
// Examine right-hand size of value
this.runningSum(this.m_TM.tailMap(value, true).entrySet(), value, sums);
// Examine left-hand size of value
this.runningSum(this.m_TM.headMap(value, false).descendingMap().entrySet(), value, sums);
// Need to normalize
return sums[0] - Math.log(this.m_SumOfWeights);
}
/**
* Returns textual description of this estimator.
*/
@Override
public String toString() {
return "Kernel estimator with bandwidth " + this.m_Width + " and total weight " + this.m_SumOfWeights + " based on\n" + this.m_TM.toString();
}
/**
* Returns the revision string.
*
* @return the revision
*/
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
/**
* Main method, used for testing this class.
* @throws InterruptedException
*/
public static void main(final String[] args) throws InterruptedException {
// Get random number generator initialized by system
Random r = new Random();
// Create density estimator
UnivariateKernelEstimator e = new UnivariateKernelEstimator();
// Output the density estimator
System.out.println(e);
// Monte Carlo integration
double sum = 0;
for (int i = 0; i < 1000; i++) {
sum += Math.exp(e.logDensity(r.nextDouble() * 10.0 - 5.0));
}
System.out.println("Approximate integral: " + 10.0 * sum / 1000);
// Add Gaussian values into it
for (int i = 0; i < 1000; i++) {
e.addValue(0.1 * r.nextGaussian() - 3, 1);
e.addValue(r.nextGaussian() * 0.25, 3);
}
// Monte Carlo integration
sum = 0;
int points = 10000;
for (int i = 0; i < points; i++) {
double value = r.nextDouble() * 10.0 - 5.0;
sum += Math.exp(e.logDensity(value));
}
System.out.println("Approximate integral: " + 10.0 * sum / points);
// Check interval estimates
double[][] Intervals = e.predictIntervals(0.9);
System.out.println("Printing kernel intervals ---------------------");
for (double[] interval : Intervals) {
System.out.println("Left: " + interval[0] + "\t Right: " + interval[1]);
}
System.out.println("Finished kernel printing intervals ---------------------");
double Covered = 0;
for (int i = 0; i < 1000; i++) {
double val = -1;
if (r.nextDouble() < 0.25) {
val = 0.1 * r.nextGaussian() - 3.0;
} else {
val = r.nextGaussian() * 0.25;
}
for (double[] interval : Intervals) {
if (val >= interval[0] && val <= interval[1]) {
Covered++;
break;
}
}
}
System.out.println("Coverage at 0.9 level for kernel intervals: " + Covered / 1000);
// Compare performance to normal estimator on normally distributed data
UnivariateKernelEstimator eKernel = new UnivariateKernelEstimator();
UnivariateNormalEstimator eNormal = new UnivariateNormalEstimator();
for (int j = 1; j < 5; j++) {
double numTrain = Math.pow(10, j);
System.out.println("Number of training cases: " + numTrain);
// Add training cases
for (int i = 0; i < numTrain; i++) {
double val = r.nextGaussian() * 1.5 + 0.5;
eKernel.addValue(val, 1);
eNormal.addValue(val, 1);
}
// Monte Carlo integration
sum = 0;
points = 10000;
for (int i = 0; i < points; i++) {
double value = r.nextDouble() * 20.0 - 10.0;
sum += Math.exp(eKernel.logDensity(value));
}
System.out.println("Approximate integral for kernel estimator: " + 20.0 * sum / points);
// Evaluate estimators
double loglikelihoodKernel = 0, loglikelihoodNormal = 0;
for (int i = 0; i < 1000; i++) {
double val = r.nextGaussian() * 1.5 + 0.5;
loglikelihoodKernel += eKernel.logDensity(val);
loglikelihoodNormal += eNormal.logDensity(val);
}
System.out.println("Loglikelihood for kernel estimator: " + loglikelihoodKernel / 1000);
System.out.println("Loglikelihood for normal estimator: " + loglikelihoodNormal / 1000);
// Check interval estimates
double[][] kernelIntervals = eKernel.predictIntervals(0.95);
double[][] normalIntervals = eNormal.predictIntervals(0.95);
System.out.println("Printing kernel intervals ---------------------");
for (double[] kernelInterval : kernelIntervals) {
System.out.println("Left: " + kernelInterval[0] + "\t Right: " + kernelInterval[1]);
}
System.out.println("Finished kernel printing intervals ---------------------");
System.out.println("Printing normal intervals ---------------------");
for (double[] normalInterval : normalIntervals) {
System.out.println("Left: " + normalInterval[0] + "\t Right: " + normalInterval[1]);
}
System.out.println("Finished normal printing intervals ---------------------");
double kernelCovered = 0;
double normalCovered = 0;
for (int i = 0; i < 1000; i++) {
double val = r.nextGaussian() * 1.5 + 0.5;
for (double[] kernelInterval : kernelIntervals) {
if (val >= kernelInterval[0] && val <= kernelInterval[1]) {
kernelCovered++;
break;
}
}
for (double[] normalInterval : normalIntervals) {
if (val >= normalInterval[0] && val <= normalInterval[1]) {
normalCovered++;
break;
}
}
}
System.out.println("Coverage at 0.95 level for kernel intervals: " + kernelCovered / 1000);
System.out.println("Coverage at 0.95 level for normal intervals: " + normalCovered / 1000);
kernelIntervals = eKernel.predictIntervals(0.8);
normalIntervals = eNormal.predictIntervals(0.8);
kernelCovered = 0;
normalCovered = 0;
for (int i = 0; i < 1000; i++) {
double val = r.nextGaussian() * 1.5 + 0.5;
for (double[] kernelInterval : kernelIntervals) {
if (val >= kernelInterval[0] && val <= kernelInterval[1]) {
kernelCovered++;
break;
}
}
for (double[] normalInterval : normalIntervals) {
if (val >= normalInterval[0] && val <= normalInterval[1]) {
normalCovered++;
break;
}
}
}
System.out.println("Coverage at 0.8 level for kernel intervals: " + kernelCovered / 1000);
System.out.println("Coverage at 0.8 level for normal intervals: " + normalCovered / 1000);
}
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/estimators/UnivariateMixtureEstimator.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* UnivariateMixtureEstimator.java
* Copyright (C) 2014 University of Waikato, Hamilton, New Zealand
*
*/
package weka.estimators;
import java.io.Serializable;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Enumeration;
import java.util.Random;
import java.util.Vector;
import weka.core.ContingencyTables;
import weka.core.Option;
import weka.core.OptionHandler;
import weka.core.RevisionUtils;
import weka.core.Statistics;
import weka.core.Utils;
/**
* Simple weighted mixture density estimator. Uses a mixture of Gaussians
* and applies the leave-one-out bootstrap for model selection. Can alternatively use normalized entropy.
*
* @author Eibe Frank (eibe@cs.waikato.ac.nz)
* @version $Revision: 8034 $
*/
public class UnivariateMixtureEstimator implements UnivariateDensityEstimator, UnivariateIntervalEstimator, UnivariateQuantileEstimator, OptionHandler, Serializable {
/** Constant for normal distribution. */
private static double m_normConst = Math.log(Math.sqrt(2 * Math.PI));
/**
* Fast univariate mixture model implementation.
*/
public class MM {
/** Means */
protected double[] m_Means = null;
/** Standard deviations */
protected double[] m_StdDevs = null;
/** The priors, on log scale */
protected double[] m_LogPriors = null;
/** The number of actual components */
protected int m_K;
/**
* Returns string describing the estimator.
*/
@Override
public String toString() {
StringBuffer sb = new StringBuffer();
sb.append("Mixture model estimator\n\n");
for (int i = 0; i < this.m_LogPriors.length; i++) {
sb.append("Mean: " + this.m_Means[i] + "\tStd. dev.: " + this.m_StdDevs[i] + "\tPrior prob.: " + Math.exp(this.m_LogPriors[i]) + "\n");
}
return sb.toString();
}
/**
* Returns smallest distance to given elements.
* Assumes m_Means has at least one element (i.e. m_K >= 1).
*/
protected double smallestDistance(final double val) {
double min = Math.abs(val - this.m_Means[0]);
for (int i = 1; i < this.m_K; i++) {
if (Math.abs(val - this.m_Means[i]) < min) {
min = Math.abs(val - this.m_Means[i]);
}
}
return min;
}
/**
* Returns the index of the nearest mean.
* Assumes m_Means has at least one element (i.e. m_K >= 1).
*/
protected int nearestMean(final double val) {
double min = Math.abs(val - this.m_Means[0]);
int index = 0;
for (int i = 1; i < this.m_K; i++) {
if (Math.abs(val - this.m_Means[i]) < min) {
min = Math.abs(val - this.m_Means[i]);
index = i;
}
}
return index;
}
/**
* Initializes the model. Assumes K >= 1, values.length >= 1,
* and values.length = weights.length.
*/
public void initializeModel(final int K, final double[] values, final double[] weights, final Random r) {
// Initialize means using farthest points
this.m_Means = new double[K];
// Randomly choose first point
double furthestVal = values[r.nextInt(values.length)];
// Find K maximally distant points (if possible)
this.m_K = 0;
do {
this.m_Means[this.m_K] = furthestVal;
this.m_K++;
if (this.m_K >= K) {
break;
}
double maxMinDist = this.smallestDistance(values[0]);
furthestVal = values[0];
for (int i = 1; i < values.length; i++) {
double minDist = this.smallestDistance(values[i]);
if (minDist > maxMinDist) {
maxMinDist = minDist;
furthestVal = values[i];
}
}
if (maxMinDist <= 0) {
break;
}
} while (true);
// Shrink array of means if necessary
if (this.m_K < K) {
double[] tempMeans = new double[this.m_K];
System.arraycopy(this.m_Means, 0, tempMeans, 0, this.m_K);
this.m_Means = tempMeans;
}
// Establish initial cluster assignments
double[][] probs = new double[this.m_K][values.length];
for (int i = 0; i < values.length; i++) {
probs[this.nearestMean(values[i])][i] = 1.0;
}
// Compute initial parameters
this.m_StdDevs = new double[this.m_K];
this.m_LogPriors = new double[this.m_K];
this.estimateParameters(values, weights, probs);
}
/**
* Estimate parameters.
*/
protected void estimateParameters(final double[] values, final double[] weights, final double[][] probs) {
double totalSumOfWeights = 0;
for (int j = 0; j < this.m_K; j++) {
double sum = 0;
double sumWeights = 0;
for (int i = 0; i < values.length; i++) {
double weight = probs[j][i] * weights[i];
sum += weight * values[i];
sumWeights += weight;
}
if (sumWeights <= 0) {
this.m_Means[j] = 0;
} else {
this.m_Means[j] = sum / sumWeights;
}
totalSumOfWeights += sumWeights;
}
for (int j = 0; j < this.m_K; j++) {
double sum = 0;
double sumWeights = 0;
for (int i = 0; i < values.length; i++) {
double weight = probs[j][i] * weights[i];
double diff = values[i] - this.m_Means[j];
sum += weight * diff * diff;
sumWeights += weight;
}
if ((sum <= 0) || (sumWeights <= 0)) {
this.m_StdDevs[j] = 1.0e-6; // Hack to prevent unpleasantness
} else {
this.m_StdDevs[j] = Math.sqrt(sum / sumWeights);
if (this.m_StdDevs[j] < 1.0e-6) {
this.m_StdDevs[j] = 1.0e-6;
}
}
if (sumWeights <= 0) {
this.m_LogPriors[j] = -Double.MAX_VALUE;
} else {
this.m_LogPriors[j] = Math.log(sumWeights / totalSumOfWeights);
}
}
}
/**
* Computes loglikelihood of current model.
*/
public double loglikelihood(final double[] values, final double[] weights) {
double sum = 0;
double sumOfWeights = 0;
for (int i = 0; i < values.length; i++) {
sum += weights[i] * this.logDensity(values[i]);
sumOfWeights += weights[i];
}
return sum / sumOfWeights;
}
/**
* Returns average of squared errors for current model.
*/
public double MSE() {
double mse = 0;
for (int i = 0; i < this.m_K; i++) {
mse += this.m_StdDevs[i] * this.m_StdDevs[i] * Math.exp(this.m_LogPriors[i]);
}
return mse;
}
/**
* Density function of normal distribution.
*/
protected double logNormalDens(final double x, final double mean, final double stdDev) {
double diff = x - mean;
return -(diff * diff / (2 * stdDev * stdDev)) - m_normConst - Math.log(stdDev);
}
/**
* Joint densities per cluster.
*/
protected double[] logJointDensities(final double value) {
double[] a = new double[this.m_K];
for (int i = 0; i < this.m_K; i++) {
a[i] = this.m_LogPriors[i] + this.logNormalDens(value, this.m_Means[i], this.m_StdDevs[i]);
}
return a;
}
/**
* Computes log of density for given value.
*/
public double logDensity(final double value) {
double[] a = this.logJointDensities(value);
double max = a[Utils.maxIndex(a)];
double sum = 0.0;
for (int i = 0; i < a.length; i++) {
sum += Math.exp(a[i] - max);
}
return max + Math.log(sum);
}
/**
* Returns the interval for the given confidence value.
*
* @param conf the confidence value in the interval [0, 1]
* @return the interval
* @throws InterruptedException
*/
public double[][] predictIntervals(final double conf) throws InterruptedException {
// Compute minimum and maximum value, and delta
double val = Statistics.normalInverse(1.0 - (1.0 - conf) / 2);
double min = Double.MAX_VALUE;
double max = -Double.MAX_VALUE;
for (int i = 0; i < this.m_Means.length; i++) {
double l = this.m_Means[i] - val * this.m_StdDevs[i];
if (l < min) {
min = l;
}
double r = this.m_Means[i] + val * this.m_StdDevs[i];
if (r > max) {
max = r;
}
}
double delta = (max - min) / UnivariateMixtureEstimator.this.m_NumIntervals;
// Create array with estimated probabilities
double[] probabilities = new double[UnivariateMixtureEstimator.this.m_NumIntervals];
double leftVal = Math.exp(this.logDensity(min));
for (int i = 0; i < UnivariateMixtureEstimator.this.m_NumIntervals; i++) {
double rightVal = Math.exp(this.logDensity(min + (i + 1) * delta));
probabilities[i] = 0.5 * (leftVal + rightVal) * delta;
leftVal = rightVal;
}
// Sort array based on area of bin estimates
int[] sortedIndices = Utils.sort(probabilities);
// Mark the intervals to use
double sum = 0;
boolean[] toUse = new boolean[probabilities.length];
int k = 0;
while ((sum < conf) && (k < toUse.length)) {
toUse[sortedIndices[toUse.length - (k + 1)]] = true;
sum += probabilities[sortedIndices[toUse.length - (k + 1)]];
k++;
}
// Don't need probabilities anymore
probabilities = null;
// Create final list of intervals
ArrayList<double[]> intervals = new ArrayList<double[]>();
// The current interval
double[] interval = null;
// Calculate actual intervals
boolean haveStartedInterval = false;
for (int i = 0; i < UnivariateMixtureEstimator.this.m_NumIntervals; i++) {
// Should the current bin be used?
if (toUse[i]) {
// Do we need to create a new interval?
if (haveStartedInterval == false) {
haveStartedInterval = true;
interval = new double[2];
interval[0] = min + i * delta;
}
// Regardless, we should update the upper boundary
interval[1] = min + (i + 1) * delta;
} else {
// We need to finalize and store the last interval
// if necessary.
if (haveStartedInterval) {
haveStartedInterval = false;
intervals.add(interval);
}
}
}
// Add last interval if there is one
if (haveStartedInterval) {
intervals.add(interval);
}
return intervals.toArray(new double[0][0]);
}
/**
* Returns the quantile for the given percentage.
*
* @param percentage the percentage
* @return the quantile
*/
public double predictQuantile(final double percentage) {
// Compute minimum and maximum value, and delta
double valRight = Statistics.normalInverse(percentage);
double valLeft = Statistics.normalInverse(0.001);
double min = Double.MAX_VALUE;
double max = -Double.MAX_VALUE;
for (int i = 0; i < this.m_Means.length; i++) {
double l = this.m_Means[i] - valLeft * this.m_StdDevs[i];
if (l < min) {
min = l;
}
double r = this.m_Means[i] + valRight * this.m_StdDevs[i];
if (r > max) {
max = r;
}
}
double delta = (max - min) / UnivariateMixtureEstimator.this.m_NumIntervals;
double sum = 0;
double leftVal = Math.exp(this.logDensity(min));
for (int i = 0; i < UnivariateMixtureEstimator.this.m_NumIntervals; i++) {
if (sum >= percentage) {
return min + i * delta;
}
double rightVal = Math.exp(this.logDensity(min + (i + 1) * delta));
sum += 0.5 * (leftVal + rightVal) * delta;
leftVal = rightVal;
}
return max;
}
}
/** For serialization */
private static final long serialVersionUID = -2035274930137353656L;
/** The values used for this estimator */
protected double[] m_Values = new double[1000];
/** The weights used for this estimator */
protected double[] m_Weights = new double[1000];
/** The number of values that have been seen */
protected int m_NumValues;
/** The current mixture model */
protected MM m_MixtureModel;
/** The number of components to use (default is -1)*/
protected int m_NumComponents = -1;
/** The maximum number of components to use (default is 5) */
protected int m_MaxNumComponents = 5;
/** The random number seed to use (default is 1*/
protected int m_Seed = 1;
/** The number of Bootstrap runs to use to select the number of components (default is 10) */
protected int m_NumBootstrapRuns = 10;
/** The number of intervals used to approximate prediction interval. */
protected int m_NumIntervals = 1000;
/** Whether to use normalized entropy instance of bootstrap. */
protected boolean m_UseNormalizedEntropy = false;
/** Whether to output debug info. */
protected boolean m_Debug = false;
/** The random number generator. */
protected Random m_Random = new Random(this.m_Seed);
/**
* Returns a string describing the estimator.
*/
public String globalInfo() {
return "Estimates a univariate mixture model.";
}
/**
* @return whether normalized entropy is used
*/
public boolean getUseNormalizedEntropy() {
return this.m_UseNormalizedEntropy;
}
/**
* @param useNormalizedEntropy whether to use normalized entropy
*/
public void setUseNormalizedEntropy(final boolean useNormalizedEntropy) {
this.m_UseNormalizedEntropy = useNormalizedEntropy;
}
/**
* The tool tip for this property.
*/
public String numBootstrapRunsToolTipText() {
return "The number of Bootstrap runs to choose the number of components.";
}
/**
* Returns the number of Bootstrap runs.
*
* @return the number of Bootstrap runs
*/
public int getNumBootstrapRuns() {
return this.m_NumBootstrapRuns;
}
/**
* Sets the number of Bootstrap runs.
*
* @param mnumBootstrapRuns the number of Bootstrap runs
*/
public void setNumBootstrapRuns(final int numBootstrapRuns) {
this.m_NumBootstrapRuns = numBootstrapRuns;
}
/**
* The tool tip for this property.
*/
public String numComponentsToolTipText() {
return "The number of mixture components to use.";
}
/**
* Returns the number of components to use.
*
* @return the m_NumComponents
*/
public int getNumComponents() {
return this.m_NumComponents;
}
/**
* Sets the number of components to use.
*
* @param m_NumComponents the m_NumComponents to set
*/
public void setNumComponents(final int numComponents) {
this.m_NumComponents = numComponents;
}
/**
* Returns the tip text for this property
* @return tip text for this property suitable for
* displaying in the explorer/experimenter gui
*/
public String seedTipText() {
return "The random number seed to be used.";
}
/**
* Set the seed for random number generation.
*
* @param seed the seed
*/
public void setSeed(final int seed) {
this.m_Seed = seed;
this.m_Random = new Random(seed);
}
/**
* Gets the seed for the random number generations
*
* @return the seed for the random number generation
*/
public int getSeed() {
return this.m_Seed;
}
/**
* The tool tip for this property.
*/
public String maxNumComponentsToolTipText() {
return "The maximum number of mixture components to use.";
}
/**
* Returns the number of components to use.
*
* @return the maximum number of components to use
*/
public int getMaxNumComponents() {
return this.m_MaxNumComponents;
}
/**
* Sets the number of components to use.
*
* @param maxNumComponents the maximum number of components to evaluate
*/
public void setMaxNumComponents(final int maxNumComponents) {
this.m_MaxNumComponents = maxNumComponents;
}
/**
* Adds a value to the density estimator.
*
* @param value the value to add
* @param weight the weight of the value
*/
@Override
public void addValue(final double value, final double weight) {
// Do we need to add value at all?
if (!Utils.eq(weight, 0)) {
// Invalidate current model
this.m_MixtureModel = null;
// Do we need to expand the arrays?
if (this.m_NumValues == this.m_Values.length) {
double[] newWeights = new double[2 * this.m_NumValues];
double[] newValues = new double[2 * this.m_NumValues];
System.arraycopy(this.m_Values, 0, newValues, 0, this.m_NumValues);
System.arraycopy(this.m_Weights, 0, newWeights, 0, this.m_NumValues);
this.m_Values = newValues;
this.m_Weights = newWeights;
}
// Add values
this.m_Values[this.m_NumValues] = value;
this.m_Weights[this.m_NumValues] = weight;
this.m_NumValues++;
}
}
/**
* Build mixture model. Assumes K >= 1, values.length >= 1,
* and values.length = weights.length.
*/
public MM buildModel(final int K, final double[] values, final double[] weights) {
// Initialize model using k-means
MM model = null;
double bestMSE = Double.MAX_VALUE;
int numAttempts = 0;
while (numAttempts < 5) {
// Initialize model
MM tempModel = new UnivariateMixtureEstimator().new MM();
tempModel.initializeModel(K, values, weights, this.m_Random);
// Run k-means until MSE converges
double oldMSE = Double.MAX_VALUE;
double MSE = tempModel.MSE();
if (this.m_Debug) {
System.err.println("MSE: " + MSE);
}
double[][] probs = new double[tempModel.m_K][values.length];
while (Utils.sm(MSE, oldMSE)) {
// Compute memberships
for (int j = 0; j < probs.length; j++) {
Arrays.fill(probs[j], 0);
}
for (int i = 0; i < values.length; i++) {
probs[tempModel.nearestMean(values[i])][i] = 1.0;
}
// Estimate parameters
tempModel.estimateParameters(values, weights, probs);
// Compute MSE for updated model
oldMSE = MSE;
MSE = tempModel.MSE();
if (this.m_Debug) {
System.err.println("MSE: " + MSE);
}
}
if (MSE < bestMSE) {
bestMSE = MSE;
model = tempModel;
}
if (this.m_Debug) {
System.err.println("Best MSE: " + bestMSE);
}
numAttempts++;
}
// Run until likelihood converges
double oldLogLikelihood = -Double.MAX_VALUE;
double loglikelihood = model.loglikelihood(values, weights);
double[][] probs = new double[model.m_K][values.length];
while (Utils.gr(loglikelihood, oldLogLikelihood)) {
// Establish membership probabilities
for (int i = 0; i < values.length; i++) {
double[] p = Utils.logs2probs(model.logJointDensities(values[i]));
for (int j = 0; j < p.length; j++) {
probs[j][i] = p[j];
}
}
// Estimate parameters
model.estimateParameters(values, weights, probs);
// Compute loglikelihood for updated model
oldLogLikelihood = loglikelihood;
loglikelihood = model.loglikelihood(values, weights);
}
return model;
}
/**
* Creates a new dataset of the same size using random sampling with
* replacement according to the given weight vector. The weights of the
* instances in the new dataset are set to one.
*/
public double[][] resampleWithWeights(final Random random, final boolean[] sampled) {
// Walker's method, see pp. 232 of "Stochastic Simulation" by B.D. Ripley
double[] P = new double[this.m_Weights.length];
System.arraycopy(this.m_Weights, 0, P, 0, this.m_Weights.length);
Utils.normalize(P);
double[] Q = new double[this.m_Weights.length];
int[] A = new int[this.m_Weights.length];
int[] W = new int[this.m_Weights.length];
int M = this.m_Weights.length;
int NN = -1;
int NP = M;
for (int I = 0; I < M; I++) {
if (P[I] < 0) {
throw new IllegalArgumentException("Weights have to be positive.");
}
Q[I] = M * P[I];
if (Q[I] < 1.0) {
W[++NN] = I;
} else {
W[--NP] = I;
}
}
if (NN > -1 && NP < M) {
for (int S = 0; S < M - 1; S++) {
int I = W[S];
int J = W[NP];
A[I] = J;
Q[J] += Q[I] - 1.0;
if (Q[J] < 1.0) {
NP++;
}
if (NP >= M) {
break;
}
}
// A[W[M]] = W[M];
}
for (int I = 0; I < M; I++) {
Q[I] += I;
}
// Do we need to keep track of how many copies to use?
int[] counts = new int[M];
int count = 0;
for (int i = 0; i < this.m_Weights.length; i++) {
int ALRV;
double U = M * random.nextDouble();
int I = (int) U;
if (U < Q[I]) {
ALRV = I;
} else {
ALRV = A[I];
}
counts[ALRV]++;
if (!sampled[ALRV]) {
sampled[ALRV] = true;
count++;
}
}
// Generate output
double[][] output = new double[2][count];
int index = 0;
for (int i = 0; i < M; i++) {
if (counts[i] > 0) {
output[0][index] = this.m_Values[i];
output[1][index] = counts[i];
index++;
}
}
return output;
}
/**
* Selects the number of components using leave-one-out Bootstrap, estimating loglikelihood.
*
* @return the number of components to use
*/
protected int findNumComponentsUsingBootStrap() {
if (this.m_NumComponents > 0) {
return this.m_NumComponents;
}
if (this.m_MaxNumComponents <= 1) {
return 1;
}
double bestLogLikelihood = -Double.MAX_VALUE;
int bestNumComponents = 1;
for (int i = 1; i <= this.m_MaxNumComponents; i++) {
double logLikelihood = 0;
for (int k = 0; k < this.m_NumBootstrapRuns; k++) {
boolean[] inBag = new boolean[this.m_NumValues];
double[][] output = this.resampleWithWeights(this.m_Random, inBag);
MM mixtureModel = this.buildModel(i, output[0], output[1]);
double locLogLikelihood = 0;
double totalWeight = 0;
for (int j = 0; j < this.m_NumValues; j++) {
if (!inBag[j]) {
double weight = this.m_Weights[j];
locLogLikelihood += weight * mixtureModel.logDensity(this.m_Values[j]);
totalWeight += weight;
}
}
locLogLikelihood /= totalWeight;
logLikelihood += locLogLikelihood;
}
logLikelihood /= this.m_NumBootstrapRuns;
if (this.m_Debug) {
System.err.println("Loglikelihood: " + logLikelihood + "\tNumber of components: " + i);
}
if (logLikelihood > bestLogLikelihood) {
bestNumComponents = i;
bestLogLikelihood = logLikelihood;
}
}
return bestNumComponents;
}
/**
* Calculates entrpy for given model and data.
*/
protected double entropy(final MM mixtureModel) {
double entropy = 0;
for (int j = 0; j < this.m_NumValues; j++) {
entropy += this.m_Weights[j] * ContingencyTables.entropy(Utils.logs2probs(mixtureModel.logJointDensities(this.m_Values[j])));
}
entropy *= Utils.log2; // Need natural logarithm, not base-2 logarithm
return entropy / this.m_NumValues;
}
/**
* Selects the number of components using normalized entropy.
*
* @return the model to use
*/
protected MM findModelUsingNormalizedEntropy() {
if (this.m_NumComponents > 0) {
return this.buildModel(this.m_NumComponents, this.m_Values, this.m_Weights);
}
if (this.m_MaxNumComponents <= 1) {
return this.buildModel(1, this.m_Values, this.m_Weights);
}
// Loglikelihood for one cluster
MM bestMixtureModel = this.buildModel(1, this.m_Values, this.m_Weights);
double loglikelihoodForOneCluster = bestMixtureModel.loglikelihood(this.m_Values, this.m_Weights);
double bestNormalizedEntropy = 1;
for (int i = 2; i <= this.m_MaxNumComponents; i++) {
MM mixtureModel = this.buildModel(i, this.m_Values, this.m_Weights);
double loglikelihood = mixtureModel.loglikelihood(this.m_Values, this.m_Weights);
if (loglikelihood < loglikelihoodForOneCluster) {
// This appears to happen in practice, hopefully not because of a bug...
if (this.m_Debug) {
System.err.println("Likelihood for one cluster greater than for " + i + " clusters.");
}
continue;
}
double entropy = this.entropy(mixtureModel);
double normalizedEntropy = entropy / (loglikelihood - loglikelihoodForOneCluster);
if (this.m_Debug) {
System.err.println(
"Entropy: " + entropy + "\tLogLikelihood: " + loglikelihood + "\tLoglikelihood for one cluster: " + loglikelihoodForOneCluster + "\tNormalized entropy: " + normalizedEntropy + "\tNumber of components: " + i);
}
if (normalizedEntropy < bestNormalizedEntropy) {
bestMixtureModel = mixtureModel;
bestNormalizedEntropy = normalizedEntropy;
}
}
return bestMixtureModel;
}
/**
* Updates the model based on the current data.
* Uses the leave-one-out Bootstrap to choose the number of components.
*/
protected void updateModel() {
if (this.m_MixtureModel != null) {
return;
} else if (this.m_NumValues > 0) {
// Shrink arrays if necessary
if (this.m_Values.length > this.m_NumValues) {
double[] values = new double[this.m_NumValues];
double[] weights = new double[this.m_NumValues];
System.arraycopy(this.m_Values, 0, values, 0, this.m_NumValues);
System.arraycopy(this.m_Weights, 0, weights, 0, this.m_NumValues);
this.m_Values = values;
this.m_Weights = weights;
}
if (this.m_UseNormalizedEntropy) {
this.m_MixtureModel = this.findModelUsingNormalizedEntropy();
} else {
this.m_MixtureModel = this.buildModel(this.findNumComponentsUsingBootStrap(), this.m_Values, this.m_Weights);
}
}
}
/**
* Returns the interval for the given confidence value.
*
* @param conf the confidence value in the interval [0, 1]
* @return the interval
* @throws InterruptedException
*/
@Override
public double[][] predictIntervals(final double conf) throws InterruptedException {
this.updateModel();
return this.m_MixtureModel.predictIntervals(conf);
}
/**
* Returns the quantile for the given percentage.
*
* @param percentage the percentage
* @return the quantile
*/
@Override
public double predictQuantile(final double percentage) {
this.updateModel();
return this.m_MixtureModel.predictQuantile(percentage);
}
/**
* Returns the natural logarithm of the density estimate at the given
* point.
*
* @param value the value at which to evaluate
* @return the natural logarithm of the density estimate at the given
* value
*/
@Override
public double logDensity(final double value) {
this.updateModel();
if (this.m_MixtureModel == null) {
return Math.log(Double.MIN_VALUE);
}
return this.m_MixtureModel.logDensity(value);
}
/**
* Returns textual description of this estimator.
*/
@Override
public String toString() {
this.updateModel();
if (this.m_MixtureModel == null) {
return "";
}
return this.m_MixtureModel.toString();
}
/**
* Returns an enumeration that lists the command-line options that are available
*
* @return the list of options as an enumeration
*/
@Override
public Enumeration<Option> listOptions() {
Vector<Option> options = new Vector<Option>();
options.addElement(new Option("\tNumber of components to use (default: -1).", "N", 1, "-N"));
options.addElement(new Option("\tMaximum number of components to use (default: 5).", "M", 1, "-M"));
options.addElement(new Option("\tSeed for the random number generator (default: 1).", "S", 1, "-S"));
options.addElement(new Option("\tThe number of bootstrap runs to use (default: 10).", "B", 1, "-B"));
options.addElement(new Option("\tUse normalized entropy instead of bootstrap.", "E", 1, "-E"));
return options.elements();
}
/**
* Sets options based on the given array of strings.
*
* @param options the list of options to parse
*/
@Override
public void setOptions(final String[] options) throws Exception {
String optionString = Utils.getOption("N", options);
if (optionString.length() > 0) {
this.setNumComponents(Integer.parseInt(optionString));
} else {
this.setNumComponents(-1);
}
optionString = Utils.getOption("M", options);
if (optionString.length() > 0) {
this.setMaxNumComponents(Integer.parseInt(optionString));
} else {
this.setMaxNumComponents(5);
}
optionString = Utils.getOption("S", options);
if (optionString.length() > 0) {
this.setSeed(Integer.parseInt(optionString));
} else {
this.setSeed(1);
}
optionString = Utils.getOption("B", options);
if (optionString.length() > 0) {
this.setNumBootstrapRuns(Integer.parseInt(optionString));
} else {
this.setNumBootstrapRuns(10);
}
this.m_UseNormalizedEntropy = Utils.getFlag("E", options);
Utils.checkForRemainingOptions(options);
}
/**
* Returns the current set of options.
*
* @return the current set of options as a string
*/
@Override
public String[] getOptions() {
Vector<String> options = new Vector<String>();
options.add("-N");
options.add("" + this.getNumComponents());
options.add("-M");
options.add("" + this.getMaxNumComponents());
options.add("-S");
options.add("" + this.getSeed());
options.add("-B");
options.add("" + this.getNumBootstrapRuns());
if (this.m_UseNormalizedEntropy) {
options.add("-E");
}
return options.toArray(new String[0]);
}
/**
* Returns the revision string.
*
* @return the revision
*/
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision: 10971 $");
}
/**
* Main method, used for testing this class.
*/
public static void main(final String[] args) throws Exception {
// Get random number generator initialized by system
Random r = new Random();
// Create density estimator
UnivariateMixtureEstimator e = new UnivariateMixtureEstimator();
e.setOptions(Arrays.copyOf(args, args.length));
// Output the density estimator
System.out.println(e);
// Monte Carlo integration
double sum = 0;
for (int i = 0; i < 100000; i++) {
sum += Math.exp(e.logDensity(r.nextDouble() * 10.0 - 5.0));
}
System.out.println("Approximate integral: " + 10.0 * sum / 100000);
// Add Gaussian values into it
for (int i = 0; i < 100000; i++) {
e.addValue(r.nextGaussian() * 0.5 - 1, 1);
e.addValue(r.nextGaussian() * 0.5 + 1, 3);
}
// Output the density estimator
System.out.println(e);
// Monte Carlo integration
sum = 0;
for (int i = 0; i < 100000; i++) {
sum += Math.exp(e.logDensity(r.nextDouble() * 10.0 - 5.0));
}
System.out.println("Approximate integral: " + 10.0 * sum / 100000);
// Create density estimator
e = new UnivariateMixtureEstimator();
e.setOptions(Arrays.copyOf(args, args.length));
// Add Gaussian values into it
for (int i = 0; i < 100000; i++) {
e.addValue(r.nextGaussian() * 0.5 - 1, 1);
e.addValue(r.nextGaussian() * 0.5 + 1, 1);
e.addValue(r.nextGaussian() * 0.5 + 1, 1);
e.addValue(r.nextGaussian() * 0.5 + 1, 1);
}
// Output the density estimator
System.out.println(e);
// Monte Carlo integration
sum = 0;
for (int i = 0; i < 100000; i++) {
sum += Math.exp(e.logDensity(r.nextDouble() * 10.0 - 5.0));
}
System.out.println("Approximate integral: " + 10.0 * sum / 100000);
// Create density estimator
e = new UnivariateMixtureEstimator();
e.setOptions(Arrays.copyOf(args, args.length));
// Add Gaussian values into it
for (int i = 0; i < 100000; i++) {
e.addValue(r.nextGaussian() * 5.0 + 3.0, 1);
}
// Output the density estimator
System.out.println(e);
// Check interval estimates
double[][] intervals = e.predictIntervals(0.95);
System.out.println("Lower: " + intervals[0][0] + " Upper: " + intervals[0][1]);
double covered = 0;
for (int i = 0; i < 100000; i++) {
double val = r.nextGaussian() * 5.0 + 3.0;
if (val >= intervals[0][0] && val <= intervals[0][1]) {
covered++;
}
}
System.out.println("Coverage: " + covered / 100000);
intervals = e.predictIntervals(0.8);
System.out.println("Lower: " + intervals[0][0] + " Upper: " + intervals[0][1]);
covered = 0;
for (int i = 0; i < 100000; i++) {
double val = r.nextGaussian() * 5.0 + 3.0;
if (val >= intervals[0][0] && val <= intervals[0][1]) {
covered++;
}
}
System.out.println("Coverage: " + covered / 100000);
// Output quantile
System.out.println("95% quantile: " + e.predictQuantile(0.95));
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/estimators/UnivariateNormalEstimator.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* UnivariateNormalEstimator.java
* Copyright (C) 2009-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.estimators;
import java.io.Serializable;
import java.util.Random;
import weka.core.RevisionUtils;
import weka.core.Statistics;
/**
* Simple weighted normal density estimator.
*
* @author Eibe Frank (eibe@cs.waikato.ac.nz)
* @version $Revision$
*/
public class UnivariateNormalEstimator implements UnivariateDensityEstimator,
UnivariateIntervalEstimator,
UnivariateQuantileEstimator,
Serializable {
/** For serialization */
private static final long serialVersionUID = -1669009817825826548L;
/** The weighted sum of values */
protected double m_WeightedSum = 0;
/** The weighted sum of squared values */
protected double m_WeightedSumSquared = 0;
/** The weight of the values collected so far */
protected double m_SumOfWeights = 0;
/** The mean value (only updated when needed) */
protected double m_Mean = 0;
/** The variance (only updated when needed) */
protected double m_Variance = Double.MAX_VALUE;
/** The minimum allowed value of the variance (default: 1.0E-6 * 1.0E-6) */
protected double m_MinVar = 1.0E-6 * 1.0E-6;
/** Constant for Gaussian density */
public static final double CONST = Math.log(2 * Math.PI);
/**
* Returns a string describing the estimator.
*/
public String globalInfo() {
return "Estimates a univariate normal density.";
}
/**
* Adds a value to the density estimator.
*
* @param value the value to add
* @param weight the weight of the value
*/
public void addValue(double value, double weight) {
m_WeightedSum += value * weight;
m_WeightedSumSquared += value * value * weight;
m_SumOfWeights += weight;
}
/**
* Updates mean and variance based on sufficient statistics.
* Variance is set to m_MinVar if it becomes smaller than that
* value. It is set to Double.MAX_VALUE if the sum of weights is
* zero.
*/
protected void updateMeanAndVariance() {
// Compute mean
m_Mean = 0;
if (m_SumOfWeights > 0) {
m_Mean = m_WeightedSum / m_SumOfWeights;
}
// Compute variance
m_Variance = Double.MAX_VALUE;
if (m_SumOfWeights > 0) {
m_Variance = m_WeightedSumSquared / m_SumOfWeights - m_Mean * m_Mean;
}
// Hack for case where variance is 0
if (m_Variance <= m_MinVar) {
m_Variance = m_MinVar;
}
}
/**
* Returns the interval for the given confidence value.
*
* @param conf the confidence value in the interval [0, 1]
* @return the interval
*/
public double[][] predictIntervals(double conf) {
updateMeanAndVariance();
double val = Statistics.normalInverse(1.0 - (1.0 - conf) / 2.0);
double[][] arr = new double[1][2];
arr[0][1] = m_Mean + val * Math.sqrt(m_Variance);
arr[0][0] = m_Mean - val * Math.sqrt(m_Variance);
return arr;
}
/**
* Returns the quantile for the given percentage.
*
* @param percentage the percentage
* @return the quantile
*/
public double predictQuantile(double percentage) {
updateMeanAndVariance();
return m_Mean + Statistics.normalInverse(percentage) * Math.sqrt(m_Variance);
}
/**
* Returns the natural logarithm of the density estimate at the given
* point.
*
* @param value the value at which to evaluate
* @return the natural logarithm of the density estimate at the given
* value
*/
public double logDensity(double value) {
updateMeanAndVariance();
// Return natural logarithm of density
double val = -0.5 * (CONST + Math.log(m_Variance) +
(value - m_Mean) * (value - m_Mean) / m_Variance);
return val;
}
/**
* Returns textual description of this estimator.
*/
public String toString() {
updateMeanAndVariance();
return "Mean: " + m_Mean + "\t" + "Variance: " + m_Variance;
}
/**
* Returns the revision string.
*
* @return the revision
*/
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
/**
* Main method, used for testing this class.
*/
public static void main(String[] args) {
// Get random number generator initialized by system
Random r = new Random();
// Create density estimator
UnivariateNormalEstimator e = new UnivariateNormalEstimator();
// Output the density estimator
System.out.println(e);
// Monte Carlo integration
double sum = 0;
for (int i = 0; i < 100000; i++) {
sum += Math.exp(e.logDensity(r.nextDouble() * 10.0 - 5.0));
}
System.out.println("Approximate integral: " + 10.0 * sum / 100000);
// Add Gaussian values into it
for (int i = 0; i < 100000; i++) {
e.addValue(r.nextGaussian(), 1);
e.addValue(r.nextGaussian() * 2.0, 3);
}
// Output the density estimator
System.out.println(e);
// Monte Carlo integration
sum = 0;
for (int i = 0; i < 100000; i++) {
sum += Math.exp(e.logDensity(r.nextDouble() * 10.0 - 5.0));
}
System.out.println("Approximate integral: " + 10.0 * sum / 100000);
// Create density estimator
e = new UnivariateNormalEstimator();
// Add Gaussian values into it
for (int i = 0; i < 100000; i++) {
e.addValue(r.nextGaussian(), 1);
e.addValue(r.nextGaussian() * 2.0, 1);
e.addValue(r.nextGaussian() * 2.0, 1);
e.addValue(r.nextGaussian() * 2.0, 1);
}
// Output the density estimator
System.out.println(e);
// Monte Carlo integration
sum = 0;
for (int i = 0; i < 100000; i++) {
sum += Math.exp(e.logDensity(r.nextDouble() * 10.0 - 5.0));
}
System.out.println("Approximate integral: " + 10.0 * sum / 100000);
// Create density estimator
e = new UnivariateNormalEstimator();
// Add Gaussian values into it
for (int i = 0; i < 100000; i++) {
e.addValue(r.nextGaussian() * 5.0 + 3.0 , 1);
}
// Output the density estimator
System.out.println(e);
// Check interval estimates
double[][] intervals = e.predictIntervals(0.95);
System.out.println("Lower: " + intervals[0][0] + " Upper: " + intervals[0][1]);
double covered = 0;
for (int i = 0; i < 100000; i++) {
double val = r.nextGaussian() * 5.0 + 3.0;
if (val >= intervals[0][0] && val <= intervals[0][1]) {
covered++;
}
}
System.out.println("Coverage: " + covered / 100000);
intervals = e.predictIntervals(0.8);
System.out.println("Lower: " + intervals[0][0] + " Upper: " + intervals[0][1]);
covered = 0;
for (int i = 0; i < 100000; i++) {
double val = r.nextGaussian() * 5.0 + 3.0;
if (val >= intervals[0][0] && val <= intervals[0][1]) {
covered++;
}
}
System.out.println("Coverage: " + covered / 100000);
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/estimators/UnivariateQuantileEstimator.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* UnivariateQuantileEstimator.java
* Copyright (C) 2009-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.estimators;
/**
* Interface that can be implemented by simple weighted univariate
* quantile estimators.
*
* @author Eibe Frank (eibe@cs.waikato.ac.nz)
* @version $Revision$
*/
public interface UnivariateQuantileEstimator {
/**
* Adds a value to the interval estimator.
*
* @param value the value to add
* @param weight the weight of the value
*/
void addValue(double value, double weight);
/**
* Returns the quantile for the given percentage
*
* @param value the value at which to evaluate
* @return the quantile
*/
double predictQuantile(double quantile);
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/experiment/AveragingResultProducer.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* AveragingResultProducer.java
* Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.experiment;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Enumeration;
import java.util.Hashtable;
import java.util.Vector;
import weka.core.AdditionalMeasureProducer;
import weka.core.Instances;
import weka.core.Option;
import weka.core.OptionHandler;
import weka.core.RevisionHandler;
import weka.core.RevisionUtils;
import weka.core.Utils;
/**
* <!-- globalinfo-start --> Takes the results from a ResultProducer and submits
* the average to the result listener. Normally used with a
* CrossValidationResultProducer to perform n x m fold cross validation. For
* non-numeric result fields, the first value is used.
* <p/>
* <!-- globalinfo-end -->
*
* <!-- options-start --> Valid options are:
* <p/>
*
* <pre>
* -F <field name>
* The name of the field to average over.
* (default "Fold")
* </pre>
*
* <pre>
* -X <num results>
* The number of results expected per average.
* (default 10)
* </pre>
*
* <pre>
* -S
* Calculate standard deviations.
* (default only averages)
* </pre>
*
* <pre>
* -W <class name>
* The full class name of a ResultProducer.
* eg: weka.experiment.CrossValidationResultProducer
* </pre>
*
* <pre>
* Options specific to result producer weka.experiment.CrossValidationResultProducer:
* </pre>
*
* <pre>
* -X <number of folds>
* The number of folds to use for the cross-validation.
* (default 10)
* </pre>
*
* <pre>
* -D
* Save raw split evaluator output.
* </pre>
*
* <pre>
* -O <file/directory name/path>
* The filename where raw output will be stored.
* If a directory name is specified then then individual
* outputs will be gzipped, otherwise all output will be
* zipped to the named file. Use in conjuction with -D. (default splitEvalutorOut.zip)
* </pre>
*
* <pre>
* -W <class name>
* The full class name of a SplitEvaluator.
* eg: weka.experiment.ClassifierSplitEvaluator
* </pre>
*
* <pre>
* Options specific to split evaluator weka.experiment.ClassifierSplitEvaluator:
* </pre>
*
* <pre>
* -W <class name>
* The full class name of the classifier.
* eg: weka.classifiers.bayes.NaiveBayes
* </pre>
*
* <pre>
* -C <index>
* The index of the class for which IR statistics
* are to be output. (default 1)
* </pre>
*
* <pre>
* -I <index>
* The index of an attribute to output in the
* results. This attribute should identify an
* instance in order to know which instances are
* in the test set of a cross validation. if 0
* no output (default 0).
* </pre>
*
* <pre>
* -P
* Add target and prediction columns to the result
* for each fold.
* </pre>
*
* <pre>
* Options specific to classifier weka.classifiers.rules.ZeroR:
* </pre>
*
* <pre>
* -D
* If set, classifier is run in debug mode and
* may output additional info to the console
* </pre>
*
* <!-- options-end -->
*
* All options after -- will be passed to the result producer.
*
* @author Len Trigg (trigg@cs.waikato.ac.nz)
* @version $Revision$
*/
public class AveragingResultProducer implements ResultListener, ResultProducer,
OptionHandler, AdditionalMeasureProducer, RevisionHandler {
/** for serialization */
static final long serialVersionUID = 2551284958501991352L;
/** The dataset of interest */
protected Instances m_Instances;
/** The ResultListener to send results to */
protected ResultListener m_ResultListener = new CSVResultListener();
/** The ResultProducer used to generate results */
protected ResultProducer m_ResultProducer = new CrossValidationResultProducer();
/** The names of any additional measures to look for in SplitEvaluators */
protected String[] m_AdditionalMeasures = null;
/** The number of results expected to average over for each run */
protected int m_ExpectedResultsPerAverage = 10;
/** True if standard deviation fields should be produced */
protected boolean m_CalculateStdDevs;
/**
* The name of the field that will contain the number of results averaged
* over.
*/
protected String m_CountFieldName = "Num_"
+ CrossValidationResultProducer.FOLD_FIELD_NAME;
/** The name of the key field to average over */
protected String m_KeyFieldName = CrossValidationResultProducer.FOLD_FIELD_NAME;
/** The index of the field to average over in the resultproducers key */
protected int m_KeyIndex = -1;
/** Collects the keys from a single run */
protected ArrayList<Object[]> m_Keys = new ArrayList<Object[]>();
/** Collects the results from a single run */
protected ArrayList<Object[]> m_Results = new ArrayList<Object[]>();
/**
* Returns a string describing this result producer
*
* @return a description of the result producer suitable for displaying in the
* explorer/experimenter gui
*/
public String globalInfo() {
return "Takes the results from a ResultProducer "
+ "and submits the average to the result listener. Normally used with "
+ "a CrossValidationResultProducer to perform n x m fold cross "
+ "validation. For non-numeric result fields, the first value is used.";
}
/**
* Scans through the key field names of the result producer to find the index
* of the key field to average over. Sets the value of m_KeyIndex to the
* index, or -1 if no matching key field was found.
*
* @return the index of the key field to average over
*/
protected int findKeyIndex() {
m_KeyIndex = -1;
try {
if (m_ResultProducer != null) {
String[] keyNames = m_ResultProducer.getKeyNames();
for (int i = 0; i < keyNames.length; i++) {
if (keyNames[i].equals(m_KeyFieldName)) {
m_KeyIndex = i;
break;
}
}
}
} catch (Exception ex) {
}
return m_KeyIndex;
}
/**
* Determines if there are any constraints (imposed by the destination) on the
* result columns to be produced by resultProducers. Null should be returned
* if there are NO constraints, otherwise a list of column names should be
* returned as an array of Strings.
*
* @param rp the ResultProducer to which the constraints will apply
* @return an array of column names to which resutltProducer's results will be
* restricted.
* @throws Exception if constraints can't be determined
*/
@Override
public String[] determineColumnConstraints(ResultProducer rp)
throws Exception {
return null;
}
/**
* Simulates a run to collect the keys the sub-resultproducer could generate.
* Does some checking on the keys and determines the template key.
*
* @param run the run number
* @return a template key (null for the field being averaged)
* @throws Exception if an error occurs
*/
protected Object[] determineTemplate(int run) throws Exception {
if (m_Instances == null) {
throw new Exception("No Instances set");
}
m_ResultProducer.setInstances(m_Instances);
// Clear the collected results
m_Keys.clear();
m_Results.clear();
m_ResultProducer.doRunKeys(run);
checkForMultipleDifferences();
Object[] template = m_Keys.get(0).clone();
template[m_KeyIndex] = null;
// Check for duplicate keys
checkForDuplicateKeys(template);
return template;
}
/**
* Gets the keys for a specified run number. Different run numbers correspond
* to different randomizations of the data. Keys produced should be sent to
* the current ResultListener
*
* @param run the run number to get keys for.
* @throws Exception if a problem occurs while getting the keys
*/
@Override
public void doRunKeys(int run) throws Exception {
// Generate the template
Object[] template = determineTemplate(run);
String[] newKey = new String[template.length - 1];
System.arraycopy(template, 0, newKey, 0, m_KeyIndex);
System.arraycopy(template, m_KeyIndex + 1, newKey, m_KeyIndex,
template.length - m_KeyIndex - 1);
m_ResultListener.acceptResult(this, newKey, null);
}
/**
* Gets the results for a specified run number. Different run numbers
* correspond to different randomizations of the data. Results produced should
* be sent to the current ResultListener
*
* @param run the run number to get results for.
* @throws Exception if a problem occurs while getting the results
*/
@Override
public void doRun(int run) throws Exception {
// Generate the key and ask whether the result is required
Object[] template = determineTemplate(run);
String[] newKey = new String[template.length - 1];
System.arraycopy(template, 0, newKey, 0, m_KeyIndex);
System.arraycopy(template, m_KeyIndex + 1, newKey, m_KeyIndex,
template.length - m_KeyIndex - 1);
if (m_ResultListener.isResultRequired(this, newKey)) {
// Clear the collected keys
m_Keys.clear();
m_Results.clear();
m_ResultProducer.doRun(run);
// Average the results collected
// System.err.println("Number of results collected: " + m_Keys.size());
// Check that the keys only differ on the selected key field
checkForMultipleDifferences();
template = m_Keys.get(0).clone();
template[m_KeyIndex] = null;
// Check for duplicate keys
checkForDuplicateKeys(template);
// Calculate the average and submit it if necessary
doAverageResult(template);
}
}
/**
* Compares a key to a template to see whether they match. Null fields in the
* template are ignored in the matching.
*
* @param template the template to match against
* @param test the key to test
* @return true if the test key matches the template on all non-null template
* fields
*/
protected boolean matchesTemplate(Object[] template, Object[] test) {
if (template.length != test.length) {
return false;
}
for (int i = 0; i < test.length; i++) {
if ((template[i] != null) && (!template[i].equals(test[i]))) {
return false;
}
}
return true;
}
/**
* Asks the resultlistener whether an average result is required, and if so,
* calculates it.
*
* @param template the template to match keys against when calculating the
* average
* @throws Exception if an error occurs
*/
protected void doAverageResult(Object[] template) throws Exception {
// Generate the key and ask whether the result is required
String[] newKey = new String[template.length - 1];
System.arraycopy(template, 0, newKey, 0, m_KeyIndex);
System.arraycopy(template, m_KeyIndex + 1, newKey, m_KeyIndex,
template.length - m_KeyIndex - 1);
if (m_ResultListener.isResultRequired(this, newKey)) {
Object[] resultTypes = m_ResultProducer.getResultTypes();
Stats[] stats = new Stats[resultTypes.length];
for (int i = 0; i < stats.length; i++) {
stats[i] = new Stats();
}
Object[] result = getResultTypes();
int numMatches = 0;
for (int i = 0; i < m_Keys.size(); i++) {
Object[] currentKey = m_Keys.get(i);
// Skip non-matching keys
if (!matchesTemplate(template, currentKey)) {
continue;
}
// Add the results to the stats accumulator
Object[] currentResult = m_Results.get(i);
numMatches++;
for (int j = 0; j < resultTypes.length; j++) {
if (resultTypes[j] instanceof Double) {
if (currentResult[j] == null) {
// set the stats object for this result to null---
// more than likely this is an additional measure field
// not supported by the low level split evaluator
if (stats[j] != null) {
stats[j] = null;
}
/*
* throw new Exception("Null numeric result field found:\n" +
* DatabaseUtils.arrayToString(currentKey) + " -- " +
* DatabaseUtils .arrayToString(currentResult));
*/
}
if (stats[j] != null) {
double currentVal = ((Double) currentResult[j]).doubleValue();
stats[j].add(currentVal);
}
}
}
}
if (numMatches != m_ExpectedResultsPerAverage) {
throw new Exception("Expected " + m_ExpectedResultsPerAverage
+ " results matching key \"" + DatabaseUtils.arrayToString(template)
+ "\" but got " + numMatches);
}
result[0] = new Double(numMatches);
Object[] currentResult = m_Results.get(0);
int k = 1;
for (int j = 0; j < resultTypes.length; j++) {
if (resultTypes[j] instanceof Double) {
if (stats[j] != null) {
stats[j].calculateDerived();
result[k++] = new Double(stats[j].mean);
} else {
result[k++] = null;
}
if (getCalculateStdDevs()) {
if (stats[j] != null) {
result[k++] = new Double(stats[j].stdDev);
} else {
result[k++] = null;
}
}
} else {
result[k++] = currentResult[j];
}
}
m_ResultListener.acceptResult(this, newKey, result);
}
}
/**
* Checks whether any duplicate results (with respect to a key template) were
* received.
*
* @param template the template key.
* @throws Exception if duplicate results are detected
*/
protected void checkForDuplicateKeys(Object[] template) throws Exception {
Hashtable<Object, Object> hash = new Hashtable<Object, Object>();
int numMatches = 0;
for (int i = 0; i < m_Keys.size(); i++) {
Object[] current = m_Keys.get(i);
// Skip non-matching keys
if (!matchesTemplate(template, current)) {
continue;
}
if (hash.containsKey(current[m_KeyIndex])) {
throw new Exception("Duplicate result received:"
+ DatabaseUtils.arrayToString(current));
}
numMatches++;
hash.put(current[m_KeyIndex], current[m_KeyIndex]);
}
if (numMatches != m_ExpectedResultsPerAverage) {
throw new Exception("Expected " + m_ExpectedResultsPerAverage
+ " results matching key \"" + DatabaseUtils.arrayToString(template)
+ "\" but got " + numMatches);
}
}
/**
* Checks that the keys for a run only differ in one key field. If they differ
* in more than one field, a more sophisticated averager will submit multiple
* results - for now an exception is thrown. Currently assumes that the most
* differences will be shown between the first and last result received.
*
* @throws Exception if the keys differ on fields other than the key averaging
* field
*/
protected void checkForMultipleDifferences() throws Exception {
Object[] firstKey = m_Keys.get(0);
Object[] lastKey = m_Keys.get(m_Keys.size() - 1);
/*
* System.err.println("First key:" + DatabaseUtils.arrayToString(firstKey));
* System.err.println("Last key :" + DatabaseUtils.arrayToString(lastKey));
*/
for (int i = 0; i < firstKey.length; i++) {
if ((i != m_KeyIndex) && !firstKey[i].equals(lastKey[i])) {
throw new Exception("Keys differ on fields other than \""
+ m_KeyFieldName + "\" -- time to implement multiple averaging");
}
}
}
/**
* Prepare for the results to be received.
*
* @param rp the ResultProducer that will generate the results
* @throws Exception if an error occurs during preprocessing.
*/
@Override
public void preProcess(ResultProducer rp) throws Exception {
if (m_ResultListener == null) {
throw new Exception("No ResultListener set");
}
m_ResultListener.preProcess(this);
}
/**
* Prepare to generate results. The ResultProducer should call
* preProcess(this) on the ResultListener it is to send results to.
*
* @throws Exception if an error occurs during preprocessing.
*/
@Override
public void preProcess() throws Exception {
if (m_ResultProducer == null) {
throw new Exception("No ResultProducer set");
}
// Tell the resultproducer to send results to us
m_ResultProducer.setResultListener(this);
findKeyIndex();
if (m_KeyIndex == -1) {
throw new Exception("No key field called " + m_KeyFieldName
+ " produced by " + m_ResultProducer.getClass().getName());
}
m_ResultProducer.preProcess();
}
/**
* When this method is called, it indicates that no more results will be sent
* that need to be grouped together in any way.
*
* @param rp the ResultProducer that generated the results
* @throws Exception if an error occurs
*/
@Override
public void postProcess(ResultProducer rp) throws Exception {
m_ResultListener.postProcess(this);
}
/**
* When this method is called, it indicates that no more requests to generate
* results for the current experiment will be sent. The ResultProducer should
* call preProcess(this) on the ResultListener it is to send results to.
*
* @throws Exception if an error occurs
*/
@Override
public void postProcess() throws Exception {
m_ResultProducer.postProcess();
}
/**
* Accepts results from a ResultProducer.
*
* @param rp the ResultProducer that generated the results
* @param key an array of Objects (Strings or Doubles) that uniquely identify
* a result for a given ResultProducer with given compatibilityState
* @param result the results stored in an array. The objects stored in the
* array may be Strings, Doubles, or null (for the missing value).
* @throws Exception if the result could not be accepted.
*/
@Override
public void acceptResult(ResultProducer rp, Object[] key, Object[] result)
throws Exception {
if (m_ResultProducer != rp) {
throw new Error("Unrecognized ResultProducer sending results!!");
}
m_Keys.add(key);
m_Results.add(result);
}
/**
* Determines whether the results for a specified key must be generated.
*
* @param rp the ResultProducer wanting to generate the results
* @param key an array of Objects (Strings or Doubles) that uniquely identify
* a result for a given ResultProducer with given compatibilityState
* @return true if the result should be generated
* @throws Exception if it could not be determined if the result is needed.
*/
@Override
public boolean isResultRequired(ResultProducer rp, Object[] key)
throws Exception {
if (m_ResultProducer != rp) {
throw new Error("Unrecognized ResultProducer sending results!!");
}
return true;
}
/**
* Gets the names of each of the columns produced for a single run.
*
* @return an array containing the name of each column
* @throws Exception if key names cannot be generated
*/
@Override
public String[] getKeyNames() throws Exception {
if (m_KeyIndex == -1) {
throw new Exception("No key field called " + m_KeyFieldName
+ " produced by " + m_ResultProducer.getClass().getName());
}
String[] keyNames = m_ResultProducer.getKeyNames();
String[] newKeyNames = new String[keyNames.length - 1];
System.arraycopy(keyNames, 0, newKeyNames, 0, m_KeyIndex);
System.arraycopy(keyNames, m_KeyIndex + 1, newKeyNames, m_KeyIndex,
keyNames.length - m_KeyIndex - 1);
return newKeyNames;
}
/**
* Gets the data types of each of the columns produced for a single run. This
* method should really be static.
*
* @return an array containing objects of the type of each column. The objects
* should be Strings, or Doubles.
* @throws Exception if the key types could not be determined (perhaps because
* of a problem from a nested sub-resultproducer)
*/
@Override
public Object[] getKeyTypes() throws Exception {
if (m_KeyIndex == -1) {
throw new Exception("No key field called " + m_KeyFieldName
+ " produced by " + m_ResultProducer.getClass().getName());
}
Object[] keyTypes = m_ResultProducer.getKeyTypes();
// Find and remove the key field that is being averaged over
Object[] newKeyTypes = new String[keyTypes.length - 1];
System.arraycopy(keyTypes, 0, newKeyTypes, 0, m_KeyIndex);
System.arraycopy(keyTypes, m_KeyIndex + 1, newKeyTypes, m_KeyIndex,
keyTypes.length - m_KeyIndex - 1);
return newKeyTypes;
}
/**
* Gets the names of each of the columns produced for a single run. A new
* result field is added for the number of results used to produce each
* average. If only averages are being produced the names are not altered, if
* standard deviations are produced then "Dev_" and "Avg_" are prepended to
* each result deviation and average field respectively.
*
* @return an array containing the name of each column
* @throws Exception if the result names could not be determined (perhaps
* because of a problem from a nested sub-resultproducer)
*/
@Override
public String[] getResultNames() throws Exception {
String[] resultNames = m_ResultProducer.getResultNames();
// Add in the names of our extra Result fields
if (getCalculateStdDevs()) {
Object[] resultTypes = m_ResultProducer.getResultTypes();
int numNumeric = 0;
for (Object resultType : resultTypes) {
if (resultType instanceof Double) {
numNumeric++;
}
}
String[] newResultNames = new String[resultNames.length + 1 + numNumeric];
newResultNames[0] = m_CountFieldName;
int j = 1;
for (int i = 0; i < resultNames.length; i++) {
newResultNames[j++] = "Avg_" + resultNames[i];
if (resultTypes[i] instanceof Double) {
newResultNames[j++] = "Dev_" + resultNames[i];
}
}
return newResultNames;
} else {
String[] newResultNames = new String[resultNames.length + 1];
newResultNames[0] = m_CountFieldName;
System.arraycopy(resultNames, 0, newResultNames, 1, resultNames.length);
return newResultNames;
}
}
/**
* Gets the data types of each of the columns produced for a single run.
*
* @return an array containing objects of the type of each column. The objects
* should be Strings, or Doubles.
* @throws Exception if the result types could not be determined (perhaps
* because of a problem from a nested sub-resultproducer)
*/
@Override
public Object[] getResultTypes() throws Exception {
Object[] resultTypes = m_ResultProducer.getResultTypes();
// Add in the types of our extra Result fields
if (getCalculateStdDevs()) {
int numNumeric = 0;
for (Object resultType : resultTypes) {
if (resultType instanceof Double) {
numNumeric++;
}
}
Object[] newResultTypes = new Object[resultTypes.length + 1 + numNumeric];
newResultTypes[0] = new Double(0);
int j = 1;
for (Object resultType : resultTypes) {
newResultTypes[j++] = resultType;
if (resultType instanceof Double) {
newResultTypes[j++] = new Double(0);
}
}
return newResultTypes;
} else {
Object[] newResultTypes = new Object[resultTypes.length + 1];
newResultTypes[0] = new Double(0);
System.arraycopy(resultTypes, 0, newResultTypes, 1, resultTypes.length);
return newResultTypes;
}
}
/**
* Gets a description of the internal settings of the result producer,
* sufficient for distinguishing a ResultProducer instance from another with
* different settings (ignoring those settings set through this interface).
* For example, a cross-validation ResultProducer may have a setting for the
* number of folds. For a given state, the results produced should be
* compatible. Typically if a ResultProducer is an OptionHandler, this string
* will represent the command line arguments required to set the
* ResultProducer to that state.
*
* @return the description of the ResultProducer state, or null if no state is
* defined
*/
@Override
public String getCompatibilityState() {
String result = // "-F " + Utils.quote(getKeyFieldName())
" -X " + getExpectedResultsPerAverage() + " ";
if (getCalculateStdDevs()) {
result += "-S ";
}
if (m_ResultProducer == null) {
result += "<null ResultProducer>";
} else {
result += "-W " + m_ResultProducer.getClass().getName();
result += " -- " + m_ResultProducer.getCompatibilityState();
}
return result.trim();
}
/**
* Returns an enumeration describing the available options..
*
* @return an enumeration of all the available options.
*/
@Override
public Enumeration<Option> listOptions() {
Vector<Option> newVector = new Vector<Option>(2);
newVector.addElement(new Option(
"\tThe name of the field to average over.\n" + "\t(default \"Fold\")",
"F", 1, "-F <field name>"));
newVector.addElement(new Option(
"\tThe number of results expected per average.\n" + "\t(default 10)",
"X", 1, "-X <num results>"));
newVector.addElement(new Option("\tCalculate standard deviations.\n"
+ "\t(default only averages)", "S", 0, "-S"));
newVector.addElement(new Option(
"\tThe full class name of a ResultProducer.\n"
+ "\teg: weka.experiment.CrossValidationResultProducer", "W", 1,
"-W <class name>"));
if ((m_ResultProducer != null)
&& (m_ResultProducer instanceof OptionHandler)) {
newVector.addElement(new Option("", "", 0,
"\nOptions specific to result producer "
+ m_ResultProducer.getClass().getName() + ":"));
newVector.addAll(Collections.list(((OptionHandler) m_ResultProducer)
.listOptions()));
}
return newVector.elements();
}
/**
* Parses a given list of options.
* <p/>
*
* <!-- options-start --> Valid options are:
* <p/>
*
* <pre>
* -F <field name>
* The name of the field to average over.
* (default "Fold")
* </pre>
*
* <pre>
* -X <num results>
* The number of results expected per average.
* (default 10)
* </pre>
*
* <pre>
* -S
* Calculate standard deviations.
* (default only averages)
* </pre>
*
* <pre>
* -W <class name>
* The full class name of a ResultProducer.
* eg: weka.experiment.CrossValidationResultProducer
* </pre>
*
* <pre>
* Options specific to result producer weka.experiment.CrossValidationResultProducer:
* </pre>
*
* <pre>
* -X <number of folds>
* The number of folds to use for the cross-validation.
* (default 10)
* </pre>
*
* <pre>
* -D
* Save raw split evaluator output.
* </pre>
*
* <pre>
* -O <file/directory name/path>
* The filename where raw output will be stored.
* If a directory name is specified then then individual
* outputs will be gzipped, otherwise all output will be
* zipped to the named file. Use in conjuction with -D. (default splitEvalutorOut.zip)
* </pre>
*
* <pre>
* -W <class name>
* The full class name of a SplitEvaluator.
* eg: weka.experiment.ClassifierSplitEvaluator
* </pre>
*
* <pre>
* Options specific to split evaluator weka.experiment.ClassifierSplitEvaluator:
* </pre>
*
* <pre>
* -W <class name>
* The full class name of the classifier.
* eg: weka.classifiers.bayes.NaiveBayes
* </pre>
*
* <pre>
* -C <index>
* The index of the class for which IR statistics
* are to be output. (default 1)
* </pre>
*
* <pre>
* -I <index>
* The index of an attribute to output in the
* results. This attribute should identify an
* instance in order to know which instances are
* in the test set of a cross validation. if 0
* no output (default 0).
* </pre>
*
* <pre>
* -P
* Add target and prediction columns to the result
* for each fold.
* </pre>
*
* <pre>
* Options specific to classifier weka.classifiers.rules.ZeroR:
* </pre>
*
* <pre>
* -D
* If set, classifier is run in debug mode and
* may output additional info to the console
* </pre>
*
* <!-- options-end -->
*
* All options after -- will be passed to the result producer.
*
* @param options the list of options as an array of strings
* @throws Exception if an option is not supported
*/
@Override
public void setOptions(String[] options) throws Exception {
String keyFieldName = Utils.getOption('F', options);
if (keyFieldName.length() != 0) {
setKeyFieldName(keyFieldName);
} else {
setKeyFieldName(CrossValidationResultProducer.FOLD_FIELD_NAME);
}
String numResults = Utils.getOption('X', options);
if (numResults.length() != 0) {
setExpectedResultsPerAverage(Integer.parseInt(numResults));
} else {
setExpectedResultsPerAverage(10);
}
setCalculateStdDevs(Utils.getFlag('S', options));
String rpName = Utils.getOption('W', options);
if (rpName.length() == 0) {
throw new Exception("A ResultProducer must be specified with"
+ " the -W option.");
}
// Do it first without options, so if an exception is thrown during
// the option setting, listOptions will contain options for the actual
// RP.
setResultProducer((ResultProducer) Utils.forName(ResultProducer.class,
rpName, null));
if (getResultProducer() instanceof OptionHandler) {
((OptionHandler) getResultProducer()).setOptions(Utils
.partitionOptions(options));
}
}
/**
* Gets the current settings of the result producer.
*
* @return an array of strings suitable for passing to setOptions
*/
@Override
public String[] getOptions() {
String[] seOptions = new String[0];
if ((m_ResultProducer != null)
&& (m_ResultProducer instanceof OptionHandler)) {
seOptions = ((OptionHandler) m_ResultProducer).getOptions();
}
String[] options = new String[seOptions.length + 8];
int current = 0;
options[current++] = "-F";
options[current++] = "" + getKeyFieldName();
options[current++] = "-X";
options[current++] = "" + getExpectedResultsPerAverage();
if (getCalculateStdDevs()) {
options[current++] = "-S";
}
if (getResultProducer() != null) {
options[current++] = "-W";
options[current++] = getResultProducer().getClass().getName();
}
options[current++] = "--";
System.arraycopy(seOptions, 0, options, current, seOptions.length);
current += seOptions.length;
while (current < options.length) {
options[current++] = "";
}
return options;
}
/**
* Set a list of method names for additional measures to look for in
* SplitEvaluators. This could contain many measures (of which only a subset
* may be produceable by the current resultProducer) if an experiment is the
* type that iterates over a set of properties.
*
* @param additionalMeasures an array of measure names, null if none
*/
@Override
public void setAdditionalMeasures(String[] additionalMeasures) {
m_AdditionalMeasures = additionalMeasures;
if (m_ResultProducer != null) {
System.err.println("AveragingResultProducer: setting additional "
+ "measures for " + "ResultProducer");
m_ResultProducer.setAdditionalMeasures(m_AdditionalMeasures);
}
}
/**
* Returns an enumeration of any additional measure names that might be in the
* result producer
*
* @return an enumeration of the measure names
*/
@Override
public Enumeration<String> enumerateMeasures() {
Vector<String> newVector = new Vector<String>();
if (m_ResultProducer instanceof AdditionalMeasureProducer) {
Enumeration<String> en = ((AdditionalMeasureProducer) m_ResultProducer)
.enumerateMeasures();
while (en.hasMoreElements()) {
String mname = en.nextElement();
newVector.addElement(mname);
}
}
return newVector.elements();
}
/**
* Returns the value of the named measure
*
* @param additionalMeasureName the name of the measure to query for its value
* @return the value of the named measure
* @throws IllegalArgumentException if the named measure is not supported
*/
@Override
public double getMeasure(String additionalMeasureName) {
if (m_ResultProducer instanceof AdditionalMeasureProducer) {
return ((AdditionalMeasureProducer) m_ResultProducer)
.getMeasure(additionalMeasureName);
} else {
throw new IllegalArgumentException("AveragingResultProducer: "
+ "Can't return value for : " + additionalMeasureName + ". "
+ m_ResultProducer.getClass().getName() + " "
+ "is not an AdditionalMeasureProducer");
}
}
/**
* Sets the dataset that results will be obtained for.
*
* @param instances a value of type 'Instances'.
*/
@Override
public void setInstances(Instances instances) {
m_Instances = instances;
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String calculateStdDevsTipText() {
return "Record standard deviations for each run.";
}
/**
* Get the value of CalculateStdDevs.
*
* @return Value of CalculateStdDevs.
*/
public boolean getCalculateStdDevs() {
return m_CalculateStdDevs;
}
/**
* Set the value of CalculateStdDevs.
*
* @param newCalculateStdDevs Value to assign to CalculateStdDevs.
*/
public void setCalculateStdDevs(boolean newCalculateStdDevs) {
m_CalculateStdDevs = newCalculateStdDevs;
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String expectedResultsPerAverageTipText() {
return "Set the expected number of results to average per run. "
+ "For example if a CrossValidationResultProducer is being used "
+ "(with the number of folds set to 10), then the expected number "
+ "of results per run is 10.";
}
/**
* Get the value of ExpectedResultsPerAverage.
*
* @return Value of ExpectedResultsPerAverage.
*/
public int getExpectedResultsPerAverage() {
return m_ExpectedResultsPerAverage;
}
/**
* Set the value of ExpectedResultsPerAverage.
*
* @param newExpectedResultsPerAverage Value to assign to
* ExpectedResultsPerAverage.
*/
public void setExpectedResultsPerAverage(int newExpectedResultsPerAverage) {
m_ExpectedResultsPerAverage = newExpectedResultsPerAverage;
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String keyFieldNameTipText() {
return "Set the field name that will be unique for a run.";
}
/**
* Get the value of KeyFieldName.
*
* @return Value of KeyFieldName.
*/
public String getKeyFieldName() {
return m_KeyFieldName;
}
/**
* Set the value of KeyFieldName.
*
* @param newKeyFieldName Value to assign to KeyFieldName.
*/
public void setKeyFieldName(String newKeyFieldName) {
m_KeyFieldName = newKeyFieldName;
m_CountFieldName = "Num_" + m_KeyFieldName;
findKeyIndex();
}
/**
* Sets the object to send results of each run to.
*
* @param listener a value of type 'ResultListener'
*/
@Override
public void setResultListener(ResultListener listener) {
m_ResultListener = listener;
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String resultProducerTipText() {
return "Set the resultProducer for which results are to be averaged.";
}
/**
* Get the ResultProducer.
*
* @return the ResultProducer.
*/
public ResultProducer getResultProducer() {
return m_ResultProducer;
}
/**
* Set the ResultProducer.
*
* @param newResultProducer new ResultProducer to use.
*/
public void setResultProducer(ResultProducer newResultProducer) {
m_ResultProducer = newResultProducer;
m_ResultProducer.setResultListener(this);
findKeyIndex();
}
/**
* Gets a text descrption of the result producer.
*
* @return a text description of the result producer.
*/
@Override
public String toString() {
String result = "AveragingResultProducer: ";
result += getCompatibilityState();
if (m_Instances == null) {
result += ": <null Instances>";
} else {
result += ": " + Utils.backQuoteChars(m_Instances.relationName());
}
return result;
}
/**
* Returns the revision string.
*
* @return the revision
*/
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
} // AveragingResultProducer
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/experiment/CSVResultListener.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* CSVResultListener.java
* Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.experiment;
import java.io.BufferedOutputStream;
import java.io.File;
import java.io.FileOutputStream;
import java.io.PrintWriter;
import java.util.Enumeration;
import java.util.Vector;
import weka.core.Option;
import weka.core.OptionHandler;
import weka.core.RevisionHandler;
import weka.core.RevisionUtils;
import weka.core.Utils;
import weka.gui.FilePropertyMetadata;
import javax.swing.*;
/**
* <!-- globalinfo-start --> Takes results from a result producer and assembles
* them into comma separated value form.
* <p/>
* <!-- globalinfo-end -->
*
* <!-- options-start --> Valid options are:
* <p/>
*
* <pre>
* -O <file name>
* The filename where output will be stored. Use - for stdout.
* (default temp file)
* </pre>
*
* <!-- options-end -->
*
* @author Len Trigg (trigg@cs.waikato.ac.nz)
* @version $Revision$
*/
public class CSVResultListener implements ResultListener, OptionHandler,
RevisionHandler {
/** for serialization */
static final long serialVersionUID = -623185072785174658L;
/** The ResultProducer sending us results */
protected ResultProducer m_RP;
/** The destination output file, null sends to System.out */
protected File m_OutputFile = null;
/** The name of the output file. Empty for temporary file. */
protected String m_OutputFileName = "";
/** The destination for results (typically connected to the output file) */
protected transient PrintWriter m_Out = new PrintWriter(System.out, true);
/**
* Sets temporary file.
*/
public CSVResultListener() {
File resultsFile;
try {
resultsFile = File.createTempFile("weka_experiment", ".csv");
resultsFile.deleteOnExit();
} catch (Exception e) {
System.err.println("Cannot create temp file, writing to standard out.");
resultsFile = new File("-");
}
setOutputFile(resultsFile);
setOutputFileName("");
}
/**
* Returns a string describing this result listener
*
* @return a description of the result listener suitable for displaying in the
* explorer/experimenter gui
*/
public String globalInfo() {
return "Takes results from a result producer and assembles them into "
+ "comma separated value form.";
}
/**
* Returns an enumeration describing the available options..
*
* @return an enumeration of all the available options.
*/
@Override
public Enumeration<Option> listOptions() {
Vector<Option> newVector = new Vector<Option>(1);
newVector.addElement(new Option(
"\tThe filename where output will be stored. Use - for stdout.\n"
+ "\t(default temp file)", "O", 1, "-O <file name>"));
return newVector.elements();
}
/**
* Parses a given list of options.
* <p/>
*
* <!-- options-start --> Valid options are:
* <p/>
*
* <pre>
* -O <file name>
* The filename where output will be stored. Use - for stdout.
* (default temp file)
* </pre>
*
* <!-- options-end -->
*
* @param options the list of options as an array of strings
* @throws Exception if an option is not supported
*/
@Override
public void setOptions(String[] options) throws Exception {
String fName = Utils.getOption('O', options);
if (fName.length() != 0) {
setOutputFile(new File(fName));
} else {
File resultsFile;
try {
resultsFile = File.createTempFile("weka_experiment", null);
resultsFile.deleteOnExit();
} catch (Exception e) {
System.err.println("Cannot create temp file, writing to standard out.");
resultsFile = new File("-");
}
setOutputFile(resultsFile);
setOutputFileName("");
}
}
/**
* Gets the current settings of the Classifier.
*
* @return an array of strings suitable for passing to setOptions
*/
@Override
public String[] getOptions() {
String[] options = new String[2];
int current = 0;
options[current++] = "-O";
options[current++] = getOutputFile().getName();
while (current < options.length) {
options[current++] = "";
}
return options;
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String outputFileTipText() {
return "File to save to. Use '-' to write to standard out.";
}
/**
* Get the value of OutputFile.
*
* @return Value of OutputFile.
*/
@FilePropertyMetadata(fileChooserDialogType = JFileChooser.SAVE_DIALOG, directoriesOnly = false)
public File getOutputFile() {
return m_OutputFile;
}
/**
* Set the value of OutputFile. Also sets the OutputFileName.
*
* @param newOutputFile Value to assign to OutputFile.
*/
public void setOutputFile(File newOutputFile) {
m_OutputFile = newOutputFile;
setOutputFileName(newOutputFile.getName());
}
/**
* Get the value of OutputFileName.
*
* @return Value of OutputFile.
*/
public String outputFileName() {
return m_OutputFileName;
}
/**
* Set the value of OutputFileName. Must be used AFTER setOutputFile.
*
* @param name the name of OutputFile.
*/
public void setOutputFileName(String name) {
m_OutputFileName = name;
}
/**
* Prepare for the results to be received.
*
* @param rp the ResultProducer that will generate the results
* @throws Exception if an error occurs during preprocessing.
*/
@Override
public void preProcess(ResultProducer rp) throws Exception {
m_RP = rp;
if ((m_OutputFile == null) || (m_OutputFile.getName().equals("-"))) {
m_Out = new PrintWriter(System.out, true);
} else {
m_Out = new PrintWriter(new BufferedOutputStream(new FileOutputStream(
m_OutputFile)), true);
}
printResultNames(m_RP);
}
/**
* Perform any postprocessing. When this method is called, it indicates that
* no more results will be sent that need to be grouped together in any way.
*
* @param rp the ResultProducer that generated the results
* @throws Exception if an error occurs
*/
@Override
public void postProcess(ResultProducer rp) throws Exception {
if (!(m_OutputFile == null) && !(m_OutputFile.getName().equals("-"))) {
m_Out.close();
}
}
/**
* Determines if there are any constraints (imposed by the destination) on the
* result columns to be produced by resultProducers. Null should be returned
* if there are NO constraints, otherwise a list of column names should be
* returned as an array of Strings.
*
* @param rp the ResultProducer to which the constraints will apply
* @return an array of column names to which resutltProducer's results will be
* restricted.
* @throws Exception if an error occurs.
*/
@Override
public String[] determineColumnConstraints(ResultProducer rp)
throws Exception {
return null;
}
/**
* Just prints out each result as it is received.
*
* @param rp the ResultProducer that generated the result
* @param key The key for the results.
* @param result The actual results.
* @throws Exception if the result could not be accepted.
*/
@Override
public void acceptResult(ResultProducer rp, Object[] key, Object[] result)
throws Exception {
if (m_RP != rp) {
throw new Error("Unrecognized ResultProducer sending results!!");
}
for (int i = 0; i < key.length; i++) {
if (i != 0) {
m_Out.print(',');
}
if (key[i] == null) {
m_Out.print("?");
} else {
m_Out.print(Utils.quote(key[i].toString()));
}
}
for (Object element : result) {
m_Out.print(',');
if (element == null) {
m_Out.print("?");
} else {
m_Out.print(Utils.quote(element.toString()));
}
}
m_Out.println("");
}
/**
* Always says a result is required. If this is the first call, prints out the
* header for the csv output.
*
* @param rp the ResultProducer wanting to generate the result
* @param key The key for which a result may be needed.
* @return true if the result should be calculated.
* @throws Exception if it could not be determined if the result is needed.
*/
@Override
public boolean isResultRequired(ResultProducer rp, Object[] key)
throws Exception {
return true;
}
/**
* Prints the names of each field out as the first row of the CSV output.
*
* @param rp the ResultProducer generating our results.
* @throws Exception if the field names could not be determined.
*/
private void printResultNames(ResultProducer rp) throws Exception {
String[] key = rp.getKeyNames();
for (int i = 0; i < key.length; i++) {
if (i != 0) {
m_Out.print(',');
}
if (key[i] == null) {
m_Out.print("?");
} else {
m_Out.print("Key_" + key[i].toString());
}
}
String[] result = rp.getResultNames();
for (String element : result) {
m_Out.print(',');
if (element == null) {
m_Out.print("?");
} else {
m_Out.print(element.toString());
}
}
m_Out.println("");
}
/**
* Returns the revision string.
*
* @return the revision
*/
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
} // CSVResultListener
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/experiment/ClassifierSplitEvaluator.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* ClassifierSplitEvaluator.java
* Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.experiment;
import java.io.ByteArrayOutputStream;
import java.io.ObjectOutputStream;
import java.io.ObjectStreamClass;
import java.io.Serializable;
import java.lang.management.ManagementFactory;
import java.lang.management.ThreadMXBean;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.Enumeration;
import java.util.List;
import java.util.Vector;
import weka.classifiers.AbstractClassifier;
import weka.classifiers.Classifier;
import weka.classifiers.Evaluation;
import weka.classifiers.evaluation.AbstractEvaluationMetric;
import weka.classifiers.rules.ZeroR;
import weka.core.AdditionalMeasureProducer;
import weka.core.Attribute;
import weka.core.Instances;
import weka.core.Option;
import weka.core.OptionHandler;
import weka.core.RevisionHandler;
import weka.core.RevisionUtils;
import weka.core.Summarizable;
import weka.core.Utils;
/**
* <!-- globalinfo-start --> A SplitEvaluator that produces results for a
* classification scheme on a nominal class attribute.
* <p/>
* <!-- globalinfo-end -->
*
* <!-- options-start --> Valid options are:
* <p/>
*
* <pre>
* -W <class name>
* The full class name of the classifier.
* eg: weka.classifiers.bayes.NaiveBayes
* </pre>
*
* <pre>
* -C <index>
* The index of the class for which IR statistics
* are to be output. (default 1)
* </pre>
*
* <pre>
* -I <index>
* The index of an attribute to output in the
* results. This attribute should identify an
* instance in order to know which instances are
* in the test set of a cross validation. if 0
* no output (default 0).
* </pre>
*
* <pre>
* -P
* Add target and prediction columns to the result
* for each fold.
* </pre>
*
* <pre>
* -no-size
* Skips the determination of sizes (train/test/classifier)
* (default: sizes are determined)
* </pre>
*
* <pre>
* Options specific to classifier weka.classifiers.rules.ZeroR:
* </pre>
*
* <pre>
* -D
* If set, classifier is run in debug mode and
* may output additional info to the console
* </pre>
*
* <!-- options-end -->
*
* All options after -- will be passed to the classifier.
*
* @author Len Trigg (trigg@cs.waikato.ac.nz)
* @version $Revision$
*/
public class ClassifierSplitEvaluator implements SplitEvaluator, OptionHandler,
AdditionalMeasureProducer, RevisionHandler {
/** for serialization */
static final long serialVersionUID = -8511241602760467265L;
/** The template classifier */
protected Classifier m_Template = new ZeroR();
/** The classifier used for evaluation */
protected Classifier m_Classifier;
/** Holds the most recently used Evaluation object */
protected Evaluation m_Evaluation;
/** The names of any additional measures to look for in SplitEvaluators */
protected String[] m_AdditionalMeasures = null;
/**
* Array of booleans corresponding to the measures in m_AdditionalMeasures
* indicating which of the AdditionalMeasures the current classifier can
* produce
*/
protected boolean[] m_doesProduce = null;
/**
* The number of additional measures that need to be filled in after taking
* into account column constraints imposed by the final destination for
* results
*/
protected int m_numberAdditionalMeasures = 0;
/** Holds the statistics for the most recent application of the classifier */
protected String m_result = null;
/** The classifier options (if any) */
protected String m_ClassifierOptions = "";
/** The classifier version */
protected String m_ClassifierVersion = "";
/** The length of a key */
private static final int KEY_SIZE = 3;
/** The length of a result */
private static final int RESULT_SIZE = 32;
/** The number of IR statistics */
private static final int NUM_IR_STATISTICS = 16;
/** The number of averaged IR statistics */
private static final int NUM_WEIGHTED_IR_STATISTICS = 10;
/** The number of unweighted averaged IR statistics */
private static final int NUM_UNWEIGHTED_IR_STATISTICS = 2;
/** Class index for information retrieval statistics (default 0) */
private int m_IRclass = 0;
/** Flag for prediction and target columns output. */
private boolean m_predTargetColumn = false;
/** Attribute index of instance identifier (default -1) */
private int m_attID = -1;
/** whether to skip determination of sizes (train/test/classifier). */
private boolean m_NoSizeDetermination;
protected final List<AbstractEvaluationMetric> m_pluginMetrics =
new ArrayList<AbstractEvaluationMetric>();
protected int m_numPluginStatistics = 0;
/**
* No args constructor.
*/
public ClassifierSplitEvaluator() {
updateOptions();
List<AbstractEvaluationMetric> pluginMetrics = AbstractEvaluationMetric
.getPluginMetrics();
if (pluginMetrics != null) {
for (AbstractEvaluationMetric m : pluginMetrics) {
System.err.println(m.getMetricName());
if (m.appliesToNominalClass()) {
m_pluginMetrics.add(m);
m_numPluginStatistics += m.getStatisticNames().size();
}
}
}
}
/**
* Returns a string describing this split evaluator
*
* @return a description of the split evaluator suitable for displaying in the
* explorer/experimenter gui
*/
public String globalInfo() {
return " A SplitEvaluator that produces results for a classification "
+ "scheme on a nominal class attribute.";
}
/**
* Returns an enumeration describing the available options..
*
* @return an enumeration of all the available options.
*/
@Override
public Enumeration<Option> listOptions() {
Vector<Option> newVector = new Vector<Option>(5);
newVector
.addElement(new Option("\tThe full class name of the classifier.\n"
+ "\teg: weka.classifiers.bayes.NaiveBayes", "W", 1, "-W <class name>"));
newVector.addElement(new Option(
"\tThe index of the class for which IR statistics\n"
+ "\tare to be output. (default 1)", "C", 1, "-C <index>"));
newVector.addElement(new Option(
"\tThe index of an attribute to output in the\n"
+ "\tresults. This attribute should identify an\n"
+ "\tinstance in order to know which instances are\n"
+ "\tin the test set of a cross validation. if 0\n"
+ "\tno output (default 0).", "I", 1, "-I <index>"));
newVector.addElement(new Option(
"\tAdd target and prediction columns to the result\n"
+ "\tfor each fold.", "P", 0, "-P"));
newVector.addElement(new Option(
"\tSkips the determination of sizes (train/test/classifier)\n"
+ "\t(default: sizes are determined)", "no-size", 0, "-no-size"));
if ((m_Template != null) && (m_Template instanceof OptionHandler)) {
newVector.addElement(new Option("", "", 0,
"\nOptions specific to classifier " + m_Template.getClass().getName()
+ ":"));
newVector.addAll(Collections.list(((OptionHandler) m_Template)
.listOptions()));
}
return newVector.elements();
}
/**
* Parses a given list of options.
* <p/>
*
* <!-- options-start --> Valid options are:
* <p/>
*
* <pre>
* -W <class name>
* The full class name of the classifier.
* eg: weka.classifiers.bayes.NaiveBayes
* </pre>
*
* <pre>
* -C <index>
* The index of the class for which IR statistics
* are to be output. (default 1)
* </pre>
*
* <pre>
* -I <index>
* The index of an attribute to output in the
* results. This attribute should identify an
* instance in order to know which instances are
* in the test set of a cross validation. if 0
* no output (default 0).
* </pre>
*
* <pre>
* -P
* Add target and prediction columns to the result
* for each fold.
* </pre>
*
* <pre>
* -no-size
* Skips the determination of sizes (train/test/classifier)
* (default: sizes are determined)
* </pre>
*
* <pre>
* Options specific to classifier weka.classifiers.rules.ZeroR:
* </pre>
*
* <pre>
* -D
* If set, classifier is run in debug mode and
* may output additional info to the console
* </pre>
*
* <!-- options-end -->
*
* All options after -- will be passed to the classifier.
*
* @param options the list of options as an array of strings
* @throws Exception if an option is not supported
*/
@Override
public void setOptions(String[] options) throws Exception {
String cName = Utils.getOption('W', options);
if (cName.length() == 0) {
throw new Exception("A classifier must be specified with"
+ " the -W option.");
}
// Do it first without options, so if an exception is thrown during
// the option setting, listOptions will contain options for the actual
// Classifier.
setClassifier(AbstractClassifier.forName(cName, null));
if (getClassifier() instanceof OptionHandler) {
((OptionHandler) getClassifier()).setOptions(Utils
.partitionOptions(options));
updateOptions();
}
String indexName = Utils.getOption('C', options);
if (indexName.length() != 0) {
m_IRclass = (new Integer(indexName)).intValue() - 1;
} else {
m_IRclass = 0;
}
String attID = Utils.getOption('I', options);
if (attID.length() != 0) {
m_attID = (new Integer(attID)).intValue() - 1;
} else {
m_attID = -1;
}
m_predTargetColumn = Utils.getFlag('P', options);
m_NoSizeDetermination = Utils.getFlag("no-size", options);
}
/**
* Gets the current settings of the Classifier.
*
* @return an array of strings suitable for passing to setOptions
*/
@Override
public String[] getOptions() {
Vector<String> result;
String[] classifierOptions;
result = new Vector<String>();
classifierOptions = new String[0];
if ((m_Template != null) && (m_Template instanceof OptionHandler)) {
classifierOptions = ((OptionHandler) m_Template).getOptions();
}
if (getClassifier() != null) {
result.add("-W");
result.add(getClassifier().getClass().getName());
}
result.add("-I");
result.add("" + (m_attID + 1));
if (getPredTargetColumn()) {
result.add("-P");
}
result.add("-C");
result.add("" + (m_IRclass + 1));
if (getNoSizeDetermination()) {
result.add("-no-size");
}
result.add("--");
result.addAll(Arrays.asList(classifierOptions));
return result.toArray(new String[result.size()]);
}
/**
* Set a list of method names for additional measures to look for in
* Classifiers. This could contain many measures (of which only a subset may
* be produceable by the current Classifier) if an experiment is the type that
* iterates over a set of properties.
*
* @param additionalMeasures a list of method names
*/
@Override
public void setAdditionalMeasures(String[] additionalMeasures) {
// System.err.println("ClassifierSplitEvaluator: setting additional measures");
m_AdditionalMeasures = additionalMeasures;
// determine which (if any) of the additional measures this classifier
// can produce
if (m_AdditionalMeasures != null && m_AdditionalMeasures.length > 0) {
m_doesProduce = new boolean[m_AdditionalMeasures.length];
if (m_Template instanceof AdditionalMeasureProducer) {
Enumeration<String> en = ((AdditionalMeasureProducer) m_Template)
.enumerateMeasures();
while (en.hasMoreElements()) {
String mname = en.nextElement();
for (int j = 0; j < m_AdditionalMeasures.length; j++) {
if (mname.compareToIgnoreCase(m_AdditionalMeasures[j]) == 0) {
m_doesProduce[j] = true;
}
}
}
}
} else {
m_doesProduce = null;
}
}
/**
* Returns an enumeration of any additional measure names that might be in the
* classifier
*
* @return an enumeration of the measure names
*/
@Override
public Enumeration<String> enumerateMeasures() {
Vector<String> newVector = new Vector<String>();
if (m_Template instanceof AdditionalMeasureProducer) {
Enumeration<String> en = ((AdditionalMeasureProducer) m_Template)
.enumerateMeasures();
while (en.hasMoreElements()) {
String mname = en.nextElement();
newVector.add(mname);
}
}
return newVector.elements();
}
/**
* Returns the value of the named measure
*
* @param additionalMeasureName the name of the measure to query for its value
* @return the value of the named measure
* @throws IllegalArgumentException if the named measure is not supported
*/
@Override
public double getMeasure(String additionalMeasureName) {
if (m_Template instanceof AdditionalMeasureProducer) {
if (m_Classifier == null) {
throw new IllegalArgumentException("ClassifierSplitEvaluator: "
+ "Can't return result for measure, "
+ "classifier has not been built yet.");
}
return ((AdditionalMeasureProducer) m_Classifier)
.getMeasure(additionalMeasureName);
} else {
throw new IllegalArgumentException("ClassifierSplitEvaluator: "
+ "Can't return value for : " + additionalMeasureName + ". "
+ m_Template.getClass().getName() + " "
+ "is not an AdditionalMeasureProducer");
}
}
/**
* Gets the data types of each of the key columns produced for a single run.
* The number of key fields must be constant for a given SplitEvaluator.
*
* @return an array containing objects of the type of each key column. The
* objects should be Strings, or Doubles.
*/
@Override
public Object[] getKeyTypes() {
Object[] keyTypes = new Object[KEY_SIZE];
keyTypes[0] = "";
keyTypes[1] = "";
keyTypes[2] = "";
return keyTypes;
}
/**
* Gets the names of each of the key columns produced for a single run. The
* number of key fields must be constant for a given SplitEvaluator.
*
* @return an array containing the name of each key column
*/
@Override
public String[] getKeyNames() {
String[] keyNames = new String[KEY_SIZE];
keyNames[0] = "Scheme";
keyNames[1] = "Scheme_options";
keyNames[2] = "Scheme_version_ID";
return keyNames;
}
/**
* Gets the key describing the current SplitEvaluator. For example This may
* contain the name of the classifier used for classifier predictive
* evaluation. The number of key fields must be constant for a given
* SplitEvaluator.
*
* @return an array of objects containing the key.
*/
@Override
public Object[] getKey() {
Object[] key = new Object[KEY_SIZE];
key[0] = m_Template.getClass().getName();
key[1] = m_ClassifierOptions;
key[2] = m_ClassifierVersion;
return key;
}
/**
* Gets the data types of each of the result columns produced for a single
* run. The number of result fields must be constant for a given
* SplitEvaluator.
*
* @return an array containing objects of the type of each result column. The
* objects should be Strings, or Doubles.
*/
@Override
public Object[] getResultTypes() {
int addm = (m_AdditionalMeasures != null) ? m_AdditionalMeasures.length : 0;
int overall_length = RESULT_SIZE + addm;
overall_length += NUM_IR_STATISTICS;
overall_length += NUM_WEIGHTED_IR_STATISTICS;
overall_length += NUM_UNWEIGHTED_IR_STATISTICS;
if (getAttributeID() >= 0) {
overall_length += 1;
}
if (getPredTargetColumn()) {
overall_length += 2;
}
overall_length += m_numPluginStatistics;
Object[] resultTypes = new Object[overall_length];
Double doub = new Double(0);
int current = 0;
resultTypes[current++] = doub;
resultTypes[current++] = doub;
resultTypes[current++] = doub;
resultTypes[current++] = doub;
resultTypes[current++] = doub;
resultTypes[current++] = doub;
resultTypes[current++] = doub;
resultTypes[current++] = doub;
resultTypes[current++] = doub;
resultTypes[current++] = doub;
resultTypes[current++] = doub;
resultTypes[current++] = doub;
resultTypes[current++] = doub;
resultTypes[current++] = doub;
resultTypes[current++] = doub;
resultTypes[current++] = doub;
resultTypes[current++] = doub;
resultTypes[current++] = doub;
resultTypes[current++] = doub;
resultTypes[current++] = doub;
resultTypes[current++] = doub;
resultTypes[current++] = doub;
// IR stats
resultTypes[current++] = doub;
resultTypes[current++] = doub;
resultTypes[current++] = doub;
resultTypes[current++] = doub;
resultTypes[current++] = doub;
resultTypes[current++] = doub;
resultTypes[current++] = doub;
resultTypes[current++] = doub;
resultTypes[current++] = doub;
resultTypes[current++] = doub;
resultTypes[current++] = doub;
resultTypes[current++] = doub;
resultTypes[current++] = doub;
resultTypes[current++] = doub;
// Unweighted IR stats
resultTypes[current++] = doub;
resultTypes[current++] = doub;
// Weighted IR stats
resultTypes[current++] = doub;
resultTypes[current++] = doub;
resultTypes[current++] = doub;
resultTypes[current++] = doub;
resultTypes[current++] = doub;
resultTypes[current++] = doub;
resultTypes[current++] = doub;
resultTypes[current++] = doub;
resultTypes[current++] = doub;
resultTypes[current++] = doub;
// Timing stats
resultTypes[current++] = doub;
resultTypes[current++] = doub;
resultTypes[current++] = doub;
resultTypes[current++] = doub;
resultTypes[current++] = doub;
resultTypes[current++] = doub;
// sizes
resultTypes[current++] = doub;
resultTypes[current++] = doub;
resultTypes[current++] = doub;
// Prediction interval statistics
resultTypes[current++] = doub;
resultTypes[current++] = doub;
// ID/Targets/Predictions
if (getAttributeID() >= 0) {
resultTypes[current++] = "";
}
if (getPredTargetColumn()) {
resultTypes[current++] = "";
resultTypes[current++] = "";
}
// Classifier defined extras
resultTypes[current++] = "";
// add any additional measures
for (int i = 0; i < addm; i++) {
resultTypes[current++] = doub;
}
// plugin metrics
for (int i = 0; i < m_numPluginStatistics; i++) {
resultTypes[current++] = doub;
}
if (current != overall_length) {
throw new Error("ResultTypes didn't fit RESULT_SIZE");
}
return resultTypes;
}
/**
* Gets the names of each of the result columns produced for a single run. The
* number of result fields must be constant for a given SplitEvaluator.
*
* @return an array containing the name of each result column
*/
@Override
public String[] getResultNames() {
int addm = (m_AdditionalMeasures != null) ? m_AdditionalMeasures.length : 0;
int overall_length = RESULT_SIZE + addm;
overall_length += NUM_IR_STATISTICS;
overall_length += NUM_WEIGHTED_IR_STATISTICS;
overall_length += NUM_UNWEIGHTED_IR_STATISTICS;
if (getAttributeID() >= 0) {
overall_length += 1;
}
if (getPredTargetColumn()) {
overall_length += 2;
}
overall_length += m_numPluginStatistics;
String[] resultNames = new String[overall_length];
int current = 0;
resultNames[current++] = "Number_of_training_instances";
resultNames[current++] = "Number_of_testing_instances";
// Basic performance stats - right vs wrong
resultNames[current++] = "Number_correct";
resultNames[current++] = "Number_incorrect";
resultNames[current++] = "Number_unclassified";
resultNames[current++] = "Percent_correct";
resultNames[current++] = "Percent_incorrect";
resultNames[current++] = "Percent_unclassified";
resultNames[current++] = "Kappa_statistic";
// Sensitive stats - certainty of predictions
resultNames[current++] = "Mean_absolute_error";
resultNames[current++] = "Root_mean_squared_error";
resultNames[current++] = "Relative_absolute_error";
resultNames[current++] = "Root_relative_squared_error";
// SF stats
resultNames[current++] = "SF_prior_entropy";
resultNames[current++] = "SF_scheme_entropy";
resultNames[current++] = "SF_entropy_gain";
resultNames[current++] = "SF_mean_prior_entropy";
resultNames[current++] = "SF_mean_scheme_entropy";
resultNames[current++] = "SF_mean_entropy_gain";
// K&B stats
resultNames[current++] = "KB_information";
resultNames[current++] = "KB_mean_information";
resultNames[current++] = "KB_relative_information";
// IR stats
resultNames[current++] = "True_positive_rate";
resultNames[current++] = "Num_true_positives";
resultNames[current++] = "False_positive_rate";
resultNames[current++] = "Num_false_positives";
resultNames[current++] = "True_negative_rate";
resultNames[current++] = "Num_true_negatives";
resultNames[current++] = "False_negative_rate";
resultNames[current++] = "Num_false_negatives";
resultNames[current++] = "IR_precision";
resultNames[current++] = "IR_recall";
resultNames[current++] = "F_measure";
resultNames[current++] = "Matthews_correlation";
resultNames[current++] = "Area_under_ROC";
resultNames[current++] = "Area_under_PRC";
// Weighted IR stats
resultNames[current++] = "Weighted_avg_true_positive_rate";
resultNames[current++] = "Weighted_avg_false_positive_rate";
resultNames[current++] = "Weighted_avg_true_negative_rate";
resultNames[current++] = "Weighted_avg_false_negative_rate";
resultNames[current++] = "Weighted_avg_IR_precision";
resultNames[current++] = "Weighted_avg_IR_recall";
resultNames[current++] = "Weighted_avg_F_measure";
resultNames[current++] = "Weighted_avg_matthews_correlation";
resultNames[current++] = "Weighted_avg_area_under_ROC";
resultNames[current++] = "Weighted_avg_area_under_PRC";
// Unweighted IR stats
resultNames[current++] = "Unweighted_macro_avg_F_measure";
resultNames[current++] = "Unweighted_micro_avg_F_measure";
// Timing stats
resultNames[current++] = "Elapsed_Time_training";
resultNames[current++] = "Elapsed_Time_testing";
resultNames[current++] = "UserCPU_Time_training";
resultNames[current++] = "UserCPU_Time_testing";
resultNames[current++] = "UserCPU_Time_millis_training";
resultNames[current++] = "UserCPU_Time_millis_testing";
// sizes
resultNames[current++] = "Serialized_Model_Size";
resultNames[current++] = "Serialized_Train_Set_Size";
resultNames[current++] = "Serialized_Test_Set_Size";
// Prediction interval statistics
resultNames[current++] = "Coverage_of_Test_Cases_By_Regions";
resultNames[current++] = "Size_of_Predicted_Regions";
// ID/Targets/Predictions
if (getAttributeID() >= 0) {
resultNames[current++] = "Instance_ID";
}
if (getPredTargetColumn()) {
resultNames[current++] = "Targets";
resultNames[current++] = "Predictions";
}
// Classifier defined extras
resultNames[current++] = "Summary";
// add any additional measures
for (int i = 0; i < addm; i++) {
resultNames[current++] = m_AdditionalMeasures[i];
}
for (AbstractEvaluationMetric m : m_pluginMetrics) {
List<String> statNames = m.getStatisticNames();
for (String s : statNames) {
resultNames[current++] = s;
}
}
if (current != overall_length) {
throw new Error("ResultNames didn't fit RESULT_SIZE");
}
return resultNames;
}
/**
* Gets the results for the supplied train and test datasets. Now performs a
* deep copy of the classifier before it is built and evaluated (just in case
* the classifier is not initialized properly in buildClassifier()).
*
* @param train the training Instances.
* @param test the testing Instances.
* @return the results stored in an array. The objects stored in the array may
* be Strings, Doubles, or null (for the missing value).
* @throws Exception if a problem occurs while getting the results
*/
@Override
public Object[] getResult(Instances train, Instances test) throws Exception {
if (train.classAttribute().type() != Attribute.NOMINAL) {
throw new Exception("Class attribute is not nominal!");
}
if (m_Template == null) {
throw new Exception("No classifier has been specified");
}
int addm = (m_AdditionalMeasures != null) ? m_AdditionalMeasures.length : 0;
int overall_length = RESULT_SIZE + addm;
overall_length += NUM_IR_STATISTICS;
overall_length += NUM_WEIGHTED_IR_STATISTICS;
overall_length += NUM_UNWEIGHTED_IR_STATISTICS;
if (getAttributeID() >= 0) {
overall_length += 1;
}
if (getPredTargetColumn()) {
overall_length += 2;
}
overall_length += m_numPluginStatistics;
ThreadMXBean thMonitor = ManagementFactory.getThreadMXBean();
boolean canMeasureCPUTime = thMonitor.isThreadCpuTimeSupported();
if (canMeasureCPUTime && !thMonitor.isThreadCpuTimeEnabled()) {
thMonitor.setThreadCpuTimeEnabled(true);
}
Object[] result = new Object[overall_length];
Evaluation eval = new Evaluation(train);
m_Classifier = AbstractClassifier.makeCopy(m_Template);
double[] predictions;
long thID = Thread.currentThread().getId();
long CPUStartTime = -1, trainCPUTimeElapsed = -1, testCPUTimeElapsed = -1, trainTimeStart, trainTimeElapsed, testTimeStart, testTimeElapsed;
// training classifier
trainTimeStart = System.currentTimeMillis();
if (canMeasureCPUTime) {
CPUStartTime = thMonitor.getThreadUserTime(thID);
}
m_Classifier.buildClassifier(train);
if (canMeasureCPUTime) {
trainCPUTimeElapsed = thMonitor.getThreadUserTime(thID) - CPUStartTime;
}
trainTimeElapsed = System.currentTimeMillis() - trainTimeStart;
// testing classifier
testTimeStart = System.currentTimeMillis();
if (canMeasureCPUTime) {
CPUStartTime = thMonitor.getThreadUserTime(thID);
}
predictions = eval.evaluateModel(m_Classifier, test);
if (canMeasureCPUTime) {
testCPUTimeElapsed = thMonitor.getThreadUserTime(thID) - CPUStartTime;
}
testTimeElapsed = System.currentTimeMillis() - testTimeStart;
thMonitor = null;
m_result = eval.toSummaryString();
// The results stored are all per instance -- can be multiplied by the
// number of instances to get absolute numbers
int current = 0;
result[current++] = new Double(train.numInstances());
result[current++] = new Double(eval.numInstances());
result[current++] = new Double(eval.correct());
result[current++] = new Double(eval.incorrect());
result[current++] = new Double(eval.unclassified());
result[current++] = new Double(eval.pctCorrect());
result[current++] = new Double(eval.pctIncorrect());
result[current++] = new Double(eval.pctUnclassified());
result[current++] = new Double(eval.kappa());
result[current++] = new Double(eval.meanAbsoluteError());
result[current++] = new Double(eval.rootMeanSquaredError());
result[current++] = new Double(eval.relativeAbsoluteError());
result[current++] = new Double(eval.rootRelativeSquaredError());
result[current++] = new Double(eval.SFPriorEntropy());
result[current++] = new Double(eval.SFSchemeEntropy());
result[current++] = new Double(eval.SFEntropyGain());
result[current++] = new Double(eval.SFMeanPriorEntropy());
result[current++] = new Double(eval.SFMeanSchemeEntropy());
result[current++] = new Double(eval.SFMeanEntropyGain());
// K&B stats
result[current++] = new Double(eval.KBInformation());
result[current++] = new Double(eval.KBMeanInformation());
result[current++] = new Double(eval.KBRelativeInformation());
// IR stats
result[current++] = new Double(eval.truePositiveRate(m_IRclass));
result[current++] = new Double(eval.numTruePositives(m_IRclass));
result[current++] = new Double(eval.falsePositiveRate(m_IRclass));
result[current++] = new Double(eval.numFalsePositives(m_IRclass));
result[current++] = new Double(eval.trueNegativeRate(m_IRclass));
result[current++] = new Double(eval.numTrueNegatives(m_IRclass));
result[current++] = new Double(eval.falseNegativeRate(m_IRclass));
result[current++] = new Double(eval.numFalseNegatives(m_IRclass));
result[current++] = new Double(eval.precision(m_IRclass));
result[current++] = new Double(eval.recall(m_IRclass));
result[current++] = new Double(eval.fMeasure(m_IRclass));
result[current++] = new Double(
eval.matthewsCorrelationCoefficient(m_IRclass));
result[current++] = new Double(eval.areaUnderROC(m_IRclass));
result[current++] = new Double(eval.areaUnderPRC(m_IRclass));
// Weighted IR stats
result[current++] = new Double(eval.weightedTruePositiveRate());
result[current++] = new Double(eval.weightedFalsePositiveRate());
result[current++] = new Double(eval.weightedTrueNegativeRate());
result[current++] = new Double(eval.weightedFalseNegativeRate());
result[current++] = new Double(eval.weightedPrecision());
result[current++] = new Double(eval.weightedRecall());
result[current++] = new Double(eval.weightedFMeasure());
result[current++] = new Double(eval.weightedMatthewsCorrelation());
result[current++] = new Double(eval.weightedAreaUnderROC());
result[current++] = new Double(eval.weightedAreaUnderPRC());
// Unweighted IR stats
result[current++] = new Double(eval.unweightedMacroFmeasure());
result[current++] = new Double(eval.unweightedMicroFmeasure());
// Timing stats
result[current++] = new Double(trainTimeElapsed / 1000.0);
result[current++] = new Double(testTimeElapsed / 1000.0);
if (canMeasureCPUTime) {
result[current++] =
new Double((trainCPUTimeElapsed / 1000000.0) / 1000.0);
result[current++] = new Double((testCPUTimeElapsed / 1000000.0) / 1000.0);
result[current++] =
new Double(trainCPUTimeElapsed / 1000000.0);
result[current++] = new Double(testCPUTimeElapsed / 1000000.0);
} else {
result[current++] = new Double(Utils.missingValue());
result[current++] = new Double(Utils.missingValue());
result[current++] = new Double(Utils.missingValue());
result[current++] = new Double(Utils.missingValue());
}
// sizes
if (m_NoSizeDetermination) {
result[current++] = -1.0;
result[current++] = -1.0;
result[current++] = -1.0;
} else {
ByteArrayOutputStream bastream = new ByteArrayOutputStream();
ObjectOutputStream oostream = new ObjectOutputStream(bastream);
oostream.writeObject(m_Classifier);
result[current++] = new Double(bastream.size());
bastream = new ByteArrayOutputStream();
oostream = new ObjectOutputStream(bastream);
oostream.writeObject(train);
result[current++] = new Double(bastream.size());
bastream = new ByteArrayOutputStream();
oostream = new ObjectOutputStream(bastream);
oostream.writeObject(test);
result[current++] = new Double(bastream.size());
}
// Prediction interval statistics
result[current++] =
new Double(eval.coverageOfTestCasesByPredictedRegions());
result[current++] = new Double(eval.sizeOfPredictedRegions());
// IDs
if (getAttributeID() >= 0) {
String idsString = "";
if (test.attribute(m_attID).isNumeric()) {
if (test.numInstances() > 0) {
idsString += test.instance(0).value(m_attID);
}
for (int i = 1; i < test.numInstances(); i++) {
idsString += "|" + test.instance(i).value(m_attID);
}
} else {
if (test.numInstances() > 0) {
idsString += test.instance(0).stringValue(m_attID);
}
for (int i = 1; i < test.numInstances(); i++) {
idsString += "|" + test.instance(i).stringValue(m_attID);
}
}
result[current++] = idsString;
}
if (getPredTargetColumn()) {
if (test.classAttribute().isNumeric()) {
// Targets
if (test.numInstances() > 0) {
String targetsString = "";
targetsString += test.instance(0).value(test.classIndex());
for (int i = 1; i < test.numInstances(); i++) {
targetsString += "|" + test.instance(i).value(test.classIndex());
}
result[current++] = targetsString;
}
// Predictions
if (predictions.length > 0) {
String predictionsString = "";
predictionsString += predictions[0];
for (int i = 1; i < predictions.length; i++) {
predictionsString += "|" + predictions[i];
}
result[current++] = predictionsString;
}
} else {
// Targets
if (test.numInstances() > 0) {
String targetsString = "";
targetsString += test.instance(0).stringValue(test.classIndex());
for (int i = 1; i < test.numInstances(); i++) {
targetsString += "|"
+ test.instance(i).stringValue(test.classIndex());
}
result[current++] = targetsString;
}
// Predictions
if (predictions.length > 0) {
String predictionsString = "";
predictionsString += test.classAttribute()
.value((int) predictions[0]);
for (int i = 1; i < predictions.length; i++) {
predictionsString += "|"
+ test.classAttribute().value((int) predictions[i]);
}
result[current++] = predictionsString;
}
}
}
if (m_Classifier instanceof Summarizable) {
result[current++] = ((Summarizable) m_Classifier).toSummaryString();
} else {
result[current++] = null;
}
for (int i = 0; i < addm; i++) {
if (m_doesProduce[i]) {
try {
double dv = ((AdditionalMeasureProducer) m_Classifier)
.getMeasure(m_AdditionalMeasures[i]);
if (!Utils.isMissingValue(dv)) {
Double value = new Double(dv);
result[current++] = value;
} else {
result[current++] = null;
}
} catch (Exception ex) {
System.err.println(ex);
}
} else {
result[current++] = null;
}
}
// get the actual metrics from the evaluation object
List<AbstractEvaluationMetric> metrics = eval.getPluginMetrics();
if (metrics != null) {
for (AbstractEvaluationMetric m : metrics) {
if (m.appliesToNominalClass()) {
List<String> statNames = m.getStatisticNames();
for (String s : statNames) {
result[current++] = new Double(m.getStatistic(s));
}
}
}
}
if (current != overall_length) {
throw new Error("Results didn't fit RESULT_SIZE");
}
m_Evaluation = eval;
return result;
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String classifierTipText() {
return "The classifier to use.";
}
/**
* Get the value of Classifier.
*
* @return Value of Classifier.
*/
public Classifier getClassifier() {
return m_Template;
}
/**
* Sets the classifier.
*
* @param newClassifier the new classifier to use.
*/
public void setClassifier(Classifier newClassifier) {
m_Template = newClassifier;
updateOptions();
}
/**
* Get the value of ClassForIRStatistics.
*
* @return Value of ClassForIRStatistics.
*/
public int getClassForIRStatistics() {
return m_IRclass;
}
/**
* Set the value of ClassForIRStatistics.
*
* @param v Value to assign to ClassForIRStatistics.
*/
public void setClassForIRStatistics(int v) {
m_IRclass = v;
}
/**
* Get the index of Attibute Identifying the instances
*
* @return index of outputed Attribute.
*/
public int getAttributeID() {
return m_attID;
}
/**
* Set the index of Attibute Identifying the instances
*
* @param v index the attribute to output
*/
public void setAttributeID(int v) {
m_attID = v;
}
/**
* @return true if the prediction and target columns must be outputed.
*/
public boolean getPredTargetColumn() {
return m_predTargetColumn;
}
/**
* Set the flag for prediction and target output.
*
* @param v true if the 2 columns have to be outputed. false otherwise.
*/
public void setPredTargetColumn(boolean v) {
m_predTargetColumn = v;
}
/**
* Returns whether the size determination (train/test/classifer) is skipped.
*
* @return true if size determination skipped
*/
public boolean getNoSizeDetermination() {
return m_NoSizeDetermination;
}
/**
* Sets whether the size determination (train/test/classifer) is skipped.
*
* @param value true if to determine sizes
*/
public void setNoSizeDetermination(boolean value) {
m_NoSizeDetermination = value;
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String noSizeDeterminationTipText() {
return "If enabled, the size determination for train/test/classifier is skipped.";
}
/**
* Updates the options that the current classifier is using.
*/
protected void updateOptions() {
if (m_Template instanceof OptionHandler) {
m_ClassifierOptions = Utils.joinOptions(((OptionHandler) m_Template)
.getOptions());
} else {
m_ClassifierOptions = "";
}
if (m_Template instanceof Serializable) {
ObjectStreamClass obs = ObjectStreamClass.lookup(m_Template.getClass());
m_ClassifierVersion = "" + obs.getSerialVersionUID();
} else {
m_ClassifierVersion = "";
}
}
/**
* Set the Classifier to use, given it's class name. A new classifier will be
* instantiated.
*
* @param newClassifierName the Classifier class name.
* @throws Exception if the class name is invalid.
*/
public void setClassifierName(String newClassifierName) throws Exception {
try {
setClassifier((Classifier) Class.forName(newClassifierName).newInstance());
} catch (Exception ex) {
throw new Exception("Can't find Classifier with class name: "
+ newClassifierName);
}
}
/**
* Gets the raw output from the classifier
*
* @return the raw output from th,0e classifier
*/
@Override
public String getRawResultOutput() {
StringBuffer result = new StringBuffer();
if (m_Classifier == null) {
return "<null> classifier";
}
result.append(toString());
result.append("Classifier model: \n" + m_Classifier.toString() + '\n');
// append the performance statistics
if (m_result != null) {
result.append(m_result);
if (m_doesProduce != null) {
for (int i = 0; i < m_doesProduce.length; i++) {
if (m_doesProduce[i]) {
try {
double dv = ((AdditionalMeasureProducer) m_Classifier)
.getMeasure(m_AdditionalMeasures[i]);
if (!Utils.isMissingValue(dv)) {
Double value = new Double(dv);
result.append(m_AdditionalMeasures[i] + " : " + value + '\n');
} else {
result.append(m_AdditionalMeasures[i] + " : " + '?' + '\n');
}
} catch (Exception ex) {
System.err.println(ex);
}
}
}
}
}
return result.toString();
}
/**
* Returns a text description of the split evaluator.
*
* @return a text description of the split evaluator.
*/
@Override
public String toString() {
String result = "ClassifierSplitEvaluator: ";
if (m_Template == null) {
return result + "<null> classifier";
}
return result + m_Template.getClass().getName() + " " + m_ClassifierOptions
+ "(version " + m_ClassifierVersion + ")";
}
/**
* Returns the revision string.
*
* @return the revision
*/
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/experiment/Compute.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* Compute.java
* Copyright (C) 2000-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.experiment;
import java.rmi.Remote;
import java.rmi.RemoteException;
/**
* Interface to something that can accept remote connections and execute
* a task.
*
* @author Mark Hall (mhall@cs.waikato.ac.nz)
* @version $Revision$
*/
public interface Compute extends Remote {
/**
* Execute a task
* @param t Task to be executed
* @exception RemoteException if something goes wrong.
* @return a unique ID for the task
*/
Object executeTask(Task t) throws RemoteException;
/**
* Check on the status of a <code>Task</code>
*
* @param taskId the ID for the Task to be checked
* @return the status of the Task
* @exception Exception if an error occurs
*/
Object checkStatus(Object taskId) throws Exception;
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/experiment/CostSensitiveClassifierSplitEvaluator.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* CostSensitiveClassifierSplitEvaluator.java
* Copyright (C) 2002-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.experiment;
import java.io.BufferedReader;
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.FileReader;
import java.io.ObjectOutputStream;
import java.lang.management.ManagementFactory;
import java.lang.management.ThreadMXBean;
import java.util.Collections;
import java.util.Enumeration;
import java.util.Vector;
import weka.classifiers.AbstractClassifier;
import weka.classifiers.CostMatrix;
import weka.classifiers.Evaluation;
import weka.core.AdditionalMeasureProducer;
import weka.core.Attribute;
import weka.core.Instances;
import weka.core.Option;
import weka.core.RevisionUtils;
import weka.core.Summarizable;
import weka.core.Utils;
/**
* <!-- globalinfo-start --> SplitEvaluator that produces results for a
* classification scheme on a nominal class attribute, including weighted
* misclassification costs.
* <p/>
* <!-- globalinfo-end -->
*
* <!-- options-start --> Valid options are:
* <p/>
*
* <pre>
* -W <class name>
* The full class name of the classifier.
* eg: weka.classifiers.bayes.NaiveBayes
* </pre>
*
* <pre>
* -C <index>
* The index of the class for which IR statistics
* are to be output. (default 1)
* </pre>
*
* <pre>
* -I <index>
* The index of an attribute to output in the
* results. This attribute should identify an
* instance in order to know which instances are
* in the test set of a cross validation. if 0
* no output (default 0).
* </pre>
*
* <pre>
* -P
* Add target and prediction columns to the result
* for each fold.
* </pre>
*
* <pre>
* Options specific to classifier weka.classifiers.rules.ZeroR:
* </pre>
*
* <pre>
* -D
* If set, classifier is run in debug mode and
* may output additional info to the console
* </pre>
*
* <pre>
* -D <directory>
* Name of a directory to search for cost files when loading
* costs on demand (default current directory).
* </pre>
*
* <!-- options-end -->
*
* All options after -- will be passed to the classifier.
*
* @author Len Trigg (len@reeltwo.com)
* @version $Revision$
*/
public class CostSensitiveClassifierSplitEvaluator extends
ClassifierSplitEvaluator {
/** for serialization */
static final long serialVersionUID = -8069566663019501276L;
/**
* The directory used when loading cost files on demand, null indicates
* current directory
*/
protected File m_OnDemandDirectory = new File(System.getProperty("user.dir"));
/** The length of a result */
private static final int RESULT_SIZE = 33;
/**
* Returns a string describing this split evaluator
*
* @return a description of the split evaluator suitable for displaying in the
* explorer/experimenter gui
*/
@Override
public String globalInfo() {
return " SplitEvaluator that produces results for a classification scheme "
+ "on a nominal class attribute, including weighted misclassification "
+ "costs.";
}
/**
* Returns an enumeration describing the available options..
*
* @return an enumeration of all the available options.
*/
@Override
public Enumeration<Option> listOptions() {
Vector<Option> newVector = new Vector<Option>(1);
newVector.addAll(Collections.list(super.listOptions()));
newVector.addElement(new Option(
"\tName of a directory to search for cost files when loading\n"
+ "\tcosts on demand (default current directory).", "D", 1,
"-D <directory>"));
return newVector.elements();
}
/**
* Parses a given list of options.
* <p/>
*
* <!-- options-start --> Valid options are:
* <p/>
*
* <pre>
* -W <class name>
* The full class name of the classifier.
* eg: weka.classifiers.bayes.NaiveBayes
* </pre>
*
* <pre>
* -C <index>
* The index of the class for which IR statistics
* are to be output. (default 1)
* </pre>
*
* <pre>
* -I <index>
* The index of an attribute to output in the
* results. This attribute should identify an
* instance in order to know which instances are
* in the test set of a cross validation. if 0
* no output (default 0).
* </pre>
*
* <pre>
* -P
* Add target and prediction columns to the result
* for each fold.
* </pre>
*
* <pre>
* Options specific to classifier weka.classifiers.rules.ZeroR:
* </pre>
*
* <pre>
* -D
* If set, classifier is run in debug mode and
* may output additional info to the console
* </pre>
*
* <pre>
* -D <directory>
* Name of a directory to search for cost files when loading
* costs on demand (default current directory).
* </pre>
*
* <!-- options-end -->
*
* All options after -- will be passed to the classifier.
*
* @param options the list of options as an array of strings
* @throws Exception if an option is not supported
*/
@Override
public void setOptions(String[] options) throws Exception {
String demandDir = Utils.getOption('D', options);
if (demandDir.length() != 0) {
setOnDemandDirectory(new File(demandDir));
}
super.setOptions(options);
Utils.checkForRemainingOptions(options);
}
/**
* Gets the current settings of the Classifier.
*
* @return an array of strings suitable for passing to setOptions
*/
@Override
public String[] getOptions() {
Vector<String> options = new Vector<String>();
options.add("-D");
options.add("" + getOnDemandDirectory());
Collections.addAll(options, super.getOptions());
return options.toArray(new String[0]);
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String onDemandDirectoryTipText() {
return "The directory to look in for cost files. This directory will be "
+ "searched for cost files when loading on demand.";
}
/**
* Returns the directory that will be searched for cost files when loading on
* demand.
*
* @return The cost file search directory.
*/
public File getOnDemandDirectory() {
return m_OnDemandDirectory;
}
/**
* Sets the directory that will be searched for cost files when loading on
* demand.
*
* @param newDir The cost file search directory.
*/
public void setOnDemandDirectory(File newDir) {
if (newDir.isDirectory()) {
m_OnDemandDirectory = newDir;
} else {
m_OnDemandDirectory = new File(newDir.getParent());
}
}
/**
* Gets the data types of each of the result columns produced for a single
* run. The number of result fields must be constant for a given
* SplitEvaluator.
*
* @return an array containing objects of the type of each result column. The
* objects should be Strings, or Doubles.
*/
@Override
public Object[] getResultTypes() {
int addm = (m_AdditionalMeasures != null) ? m_AdditionalMeasures.length : 0;
Object[] resultTypes = new Object[RESULT_SIZE + addm];
Double doub = new Double(0);
int current = 0;
resultTypes[current++] = doub;
resultTypes[current++] = doub;
resultTypes[current++] = doub;
resultTypes[current++] = doub;
resultTypes[current++] = doub;
resultTypes[current++] = doub;
resultTypes[current++] = doub;
resultTypes[current++] = doub;
resultTypes[current++] = doub;
resultTypes[current++] = doub;
resultTypes[current++] = doub;
resultTypes[current++] = doub;
resultTypes[current++] = doub;
resultTypes[current++] = doub;
resultTypes[current++] = doub;
resultTypes[current++] = doub;
resultTypes[current++] = doub;
resultTypes[current++] = doub;
resultTypes[current++] = doub;
resultTypes[current++] = doub;
resultTypes[current++] = doub;
resultTypes[current++] = doub;
resultTypes[current++] = doub;
// Timing stats
resultTypes[current++] = doub;
resultTypes[current++] = doub;
resultTypes[current++] = doub;
resultTypes[current++] = doub;
resultTypes[current++] = doub;
resultTypes[current++] = doub;
// sizes
resultTypes[current++] = doub;
resultTypes[current++] = doub;
resultTypes[current++] = doub;
resultTypes[current++] = "";
// add any additional measures
for (int i = 0; i < addm; i++) {
resultTypes[current++] = doub;
}
if (current != RESULT_SIZE + addm) {
throw new Error("ResultTypes didn't fit RESULT_SIZE");
}
return resultTypes;
}
/**
* Gets the names of each of the result columns produced for a single run. The
* number of result fields must be constant for a given SplitEvaluator.
*
* @return an array containing the name of each result column
*/
@Override
public String[] getResultNames() {
int addm = (m_AdditionalMeasures != null) ? m_AdditionalMeasures.length : 0;
String[] resultNames = new String[RESULT_SIZE + addm];
int current = 0;
resultNames[current++] = "Number_of_training_instances";
resultNames[current++] = "Number_of_testing_instances";
// Basic performance stats - right vs wrong
resultNames[current++] = "Number_correct";
resultNames[current++] = "Number_incorrect";
resultNames[current++] = "Number_unclassified";
resultNames[current++] = "Percent_correct";
resultNames[current++] = "Percent_incorrect";
resultNames[current++] = "Percent_unclassified";
resultNames[current++] = "Total_cost";
resultNames[current++] = "Average_cost";
// Sensitive stats - certainty of predictions
resultNames[current++] = "Mean_absolute_error";
resultNames[current++] = "Root_mean_squared_error";
resultNames[current++] = "Relative_absolute_error";
resultNames[current++] = "Root_relative_squared_error";
// SF stats
resultNames[current++] = "SF_prior_entropy";
resultNames[current++] = "SF_scheme_entropy";
resultNames[current++] = "SF_entropy_gain";
resultNames[current++] = "SF_mean_prior_entropy";
resultNames[current++] = "SF_mean_scheme_entropy";
resultNames[current++] = "SF_mean_entropy_gain";
// K&B stats
resultNames[current++] = "KB_information";
resultNames[current++] = "KB_mean_information";
resultNames[current++] = "KB_relative_information";
// Timing stats
resultNames[current++] = "Elapsed_Time_training";
resultNames[current++] = "Elapsed_Time_testing";
resultNames[current++] = "UserCPU_Time_training";
resultNames[current++] = "UserCPU_Time_testing";
resultNames[current++] = "UserCPU_Time_millis_training";
resultNames[current++] = "UserCPU_Time_millis_testing";
// sizes
resultNames[current++] = "Serialized_Model_Size";
resultNames[current++] = "Serialized_Train_Set_Size";
resultNames[current++] = "Serialized_Test_Set_Size";
// Classifier defined extras
resultNames[current++] = "Summary";
// add any additional measures
for (int i = 0; i < addm; i++) {
resultNames[current++] = m_AdditionalMeasures[i];
}
if (current != RESULT_SIZE + addm) {
throw new Error("ResultNames didn't fit RESULT_SIZE");
}
return resultNames;
}
/**
* Gets the results for the supplied train and test datasets. Now performs a
* deep copy of the classifier before it is built and evaluated (just in case
* the classifier is not initialized properly in buildClassifier()).
*
* @param train the training Instances.
* @param test the testing Instances.
* @return the results stored in an array. The objects stored in the array may
* be Strings, Doubles, or null (for the missing value).
* @throws Exception if a problem occurs while getting the results
*/
@Override
public Object[] getResult(Instances train, Instances test) throws Exception {
if (train.classAttribute().type() != Attribute.NOMINAL) {
throw new Exception("Class attribute is not nominal!");
}
if (m_Template == null) {
throw new Exception("No classifier has been specified");
}
ThreadMXBean thMonitor = ManagementFactory.getThreadMXBean();
boolean canMeasureCPUTime = thMonitor.isThreadCpuTimeSupported();
if (canMeasureCPUTime && !thMonitor.isThreadCpuTimeEnabled()) {
thMonitor.setThreadCpuTimeEnabled(true);
}
int addm = (m_AdditionalMeasures != null) ? m_AdditionalMeasures.length : 0;
Object[] result = new Object[RESULT_SIZE + addm];
long thID = Thread.currentThread().getId();
long CPUStartTime = -1, trainCPUTimeElapsed = -1, testCPUTimeElapsed = -1, trainTimeStart, trainTimeElapsed, testTimeStart, testTimeElapsed;
String costName = train.relationName() + CostMatrix.FILE_EXTENSION;
File costFile = new File(getOnDemandDirectory(), costName);
if (!costFile.exists()) {
throw new Exception("On-demand cost file doesn't exist: " + costFile);
}
CostMatrix costMatrix = new CostMatrix(new BufferedReader(new FileReader(
costFile)));
Evaluation eval = new Evaluation(train, costMatrix);
m_Classifier = AbstractClassifier.makeCopy(m_Template);
trainTimeStart = System.currentTimeMillis();
if (canMeasureCPUTime) {
CPUStartTime = thMonitor.getThreadUserTime(thID);
}
m_Classifier.buildClassifier(train);
if (canMeasureCPUTime) {
trainCPUTimeElapsed = thMonitor.getThreadUserTime(thID) - CPUStartTime;
}
trainTimeElapsed = System.currentTimeMillis() - trainTimeStart;
testTimeStart = System.currentTimeMillis();
if (canMeasureCPUTime) {
CPUStartTime = thMonitor.getThreadUserTime(thID);
}
eval.evaluateModel(m_Classifier, test);
if (canMeasureCPUTime) {
testCPUTimeElapsed = thMonitor.getThreadUserTime(thID) - CPUStartTime;
}
testTimeElapsed = System.currentTimeMillis() - testTimeStart;
thMonitor = null;
m_result = eval.toSummaryString();
// The results stored are all per instance -- can be multiplied by the
// number of instances to get absolute numbers
int current = 0;
result[current++] = new Double(train.numInstances());
result[current++] = new Double(eval.numInstances());
result[current++] = new Double(eval.correct());
result[current++] = new Double(eval.incorrect());
result[current++] = new Double(eval.unclassified());
result[current++] = new Double(eval.pctCorrect());
result[current++] = new Double(eval.pctIncorrect());
result[current++] = new Double(eval.pctUnclassified());
result[current++] = new Double(eval.totalCost());
result[current++] = new Double(eval.avgCost());
result[current++] = new Double(eval.meanAbsoluteError());
result[current++] = new Double(eval.rootMeanSquaredError());
result[current++] = new Double(eval.relativeAbsoluteError());
result[current++] = new Double(eval.rootRelativeSquaredError());
result[current++] = new Double(eval.SFPriorEntropy());
result[current++] = new Double(eval.SFSchemeEntropy());
result[current++] = new Double(eval.SFEntropyGain());
result[current++] = new Double(eval.SFMeanPriorEntropy());
result[current++] = new Double(eval.SFMeanSchemeEntropy());
result[current++] = new Double(eval.SFMeanEntropyGain());
// K&B stats
result[current++] = new Double(eval.KBInformation());
result[current++] = new Double(eval.KBMeanInformation());
result[current++] = new Double(eval.KBRelativeInformation());
// Timing stats
result[current++] = new Double(trainTimeElapsed / 1000.0);
result[current++] = new Double(testTimeElapsed / 1000.0);
if (canMeasureCPUTime) {
result[current++] =
new Double((trainCPUTimeElapsed / 1000000.0) / 1000.0);
result[current++] = new Double((testCPUTimeElapsed / 1000000.0) / 1000.0);
result[current++] =
new Double(trainCPUTimeElapsed / 1000000.0);
result[current++] = new Double(testCPUTimeElapsed / 1000000.0);
} else {
result[current++] = new Double(Utils.missingValue());
result[current++] = new Double(Utils.missingValue());
result[current++] = new Double(Utils.missingValue());
result[current++] = new Double(Utils.missingValue());
}
// sizes
ByteArrayOutputStream bastream = new ByteArrayOutputStream();
ObjectOutputStream oostream = new ObjectOutputStream(bastream);
oostream.writeObject(m_Classifier);
result[current++] = new Double(bastream.size());
bastream = new ByteArrayOutputStream();
oostream = new ObjectOutputStream(bastream);
oostream.writeObject(train);
result[current++] = new Double(bastream.size());
bastream = new ByteArrayOutputStream();
oostream = new ObjectOutputStream(bastream);
oostream.writeObject(test);
result[current++] = new Double(bastream.size());
if (m_Classifier instanceof Summarizable) {
result[current++] = ((Summarizable) m_Classifier).toSummaryString();
} else {
result[current++] = null;
}
for (int i = 0; i < addm; i++) {
if (m_doesProduce[i]) {
try {
double dv = ((AdditionalMeasureProducer) m_Classifier)
.getMeasure(m_AdditionalMeasures[i]);
if (!Utils.isMissingValue(dv)) {
Double value = new Double(dv);
result[current++] = value;
} else {
result[current++] = null;
}
} catch (Exception ex) {
System.err.println(ex);
}
} else {
result[current++] = null;
}
}
if (current != RESULT_SIZE + addm) {
throw new Error("Results didn't fit RESULT_SIZE");
}
m_Evaluation = eval;
return result;
}
/**
* Returns a text description of the split evaluator.
*
* @return a text description of the split evaluator.
*/
@Override
public String toString() {
String result = "CostSensitiveClassifierSplitEvaluator: ";
if (m_Template == null) {
return result + "<null> classifier";
}
return result + m_Template.getClass().getName() + " " + m_ClassifierOptions
+ "(version " + m_ClassifierVersion + ")";
}
/**
* Returns the revision string.
*
* @return the revision
*/
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
} // CostSensitiveClassifierSplitEvaluator
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/experiment/CrossValidationResultProducer.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* CrossValidationResultProducer.java
* Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.experiment;
import java.io.File;
import java.util.Calendar;
import java.util.Collections;
import java.util.Enumeration;
import java.util.Random;
import java.util.TimeZone;
import java.util.Vector;
import weka.core.AdditionalMeasureProducer;
import weka.core.Instances;
import weka.core.Option;
import weka.core.OptionHandler;
import weka.core.RevisionHandler;
import weka.core.RevisionUtils;
import weka.core.Utils;
/**
* <!-- globalinfo-start --> Generates for each run, carries out an n-fold
* cross-validation, using the set SplitEvaluator to generate some results. If
* the class attribute is nominal, the dataset is stratified. Results for each
* fold are generated, so you may wish to use this in addition with an
* AveragingResultProducer to obtain averages for each run.
* <p/>
* <!-- globalinfo-end -->
*
* <!-- options-start --> Valid options are:
* <p/>
*
* <pre>
* -X <number of folds>
* The number of folds to use for the cross-validation.
* (default 10)
* </pre>
*
* <pre>
* -D
* Save raw split evaluator output.
* </pre>
*
* <pre>
* -O <file/directory name/path>
* The filename where raw output will be stored.
* If a directory name is specified then then individual
* outputs will be gzipped, otherwise all output will be
* zipped to the named file. Use in conjuction with -D. (default splitEvalutorOut.zip)
* </pre>
*
* <pre>
* -W <class name>
* The full class name of a SplitEvaluator.
* eg: weka.experiment.ClassifierSplitEvaluator
* </pre>
*
* <pre>
* Options specific to split evaluator weka.experiment.ClassifierSplitEvaluator:
* </pre>
*
* <pre>
* -W <class name>
* The full class name of the classifier.
* eg: weka.classifiers.bayes.NaiveBayes
* </pre>
*
* <pre>
* -C <index>
* The index of the class for which IR statistics
* are to be output. (default 1)
* </pre>
*
* <pre>
* -I <index>
* The index of an attribute to output in the
* results. This attribute should identify an
* instance in order to know which instances are
* in the test set of a cross validation. if 0
* no output (default 0).
* </pre>
*
* <pre>
* -P
* Add target and prediction columns to the result
* for each fold.
* </pre>
*
* <pre>
* Options specific to classifier weka.classifiers.rules.ZeroR:
* </pre>
*
* <pre>
* -D
* If set, classifier is run in debug mode and
* may output additional info to the console
* </pre>
*
* <!-- options-end -->
*
* All options after -- will be passed to the split evaluator.
*
* @author Len Trigg (trigg@cs.waikato.ac.nz)
* @version $Revision$
*/
public class CrossValidationResultProducer implements ResultProducer,
OptionHandler, AdditionalMeasureProducer, RevisionHandler {
/** for serialization */
static final long serialVersionUID = -1580053925080091917L;
/** The dataset of interest */
protected Instances m_Instances;
/** The ResultListener to send results to */
protected ResultListener m_ResultListener = new CSVResultListener();
/** The number of folds in the cross-validation */
protected int m_NumFolds = 10;
/** Save raw output of split evaluators --- for debugging purposes */
protected boolean m_debugOutput = false;
/** The output zipper to use for saving raw splitEvaluator output */
protected OutputZipper m_ZipDest = null;
/** The destination output file/directory for raw output */
protected File m_OutputFile = new File(new File(
System.getProperty("user.dir")), "splitEvalutorOut.zip");
/** The SplitEvaluator used to generate results */
protected SplitEvaluator m_SplitEvaluator = new ClassifierSplitEvaluator();
/** The names of any additional measures to look for in SplitEvaluators */
protected String[] m_AdditionalMeasures = null;
/** The name of the key field containing the dataset name */
public static String DATASET_FIELD_NAME = "Dataset";
/** The name of the key field containing the run number */
public static String RUN_FIELD_NAME = "Run";
/** The name of the key field containing the fold number */
public static String FOLD_FIELD_NAME = "Fold";
/** The name of the result field containing the timestamp */
public static String TIMESTAMP_FIELD_NAME = "Date_time";
/**
* Returns a string describing this result producer
*
* @return a description of the result producer suitable for displaying in the
* explorer/experimenter gui
*/
public String globalInfo() {
return "Generates for each run, carries out an n-fold cross-validation, "
+ "using the set SplitEvaluator to generate some results. If the class "
+ "attribute is nominal, the dataset is stratified. Results for each fold "
+ "are generated, so you may wish to use this in addition with an "
+ "AveragingResultProducer to obtain averages for each run.";
}
/**
* Sets the dataset that results will be obtained for.
*
* @param instances a value of type 'Instances'.
*/
@Override
public void setInstances(Instances instances) {
m_Instances = instances;
}
/**
* Sets the object to send results of each run to.
*
* @param listener a value of type 'ResultListener'
*/
@Override
public void setResultListener(ResultListener listener) {
m_ResultListener = listener;
}
/**
* Set a list of method names for additional measures to look for in
* SplitEvaluators. This could contain many measures (of which only a subset
* may be produceable by the current SplitEvaluator) if an experiment is the
* type that iterates over a set of properties.
*
* @param additionalMeasures an array of measure names, null if none
*/
@Override
public void setAdditionalMeasures(String[] additionalMeasures) {
m_AdditionalMeasures = additionalMeasures;
if (m_SplitEvaluator != null) {
System.err.println("CrossValidationResultProducer: setting additional "
+ "measures for " + "split evaluator");
m_SplitEvaluator.setAdditionalMeasures(m_AdditionalMeasures);
}
}
/**
* Returns an enumeration of any additional measure names that might be in the
* SplitEvaluator
*
* @return an enumeration of the measure names
*/
@Override
public Enumeration<String> enumerateMeasures() {
Vector<String> newVector = new Vector<String>();
if (m_SplitEvaluator instanceof AdditionalMeasureProducer) {
Enumeration<String> en = ((AdditionalMeasureProducer) m_SplitEvaluator)
.enumerateMeasures();
while (en.hasMoreElements()) {
String mname = en.nextElement();
newVector.addElement(mname);
}
}
return newVector.elements();
}
/**
* Returns the value of the named measure
*
* @param additionalMeasureName the name of the measure to query for its value
* @return the value of the named measure
* @throws IllegalArgumentException if the named measure is not supported
*/
@Override
public double getMeasure(String additionalMeasureName) {
if (m_SplitEvaluator instanceof AdditionalMeasureProducer) {
return ((AdditionalMeasureProducer) m_SplitEvaluator)
.getMeasure(additionalMeasureName);
} else {
throw new IllegalArgumentException("CrossValidationResultProducer: "
+ "Can't return value for : " + additionalMeasureName + ". "
+ m_SplitEvaluator.getClass().getName() + " "
+ "is not an AdditionalMeasureProducer");
}
}
/**
* Gets a Double representing the current date and time. eg: 1:46pm on
* 20/5/1999 -> 19990520.1346
*
* @return a value of type Double
*/
public static Double getTimestamp() {
Calendar now = Calendar.getInstance(TimeZone.getTimeZone("UTC"));
double timestamp = now.get(Calendar.YEAR) * 10000
+ (now.get(Calendar.MONTH) + 1) * 100 + now.get(Calendar.DAY_OF_MONTH)
+ now.get(Calendar.HOUR_OF_DAY) / 100.0 + now.get(Calendar.MINUTE)
/ 10000.0;
return new Double(timestamp);
}
/**
* Prepare to generate results.
*
* @throws Exception if an error occurs during preprocessing.
*/
@Override
public void preProcess() throws Exception {
if (m_SplitEvaluator == null) {
throw new Exception("No SplitEvalutor set");
}
if (m_ResultListener == null) {
throw new Exception("No ResultListener set");
}
m_ResultListener.preProcess(this);
}
/**
* Perform any postprocessing. When this method is called, it indicates that
* no more requests to generate results for the current experiment will be
* sent.
*
* @throws Exception if an error occurs
*/
@Override
public void postProcess() throws Exception {
m_ResultListener.postProcess(this);
if (m_debugOutput) {
if (m_ZipDest != null) {
m_ZipDest.finished();
m_ZipDest = null;
}
}
}
/**
* Gets the keys for a specified run number. Different run numbers correspond
* to different randomizations of the data. Keys produced should be sent to
* the current ResultListener
*
* @param run the run number to get keys for.
* @throws Exception if a problem occurs while getting the keys
*/
@Override
public void doRunKeys(int run) throws Exception {
if (m_Instances == null) {
throw new Exception("No Instances set");
}
/*
* // Randomize on a copy of the original dataset Instances runInstances =
* new Instances(m_Instances); runInstances.randomize(new Random(run)); if
* (runInstances.classAttribute().isNominal()) {
* runInstances.stratify(m_NumFolds); }
*/
for (int fold = 0; fold < m_NumFolds; fold++) {
// Add in some fields to the key like run and fold number, dataset name
Object[] seKey = m_SplitEvaluator.getKey();
Object[] key = new Object[seKey.length + 3];
key[0] = Utils.backQuoteChars(m_Instances.relationName());
key[1] = "" + run;
key[2] = "" + (fold + 1);
System.arraycopy(seKey, 0, key, 3, seKey.length);
if (m_ResultListener.isResultRequired(this, key)) {
try {
m_ResultListener.acceptResult(this, key, null);
} catch (Exception ex) {
// Save the train and test datasets for debugging purposes?
throw ex;
}
}
}
}
/**
* Gets the results for a specified run number. Different run numbers
* correspond to different randomizations of the data. Results produced should
* be sent to the current ResultListener
*
* @param run the run number to get results for.
* @throws Exception if a problem occurs while getting the results
*/
@Override
public void doRun(int run) throws Exception {
if (getRawOutput()) {
if (m_ZipDest == null) {
m_ZipDest = new OutputZipper(m_OutputFile);
}
}
if (m_Instances == null) {
throw new Exception("No Instances set");
}
// Randomize on a copy of the original dataset
Instances runInstances = new Instances(m_Instances);
Random random = new Random(run);
runInstances.randomize(random);
if (runInstances.classAttribute().isNominal()) {
runInstances.stratify(m_NumFolds);
}
for (int fold = 0; fold < m_NumFolds; fold++) {
// Add in some fields to the key like run and fold number, dataset name
Object[] seKey = m_SplitEvaluator.getKey();
Object[] key = new Object[seKey.length + 3];
key[0] = Utils.backQuoteChars(m_Instances.relationName());
key[1] = "" + run;
key[2] = "" + (fold + 1);
System.arraycopy(seKey, 0, key, 3, seKey.length);
if (m_ResultListener.isResultRequired(this, key)) {
Instances train = runInstances.trainCV(m_NumFolds, fold, random);
Instances test = runInstances.testCV(m_NumFolds, fold);
try {
Object[] seResults = m_SplitEvaluator.getResult(train, test);
Object[] results = new Object[seResults.length + 1];
results[0] = getTimestamp();
System.arraycopy(seResults, 0, results, 1, seResults.length);
if (m_debugOutput) {
String resultName = ("" + run + "." + (fold + 1) + "."
+ Utils.backQuoteChars(runInstances.relationName()) + "." + m_SplitEvaluator
.toString()).replace(' ', '_');
resultName = Utils.removeSubstring(resultName, "weka.classifiers.");
resultName = Utils.removeSubstring(resultName, "weka.filters.");
resultName = Utils.removeSubstring(resultName,
"weka.attributeSelection.");
m_ZipDest.zipit(m_SplitEvaluator.getRawResultOutput(), resultName);
}
m_ResultListener.acceptResult(this, key, results);
} catch (Exception ex) {
// Save the train and test datasets for debugging purposes?
throw ex;
}
}
}
}
/**
* Gets the names of each of the columns produced for a single run. This
* method should really be static.
*
* @return an array containing the name of each column
*/
@Override
public String[] getKeyNames() {
String[] keyNames = m_SplitEvaluator.getKeyNames();
// Add in the names of our extra key fields
String[] newKeyNames = new String[keyNames.length + 3];
newKeyNames[0] = DATASET_FIELD_NAME;
newKeyNames[1] = RUN_FIELD_NAME;
newKeyNames[2] = FOLD_FIELD_NAME;
System.arraycopy(keyNames, 0, newKeyNames, 3, keyNames.length);
return newKeyNames;
}
/**
* Gets the data types of each of the columns produced for a single run. This
* method should really be static.
*
* @return an array containing objects of the type of each column. The objects
* should be Strings, or Doubles.
*/
@Override
public Object[] getKeyTypes() {
Object[] keyTypes = m_SplitEvaluator.getKeyTypes();
// Add in the types of our extra fields
Object[] newKeyTypes = new String[keyTypes.length + 3];
newKeyTypes[0] = new String();
newKeyTypes[1] = new String();
newKeyTypes[2] = new String();
System.arraycopy(keyTypes, 0, newKeyTypes, 3, keyTypes.length);
return newKeyTypes;
}
/**
* Gets the names of each of the columns produced for a single run. This
* method should really be static.
*
* @return an array containing the name of each column
*/
@Override
public String[] getResultNames() {
String[] resultNames = m_SplitEvaluator.getResultNames();
// Add in the names of our extra Result fields
String[] newResultNames = new String[resultNames.length + 1];
newResultNames[0] = TIMESTAMP_FIELD_NAME;
System.arraycopy(resultNames, 0, newResultNames, 1, resultNames.length);
return newResultNames;
}
/**
* Gets the data types of each of the columns produced for a single run. This
* method should really be static.
*
* @return an array containing objects of the type of each column. The objects
* should be Strings, or Doubles.
*/
@Override
public Object[] getResultTypes() {
Object[] resultTypes = m_SplitEvaluator.getResultTypes();
// Add in the types of our extra Result fields
Object[] newResultTypes = new Object[resultTypes.length + 1];
newResultTypes[0] = new Double(0);
System.arraycopy(resultTypes, 0, newResultTypes, 1, resultTypes.length);
return newResultTypes;
}
/**
* Gets a description of the internal settings of the result producer,
* sufficient for distinguishing a ResultProducer instance from another with
* different settings (ignoring those settings set through this interface).
* For example, a cross-validation ResultProducer may have a setting for the
* number of folds. For a given state, the results produced should be
* compatible. Typically if a ResultProducer is an OptionHandler, this string
* will represent the command line arguments required to set the
* ResultProducer to that state.
*
* @return the description of the ResultProducer state, or null if no state is
* defined
*/
@Override
public String getCompatibilityState() {
String result = "-X " + m_NumFolds + " ";
if (m_SplitEvaluator == null) {
result += "<null SplitEvaluator>";
} else {
result += "-W " + m_SplitEvaluator.getClass().getName();
}
return result + " --";
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String outputFileTipText() {
return "Set the destination for saving raw output. If the rawOutput "
+ "option is selected, then output from the splitEvaluator for "
+ "individual folds is saved. If the destination is a directory, "
+ "then each output is saved to an individual gzip file; if the "
+ "destination is a file, then each output is saved as an entry "
+ "in a zip file.";
}
/**
* Get the value of OutputFile.
*
* @return Value of OutputFile.
*/
public File getOutputFile() {
return m_OutputFile;
}
/**
* Set the value of OutputFile.
*
* @param newOutputFile Value to assign to OutputFile.
*/
public void setOutputFile(File newOutputFile) {
m_OutputFile = newOutputFile;
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String numFoldsTipText() {
return "Number of folds to use in cross validation.";
}
/**
* Get the value of NumFolds.
*
* @return Value of NumFolds.
*/
public int getNumFolds() {
return m_NumFolds;
}
/**
* Set the value of NumFolds.
*
* @param newNumFolds Value to assign to NumFolds.
*/
public void setNumFolds(int newNumFolds) {
m_NumFolds = newNumFolds;
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String rawOutputTipText() {
return "Save raw output (useful for debugging). If set, then output is "
+ "sent to the destination specified by outputFile";
}
/**
* Get if raw split evaluator output is to be saved
*
* @return true if raw split evalutor output is to be saved
*/
public boolean getRawOutput() {
return m_debugOutput;
}
/**
* Set to true if raw split evaluator output is to be saved
*
* @param d true if output is to be saved
*/
public void setRawOutput(boolean d) {
m_debugOutput = d;
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String splitEvaluatorTipText() {
return "The evaluator to apply to the cross validation folds. "
+ "This may be a classifier, regression scheme etc.";
}
/**
* Get the SplitEvaluator.
*
* @return the SplitEvaluator.
*/
public SplitEvaluator getSplitEvaluator() {
return m_SplitEvaluator;
}
/**
* Set the SplitEvaluator.
*
* @param newSplitEvaluator new SplitEvaluator to use.
*/
public void setSplitEvaluator(SplitEvaluator newSplitEvaluator) {
m_SplitEvaluator = newSplitEvaluator;
m_SplitEvaluator.setAdditionalMeasures(m_AdditionalMeasures);
}
/**
* Returns an enumeration describing the available options..
*
* @return an enumeration of all the available options.
*/
@Override
public Enumeration<Option> listOptions() {
Vector<Option> newVector = new Vector<Option>(4);
newVector.addElement(new Option(
"\tThe number of folds to use for the cross-validation.\n"
+ "\t(default 10)", "X", 1, "-X <number of folds>"));
newVector.addElement(new Option("Save raw split evaluator output.", "D", 0,
"-D"));
newVector.addElement(new Option(
"\tThe filename where raw output will be stored.\n"
+ "\tIf a directory name is specified then then individual\n"
+ "\toutputs will be gzipped, otherwise all output will be\n"
+ "\tzipped to the named file. Use in conjuction with -D."
+ "\t(default splitEvalutorOut.zip)", "O", 1,
"-O <file/directory name/path>"));
newVector.addElement(new Option(
"\tThe full class name of a SplitEvaluator.\n"
+ "\teg: weka.experiment.ClassifierSplitEvaluator", "W", 1,
"-W <class name>"));
if ((m_SplitEvaluator != null)
&& (m_SplitEvaluator instanceof OptionHandler)) {
newVector.addElement(new Option("", "", 0,
"\nOptions specific to split evaluator "
+ m_SplitEvaluator.getClass().getName() + ":"));
newVector.addAll(Collections.list(((OptionHandler) m_SplitEvaluator)
.listOptions()));
}
return newVector.elements();
}
/**
* Parses a given list of options.
* <p/>
*
* <!-- options-start --> Valid options are:
* <p/>
*
* <pre>
* -X <number of folds>
* The number of folds to use for the cross-validation.
* (default 10)
* </pre>
*
* <pre>
* -D
* Save raw split evaluator output.
* </pre>
*
* <pre>
* -O <file/directory name/path>
* The filename where raw output will be stored.
* If a directory name is specified then then individual
* outputs will be gzipped, otherwise all output will be
* zipped to the named file. Use in conjuction with -D. (default splitEvalutorOut.zip)
* </pre>
*
* <pre>
* -W <class name>
* The full class name of a SplitEvaluator.
* eg: weka.experiment.ClassifierSplitEvaluator
* </pre>
*
* <pre>
* Options specific to split evaluator weka.experiment.ClassifierSplitEvaluator:
* </pre>
*
* <pre>
* -W <class name>
* The full class name of the classifier.
* eg: weka.classifiers.bayes.NaiveBayes
* </pre>
*
* <pre>
* -C <index>
* The index of the class for which IR statistics
* are to be output. (default 1)
* </pre>
*
* <pre>
* -I <index>
* The index of an attribute to output in the
* results. This attribute should identify an
* instance in order to know which instances are
* in the test set of a cross validation. if 0
* no output (default 0).
* </pre>
*
* <pre>
* -P
* Add target and prediction columns to the result
* for each fold.
* </pre>
*
* <pre>
* Options specific to classifier weka.classifiers.rules.ZeroR:
* </pre>
*
* <pre>
* -D
* If set, classifier is run in debug mode and
* may output additional info to the console
* </pre>
*
* <!-- options-end -->
*
* All options after -- will be passed to the split evaluator.
*
* @param options the list of options as an array of strings
* @throws Exception if an option is not supported
*/
@Override
public void setOptions(String[] options) throws Exception {
setRawOutput(Utils.getFlag('D', options));
String fName = Utils.getOption('O', options);
if (fName.length() != 0) {
setOutputFile(new File(fName));
}
String numFolds = Utils.getOption('X', options);
if (numFolds.length() != 0) {
setNumFolds(Integer.parseInt(numFolds));
} else {
setNumFolds(10);
}
String seName = Utils.getOption('W', options);
if (seName.length() == 0) {
throw new Exception("A SplitEvaluator must be specified with"
+ " the -W option.");
}
// Do it first without options, so if an exception is thrown during
// the option setting, listOptions will contain options for the actual
// SE.
setSplitEvaluator((SplitEvaluator) Utils.forName(SplitEvaluator.class,
seName, null));
if (getSplitEvaluator() instanceof OptionHandler) {
((OptionHandler) getSplitEvaluator()).setOptions(Utils
.partitionOptions(options));
}
}
/**
* Gets the current settings of the result producer.
*
* @return an array of strings suitable for passing to setOptions
*/
@Override
public String[] getOptions() {
Vector<String> options = new Vector<String>();
options.add("-X");
options.add("" + getNumFolds());
if (getRawOutput()) {
options.add("-D");
}
options.add("-O");
options.add(getOutputFile().getName());
if (getSplitEvaluator() != null) {
options.add("-W");
options.add(getSplitEvaluator().getClass().getName());
}
if ((m_SplitEvaluator != null)
&& (m_SplitEvaluator instanceof OptionHandler)) {
String[] opts = ((OptionHandler) m_SplitEvaluator).getOptions();
if (opts.length > 0) {
options.add("--");
Collections.addAll(options, opts);
}
}
return options.toArray(new String[0]);
}
/**
* Gets a text descrption of the result producer.
*
* @return a text description of the result producer.
*/
@Override
public String toString() {
String result = "CrossValidationResultProducer: ";
result += getCompatibilityState();
if (m_Instances == null) {
result += ": <null Instances>";
} else {
result += ": " + Utils.backQuoteChars(m_Instances.relationName());
}
return result;
}
/**
* Returns the revision string.
*
* @return the revision
*/
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
/**
* Quick test of timestamp
*
* @param args the commandline options
*/
public static void main(String[] args) {
System.err.println(Utils.doubleToString(getTimestamp().doubleValue(), 4));
}
} // CrossValidationResultProducer
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/experiment/CrossValidationSplitResultProducer.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* CrossValidationSplitResultProducer.java
* Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.experiment;
import java.util.Random;
import weka.core.Instances;
import weka.core.RevisionUtils;
import weka.core.Utils;
/**
* <!-- globalinfo-start --> Carries out one split of a repeated k-fold
* cross-validation, using the set SplitEvaluator to generate some results. Note
* that the run number is actually the nth split of a repeated k-fold
* cross-validation, i.e. if k=10, run number 100 is the 10th fold of the 10th
* cross-validation run. This producer's sole purpose is to allow more
* fine-grained distribution of cross-validation experiments. If the class
* attribute is nominal, the dataset is stratified.
* <p/>
* <!-- globalinfo-end -->
*
* <!-- options-start --> Valid options are:
* <p/>
*
* <pre>
* -X <number of folds>
* The number of folds to use for the cross-validation.
* (default 10)
* </pre>
*
* <pre>
* -D
* Save raw split evaluator output.
* </pre>
*
* <pre>
* -O <file/directory name/path>
* The filename where raw output will be stored.
* If a directory name is specified then then individual
* outputs will be gzipped, otherwise all output will be
* zipped to the named file. Use in conjuction with -D. (default splitEvalutorOut.zip)
* </pre>
*
* <pre>
* -W <class name>
* The full class name of a SplitEvaluator.
* eg: weka.experiment.ClassifierSplitEvaluator
* </pre>
*
* <pre>
* Options specific to split evaluator weka.experiment.ClassifierSplitEvaluator:
* </pre>
*
* <pre>
* -W <class name>
* The full class name of the classifier.
* eg: weka.classifiers.bayes.NaiveBayes
* </pre>
*
* <pre>
* -C <index>
* The index of the class for which IR statistics
* are to be output. (default 1)
* </pre>
*
* <pre>
* -I <index>
* The index of an attribute to output in the
* results. This attribute should identify an
* instance in order to know which instances are
* in the test set of a cross validation. if 0
* no output (default 0).
* </pre>
*
* <pre>
* -P
* Add target and prediction columns to the result
* for each fold.
* </pre>
*
* <pre>
* Options specific to classifier weka.classifiers.rules.ZeroR:
* </pre>
*
* <pre>
* -D
* If set, classifier is run in debug mode and
* may output additional info to the console
* </pre>
*
* <!-- options-end -->
*
* All options after -- will be passed to the split evaluator.
*
* @author Len Trigg
* @author Eibe Frank
* @version $Revision$
*/
public class CrossValidationSplitResultProducer extends
CrossValidationResultProducer {
/** for serialization */
static final long serialVersionUID = 1403798164046795073L;
/**
* Returns a string describing this result producer
*
* @return a description of the result producer suitable for displaying in the
* explorer/experimenter gui
*/
@Override
public String globalInfo() {
return "Carries out one split of a repeated k-fold cross-validation, "
+ "using the set SplitEvaluator to generate some results. "
+ "Note that the run number is actually the nth split of a repeated "
+ "k-fold cross-validation, i.e. if k=10, run number 100 is the 10th "
+ "fold of the 10th cross-validation run. This producer's sole purpose "
+ "is to allow more fine-grained distribution of cross-validation "
+ "experiments. If the class attribute is nominal, the dataset is stratified.";
}
/**
* Gets the keys for a specified run number. Different run numbers correspond
* to different randomizations of the data. Keys produced should be sent to
* the current ResultListener
*
* @param run the run number to get keys for.
* @throws Exception if a problem occurs while getting the keys
*/
@Override
public void doRunKeys(int run) throws Exception {
if (m_Instances == null) {
throw new Exception("No Instances set");
}
// Add in some fields to the key like run and fold number, dataset name
Object[] seKey = m_SplitEvaluator.getKey();
Object[] key = new Object[seKey.length + 3];
key[0] = Utils.backQuoteChars(m_Instances.relationName());
key[2] = "" + (((run - 1) % m_NumFolds) + 1);
key[1] = "" + (((run - 1) / m_NumFolds) + 1);
System.arraycopy(seKey, 0, key, 3, seKey.length);
if (m_ResultListener.isResultRequired(this, key)) {
try {
m_ResultListener.acceptResult(this, key, null);
} catch (Exception ex) {
// Save the train and test datasets for debugging purposes?
throw ex;
}
}
}
/**
* Gets the results for a specified run number. Different run numbers
* correspond to different randomizations of the data. Results produced should
* be sent to the current ResultListener
*
* @param run the run number to get results for.
* @throws Exception if a problem occurs while getting the results
*/
@Override
public void doRun(int run) throws Exception {
if (getRawOutput()) {
if (m_ZipDest == null) {
m_ZipDest = new OutputZipper(m_OutputFile);
}
}
if (m_Instances == null) {
throw new Exception("No Instances set");
}
// Compute run and fold number from given run
int fold = (run - 1) % m_NumFolds;
run = ((run - 1) / m_NumFolds) + 1;
// Randomize on a copy of the original dataset
Instances runInstances = new Instances(m_Instances);
Random random = new Random(run);
runInstances.randomize(random);
if (runInstances.classAttribute().isNominal()) {
runInstances.stratify(m_NumFolds);
}
// Add in some fields to the key like run and fold number, dataset name
Object[] seKey = m_SplitEvaluator.getKey();
Object[] key = new Object[seKey.length + 3];
key[0] = Utils.backQuoteChars(m_Instances.relationName());
key[1] = "" + run;
key[2] = "" + (fold + 1);
System.arraycopy(seKey, 0, key, 3, seKey.length);
if (m_ResultListener.isResultRequired(this, key)) {
// Just to make behaviour absolutely consistent with
// CrossValidationResultProducer
for (int tempFold = 0; tempFold < fold; tempFold++) {
runInstances.trainCV(m_NumFolds, tempFold, random);
}
Instances train = runInstances.trainCV(m_NumFolds, fold, random);
Instances test = runInstances.testCV(m_NumFolds, fold);
try {
Object[] seResults = m_SplitEvaluator.getResult(train, test);
Object[] results = new Object[seResults.length + 1];
results[0] = getTimestamp();
System.arraycopy(seResults, 0, results, 1, seResults.length);
if (m_debugOutput) {
String resultName = ("" + run + "." + (fold + 1) + "."
+ Utils.backQuoteChars(runInstances.relationName()) + "." + m_SplitEvaluator
.toString()).replace(' ', '_');
resultName = Utils.removeSubstring(resultName, "weka.classifiers.");
resultName = Utils.removeSubstring(resultName, "weka.filters.");
resultName = Utils.removeSubstring(resultName,
"weka.attributeSelection.");
m_ZipDest.zipit(m_SplitEvaluator.getRawResultOutput(), resultName);
}
m_ResultListener.acceptResult(this, key, results);
} catch (Exception ex) {
// Save the train and test datasets for debugging purposes?
throw ex;
}
}
}
/**
* Gets a text descrption of the result producer.
*
* @return a text description of the result producer.
*/
@Override
public String toString() {
String result = "CrossValidationSplitResultProducer: ";
result += getCompatibilityState();
if (m_Instances == null) {
result += ": <null Instances>";
} else {
result += ": " + Utils.backQuoteChars(m_Instances.relationName());
}
return result;
}
/**
* Returns the revision string.
*
* @return the revision
*/
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
} // CrossValidationSplitResultProducer
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/experiment/DatabaseResultListener.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* DatabaseResultListener.java
* Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.experiment;
import java.sql.DatabaseMetaData;
import java.sql.ResultSet;
import java.util.ArrayList;
import weka.core.RevisionUtils;
/**
* <!-- globalinfo-start --> Takes results from a result producer and sends them
* to a database.
* <p/>
* <!-- globalinfo-end -->
*
* @author Len Trigg (trigg@cs.waikato.ac.nz)
* @version $Revision$
*/
public class DatabaseResultListener extends DatabaseUtils implements
ResultListener {
/** for serialization */
static final long serialVersionUID = 7388014746954652818L;
/** The ResultProducer to listen to */
protected ResultProducer m_ResultProducer;
/** The name of the current results table */
protected String m_ResultsTableName;
/** Holds the name of the key field to cache upon, or null if no caching */
protected String m_CacheKeyName = "";
/** Stores the index of the key column holding the cache key data */
protected int m_CacheKeyIndex;
/** Stores the key for which the cache is valid */
protected Object[] m_CacheKey;
/** Stores the cached values */
protected ArrayList<String> m_Cache = new ArrayList<String>();
/**
* Returns a string describing this result listener
*
* @return a description of the result listener suitable for displaying in the
* explorer/experimenter gui
*/
public String globalInfo() {
return "Takes results from a result producer and sends them to a "
+ "database.";
}
/**
* Sets up the database drivers
*
* @throws Exception if an error occurs
*/
public DatabaseResultListener() throws Exception {
super();
}
/**
* Prepare for the results to be received.
*
* @param rp the ResultProducer that will generate the results
* @throws Exception if an error occurs during preprocessing.
*/
@Override
public void preProcess(ResultProducer rp) throws Exception {
m_ResultProducer = rp;
// Connect to the database and find out what table corresponds to this
// ResultProducer
updateResultsTableName(m_ResultProducer);
}
/**
* Perform any postprocessing. When this method is called, it indicates that
* no more results will be sent that need to be grouped together in any way.
*
* @param rp the ResultProducer that generated the results
* @throws Exception if an error occurs
*/
@Override
public void postProcess(ResultProducer rp) throws Exception {
if (m_ResultProducer != rp) {
throw new Error("Unrecognized ResultProducer calling postProcess!!");
}
disconnectFromDatabase();
}
/**
* Determines if there are any constraints (imposed by the destination) on any
* additional measures produced by resultProducers. Null should be returned if
* there are NO constraints, otherwise a list of column names should be
* returned as an array of Strings. In the case of DatabaseResultListener, the
* structure of an existing database will impose constraints.
*
* @param rp the ResultProducer to which the constraints will apply
* @return an array of column names to which resutltProducer's results will be
* restricted.
* @throws Exception if an error occurs.
*/
@Override
public String[] determineColumnConstraints(ResultProducer rp)
throws Exception {
ArrayList<String> cNames = new ArrayList<String>();
updateResultsTableName(rp);
DatabaseMetaData dbmd = m_Connection.getMetaData();
ResultSet rs;
// gets a result set where each row is info on a column
if (m_checkForUpperCaseNames) {
rs = dbmd.getColumns(null, null, m_ResultsTableName.toUpperCase(), null);
} else {
rs = dbmd.getColumns(null, null, m_ResultsTableName, null);
}
boolean tableExists = false;
int numColumns = 0;
while (rs.next()) {
tableExists = true;
// column four contains the column name
String name = rs.getString(4);
if (name.toLowerCase().startsWith("measure")) {
numColumns++;
cNames.add(name);
}
}
// no constraints on any additional measures if the table does not exist
if (!tableExists) {
return null;
}
// a zero element array indicates maximum constraint
String[] columnNames = new String[numColumns];
for (int i = 0; i < numColumns; i++) {
columnNames[i] = (cNames.get(i));
}
return columnNames;
}
/**
* Submit the result to the appropriate table of the database
*
* @param rp the ResultProducer that generated the result
* @param key The key for the results.
* @param result The actual results.
* @throws Exception if the result couldn't be sent to the database
*/
@Override
public void acceptResult(ResultProducer rp, Object[] key, Object[] result)
throws Exception {
if (m_ResultProducer != rp) {
throw new Error("Unrecognized ResultProducer calling acceptResult!!");
}
// null result could occur from a chain of doRunKeys calls
if (result != null) {
putResultInTable(m_ResultsTableName, rp, key, result);
}
}
/**
* Always says a result is required. If this is the first call, prints out the
* header for the Database output.
*
* @param rp the ResultProducer wanting to generate the result
* @param key The key for which a result may be needed.
* @return true if the result should be calculated.
* @throws Exception if the database couldn't be queried
*/
@Override
public boolean isResultRequired(ResultProducer rp, Object[] key)
throws Exception {
if (m_ResultProducer != rp) {
throw new Error("Unrecognized ResultProducer calling isResultRequired!");
}
if (m_Debug) {
System.err.print("Is result required...");
for (Object element : key) {
System.err.print(" " + element);
}
System.err.flush();
}
boolean retval = false;
// Check the key cache first
if (!m_CacheKeyName.equals("")) {
if (!isCacheValid(key)) {
loadCache(rp, key);
}
retval = !isKeyInCache(rp, key);
} else {
// Ask whether the results are needed
retval = !isKeyInTable(m_ResultsTableName, rp, key);
}
if (m_Debug) {
System.err.println(" ..." + (retval ? "required" : "not required")
+ (m_CacheKeyName.equals("") ? "" : " (cache)"));
System.err.flush();
}
return retval;
}
/**
* Determines the table name that results will be inserted into. If required:
* a connection will be opened, an experiment index table created, and the
* results table created.
*
* @param rp the ResultProducer
* @throws Exception if an error occurs
*/
protected void updateResultsTableName(ResultProducer rp) throws Exception {
if (!isConnected()) {
connectToDatabase();
}
if (!experimentIndexExists()) {
createExperimentIndex();
}
String tableName = getResultsTableName(rp);
if (tableName == null) {
tableName = createExperimentIndexEntry(rp);
}
if (!tableExists(tableName)) {
createResultsTable(rp, tableName);
}
m_ResultsTableName = tableName;
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String cacheKeyNameTipText() {
return "Set the name of the key field by which to cache.";
}
/**
* Get the value of CacheKeyName.
*
* @return Value of CacheKeyName.
*/
public String getCacheKeyName() {
return m_CacheKeyName;
}
/**
* Set the value of CacheKeyName.
*
* @param newCacheKeyName Value to assign to CacheKeyName.
*/
public void setCacheKeyName(String newCacheKeyName) {
m_CacheKeyName = newCacheKeyName;
}
/**
* Checks whether the current cache contents are valid for the supplied key.
*
* @param key the results key
* @return true if the cache contents are valid for the key given
*/
protected boolean isCacheValid(Object[] key) {
if (m_CacheKey == null) {
return false;
}
if (m_CacheKey.length != key.length) {
return false;
}
for (int i = 0; i < key.length; i++) {
if ((i != m_CacheKeyIndex) && (!m_CacheKey[i].equals(key[i]))) {
return false;
}
}
return true;
}
/**
* Returns true if the supplied key is in the key cache (and thus we do not
* need to execute a database query).
*
* @param rp the ResultProducer the key belongs to.
* @param key the result key
* @return true if the key is in the key cache
* @throws Exception if an error occurs
*/
protected boolean isKeyInCache(ResultProducer rp, Object[] key)
throws Exception {
for (int i = 0; i < m_Cache.size(); i++) {
if (m_Cache.get(i).equals(key[m_CacheKeyIndex])) {
return true;
}
}
return false;
}
/**
* Executes a database query to fill the key cache
*
* @param rp the ResultProducer the key belongs to
* @param key the key
* @throws Exception if an error occurs
*/
protected void loadCache(ResultProducer rp, Object[] key) throws Exception {
System.err.print(" (updating cache)");
System.err.flush();
m_Cache.clear();
m_CacheKey = null;
String query = "SELECT Key_" + m_CacheKeyName + " FROM "
+ m_ResultsTableName;
String[] keyNames = rp.getKeyNames();
if (keyNames.length != key.length) {
throw new Exception("Key names and key values of different lengths");
}
m_CacheKeyIndex = -1;
for (int i = 0; i < keyNames.length; i++) {
if (keyNames[i].equalsIgnoreCase(m_CacheKeyName)) {
m_CacheKeyIndex = i;
break;
}
}
if (m_CacheKeyIndex == -1) {
throw new Exception("No key field named " + m_CacheKeyName
+ " (as specified for caching)");
}
boolean first = true;
for (int i = 0; i < key.length; i++) {
if ((key[i] != null) && (i != m_CacheKeyIndex)) {
if (first) {
query += " WHERE ";
first = false;
} else {
query += " AND ";
}
query += "Key_" + keyNames[i] + '=';
if (key[i] instanceof String) {
query += "'" + DatabaseUtils.processKeyString(key[i].toString())
+ "'";
} else {
query += key[i].toString();
}
}
}
ResultSet rs = select(query);
while (rs.next()) {
String keyVal = rs.getString(1);
if (!rs.wasNull()) {
m_Cache.add(keyVal);
}
}
close(rs);
m_CacheKey = key.clone();
}
/**
* Returns the revision string.
*
* @return the revision
*/
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/experiment/DatabaseResultProducer.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* DatabaseResultProducer.java
* Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.experiment;
import java.util.Collections;
import java.util.Enumeration;
import java.util.Vector;
import weka.core.AdditionalMeasureProducer;
import weka.core.Instances;
import weka.core.Option;
import weka.core.OptionHandler;
import weka.core.RevisionUtils;
import weka.core.Utils;
/**
* <!-- globalinfo-start --> Examines a database and extracts out the results
* produced by the specified ResultProducer and submits them to the specified
* ResultListener. If a result needs to be generated, the ResultProducer is used
* to obtain the result.
* <p/>
* <!-- globalinfo-end -->
*
* <!-- options-start --> Valid options are:
* <p/>
*
* <pre>
* -F <field name>
* The name of the database field to cache over.
* eg: "Fold" (default none)
* </pre>
*
* <pre>
* -W <class name>
* The full class name of a ResultProducer.
* eg: weka.experiment.CrossValidationResultProducer
* </pre>
*
* <pre>
* Options specific to result producer weka.experiment.CrossValidationResultProducer:
* </pre>
*
* <pre>
* -X <number of folds>
* The number of folds to use for the cross-validation.
* (default 10)
* </pre>
*
* <pre>
* -D
* Save raw split evaluator output.
* </pre>
*
* <pre>
* -O <file/directory name/path>
* The filename where raw output will be stored.
* If a directory name is specified then then individual
* outputs will be gzipped, otherwise all output will be
* zipped to the named file. Use in conjuction with -D. (default splitEvalutorOut.zip)
* </pre>
*
* <pre>
* -W <class name>
* The full class name of a SplitEvaluator.
* eg: weka.experiment.ClassifierSplitEvaluator
* </pre>
*
* <pre>
* Options specific to split evaluator weka.experiment.ClassifierSplitEvaluator:
* </pre>
*
* <pre>
* -W <class name>
* The full class name of the classifier.
* eg: weka.classifiers.bayes.NaiveBayes
* </pre>
*
* <pre>
* -C <index>
* The index of the class for which IR statistics
* are to be output. (default 1)
* </pre>
*
* <pre>
* -I <index>
* The index of an attribute to output in the
* results. This attribute should identify an
* instance in order to know which instances are
* in the test set of a cross validation. if 0
* no output (default 0).
* </pre>
*
* <pre>
* -P
* Add target and prediction columns to the result
* for each fold.
* </pre>
*
* <pre>
* Options specific to classifier weka.classifiers.rules.ZeroR:
* </pre>
*
* <pre>
* -D
* If set, classifier is run in debug mode and
* may output additional info to the console
* </pre>
*
* <!-- options-end -->
*
* @author Len Trigg (trigg@cs.waikato.ac.nz)
* @version $Revision$
*/
public class DatabaseResultProducer extends DatabaseResultListener implements
ResultProducer, OptionHandler, AdditionalMeasureProducer {
/** for serialization */
static final long serialVersionUID = -5620660780203158666L;
/** The dataset of interest */
protected Instances m_Instances;
/** The ResultListener to send results to */
protected ResultListener m_ResultListener = new CSVResultListener();
/** The names of any additional measures to look for in SplitEvaluators */
protected String[] m_AdditionalMeasures = null;
/**
* Returns a string describing this result producer
*
* @return a description of the result producer suitable for displaying in the
* explorer/experimenter gui
*/
@Override
public String globalInfo() {
return "Examines a database and extracts out "
+ "the results produced by the specified ResultProducer "
+ "and submits them to the specified ResultListener. If a result needs "
+ "to be generated, the ResultProducer is used to obtain the result.";
}
/**
* Creates the DatabaseResultProducer, letting the parent constructor do it's
* thing.
*
* @throws Exception if an error occurs
*/
public DatabaseResultProducer() throws Exception {
super();
m_ResultProducer = new CrossValidationResultProducer();
}
/**
* Gets the keys for a specified run number. Different run numbers correspond
* to different randomizations of the data. Keys produced should be sent to
* the current ResultListener
*
* @param run the run number to get keys for.
* @throws Exception if a problem occurs while getting the keys
*/
@Override
public void doRunKeys(int run) throws Exception {
if (m_ResultProducer == null) {
throw new Exception("No ResultProducer set");
}
if (m_ResultListener == null) {
throw new Exception("No ResultListener set");
}
if (m_Instances == null) {
throw new Exception("No Instances set");
}
// Tell the resultproducer to send results to us
m_ResultProducer.setResultListener(this);
m_ResultProducer.setInstances(m_Instances);
m_ResultProducer.doRunKeys(run);
}
/**
* Gets the results for a specified run number. Different run numbers
* correspond to different randomizations of the data. Results produced should
* be sent to the current ResultListener
*
* @param run the run number to get results for.
* @throws Exception if a problem occurs while getting the results
*/
@Override
public void doRun(int run) throws Exception {
if (m_ResultProducer == null) {
throw new Exception("No ResultProducer set");
}
if (m_ResultListener == null) {
throw new Exception("No ResultListener set");
}
if (m_Instances == null) {
throw new Exception("No Instances set");
}
// Tell the resultproducer to send results to us
m_ResultProducer.setResultListener(this);
m_ResultProducer.setInstances(m_Instances);
m_ResultProducer.doRun(run);
}
/**
* Prepare for the results to be received.
*
* @param rp the ResultProducer that will generate the results
* @throws Exception if an error occurs during preprocessing.
*/
@Override
public void preProcess(ResultProducer rp) throws Exception {
super.preProcess(rp);
if (m_ResultListener == null) {
throw new Exception("No ResultListener set");
}
m_ResultListener.preProcess(this);
}
/**
* When this method is called, it indicates that no more results will be sent
* that need to be grouped together in any way.
*
* @param rp the ResultProducer that generated the results
* @throws Exception if an error occurs
*/
@Override
public void postProcess(ResultProducer rp) throws Exception {
super.postProcess(rp);
m_ResultListener.postProcess(this);
}
/**
* Prepare to generate results. The ResultProducer should call
* preProcess(this) on the ResultListener it is to send results to.
*
* @throws Exception if an error occurs during preprocessing.
*/
@Override
public void preProcess() throws Exception {
if (m_ResultProducer == null) {
throw new Exception("No ResultProducer set");
}
m_ResultProducer.setResultListener(this);
m_ResultProducer.preProcess();
}
/**
* When this method is called, it indicates that no more requests to generate
* results for the current experiment will be sent. The ResultProducer should
* call preProcess(this) on the ResultListener it is to send results to.
*
* @throws Exception if an error occurs
*/
@Override
public void postProcess() throws Exception {
m_ResultProducer.postProcess();
}
/**
* Accepts results from a ResultProducer.
*
* @param rp the ResultProducer that generated the results
* @param key an array of Objects (Strings or Doubles) that uniquely identify
* a result for a given ResultProducer with given compatibilityState
* @param result the results stored in an array. The objects stored in the
* array may be Strings, Doubles, or null (for the missing value).
* @throws Exception if the result could not be accepted.
*/
@Override
public void acceptResult(ResultProducer rp, Object[] key, Object[] result)
throws Exception {
if (m_ResultProducer != rp) {
throw new Error("Unrecognized ResultProducer sending results!!");
}
// System.err.println("DBRP::acceptResult");
// Is the result needed by the listener?
boolean isRequiredByListener = m_ResultListener.isResultRequired(this, key);
// Is the result already in the database?
boolean isRequiredByDatabase = super.isResultRequired(rp, key);
// Insert it into the database here
if (isRequiredByDatabase) {
// We could alternatively throw an exception if we only want values
// that are already in the database
if (result != null) {
// null result could occur from a chain of doRunKeys calls
super.acceptResult(rp, key, result);
}
}
// Pass it on
if (isRequiredByListener) {
m_ResultListener.acceptResult(this, key, result);
}
}
/**
* Determines whether the results for a specified key must be generated.
*
* @param rp the ResultProducer wanting to generate the results
* @param key an array of Objects (Strings or Doubles) that uniquely identify
* a result for a given ResultProducer with given compatibilityState
* @return true if the result should be generated
* @throws Exception if it could not be determined if the result is needed.
*/
@Override
public boolean isResultRequired(ResultProducer rp, Object[] key)
throws Exception {
if (m_ResultProducer != rp) {
throw new Error("Unrecognized ResultProducer sending results!!");
}
// System.err.println("DBRP::isResultRequired");
// Is the result needed by the listener?
boolean isRequiredByListener = m_ResultListener.isResultRequired(this, key);
// Is the result already in the database?
boolean isRequiredByDatabase = super.isResultRequired(rp, key);
if (!isRequiredByDatabase && isRequiredByListener) {
// Pass the result through to the listener
Object[] result = getResultFromTable(m_ResultsTableName, rp, key);
System.err.println("Got result from database: "
+ DatabaseUtils.arrayToString(result));
m_ResultListener.acceptResult(this, key, result);
return false;
}
return (isRequiredByListener || isRequiredByDatabase);
}
/**
* Gets the names of each of the columns produced for a single run.
*
* @return an array containing the name of each column
* @throws Exception if something goes wrong.
*/
@Override
public String[] getKeyNames() throws Exception {
return m_ResultProducer.getKeyNames();
}
/**
* Gets the data types of each of the columns produced for a single run. This
* method should really be static.
*
* @return an array containing objects of the type of each column. The objects
* should be Strings, or Doubles.
* @throws Exception if something goes wrong.
*/
@Override
public Object[] getKeyTypes() throws Exception {
return m_ResultProducer.getKeyTypes();
}
/**
* Gets the names of each of the columns produced for a single run. A new
* result field is added for the number of results used to produce each
* average. If only averages are being produced the names are not altered, if
* standard deviations are produced then "Dev_" and "Avg_" are prepended to
* each result deviation and average field respectively.
*
* @return an array containing the name of each column
* @throws Exception if something goes wrong.
*/
@Override
public String[] getResultNames() throws Exception {
return m_ResultProducer.getResultNames();
}
/**
* Gets the data types of each of the columns produced for a single run.
*
* @return an array containing objects of the type of each column. The objects
* should be Strings, or Doubles.
* @throws Exception if something goes wrong.
*/
@Override
public Object[] getResultTypes() throws Exception {
return m_ResultProducer.getResultTypes();
}
/**
* Gets a description of the internal settings of the result producer,
* sufficient for distinguishing a ResultProducer instance from another with
* different settings (ignoring those settings set through this interface).
* For example, a cross-validation ResultProducer may have a setting for the
* number of folds. For a given state, the results produced should be
* compatible. Typically if a ResultProducer is an OptionHandler, this string
* will represent the command line arguments required to set the
* ResultProducer to that state.
*
* @return the description of the ResultProducer state, or null if no state is
* defined
*/
@Override
public String getCompatibilityState() {
String result = "";
if (m_ResultProducer == null) {
result += "<null ResultProducer>";
} else {
result += "-W " + m_ResultProducer.getClass().getName();
result += " -- " + m_ResultProducer.getCompatibilityState();
}
return result.trim();
}
/**
* Returns an enumeration describing the available options..
*
* @return an enumeration of all the available options.
*/
@Override
public Enumeration<Option> listOptions() {
Vector<Option> newVector = new Vector<Option>(2);
newVector.addElement(new Option(
"\tThe name of the database field to cache over.\n"
+ "\teg: \"Fold\" (default none)", "F", 1, "-F <field name>"));
newVector.addElement(new Option(
"\tThe full class name of a ResultProducer.\n"
+ "\teg: weka.experiment.CrossValidationResultProducer", "W", 1,
"-W <class name>"));
if ((m_ResultProducer != null)
&& (m_ResultProducer instanceof OptionHandler)) {
newVector.addElement(new Option("", "", 0,
"\nOptions specific to result producer "
+ m_ResultProducer.getClass().getName() + ":"));
newVector.addAll(Collections.list(((OptionHandler) m_ResultProducer)
.listOptions()));
}
return newVector.elements();
}
/**
* Parses a given list of options.
* <p/>
*
* <!-- options-start --> Valid options are:
* <p/>
*
* <pre>
* -F <field name>
* The name of the database field to cache over.
* eg: "Fold" (default none)
* </pre>
*
* <pre>
* -W <class name>
* The full class name of a ResultProducer.
* eg: weka.experiment.CrossValidationResultProducer
* </pre>
*
* <pre>
* Options specific to result producer weka.experiment.CrossValidationResultProducer:
* </pre>
*
* <pre>
* -X <number of folds>
* The number of folds to use for the cross-validation.
* (default 10)
* </pre>
*
* <pre>
* -D
* Save raw split evaluator output.
* </pre>
*
* <pre>
* -O <file/directory name/path>
* The filename where raw output will be stored.
* If a directory name is specified then then individual
* outputs will be gzipped, otherwise all output will be
* zipped to the named file. Use in conjuction with -D. (default splitEvalutorOut.zip)
* </pre>
*
* <pre>
* -W <class name>
* The full class name of a SplitEvaluator.
* eg: weka.experiment.ClassifierSplitEvaluator
* </pre>
*
* <pre>
* Options specific to split evaluator weka.experiment.ClassifierSplitEvaluator:
* </pre>
*
* <pre>
* -W <class name>
* The full class name of the classifier.
* eg: weka.classifiers.bayes.NaiveBayes
* </pre>
*
* <pre>
* -C <index>
* The index of the class for which IR statistics
* are to be output. (default 1)
* </pre>
*
* <pre>
* -I <index>
* The index of an attribute to output in the
* results. This attribute should identify an
* instance in order to know which instances are
* in the test set of a cross validation. if 0
* no output (default 0).
* </pre>
*
* <pre>
* -P
* Add target and prediction columns to the result
* for each fold.
* </pre>
*
* <pre>
* Options specific to classifier weka.classifiers.rules.ZeroR:
* </pre>
*
* <pre>
* -D
* If set, classifier is run in debug mode and
* may output additional info to the console
* </pre>
*
* <!-- options-end -->
*
* All option after -- will be passed to the result producer.
*
* @param options the list of options as an array of strings
* @throws Exception if an option is not supported
*/
@Override
public void setOptions(String[] options) throws Exception {
setCacheKeyName(Utils.getOption('F', options));
String rpName = Utils.getOption('W', options);
if (rpName.length() == 0) {
throw new Exception("A ResultProducer must be specified with"
+ " the -W option.");
}
// Do it first without options, so if an exception is thrown during
// the option setting, listOptions will contain options for the actual
// RP.
setResultProducer((ResultProducer) Utils.forName(ResultProducer.class,
rpName, null));
if (getResultProducer() instanceof OptionHandler) {
((OptionHandler) getResultProducer()).setOptions(Utils
.partitionOptions(options));
}
}
/**
* Gets the current settings of the result producer.
*
* @return an array of strings suitable for passing to setOptions
*/
@Override
public String[] getOptions() {
String[] seOptions = new String[0];
if ((m_ResultProducer != null)
&& (m_ResultProducer instanceof OptionHandler)) {
seOptions = ((OptionHandler) m_ResultProducer).getOptions();
}
String[] options = new String[seOptions.length + 8];
int current = 0;
if (!getCacheKeyName().equals("")) {
options[current++] = "-F";
options[current++] = getCacheKeyName();
}
if (getResultProducer() != null) {
options[current++] = "-W";
options[current++] = getResultProducer().getClass().getName();
}
options[current++] = "--";
System.arraycopy(seOptions, 0, options, current, seOptions.length);
current += seOptions.length;
while (current < options.length) {
options[current++] = "";
}
return options;
}
/**
* Set a list of method names for additional measures to look for in
* SplitEvaluators. This could contain many measures (of which only a subset
* may be produceable by the current resultProducer) if an experiment is the
* type that iterates over a set of properties.
*
* @param additionalMeasures an array of measure names, null if none
*/
@Override
public void setAdditionalMeasures(String[] additionalMeasures) {
m_AdditionalMeasures = additionalMeasures;
if (m_ResultProducer != null) {
System.err.println("DatabaseResultProducer: setting additional "
+ "measures for " + "ResultProducer");
m_ResultProducer.setAdditionalMeasures(m_AdditionalMeasures);
}
}
/**
* Returns an enumeration of any additional measure names that might be in the
* result producer
*
* @return an enumeration of the measure names
*/
@Override
public Enumeration<String> enumerateMeasures() {
Vector<String> newVector = new Vector<String>();
if (m_ResultProducer instanceof AdditionalMeasureProducer) {
Enumeration<String> en = ((AdditionalMeasureProducer) m_ResultProducer)
.enumerateMeasures();
while (en.hasMoreElements()) {
String mname = en.nextElement();
newVector.addElement(mname);
}
}
return newVector.elements();
}
/**
* Returns the value of the named measure
*
* @param additionalMeasureName the name of the measure to query for its value
* @return the value of the named measure
* @throws IllegalArgumentException if the named measure is not supported
*/
@Override
public double getMeasure(String additionalMeasureName) {
if (m_ResultProducer instanceof AdditionalMeasureProducer) {
return ((AdditionalMeasureProducer) m_ResultProducer)
.getMeasure(additionalMeasureName);
} else {
throw new IllegalArgumentException("DatabaseResultProducer: "
+ "Can't return value for : " + additionalMeasureName + ". "
+ m_ResultProducer.getClass().getName() + " "
+ "is not an AdditionalMeasureProducer");
}
}
/**
* Sets the dataset that results will be obtained for.
*
* @param instances a value of type 'Instances'.
*/
@Override
public void setInstances(Instances instances) {
m_Instances = instances;
}
/**
* Sets the object to send results of each run to.
*
* @param listener a value of type 'ResultListener'
*/
@Override
public void setResultListener(ResultListener listener) {
m_ResultListener = listener;
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String resultProducerTipText() {
return "Set the result producer to use. If some results are not found "
+ "in the source database then this result producer is used to generate "
+ "them.";
}
/**
* Get the ResultProducer.
*
* @return the ResultProducer.
*/
public ResultProducer getResultProducer() {
return m_ResultProducer;
}
/**
* Set the ResultProducer.
*
* @param newResultProducer new ResultProducer to use.
*/
public void setResultProducer(ResultProducer newResultProducer) {
m_ResultProducer = newResultProducer;
}
/**
* Gets a text descrption of the result producer.
*
* @return a text description of the result producer.
*/
@Override
public String toString() {
String result = "DatabaseResultProducer: ";
result += getCompatibilityState();
if (m_Instances == null) {
result += ": <null Instances>";
} else {
result += ": " + Utils.backQuoteChars(m_Instances.relationName());
}
return result;
}
/**
* Returns the revision string.
*
* @return the revision
*/
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
} // DatabaseResultProducer
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/experiment/DatabaseUtils.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* DatabaseUtils.java
* Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.experiment;
import weka.core.RevisionHandler;
import weka.core.RevisionUtils;
import weka.core.Utils;
import weka.core.WekaException;
import weka.core.WekaPackageClassLoaderManager;
import weka.core.logging.Logger;
import java.io.File;
import java.io.FileInputStream;
import java.io.Serializable;
import java.sql.Connection;
import java.sql.DatabaseMetaData;
import java.sql.Driver;
import java.sql.DriverManager;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.ResultSetMetaData;
import java.sql.SQLException;
import java.sql.Statement;
import java.sql.Types;
import java.util.Collections;
import java.util.Enumeration;
import java.util.HashSet;
import java.util.Properties;
import java.util.StringTokenizer;
import java.util.Vector;
/**
* DatabaseUtils provides utility functions for accessing the experiment
* database. The jdbc driver and database to be used default to "jdbc.idbDriver"
* and "jdbc:idb=experiments.prp". These may be changed by creating a java
* properties file called DatabaseUtils.props in user.home or the current
* directory. eg:
* <p>
*
* <code><pre>
* jdbcDriver=jdbc.idbDriver
* jdbcURL=jdbc:idb=experiments.prp
* </pre></code>
* <p>
*
* @author Len Trigg (trigg@cs.waikato.ac.nz)
* @version $Revision$
*/
public class DatabaseUtils implements Serializable, RevisionHandler {
/** for serialization. */
static final long serialVersionUID = -8252351994547116729L;
/** The name of the table containing the index to experiments. */
public static final String EXP_INDEX_TABLE = "Experiment_index";
/** The name of the column containing the experiment type (ResultProducer). */
public static final String EXP_TYPE_COL = "Experiment_type";
/** The name of the column containing the experiment setup (parameters). */
public static final String EXP_SETUP_COL = "Experiment_setup";
/** The name of the column containing the results table name. */
public static final String EXP_RESULT_COL = "Result_table";
/** The prefix for result table names. */
public static final String EXP_RESULT_PREFIX = "Results";
/** The name of the properties file. */
public final static String PROPERTY_FILE =
"weka/experiment/DatabaseUtils.props";
/** Holds the jdbc drivers to be used (only to stop them being gc'ed). */
protected Vector<String> DRIVERS = new Vector<String>();
/** keeping track of drivers that couldn't be loaded. */
protected static Vector<String> DRIVERS_ERRORS;
/** Properties associated with the database connection. */
protected Properties PROPERTIES;
/* Type mapping used for reading experiment results */
/** Type mapping for STRING used for reading experiment results. */
public static final int STRING = 0;
/** Type mapping for BOOL used for reading experiment results. */
public static final int BOOL = 1;
/** Type mapping for DOUBLE used for reading experiment results. */
public static final int DOUBLE = 2;
/** Type mapping for BYTE used for reading experiment results. */
public static final int BYTE = 3;
/** Type mapping for SHORT used for reading experiment results. */
public static final int SHORT = 4;
/** Type mapping for INTEGER used for reading experiment results. */
public static final int INTEGER = 5;
/** Type mapping for LONG used for reading experiment results. */
public static final int LONG = 6;
/** Type mapping for FLOAT used for reading experiment results. */
public static final int FLOAT = 7;
/** Type mapping for DATE used for reading experiment results. */
public static final int DATE = 8;
/** Type mapping for TEXT used for reading, e.g., text blobs. */
public static final int TEXT = 9;
/** Type mapping for TIME used for reading TIME columns. */
public static final int TIME = 10;
/** Type mapping for TIMESTAMP used for reading java.sql.Timestamp columns */
public static final int TIMESTAMP = 11;
/** Database URL. */
protected String m_DatabaseURL;
/** The prepared statement used for database queries. */
protected transient PreparedStatement m_PreparedStatement;
/** The database connection. */
protected transient Connection m_Connection;
/** True if debugging output should be printed. */
protected boolean m_Debug = false;
/** Database username. */
protected String m_userName = "";
/** Database Password. */
protected String m_password = "";
/* mappings used for creating Tables. Can be overridden in DatabaseUtils.props */
/** string type for the create table statement. */
protected String m_stringType = "LONGVARCHAR";
/** integer type for the create table statement. */
protected String m_intType = "INT";
/** double type for the create table statement. */
protected String m_doubleType = "DOUBLE";
/** For databases where Tables and Columns are created in upper case. */
protected boolean m_checkForUpperCaseNames = false;
/** For databases where Tables and Columns are created in lower case. */
protected boolean m_checkForLowerCaseNames = false;
/** setAutoCommit on the database? */
protected boolean m_setAutoCommit = true;
/** create index on the database? */
protected boolean m_createIndex = false;
/** the keywords for the current database type. */
protected HashSet<String> m_Keywords = new HashSet<String>();
/** the character to mask SQL keywords (by appending this character). */
protected String m_KeywordsMaskChar = "_";
/**
* Reads properties and sets up the database drivers.
*
* @throws Exception if an error occurs
*/
public DatabaseUtils() throws Exception {
this((Properties) null);
}
/**
* Reads the properties from the specified file and sets up the database
* drivers.
*
* @param propsFile the props file to load, ignored if null or pointing to a
* directory
* @throws Exception if an error occurs
*/
public DatabaseUtils(File propsFile) throws Exception {
this(loadProperties(propsFile));
}
/**
* Uses the specified properties to set up the database drivers.
*
* @param props the properties to use, ignored if null
* @throws Exception if an error occurs
*/
public DatabaseUtils(Properties props) throws Exception {
if (DRIVERS_ERRORS == null) {
DRIVERS_ERRORS = new Vector<String>();
}
initialize(props);
}
/**
* Initializes the database connection.
*
* @param propsFile the props file to load, ignored if null or pointing to a
* directory
*/
public void initialize(File propsFile) {
initialize(loadProperties(propsFile));
}
/**
* Initializes the database connection.
*
* @param props the properties to obtain the parameters from, ignored if null
*/
public void initialize(Properties props) {
try {
if (props != null) {
PROPERTIES = props;
} else {
PROPERTIES = Utils.readProperties(PROPERTY_FILE);
}
// Register the drivers in jdbc DriverManager
String drivers = PROPERTIES.getProperty("jdbcDriver", "jdbc.idbDriver");
if (drivers == null) {
throw new Exception("No database drivers (JDBC) specified");
}
// The call to newInstance() is necessary on some platforms
// (with some java VM implementations)
StringTokenizer st = new StringTokenizer(drivers, ", ");
while (st.hasMoreTokens()) {
String driver = st.nextToken();
boolean result;
try {
// Class.forName(driver);
Object driverImpl =
WekaPackageClassLoaderManager.objectForName(driver);
DRIVERS.addElement(driver);
result = true;
} catch (Exception e) {
result = false;
}
if (!result && !DRIVERS_ERRORS.contains(driver)) {
Logger.log(Logger.Level.WARNING,
"Trying to add database driver (JDBC): " + driver + " - "
+ "Warning, not in CLASSPATH?");
} else if (m_Debug) {
System.err.println("Trying to add database driver (JDBC): " + driver
+ " - " + (result ? "Success!" : "Warning, not in CLASSPATH?"));
}
if (!result) {
DRIVERS_ERRORS.add(driver);
}
}
} catch (Exception ex) {
System.err.println("Problem reading properties. Fix before continuing.");
System.err.println(ex);
}
m_DatabaseURL =
PROPERTIES.getProperty("jdbcURL", "jdbc:idb=experiments.prp");
m_stringType = PROPERTIES.getProperty("CREATE_STRING", "LONGVARCHAR");
m_intType = PROPERTIES.getProperty("CREATE_INT", "INT");
m_doubleType = PROPERTIES.getProperty("CREATE_DOUBLE", "DOUBLE");
m_checkForUpperCaseNames =
PROPERTIES.getProperty("checkUpperCaseNames", "false").equals("true");
m_checkForLowerCaseNames =
PROPERTIES.getProperty("checkLowerCaseNames", "false").equals("true");
m_setAutoCommit =
PROPERTIES.getProperty("setAutoCommit", "true").equals("true");
m_createIndex =
PROPERTIES.getProperty("createIndex", "false").equals("true");
setKeywords(PROPERTIES.getProperty("Keywords",
"AND,ASC,BY,DESC,FROM,GROUP,INSERT,ORDER,SELECT,UPDATE,WHERE"));
setKeywordsMaskChar(PROPERTIES.getProperty("KeywordsMaskChar", "_"));
}
/**
* returns key column headings in their original case. Used for those
* databases that create uppercase column names.
*
* @param columnName the column to retrieve the original case for
* @return the original case
*/
public String attributeCaseFix(String columnName) {
if (m_checkForUpperCaseNames) {
String ucname = columnName.toUpperCase();
if (ucname.equals(EXP_TYPE_COL.toUpperCase())) {
return EXP_TYPE_COL;
} else if (ucname.equals(EXP_SETUP_COL.toUpperCase())) {
return EXP_SETUP_COL;
} else if (ucname.equals(EXP_RESULT_COL.toUpperCase())) {
return EXP_RESULT_COL;
} else {
return columnName;
}
} else if (m_checkForLowerCaseNames) {
String ucname = columnName.toLowerCase();
if (ucname.equals(EXP_TYPE_COL.toLowerCase())) {
return EXP_TYPE_COL;
} else if (ucname.equals(EXP_SETUP_COL.toLowerCase())) {
return EXP_SETUP_COL;
} else if (ucname.equals(EXP_RESULT_COL.toLowerCase())) {
return EXP_RESULT_COL;
} else {
return columnName;
}
} else {
return columnName;
}
}
/**
* translates the column data type string to an integer value that indicates
* which data type / get()-Method to use in order to retrieve values from the
* database (see DatabaseUtils.Properties, InstanceQuery()). Blanks in the
* type are replaced with underscores "_", since Java property names can't
* contain blanks.
*
* @param type the column type as retrieved with
* java.sql.MetaData.getColumnTypeName(int)
* @return an integer value that indicates which data type / get()-Method to
* use in order to retrieve values from the
*/
public int translateDBColumnType(String type) {
try {
// Oracle, e.g., has datatypes like "DOUBLE PRECISION"
// BUT property names can't have blanks in the name (unless escaped with
// a backslash), hence also check for names where the blanks are
// replaced with underscores "_":
String value = PROPERTIES.getProperty(type);
String typeUnderscore = type.replaceAll(" ", "_");
if (value == null) {
value = PROPERTIES.getProperty(typeUnderscore);
}
return Integer.parseInt(value);
} catch (NumberFormatException e) {
e.printStackTrace();
throw new IllegalArgumentException("Unknown data type: " + type + ". "
+ "Add entry in " + PROPERTY_FILE + ".\n"
+ "If the type contains blanks, either escape them with a backslash "
+ "or use underscores instead of blanks.");
}
}
/**
* Converts an array of objects to a string by inserting a space between each
* element. Null elements are printed as ?
*
* @param array the array of objects
* @return a value of type 'String'
*/
public static String arrayToString(Object[] array) {
String result = "";
if (array == null) {
result = "<null>";
} else {
for (Object element : array) {
if (element == null) {
result += " ?";
} else {
result += " " + element;
}
}
}
return result;
}
/**
* Returns the name associated with a SQL type.
*
* @param type the SQL type
* @return the name of the type
*/
public static String typeName(int type) {
switch (type) {
case Types.BIGINT:
return "BIGINT ";
case Types.BINARY:
return "BINARY";
case Types.BIT:
return "BIT";
case Types.CHAR:
return "CHAR";
case Types.DATE:
return "DATE";
case Types.DECIMAL:
return "DECIMAL";
case Types.DOUBLE:
return "DOUBLE";
case Types.FLOAT:
return "FLOAT";
case Types.INTEGER:
return "INTEGER";
case Types.LONGVARBINARY:
return "LONGVARBINARY";
case Types.LONGVARCHAR:
return "LONGVARCHAR";
case Types.NULL:
return "NULL";
case Types.NUMERIC:
return "NUMERIC";
case Types.OTHER:
return "OTHER";
case Types.REAL:
return "REAL";
case Types.SMALLINT:
return "SMALLINT";
case Types.TIME:
return "TIME";
case Types.TIMESTAMP:
return "TIMESTAMP";
case Types.TINYINT:
return "TINYINT";
case Types.VARBINARY:
return "VARBINARY";
case Types.VARCHAR:
return "VARCHAR";
default:
return "Unknown";
}
}
/**
* Returns the tip text for this property.
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String databaseURLTipText() {
return "Set the URL to the database.";
}
/**
* Get the value of DatabaseURL.
*
* @return Value of DatabaseURL.
*/
public String getDatabaseURL() {
return m_DatabaseURL;
}
/**
* Set the value of DatabaseURL.
*
* @param newDatabaseURL Value to assign to DatabaseURL.
*/
public void setDatabaseURL(String newDatabaseURL) {
m_DatabaseURL = newDatabaseURL;
}
/**
* Returns the tip text for this property.
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String debugTipText() {
return "Whether debug information is printed.";
}
/**
* Sets whether there should be printed some debugging output to stderr or
* not.
*
* @param d true if output should be printed
*/
public void setDebug(boolean d) {
m_Debug = d;
}
/**
* Gets whether there should be printed some debugging output to stderr or
* not.
*
* @return true if output should be printed
*/
public boolean getDebug() {
return m_Debug;
}
/**
* Returns the tip text for this property.
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String usernameTipText() {
return "The user to use for connecting to the database.";
}
/**
* Set the database username.
*
* @param username Username for Database.
*/
public void setUsername(String username) {
m_userName = username;
}
/**
* Get the database username.
*
* @return Database username
*/
public String getUsername() {
return m_userName;
}
/**
* Returns the tip text for this property.
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String passwordTipText() {
return "The password to use for connecting to the database.";
}
/**
* Set the database password.
*
* @param password Password for Database.
*/
public void setPassword(String password) {
m_password = password;
}
/**
* Get the database password.
*
* @return Password for Database.
*/
public String getPassword() {
return m_password;
}
/**
* Opens a connection to the database.
*
* @throws Exception if an error occurs
*/
public void connectToDatabase() throws Exception {
if (m_Debug) {
System.err.println("Connecting to " + m_DatabaseURL);
}
if (m_Connection == null) {
try {
connectUsingDriverManager();
} catch (Exception ex) {
connectViaPackageLoadedDriver();
}
if (m_Connection == null) {
throw new SQLException("Unable to find a suitable driver for "
+ m_DatabaseURL);
}
m_Connection.setAutoCommit(m_setAutoCommit);
}
}
/**
* Open a connection by trying all package-loaded JDBC drivers
*
* @throws Exception if a problem occurs
*/
protected void connectViaPackageLoadedDriver() throws Exception {
java.util.Properties info = new java.util.Properties();
if (m_userName.length() > 0) {
info.put("user", m_userName);
}
if (m_password.length() > 0) {
info.put("password", m_password);
}
for (String driverClass : DRIVERS) {
Driver driver = getDriver(driverClass);
if (driver != null) {
Connection conn = driver.connect(m_DatabaseURL, info);
if (conn != null) {
m_Connection = conn;
break;
}
}
}
}
/**
* Try to get a driver from Weka package classloaders
*
* @param driverClass the name of the driver class to try and instantiate
* @return the instantiated driver class
* @throws Exception if a problem occurs
*/
protected Driver getDriver(String driverClass) throws Exception {
Object result = WekaPackageClassLoaderManager.objectForName(driverClass);
if (!(result instanceof Driver)) {
throw new WekaException("JDBC driver " + driverClass
+ " does not implement java.sql.Driver");
}
return (Driver) result;
}
/**
* Open a connection using {@code java.sql.DriverManager}. This covers all
* JDBC drivers that are on the main CLASSPATH.
*
* @throws Exception if a problem occurs
*/
protected void connectUsingDriverManager() throws Exception {
if (m_Connection == null) {
if (m_userName.equals("")) {
try {
m_Connection = DriverManager.getConnection(m_DatabaseURL);
} catch (java.sql.SQLException e) {
// Try loading the drivers
for (int i = 0; i < DRIVERS.size(); i++) {
try {
// Class.forName(DRIVERS.elementAt(i));
WekaPackageClassLoaderManager.forName(DRIVERS.elementAt(i));
} catch (Exception ex) {
// Drop through
}
}
m_Connection = DriverManager.getConnection(m_DatabaseURL);
}
} else {
try {
m_Connection =
DriverManager.getConnection(m_DatabaseURL, m_userName, m_password);
} catch (java.sql.SQLException e) {
// Try loading the drivers
for (int i = 0; i < DRIVERS.size(); i++) {
try {
WekaPackageClassLoaderManager.forName(DRIVERS.elementAt(i));
// Class.forName(DRIVERS.elementAt(i));
} catch (Exception ex) {
// Drop through
}
}
m_Connection =
DriverManager.getConnection(m_DatabaseURL, m_userName, m_password);
}
}
}
}
/**
* Closes the connection to the database.
*
* @throws Exception if an error occurs
*/
public void disconnectFromDatabase() throws Exception {
if (m_Debug) {
System.err.println("Disconnecting from " + m_DatabaseURL);
}
if (m_Connection != null) {
m_Connection.close();
m_Connection = null;
}
}
/**
* Returns true if a database connection is active.
*
* @return a value of type 'boolean'
*/
public boolean isConnected() {
return (m_Connection != null);
}
/**
* Returns whether the cursors only support forward movement or are scroll
* sensitive (with ResultSet.CONCUR_READ_ONLY concurrency). Returns always
* false if not connected
*
* @return true if connected and the cursor is scroll-sensitive
* @see ResultSet#TYPE_SCROLL_SENSITIVE
* @see ResultSet#TYPE_FORWARD_ONLY
* @see ResultSet#CONCUR_READ_ONLY
*/
public boolean isCursorScrollSensitive() {
boolean result;
result = false;
try {
if (isConnected()) {
result =
m_Connection.getMetaData().supportsResultSetConcurrency(
ResultSet.TYPE_SCROLL_SENSITIVE, ResultSet.CONCUR_READ_ONLY);
}
} catch (Exception e) {
// ignored
}
return result;
}
/**
* Checks whether cursors are scrollable in general, false otherwise (also if
* not connected).
*
* @return true if scrollable and connected
* @see #getSupportedCursorScrollType()
*/
public boolean isCursorScrollable() {
return (getSupportedCursorScrollType() != -1);
}
/**
* Returns the type of scrolling that the cursor supports, -1 if not supported
* or not connected. Checks first for TYPE_SCROLL_SENSITIVE and then for
* TYPE_SCROLL_INSENSITIVE. In both cases CONCUR_READ_ONLY as concurrency is
* used.
*
* @return the scroll type, or -1 if not connected or no scrolling supported
* @see ResultSet#TYPE_SCROLL_SENSITIVE
* @see ResultSet#TYPE_SCROLL_INSENSITIVE
*/
public int getSupportedCursorScrollType() {
int result;
result = -1;
try {
if (isConnected()) {
if (m_Connection.getMetaData().supportsResultSetConcurrency(
ResultSet.TYPE_SCROLL_SENSITIVE, ResultSet.CONCUR_READ_ONLY)) {
result = ResultSet.TYPE_SCROLL_SENSITIVE;
}
if (result == -1) {
if (m_Connection.getMetaData().supportsResultSetConcurrency(
ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_READ_ONLY)) {
result = ResultSet.TYPE_SCROLL_INSENSITIVE;
}
}
}
} catch (Exception e) {
// ignored
}
return result;
}
/**
* Executes a SQL query. Caller must clean up manually with
* <code>close()</code>.
*
* @param query the SQL query
* @return true if the query generated results
* @throws SQLException if an error occurs
* @see #close()
*/
public boolean execute(String query) throws SQLException {
if (!isConnected()) {
throw new IllegalStateException("Not connected, please connect first!");
}
if (!isCursorScrollable()) {
m_PreparedStatement =
m_Connection.prepareStatement(query, ResultSet.TYPE_FORWARD_ONLY,
ResultSet.CONCUR_READ_ONLY);
} else {
m_PreparedStatement =
m_Connection.prepareStatement(query, getSupportedCursorScrollType(),
ResultSet.CONCUR_READ_ONLY);
}
return (m_PreparedStatement.execute());
}
/**
* Gets the results generated by a previous query. Caller must clean up
* manually with <code>close(ResultSet)</code>. Returns null if object has
* been deserialized.
*
* @return the result set.
* @throws SQLException if an error occurs
* @see #close(ResultSet)
*/
public ResultSet getResultSet() throws SQLException {
if (m_PreparedStatement != null) {
return m_PreparedStatement.getResultSet();
} else {
return null;
}
}
/**
* Executes a SQL DDL query or an INSERT, DELETE or UPDATE.
*
* @param query the SQL DDL query
* @return the number of affected rows
* @throws SQLException if an error occurs
*/
public int update(String query) throws SQLException {
if (!isConnected()) {
throw new IllegalStateException("Not connected, please connect first!");
}
Statement statement;
if (!isCursorScrollable()) {
statement =
m_Connection.createStatement(ResultSet.TYPE_FORWARD_ONLY,
ResultSet.CONCUR_READ_ONLY);
} else {
statement =
m_Connection.createStatement(getSupportedCursorScrollType(),
ResultSet.CONCUR_READ_ONLY);
}
int result = statement.executeUpdate(query);
statement.close();
return result;
}
/**
* Executes a SQL SELECT query that returns a ResultSet. Note: the ResultSet
* object must be closed by the caller.
*
* @param query the SQL query
* @return the generated ResultSet
* @throws SQLException if an error occurs
*/
public ResultSet select(String query) throws SQLException {
if (!isConnected()) {
throw new IllegalStateException("Not connected, please connect first!");
}
Statement statement;
if (!isCursorScrollable()) {
statement =
m_Connection.createStatement(ResultSet.TYPE_FORWARD_ONLY,
ResultSet.CONCUR_READ_ONLY);
} else {
statement =
m_Connection.createStatement(getSupportedCursorScrollType(),
ResultSet.CONCUR_READ_ONLY);
}
ResultSet result = statement.executeQuery(query);
return result;
}
/**
* closes the ResultSet and the statement that generated the ResultSet to
* avoid memory leaks in JDBC drivers - in contrast to the JDBC specs, a lot
* of JDBC drives don't clean up correctly.
*
* @param rs the ResultSet to clean up
*/
public void close(ResultSet rs) {
try {
Statement statement = rs.getStatement();
rs.close();
statement.close();
statement = null;
rs = null;
} catch (Exception e) {
// ignored
}
}
/**
* closes the m_PreparedStatement to avoid memory leaks.
*/
public void close() {
if (m_PreparedStatement != null) {
try {
m_PreparedStatement.close();
m_PreparedStatement = null;
} catch (Exception e) {
// ignored
}
}
}
/**
* Checks that a given table exists.
*
* @param tableName the name of the table to look for.
* @return true if the table exists.
* @throws Exception if an error occurs.
*/
public boolean tableExists(String tableName) throws Exception {
if (!isConnected()) {
throw new IllegalStateException("Not connected, please connect first!");
}
if (m_Debug) {
System.err.println("Checking if table " + tableName + " exists...");
}
DatabaseMetaData dbmd = m_Connection.getMetaData();
ResultSet rs;
if (m_checkForUpperCaseNames) {
rs = dbmd.getTables(null, null, tableName.toUpperCase(), null);
} else if (m_checkForLowerCaseNames) {
rs = dbmd.getTables(null, null, tableName.toLowerCase(), null);
} else {
rs = dbmd.getTables(null, null, tableName, null);
}
boolean tableExists = rs.next();
if (rs.next()) {
throw new Exception("This table seems to exist more than once!");
}
rs.close();
if (m_Debug) {
if (tableExists) {
System.err.println("... " + tableName + " exists");
} else {
System.err.println("... " + tableName + " does not exist");
}
}
return tableExists;
}
/**
* processes the string in such a way that it can be stored in the database,
* i.e., it changes backslashes into slashes and doubles single quotes.
*
* @param s the string to work on
* @return the processed string
*/
public static String processKeyString(String s) {
return s.replaceAll("\\\\", "/").replaceAll("'", "''");
}
/**
* Executes a database query to see whether a result for the supplied key is
* already in the database.
*
* @param tableName the name of the table to search for the key in
* @param rp the ResultProducer who will generate the result if required
* @param key the key for the result
* @return true if the result with that key is in the database already
* @throws Exception if an error occurs
*/
protected boolean isKeyInTable(String tableName, ResultProducer rp,
Object[] key) throws Exception {
String query = "SELECT Key_Run" + " FROM " + tableName;
String[] keyNames = rp.getKeyNames();
if (keyNames.length != key.length) {
throw new Exception("Key names and key values of different lengths");
}
boolean first = true;
for (int i = 0; i < key.length; i++) {
if (key[i] != null) {
if (first) {
query += " WHERE ";
first = false;
} else {
query += " AND ";
}
query += "Key_" + keyNames[i] + '=';
if (key[i] instanceof String) {
query += "'" + processKeyString(key[i].toString()) + "'";
} else {
query += key[i].toString();
}
}
}
boolean retval = false;
ResultSet rs = select(query);
if (rs.next()) {
retval = true;
if (rs.next()) {
throw new Exception("More than one result entry " + "for result key: "
+ query);
}
}
close(rs);
return retval;
}
/**
* Executes a database query to extract a result for the supplied key from the
* database.
*
* @param tableName the name of the table where the result is stored
* @param rp the ResultProducer who will generate the result if required
* @param key the key for the result
* @return true if the result with that key is in the database already
* @throws Exception if an error occurs
*/
public Object[] getResultFromTable(String tableName, ResultProducer rp,
Object[] key) throws Exception {
String query = "SELECT ";
String[] resultNames = rp.getResultNames();
for (int i = 0; i < resultNames.length; i++) {
if (i != 0) {
query += ", ";
}
query += resultNames[i];
}
query += " FROM " + tableName;
String[] keyNames = rp.getKeyNames();
if (keyNames.length != key.length) {
throw new Exception("Key names and key values of different lengths");
}
boolean first = true;
for (int i = 0; i < key.length; i++) {
if (key[i] != null) {
if (first) {
query += " WHERE ";
first = false;
} else {
query += " AND ";
}
query += "Key_" + keyNames[i] + '=';
if (key[i] instanceof String) {
query += "'" + processKeyString(key[i].toString()) + "'";
} else {
query += key[i].toString();
}
}
}
ResultSet rs = select(query);
ResultSetMetaData md = rs.getMetaData();
int numAttributes = md.getColumnCount();
if (!rs.next()) {
throw new Exception("No result for query: " + query);
}
// Extract the columns for the result
Object[] result = new Object[numAttributes];
for (int i = 1; i <= numAttributes; i++) {
switch (translateDBColumnType(md.getColumnTypeName(i))) {
case STRING:
result[i - 1] = rs.getString(i);
if (rs.wasNull()) {
result[i - 1] = null;
}
break;
case FLOAT:
case DOUBLE:
result[i - 1] = new Double(rs.getDouble(i));
if (rs.wasNull()) {
result[i - 1] = null;
}
break;
default:
throw new Exception("Unhandled SQL result type (field " + (i + 1)
+ "): " + DatabaseUtils.typeName(md.getColumnType(i)));
}
}
if (rs.next()) {
throw new Exception("More than one result entry " + "for result key: "
+ query);
}
close(rs);
return result;
}
/**
* Executes a database query to insert a result for the supplied key into the
* database.
*
* @param tableName the name of the table where the result is stored
* @param rp the ResultProducer who will generate the result if required
* @param key the key for the result
* @param result the result to store
* @throws Exception if an error occurs
*/
public void putResultInTable(String tableName, ResultProducer rp,
Object[] key, Object[] result) throws Exception {
String query = "INSERT INTO " + tableName + " VALUES ( ";
// Add the results to the table
for (int i = 0; i < key.length; i++) {
if (i != 0) {
query += ',';
}
if (key[i] != null) {
if (key[i] instanceof String) {
query += "'" + processKeyString(key[i].toString()) + "'";
} else if (key[i] instanceof Double) {
query += safeDoubleToString((Double) key[i]);
} else {
query += key[i].toString();
}
} else {
query += "NULL";
}
}
for (Object element : result) {
query += ',';
if (element != null) {
if (element instanceof String) {
query += "'" + element.toString() + "'";
} else if (element instanceof Double) {
query += safeDoubleToString((Double) element);
} else {
query += element.toString();
// !!
// System.err.println("res: "+ result[i].toString());
}
} else {
query += "NULL";
}
}
query += ')';
if (m_Debug) {
System.err.println("Submitting result: " + query);
}
update(query);
close();
}
/**
* Inserts a + if the double is in scientific notation. MySQL doesn't
* understand the number otherwise.
*
* @param number the number to convert
* @return the number as string
*/
private String safeDoubleToString(Double number) {
// NaN is treated as NULL
if (number.isNaN()) {
return "NULL";
}
String orig = number.toString();
int pos = orig.indexOf('E');
if ((pos == -1) || (orig.charAt(pos + 1) == '-')) {
return orig;
} else {
StringBuffer buff = new StringBuffer(orig);
buff.insert(pos + 1, '+');
return new String(buff);
}
}
/**
* Returns true if the experiment index exists.
*
* @return true if the index exists
* @throws Exception if an error occurs
*/
public boolean experimentIndexExists() throws Exception {
return tableExists(EXP_INDEX_TABLE);
}
/**
* Attempts to create the experiment index table.
*
* @throws Exception if an error occurs.
*/
public void createExperimentIndex() throws Exception {
if (m_Debug) {
System.err.println("Creating experiment index table...");
}
String query;
// Workaround for MySQL (doesn't support LONGVARBINARY)
// Also for InstantDB which attempts to interpret numbers when storing
// in LONGVARBINARY
/*
* if (m_Connection.getMetaData().getDriverName().
* equals("Mark Matthews' MySQL Driver") ||
* (m_Connection.getMetaData().getDriverName().
* indexOf("InstantDB JDBC Driver") != -1)) { query = "CREATE TABLE " +
* EXP_INDEX_TABLE + " ( " + EXP_TYPE_COL + " TEXT," + " " + EXP_SETUP_COL
* + " TEXT," + " " + EXP_RESULT_COL + " INT )"; } else {
*/
query =
"CREATE TABLE " + EXP_INDEX_TABLE + " ( " + EXP_TYPE_COL + " "
+ m_stringType + "," + " " + EXP_SETUP_COL + " " + m_stringType + ","
+ " " + EXP_RESULT_COL + " " + m_intType + " )";
// }
// Other possible fields:
// creator user name (from System properties)
// creation date
update(query);
close();
}
/**
* Attempts to insert a results entry for the table into the experiment index.
*
* @param rp the ResultProducer generating the results
* @return the name of the created results table
* @throws Exception if an error occurs.
*/
public String createExperimentIndexEntry(ResultProducer rp) throws Exception {
if (m_Debug) {
System.err.println("Creating experiment index entry...");
}
// Execute compound transaction
int numRows = 0;
// Workaround for MySQL (doesn't support transactions)
/*
* if (m_Connection.getMetaData().getDriverName().
* equals("Mark Matthews' MySQL Driver")) {
* m_Statement.execute("LOCK TABLES " + EXP_INDEX_TABLE + " WRITE");
* System.err.println("LOCKING TABLE"); } else {
*/
// }
// Get the number of rows
String query = "SELECT COUNT(*) FROM " + EXP_INDEX_TABLE;
ResultSet rs = select(query);
if (m_Debug) {
System.err.println("...getting number of rows");
}
if (rs.next()) {
numRows = rs.getInt(1);
}
close(rs);
// Add an entry in the index table
String expType = rp.getClass().getName();
String expParams = rp.getCompatibilityState();
query =
"INSERT INTO " + EXP_INDEX_TABLE + " VALUES ('" + expType + "', '"
+ expParams + "', " + numRows + " )";
if (update(query) > 0) {
if (m_Debug) {
System.err.println("...create returned resultset");
}
}
close();
// Finished compound transaction
// Workaround for MySQL (doesn't support transactions)
/*
* if (m_Connection.getMetaData().getDriverName().
* equals("Mark Matthews' MySQL Driver")) {
* m_Statement.execute("UNLOCK TABLES");
* System.err.println("UNLOCKING TABLE"); } else {
*/
if (!m_setAutoCommit) {
m_Connection.commit();
m_Connection.setAutoCommit(true);
}
// }
String tableName = getResultsTableName(rp);
if (tableName == null) {
throw new Exception("Problem adding experiment index entry");
}
// Drop any existing table by that name (shouldn't occur unless
// the experiment index is destroyed, in which case the experimental
// conditions of the existing table are unknown)
try {
query = "DROP TABLE " + tableName;
if (m_Debug) {
System.err.println(query);
}
update(query);
} catch (SQLException ex) {
System.err.println(ex.getMessage());
}
return tableName;
}
/**
* Gets the name of the experiment table that stores results from a particular
* ResultProducer.
*
* @param rp the ResultProducer
* @return the name of the table where the results for this ResultProducer are
* stored, or null if there is no table for this ResultProducer.
* @throws Exception if an error occurs
*/
public String getResultsTableName(ResultProducer rp) throws Exception {
// Get the experiment table name, or create a new table if necessary.
if (m_Debug) {
System.err.println("Getting results table name...");
}
String expType = rp.getClass().getName();
String expParams = rp.getCompatibilityState();
String query =
"SELECT " + EXP_RESULT_COL + " FROM " + EXP_INDEX_TABLE + " WHERE "
+ EXP_TYPE_COL + "='" + expType + "' AND " + EXP_SETUP_COL + "='"
+ expParams + "'";
String tableName = null;
ResultSet rs = select(query);
if (rs.next()) {
tableName = rs.getString(1);
if (rs.next()) {
throw new Exception("More than one index entry "
+ "for experiment config: " + query);
}
}
close(rs);
if (m_Debug) {
System.err.println("...results table = "
+ ((tableName == null) ? "<null>" : EXP_RESULT_PREFIX + tableName));
}
return (tableName == null) ? tableName : EXP_RESULT_PREFIX + tableName;
}
/**
* Creates a results table for the supplied result producer.
*
* @param rp the ResultProducer generating the results
* @param tableName the name of the resultsTable
* @return the name of the created results table
* @throws Exception if an error occurs.
*/
public String createResultsTable(ResultProducer rp, String tableName)
throws Exception {
if (m_Debug) {
System.err.println("Creating results table " + tableName + "...");
}
String query = "CREATE TABLE " + tableName + " ( ";
// Loop over the key fields
String[] names = rp.getKeyNames();
Object[] types = rp.getKeyTypes();
if (names.length != types.length) {
throw new Exception("key names types differ in length");
}
for (int i = 0; i < names.length; i++) {
query += "Key_" + names[i] + " ";
if (types[i] instanceof Double) {
query += m_doubleType;
} else if (types[i] instanceof String) {
// Workaround for MySQL (doesn't support LONGVARCHAR)
// Also for InstantDB which attempts to interpret numbers when storing
// in LONGVARBINARY
/*
* if (m_Connection.getMetaData().getDriverName().
* equals("Mark Matthews' MySQL Driver") ||
* (m_Connection.getMetaData().getDriverName().
* indexOf("InstantDB JDBC Driver")) != -1) { query += "TEXT "; } else {
*/
// query += "LONGVARCHAR ";
query += m_stringType + " ";
// }
} else {
throw new Exception("Unknown/unsupported field type in key");
}
query += ", ";
}
// Loop over the result fields
names = rp.getResultNames();
types = rp.getResultTypes();
if (names.length != types.length) {
throw new Exception("result names and types differ in length");
}
for (int i = 0; i < names.length; i++) {
query += names[i] + " ";
if (types[i] instanceof Double) {
query += m_doubleType;
} else if (types[i] instanceof String) {
// Workaround for MySQL (doesn't support LONGVARCHAR)
// Also for InstantDB which attempts to interpret numbers when storing
// in LONGVARBINARY
/*
* if (m_Connection.getMetaData().getDriverName().
* equals("Mark Matthews' MySQL Driver") ||
* (m_Connection.getMetaData().getDriverName().
* equals("InstantDB JDBC Driver"))) { query += "TEXT "; } else {
*/
// query += "LONGVARCHAR ";
query += m_stringType + " ";
// }
} else {
throw new Exception("Unknown/unsupported field type in key");
}
if (i < names.length - 1) {
query += ", ";
}
}
query += " )";
update(query);
if (m_Debug) {
System.err.println("table created");
}
close();
if (m_createIndex) {
query = "CREATE UNIQUE INDEX Key_IDX ON " + tableName + " (";
String[] keyNames = rp.getKeyNames();
boolean first = true;
for (String keyName : keyNames) {
if (keyName != null) {
if (first) {
first = false;
query += "Key_" + keyName;
} else {
query += ",Key_" + keyName;
}
}
}
query += ")";
update(query);
}
return tableName;
}
/**
* Sets the keywords (comma-separated list) to use.
*
* @param value the list of keywords
*/
public void setKeywords(String value) {
String[] keywords;
int i;
m_Keywords.clear();
keywords = value.replaceAll(" ", "").split(",");
for (i = 0; i < keywords.length; i++) {
m_Keywords.add(keywords[i].toUpperCase());
}
}
/**
* Returns the currently stored keywords (as comma-separated list).
*
* @return the list of keywords
*/
public String getKeywords() {
String result;
Vector<String> list;
int i;
list = new Vector<String>(m_Keywords);
Collections.sort(list);
result = "";
for (i = 0; i < list.size(); i++) {
if (i > 0) {
result += ",";
}
result += list.get(i);
}
return result;
}
/**
* Sets the mask character to append to table or attribute names that are a
* reserved keyword.
*
* @param value the new character
*/
public void setKeywordsMaskChar(String value) {
m_KeywordsMaskChar = value;
}
/**
* Returns the currently set mask character.
*
* @return the character
*/
public String getKeywordsMaskChar() {
return m_KeywordsMaskChar;
}
/**
* Checks whether the given string is a reserved keyword.
*
* @param s the string to check
* @return true if the string is a keyword
* @see #m_Keywords
*/
public boolean isKeyword(String s) {
return m_Keywords.contains(s.toUpperCase());
}
/**
* If the given string is a keyword, then the mask character will be appended
* and returned. Otherwise, the same string will be returned unchanged.
*
* @param s the string to check
* @return the potentially masked string
* @see #m_KeywordsMaskChar
* @see #isKeyword(String)
*/
public String maskKeyword(String s) {
if (isKeyword(s)) {
return s + m_KeywordsMaskChar;
} else {
return s;
}
}
/**
* Returns the revision string.
*
* @return the revision
*/
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
/**
* Loads a properties file from an external file.
*
* @param propsFile the properties file to load, ignored if null or pointing
* to a directory
* @return the properties, null if ignored or an error occurred
*/
private static Properties loadProperties(File propsFile) {
Properties result;
Properties defaultProps = null;
try {
defaultProps = Utils.readProperties(PROPERTY_FILE);
} catch (Exception ex) {
System.err.println("Warning, unable to read default properties file(s).");
ex.printStackTrace();
}
if (propsFile == null) {
return defaultProps;
}
if (!propsFile.exists() || propsFile.isDirectory()) {
return defaultProps;
}
try {
result = new Properties(defaultProps);
result.load(new FileInputStream(propsFile));
} catch (Exception e) {
result = null;
System.err
.println("Failed to load properties file (DatabaseUtils.java) '"
+ propsFile + "':");
e.printStackTrace();
}
return result;
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/experiment/DensityBasedClustererSplitEvaluator.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* DensityBasedClustererSplitEvaluator.java
* Copyright (C) 2008-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.experiment;
import java.io.ByteArrayOutputStream;
import java.io.ObjectOutputStream;
import java.io.ObjectStreamClass;
import java.io.Serializable;
import java.util.Arrays;
import java.util.Collections;
import java.util.Enumeration;
import java.util.Vector;
import weka.clusterers.AbstractClusterer;
import weka.clusterers.ClusterEvaluation;
import weka.clusterers.DensityBasedClusterer;
import weka.clusterers.EM;
import weka.core.AdditionalMeasureProducer;
import weka.core.Instances;
import weka.core.Option;
import weka.core.OptionHandler;
import weka.core.RevisionHandler;
import weka.core.RevisionUtils;
import weka.core.Utils;
import weka.filters.Filter;
import weka.filters.unsupervised.attribute.Remove;
/**
* <!-- globalinfo-start --> A SplitEvaluator that produces results for a
* density based clusterer.
* <p/>
* <!-- globalinfo-end -->
*
* <!-- options-start --> Valid options are:
* <p/>
*
* <pre>
* -no-size
* Skips the determination of sizes (train/test/clusterer)
* (default: sizes are determined)
* </pre>
*
* <pre>
* -W <class name>
* The full class name of the density based clusterer.
* eg: weka.clusterers.EM
* </pre>
*
* <pre>
* Options specific to clusterer weka.clusterers.EM:
* </pre>
*
* <pre>
* -N <num>
* number of clusters. If omitted or -1 specified, then
* cross validation is used to select the number of clusters.
* </pre>
*
* <pre>
* -I <num>
* max iterations.
* (default 100)
* </pre>
*
* <pre>
* -V
* verbose.
* </pre>
*
* <pre>
* -M <num>
* minimum allowable standard deviation for normal density
* computation
* (default 1e-6)
* </pre>
*
* <pre>
* -O
* Display model in old format (good when there are many clusters)
* </pre>
*
* <pre>
* -S <num>
* Random number seed.
* (default 100)
* </pre>
*
* <!-- options-end -->
*
* All options after -- will be passed to the clusterer.
*
* @author Mark Hall (mhall{[at]}pentaho{[dot]}org
* @version $Revision$
*/
public class DensityBasedClustererSplitEvaluator implements SplitEvaluator,
OptionHandler, AdditionalMeasureProducer, RevisionHandler {
/** for serialization. */
private static final long serialVersionUID = 5124501059135692160L;
/** Remove the class column (if set) from the data */
protected boolean m_removeClassColumn = true;
/** The clusterer used for evaluation */
protected DensityBasedClusterer m_clusterer = new EM();
/** Holds the most recently used ClusterEvaluation object */
protected ClusterEvaluation m_Evaluation;
/** The names of any additional measures to look for in SplitEvaluators */
protected String[] m_additionalMeasures = null;
/**
* Array of booleans corresponding to the measures in m_AdditionalMeasures
* indicating which of the AdditionalMeasures the current clusterer can
* produce
*/
protected boolean[] m_doesProduce = null;
/**
* The number of additional measures that need to be filled in after taking
* into account column constraints imposed by the final destination for
* results
*/
protected int m_numberAdditionalMeasures = 0;
/** Holds the statistics for the most recent application of the clusterer */
protected String m_result = null;
/** The clusterer options (if any) */
protected String m_clustererOptions = "";
/** The clusterer version */
protected String m_clustererVersion = "";
/** whether to skip determination of sizes (train/test/classifier). */
protected boolean m_NoSizeDetermination;
/** The length of a key */
private static final int KEY_SIZE = 3;
/** The length of a result */
private static final int RESULT_SIZE = 9;
public DensityBasedClustererSplitEvaluator() {
updateOptions();
}
/**
* Returns a string describing this split evaluator
*
* @return a description of the split evaluator suitable for displaying in the
* explorer/experimenter gui
*/
public String globalInfo() {
return " A SplitEvaluator that produces results for a density based clusterer. ";
}
/**
* Returns an enumeration describing the available options.
*
* @return an enumeration of all the available options.
*/
@Override
public Enumeration<Option> listOptions() {
Vector<Option> newVector = new Vector<Option>(2);
newVector.addElement(new Option(
"\tSkips the determination of sizes (train/test/clusterer)\n"
+ "\t(default: sizes are determined)", "no-size", 0, "-no-size"));
newVector.addElement(new Option(
"\tThe full class name of the density based clusterer.\n"
+ "\teg: weka.clusterers.EM", "W", 1, "-W <class name>"));
if ((m_clusterer != null) && (m_clusterer instanceof OptionHandler)) {
newVector.addElement(new Option("", "", 0,
"\nOptions specific to clusterer " + m_clusterer.getClass().getName()
+ ":"));
newVector.addAll(Collections.list(((OptionHandler) m_clusterer)
.listOptions()));
}
return newVector.elements();
}
/**
* Parses a given list of options. Valid options are:
* <p>
*
* -W classname <br>
* Specify the full class name of the clusterer to evaluate.
* <p>
*
* All option after -- will be passed to the classifier.
*
* @param options the list of options as an array of strings
* @exception Exception if an option is not supported
*/
@Override
public void setOptions(String[] options) throws Exception {
m_NoSizeDetermination = Utils.getFlag("no-size", options);
String cName = Utils.getOption('W', options);
if (cName.length() == 0) {
throw new Exception("A clusterer must be specified with"
+ " the -W option.");
}
// Do it first without options, so if an exception is thrown during
// the option setting, listOptions will contain options for the actual
// Classifier.
setClusterer((DensityBasedClusterer) AbstractClusterer.forName(cName, null));
if (getClusterer() instanceof OptionHandler) {
((OptionHandler) getClusterer()).setOptions(Utils
.partitionOptions(options));
updateOptions();
}
}
/**
* Gets the current settings of the Classifier.
*
* @return an array of strings suitable for passing to setOptions
*/
@Override
public String[] getOptions() {
Vector<String> result;
String[] clustererOptions;
result = new Vector<String>();
clustererOptions = new String[0];
if ((m_clusterer != null) && (m_clusterer instanceof OptionHandler)) {
clustererOptions = ((OptionHandler) m_clusterer).getOptions();
}
if (getClusterer() != null) {
result.add("-W");
result.add(getClusterer().getClass().getName());
}
if (getNoSizeDetermination()) {
result.add("-no-size");
}
result.add("--");
result.addAll(Arrays.asList(clustererOptions));
return result.toArray(new String[result.size()]);
}
/**
* Set a list of method names for additional measures to look for in
* Classifiers. This could contain many measures (of which only a subset may
* be produceable by the current Classifier) if an experiment is the type that
* iterates over a set of properties.
*
* @param additionalMeasures a list of method names
*/
@Override
public void setAdditionalMeasures(String[] additionalMeasures) {
// System.err.println("ClassifierSplitEvaluator: setting additional measures");
m_additionalMeasures = additionalMeasures;
// determine which (if any) of the additional measures this clusterer
// can produce
if (m_additionalMeasures != null && m_additionalMeasures.length > 0) {
m_doesProduce = new boolean[m_additionalMeasures.length];
if (m_clusterer instanceof AdditionalMeasureProducer) {
Enumeration<String> en = ((AdditionalMeasureProducer) m_clusterer)
.enumerateMeasures();
while (en.hasMoreElements()) {
String mname = en.nextElement();
for (int j = 0; j < m_additionalMeasures.length; j++) {
if (mname.compareToIgnoreCase(m_additionalMeasures[j]) == 0) {
m_doesProduce[j] = true;
}
}
}
}
} else {
m_doesProduce = null;
}
}
/**
* Returns an enumeration of any additional measure names that might be in the
* classifier
*
* @return an enumeration of the measure names
*/
@Override
public Enumeration<String> enumerateMeasures() {
Vector<String> newVector = new Vector<String>();
if (m_clusterer instanceof AdditionalMeasureProducer) {
Enumeration<String> en = ((AdditionalMeasureProducer) m_clusterer)
.enumerateMeasures();
while (en.hasMoreElements()) {
String mname = en.nextElement();
newVector.addElement(mname);
}
}
return newVector.elements();
}
/**
* Returns the value of the named measure
*
* @param additionalMeasureName the name of the measure to query for its value
* @return the value of the named measure
* @exception IllegalArgumentException if the named measure is not supported
*/
@Override
public double getMeasure(String additionalMeasureName) {
if (m_clusterer instanceof AdditionalMeasureProducer) {
return ((AdditionalMeasureProducer) m_clusterer)
.getMeasure(additionalMeasureName);
} else {
throw new IllegalArgumentException(
"DensityBasedClustererSplitEvaluator: " + "Can't return value for : "
+ additionalMeasureName + ". " + m_clusterer.getClass().getName()
+ " " + "is not an AdditionalMeasureProducer");
}
}
/**
* Gets the data types of each of the key columns produced for a single run.
* The number of key fields must be constant for a given SplitEvaluator.
*
* @return an array containing objects of the type of each key column. The
* objects should be Strings, or Doubles.
*/
@Override
public Object[] getKeyTypes() {
Object[] keyTypes = new Object[KEY_SIZE];
keyTypes[0] = "";
keyTypes[1] = "";
keyTypes[2] = "";
return keyTypes;
}
/**
* Gets the names of each of the key columns produced for a single run. The
* number of key fields must be constant for a given SplitEvaluator.
*
* @return an array containing the name of each key column
*/
@Override
public String[] getKeyNames() {
String[] keyNames = new String[KEY_SIZE];
keyNames[0] = "Scheme";
keyNames[1] = "Scheme_options";
keyNames[2] = "Scheme_version_ID";
return keyNames;
}
/**
* Gets the key describing the current SplitEvaluator. For example This may
* contain the name of the classifier used for classifier predictive
* evaluation. The number of key fields must be constant for a given
* SplitEvaluator.
*
* @return an array of objects containing the key.
*/
@Override
public Object[] getKey() {
Object[] key = new Object[KEY_SIZE];
key[0] = m_clusterer.getClass().getName();
key[1] = m_clustererOptions;
key[2] = m_clustererVersion;
return key;
}
/**
* Gets the data types of each of the result columns produced for a single
* run. The number of result fields must be constant for a given
* SplitEvaluator.
*
* @return an array containing objects of the type of each result column. The
* objects should be Strings, or Doubles.
*/
@Override
public Object[] getResultTypes() {
int addm = (m_additionalMeasures != null) ? m_additionalMeasures.length : 0;
int overall_length = RESULT_SIZE + addm;
Object[] resultTypes = new Object[overall_length];
Double doub = new Double(0);
int current = 0;
// number of training and testing instances
resultTypes[current++] = doub;
resultTypes[current++] = doub;
// log liklihood
resultTypes[current++] = doub;
// number of clusters
resultTypes[current++] = doub;
// timing stats
resultTypes[current++] = doub;
resultTypes[current++] = doub;
// sizes
resultTypes[current++] = doub;
resultTypes[current++] = doub;
resultTypes[current++] = doub;
// resultTypes[current++] = "";
// add any additional measures
for (int i = 0; i < addm; i++) {
resultTypes[current++] = doub;
}
if (current != overall_length) {
throw new Error("ResultTypes didn't fit RESULT_SIZE");
}
return resultTypes;
}
/**
* Gets the names of each of the result columns produced for a single run. The
* number of result fields must be constant for a given SplitEvaluator.
*
* @return an array containing the name of each result column
*/
@Override
public String[] getResultNames() {
int addm = (m_additionalMeasures != null) ? m_additionalMeasures.length : 0;
int overall_length = RESULT_SIZE + addm;
String[] resultNames = new String[overall_length];
int current = 0;
resultNames[current++] = "Number_of_training_instances";
resultNames[current++] = "Number_of_testing_instances";
// Basic performance stats
resultNames[current++] = "Log_likelihood";
resultNames[current++] = "Number_of_clusters";
// Timing stats
resultNames[current++] = "Time_training";
resultNames[current++] = "Time_testing";
// sizes
resultNames[current++] = "Serialized_Model_Size";
resultNames[current++] = "Serialized_Train_Set_Size";
resultNames[current++] = "Serialized_Test_Set_Size";
// Classifier defined extras
// resultNames[current++] = "Summary";
// add any additional measures
for (int i = 0; i < addm; i++) {
resultNames[current++] = m_additionalMeasures[i];
}
if (current != overall_length) {
throw new Error("ResultNames didn't fit RESULT_SIZE");
}
return resultNames;
}
/**
* Gets the results for the supplied train and test datasets.
*
* @param train the training Instances.
* @param test the testing Instances.
* @return the results stored in an array. The objects stored in the array may
* be Strings, Doubles, or null (for the missing value).
* @exception Exception if a problem occurs while getting the results
*/
@Override
public Object[] getResult(Instances train, Instances test) throws Exception {
if (m_clusterer == null) {
throw new Exception("No clusterer has been specified");
}
int addm = (m_additionalMeasures != null) ? m_additionalMeasures.length : 0;
int overall_length = RESULT_SIZE + addm;
if (m_removeClassColumn && train.classIndex() != -1) {
// remove the class column from the training and testing data
Remove r = new Remove();
r.setAttributeIndicesArray(new int[] { train.classIndex() });
r.setInvertSelection(false);
r.setInputFormat(train);
train = Filter.useFilter(train, r);
test = Filter.useFilter(test, r);
}
train.setClassIndex(-1);
test.setClassIndex(-1);
ClusterEvaluation eval = new ClusterEvaluation();
Object[] result = new Object[overall_length];
long trainTimeStart = System.currentTimeMillis();
m_clusterer.buildClusterer(train);
double numClusters = m_clusterer.numberOfClusters();
eval.setClusterer(m_clusterer);
long trainTimeElapsed = System.currentTimeMillis() - trainTimeStart;
long testTimeStart = System.currentTimeMillis();
eval.evaluateClusterer(test);
long testTimeElapsed = System.currentTimeMillis() - testTimeStart;
// m_result = eval.toSummaryString();
// The results stored are all per instance -- can be multiplied by the
// number of instances to get absolute numbers
int current = 0;
result[current++] = new Double(train.numInstances());
result[current++] = new Double(test.numInstances());
result[current++] = new Double(eval.getLogLikelihood());
result[current++] = new Double(numClusters);
// Timing stats
result[current++] = new Double(trainTimeElapsed / 1000.0);
result[current++] = new Double(testTimeElapsed / 1000.0);
// sizes
if (m_NoSizeDetermination) {
result[current++] = -1.0;
result[current++] = -1.0;
result[current++] = -1.0;
} else {
ByteArrayOutputStream bastream = new ByteArrayOutputStream();
ObjectOutputStream oostream = new ObjectOutputStream(bastream);
oostream.writeObject(m_clusterer);
result[current++] = new Double(bastream.size());
bastream = new ByteArrayOutputStream();
oostream = new ObjectOutputStream(bastream);
oostream.writeObject(train);
result[current++] = new Double(bastream.size());
bastream = new ByteArrayOutputStream();
oostream = new ObjectOutputStream(bastream);
oostream.writeObject(test);
result[current++] = new Double(bastream.size());
}
for (int i = 0; i < addm; i++) {
if (m_doesProduce[i]) {
try {
double dv = ((AdditionalMeasureProducer) m_clusterer)
.getMeasure(m_additionalMeasures[i]);
Double value = new Double(dv);
result[current++] = value;
} catch (Exception ex) {
System.err.println(ex);
}
} else {
result[current++] = null;
}
}
if (current != overall_length) {
throw new Error("Results didn't fit RESULT_SIZE");
}
m_Evaluation = eval;
return result;
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String removeClassColumnTipText() {
return "Remove the class column (if set) from the data.";
}
/**
* Set whether the class column should be removed from the data.
*
* @param r true if the class column is to be removed.
*/
public void setRemoveClassColumn(boolean r) {
m_removeClassColumn = r;
}
/**
* Get whether the class column is to be removed.
*
* @return true if the class column is to be removed.
*/
public boolean getRemoveClassColumn() {
return m_removeClassColumn;
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String clustererTipText() {
return "The density based clusterer to use.";
}
/**
* Get the value of clusterer
*
* @return Value of clusterer.
*/
public DensityBasedClusterer getClusterer() {
return m_clusterer;
}
/**
* Sets the clusterer.
*
* @param newClusterer the new clusterer to use.
*/
public void setClusterer(DensityBasedClusterer newClusterer) {
m_clusterer = newClusterer;
updateOptions();
}
/**
* Returns whether the size determination (train/test/clusterer) is skipped.
*
* @return true if size determination skipped
*/
public boolean getNoSizeDetermination() {
return m_NoSizeDetermination;
}
/**
* Sets whether the size determination (train/test/clusterer) is skipped.
*
* @param value true if to determine sizes
*/
public void setNoSizeDetermination(boolean value) {
m_NoSizeDetermination = value;
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String noSizeDeterminationTipText() {
return "If enabled, the size determination for train/test/clusterer is skipped.";
}
protected void updateOptions() {
if (m_clusterer instanceof OptionHandler) {
m_clustererOptions = Utils.joinOptions(((OptionHandler) m_clusterer)
.getOptions());
} else {
m_clustererOptions = "";
}
if (m_clusterer instanceof Serializable) {
ObjectStreamClass obs = ObjectStreamClass.lookup(m_clusterer.getClass());
m_clustererVersion = "" + obs.getSerialVersionUID();
} else {
m_clustererVersion = "";
}
}
/**
* Set the Clusterer to use, given it's class name. A new clusterer will be
* instantiated.
*
* @param newClustererName the clusterer class name.
* @exception Exception if the class name is invalid.
*/
public void setClustererName(String newClustererName) throws Exception {
try {
setClusterer((DensityBasedClusterer) Class.forName(newClustererName)
.newInstance());
} catch (Exception ex) {
throw new Exception("Can't find Clusterer with class name: "
+ newClustererName);
}
}
/**
* Gets the raw output from the classifier
*
* @return the raw output from the classifier
*/
@Override
public String getRawResultOutput() {
StringBuffer result = new StringBuffer();
if (m_clusterer == null) {
return "<null> clusterer";
}
result.append(toString());
result.append("Clustering model: \n" + m_clusterer.toString() + '\n');
// append the performance statistics
if (m_result != null) {
// result.append(m_result);
if (m_doesProduce != null) {
for (int i = 0; i < m_doesProduce.length; i++) {
if (m_doesProduce[i]) {
try {
double dv = ((AdditionalMeasureProducer) m_clusterer)
.getMeasure(m_additionalMeasures[i]);
Double value = new Double(dv);
result.append(m_additionalMeasures[i] + " : " + value + '\n');
} catch (Exception ex) {
System.err.println(ex);
}
}
}
}
}
return result.toString();
}
/**
* Returns a text description of the split evaluator.
*
* @return a text description of the split evaluator.
*/
@Override
public String toString() {
String result = "DensityBasedClustererSplitEvaluator: ";
if (m_clusterer == null) {
return result + "<null> clusterer";
}
return result + m_clusterer.getClass().getName() + " " + m_clustererOptions
+ "(version " + m_clustererVersion + ")";
}
/**
* Returns the revision string.
*
* @return the revision
*/
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/experiment/Experiment.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* Experiment.java
* Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.experiment;
import java.beans.PropertyDescriptor;
import java.io.BufferedInputStream;
import java.io.BufferedOutputStream;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.ObjectInputStream;
import java.io.ObjectOutputStream;
import java.io.Serializable;
import java.lang.reflect.Array;
import java.lang.reflect.Method;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Enumeration;
import java.util.Vector;
import javax.swing.DefaultListModel;
import weka.core.AdditionalMeasureProducer;
import weka.core.Instances;
import weka.core.Option;
import weka.core.OptionHandler;
import weka.core.RevisionHandler;
import weka.core.RevisionUtils;
import weka.core.SerializationHelper;
import weka.core.Utils;
import weka.core.converters.AbstractFileLoader;
import weka.core.converters.ConverterUtils;
import weka.core.xml.KOML;
import weka.core.xml.XMLOptions;
import weka.experiment.xml.XMLExperiment;
/**
* Holds all the necessary configuration information for a standard type
* experiment. This object is able to be serialized for storage on disk.
*
* <!-- options-start --> Valid options are:
* <p/>
*
* <pre>
* -L <num>
* The lower run number to start the experiment from.
* (default 1)
* </pre>
*
* <pre>
* -U <num>
* The upper run number to end the experiment at (inclusive).
* (default 10)
* </pre>
*
* <pre>
* -T <arff file>
* The dataset to run the experiment on.
* (required, may be specified multiple times)
* </pre>
*
* <pre>
* -P <class name>
* The full class name of a ResultProducer (required).
* eg: weka.experiment.RandomSplitResultProducer
* </pre>
*
* <pre>
* -D <class name>
* The full class name of a ResultListener (required).
* eg: weka.experiment.CSVResultListener
* </pre>
*
* <pre>
* -N <string>
* A string containing any notes about the experiment.
* (default none)
* </pre>
*
* <pre>
* Options specific to result producer weka.experiment.RandomSplitResultProducer:
* </pre>
*
* <pre>
* -P <percent>
* The percentage of instances to use for training.
* (default 66)
* </pre>
*
* <pre>
* -D
* Save raw split evaluator output.
* </pre>
*
* <pre>
* -O <file/directory name/path>
* The filename where raw output will be stored.
* If a directory name is specified then then individual
* outputs will be gzipped, otherwise all output will be
* zipped to the named file. Use in conjuction with -D. (default splitEvalutorOut.zip)
* </pre>
*
* <pre>
* -W <class name>
* The full class name of a SplitEvaluator.
* eg: weka.experiment.ClassifierSplitEvaluator
* </pre>
*
* <pre>
* -R
* Set when data is not to be randomized and the data sets' size.
* Is not to be determined via probabilistic rounding.
* </pre>
*
* <pre>
* Options specific to split evaluator weka.experiment.ClassifierSplitEvaluator:
* </pre>
*
* <pre>
* -W <class name>
* The full class name of the classifier.
* eg: weka.classifiers.bayes.NaiveBayes
* </pre>
*
* <pre>
* -C <index>
* The index of the class for which IR statistics
* are to be output. (default 1)
* </pre>
*
* <pre>
* -I <index>
* The index of an attribute to output in the
* results. This attribute should identify an
* instance in order to know which instances are
* in the test set of a cross validation. if 0
* no output (default 0).
* </pre>
*
* <pre>
* -P
* Add target and prediction columns to the result
* for each fold.
* </pre>
*
* <pre>
* Options specific to classifier weka.classifiers.rules.ZeroR:
* </pre>
*
* <pre>
* -D
* If set, classifier is run in debug mode and
* may output additional info to the console
* </pre>
*
* <!-- options-end -->
*
* All options after -- will be passed to the result producer.
* <p>
*
* @author Len Trigg (trigg@cs.waikato.ac.nz)
* @version $Revision$
*/
public class Experiment implements Serializable, OptionHandler, RevisionHandler {
/** for serialization */
static final long serialVersionUID = 44945596742646663L;
/** The filename extension that should be used for experiment files */
public static String FILE_EXTENSION = ".exp";
/** Where results will be sent */
protected ResultListener m_ResultListener = new InstancesResultListener();
/** The result producer */
protected ResultProducer m_ResultProducer = new RandomSplitResultProducer();
/** Lower run number */
protected int m_RunLower = 1;
/** Upper run number */
protected int m_RunUpper = 10;
/** An array of dataset files */
protected DefaultListModel m_Datasets = new DefaultListModel();
/** True if the exp should also iterate over a property of the RP */
protected boolean m_UsePropertyIterator = false;
/** The path to the iterator property */
protected PropertyNode[] m_PropertyPath;
/** The array of values to set the property to */
protected Object m_PropertyArray;
/** User notes about the experiment */
protected String m_Notes = "";
/**
* Method names of additional measures of objects contained in the custom
* property iterator. Only methods names beginning with "measure" and
* returning doubles are recognised
*/
protected String[] m_AdditionalMeasures = null;
/**
* True if the class attribute is the first attribute for all datasets
* involved in this experiment.
*/
protected boolean m_ClassFirst = false;
/**
* If true an experiment will advance the current data set befor any custom
* itererator
*/
protected boolean m_AdvanceDataSetFirst = true;
/**
* Sets whether the first attribute is treated as the class for all datasets
* involved in the experiment. This information is not output with the result
* of the experiments!
*
* @param flag whether the class attribute is the first and not the last
*/
public void classFirst(boolean flag) {
m_ClassFirst = flag;
}
/**
* Get the value of m_DataSetFirstFirst.
*
* @return Value of m_DataSetFirstFirst.
*/
public boolean getAdvanceDataSetFirst() {
return m_AdvanceDataSetFirst;
}
/**
* Set the value of m_AdvanceDataSetFirst.
*
* @param newAdvanceDataSetFirst Value to assign to m_AdvanceRunFirst.
*/
public void setAdvanceDataSetFirst(boolean newAdvanceDataSetFirst) {
m_AdvanceDataSetFirst = newAdvanceDataSetFirst;
}
/**
* Gets whether the custom property iterator should be used.
*
* @return true if so
*/
public boolean getUsePropertyIterator() {
return m_UsePropertyIterator;
}
/**
* Sets whether the custom property iterator should be used.
*
* @param newUsePropertyIterator true if so
*/
public void setUsePropertyIterator(boolean newUsePropertyIterator) {
m_UsePropertyIterator = newUsePropertyIterator;
}
/**
* Gets the path of properties taken to get to the custom property to iterate
* over.
*
* @return an array of PropertyNodes
*/
public PropertyNode[] getPropertyPath() {
return m_PropertyPath;
}
/**
* Sets the path of properties taken to get to the custom property to iterate
* over.
*
* @param newPropertyPath an array of PropertyNodes
*/
public void setPropertyPath(PropertyNode[] newPropertyPath) {
m_PropertyPath = newPropertyPath;
}
/**
* Sets the array of values to set the custom property to.
*
* @param newPropArray a value of type Object which should be an array of the
* appropriate values.
*/
public void setPropertyArray(Object newPropArray) {
m_PropertyArray = newPropArray;
}
/**
* Gets the array of values to set the custom property to.
*
* @return a value of type Object which should be an array of the appropriate
* values.
*/
public Object getPropertyArray() {
return m_PropertyArray;
}
/**
* Gets the number of custom iterator values that have been defined for the
* experiment.
*
* @return the number of custom property iterator values.
*/
public int getPropertyArrayLength() {
return Array.getLength(m_PropertyArray);
}
/**
* Gets a specified value from the custom property iterator array.
*
* @param index the index of the value wanted
* @return the property array value
*/
public Object getPropertyArrayValue(int index) {
return Array.get(m_PropertyArray, index);
}
/*
* These may potentially want to be made un-transient if it is decided that
* experiments may be saved mid-run and later resumed
*/
/** The current run number when the experiment is running */
protected transient int m_RunNumber;
/** The current dataset number when the experiment is running */
protected transient int m_DatasetNumber;
/** The current custom property value index when the experiment is running */
protected transient int m_PropertyNumber;
/** True if the experiment has finished running */
protected transient boolean m_Finished = true;
/** The dataset currently being used */
protected transient Instances m_CurrentInstances;
/** The custom property value that has actually been set */
protected transient int m_CurrentProperty;
/**
* When an experiment is running, this returns the current run number.
*
* @return the current run number.
*/
public int getCurrentRunNumber() {
return m_RunNumber;
}
/**
* When an experiment is running, this returns the current dataset number.
*
* @return the current dataset number.
*/
public int getCurrentDatasetNumber() {
return m_DatasetNumber;
}
/**
* When an experiment is running, this returns the index of the current custom
* property value.
*
* @return the index of the current custom property value.
*/
public int getCurrentPropertyNumber() {
return m_PropertyNumber;
}
/**
* Prepares an experiment for running, initializing current iterator settings.
*
* @throws Exception if an error occurs
*/
public void initialize() throws Exception {
m_RunNumber = getRunLower();
m_DatasetNumber = 0;
m_PropertyNumber = 0;
m_CurrentProperty = -1;
m_CurrentInstances = null;
m_Finished = false;
if (m_UsePropertyIterator && (m_PropertyArray == null)) {
throw new Exception("Null array for property iterator");
}
if (getRunLower() > getRunUpper()) {
throw new Exception("Lower run number is greater than upper run number");
}
if (getDatasets().size() == 0) {
throw new Exception("No datasets have been specified");
}
if (m_ResultProducer == null) {
throw new Exception("No ResultProducer set");
}
if (m_ResultListener == null) {
throw new Exception("No ResultListener set");
}
// if (m_UsePropertyIterator && (m_PropertyArray != null)) {
determineAdditionalResultMeasures();
// }
m_ResultProducer.setResultListener(m_ResultListener);
m_ResultProducer.setAdditionalMeasures(m_AdditionalMeasures);
m_ResultProducer.preProcess();
// constrain the additional measures to be only those allowable
// by the ResultListener
String[] columnConstraints = m_ResultListener
.determineColumnConstraints(m_ResultProducer);
if (columnConstraints != null) {
m_ResultProducer.setAdditionalMeasures(columnConstraints);
}
}
/**
* Iterate over the objects in the property array to determine what (if any)
* additional measures they support
*
* @throws Exception if additional measures don't comply to the naming
* convention (starting with "measure")
*/
private void determineAdditionalResultMeasures() throws Exception {
m_AdditionalMeasures = null;
ArrayList<String> measureNames = new ArrayList<String>();
// first try the result producer, then property array if applicable
if (m_ResultProducer instanceof AdditionalMeasureProducer) {
Enumeration<String> am = ((AdditionalMeasureProducer) m_ResultProducer)
.enumerateMeasures();
while (am.hasMoreElements()) {
String mname = am.nextElement();
if (mname.startsWith("measure")) {
if (measureNames.indexOf(mname) == -1) {
measureNames.add(mname);
}
} else {
throw new Exception("Additional measures in "
+ m_ResultProducer.getClass().getName()
+ " must obey the naming convention"
+ " of starting with \"measure\"");
}
}
}
if (m_UsePropertyIterator && (m_PropertyArray != null)) {
for (int i = 0; i < Array.getLength(m_PropertyArray); i++) {
Object current = Array.get(m_PropertyArray, i);
if (current instanceof AdditionalMeasureProducer) {
Enumeration<String> am = ((AdditionalMeasureProducer) current)
.enumerateMeasures();
while (am.hasMoreElements()) {
String mname = am.nextElement();
if (mname.startsWith("measure")) {
if (measureNames.indexOf(mname) == -1) {
measureNames.add(mname);
}
} else {
throw new Exception("Additional measures in "
+ current.getClass().getName()
+ " must obey the naming convention"
+ " of starting with \"measure\"");
}
}
}
}
}
if (measureNames.size() > 0) {
m_AdditionalMeasures = new String[measureNames.size()];
for (int i = 0; i < measureNames.size(); i++) {
m_AdditionalMeasures[i] = measureNames.get(i);
}
}
}
/**
* Recursively sets the custom property value, by setting all values along the
* property path.
*
* @param propertyDepth the current position along the property path
* @param origValue the value to set the property to
* @throws Exception if an error occurs
*/
protected void setProperty(int propertyDepth, Object origValue)
throws Exception {
PropertyDescriptor current = m_PropertyPath[propertyDepth].property;
Object subVal = null;
if (propertyDepth < m_PropertyPath.length - 1) {
Method getter = current.getReadMethod();
Object getArgs[] = {};
subVal = getter.invoke(origValue, getArgs);
setProperty(propertyDepth + 1, subVal);
} else {
subVal = Array.get(m_PropertyArray, m_PropertyNumber);
}
Method setter = current.getWriteMethod();
Object[] args = { subVal };
setter.invoke(origValue, args);
}
/**
* Returns true if there are more iterations to carry out in the experiment.
*
* @return true if so
*/
public boolean hasMoreIterations() {
return !m_Finished;
}
/**
* Carries out the next iteration of the experiment.
*
* @throws Exception if an error occurs
*/
public void nextIteration() throws Exception {
if (m_UsePropertyIterator) {
if (m_CurrentProperty != m_PropertyNumber) {
setProperty(0, m_ResultProducer);
m_CurrentProperty = m_PropertyNumber;
}
}
if (m_CurrentInstances == null) {
File currentFile = (File) getDatasets().elementAt(m_DatasetNumber);
AbstractFileLoader loader = ConverterUtils.getLoaderForFile(currentFile);
loader.setFile(currentFile);
Instances data = new Instances(loader.getDataSet());
// only set class attribute if not already done by loader
if (data.classIndex() == -1) {
if (m_ClassFirst) {
data.setClassIndex(0);
} else {
data.setClassIndex(data.numAttributes() - 1);
}
}
m_CurrentInstances = data;
m_ResultProducer.setInstances(m_CurrentInstances);
}
m_ResultProducer.doRun(m_RunNumber);
advanceCounters();
}
/**
* Increments iteration counters appropriately.
*/
public void advanceCounters() {
if (m_AdvanceDataSetFirst) {
m_RunNumber++;
if (m_RunNumber > getRunUpper()) {
m_RunNumber = getRunLower();
m_DatasetNumber++;
m_CurrentInstances = null;
if (m_DatasetNumber >= getDatasets().size()) {
m_DatasetNumber = 0;
if (m_UsePropertyIterator) {
m_PropertyNumber++;
if (m_PropertyNumber >= Array.getLength(m_PropertyArray)) {
m_Finished = true;
}
} else {
m_Finished = true;
}
}
}
} else { // advance by custom iterator before data set
m_RunNumber++;
if (m_RunNumber > getRunUpper()) {
m_RunNumber = getRunLower();
if (m_UsePropertyIterator) {
m_PropertyNumber++;
if (m_PropertyNumber >= Array.getLength(m_PropertyArray)) {
m_PropertyNumber = 0;
m_DatasetNumber++;
m_CurrentInstances = null;
if (m_DatasetNumber >= getDatasets().size()) {
m_Finished = true;
}
}
} else {
m_DatasetNumber++;
m_CurrentInstances = null;
if (m_DatasetNumber >= getDatasets().size()) {
m_Finished = true;
}
}
}
}
}
public void runExperiment(boolean verbose) {
while (hasMoreIterations()) {
try {
if (verbose) {
String current = "Iteration:";
if (getUsePropertyIterator()) {
int cnum = getCurrentPropertyNumber();
String ctype = getPropertyArray().getClass().getComponentType()
.getName();
int lastDot = ctype.lastIndexOf('.');
if (lastDot != -1) {
ctype = ctype.substring(lastDot + 1);
}
String cname = " " + ctype + "=" + (cnum + 1) + ":"
+ getPropertyArrayValue(cnum).getClass().getName();
current += cname;
}
String dname = ((File) getDatasets().elementAt(
getCurrentDatasetNumber())).getName();
current += " Dataset=" + dname + " Run=" + (getCurrentRunNumber());
System.out.println(current);
}
nextIteration();
} catch (Exception ex) {
ex.printStackTrace();
System.err.println(ex.getMessage());
advanceCounters(); // Try to keep plowing through
}
}
}
/**
* Runs all iterations of the experiment, continuing past errors.
*/
public void runExperiment() {
runExperiment(false);
}
/**
* Signals that the experiment is finished running, so that cleanup can be
* done.
*
* @throws Exception if an error occurs
*/
public void postProcess() throws Exception {
m_ResultProducer.postProcess();
}
/**
* Gets the datasets in the experiment.
*
* @return the datasets in the experiment.
*/
public DefaultListModel getDatasets() {
return m_Datasets;
}
/**
* Set the datasets to use in the experiment
*
* @param ds the list of datasets to use
*/
public void setDatasets(DefaultListModel ds) {
m_Datasets = ds;
}
/**
* Gets the result listener where results will be sent.
*
* @return the result listener where results will be sent.
*/
public ResultListener getResultListener() {
return m_ResultListener;
}
/**
* Sets the result listener where results will be sent.
*
* @param newResultListener the result listener where results will be sent.
*/
public void setResultListener(ResultListener newResultListener) {
m_ResultListener = newResultListener;
}
/**
* Get the result producer used for the current experiment.
*
* @return the result producer used for the current experiment.
*/
public ResultProducer getResultProducer() {
return m_ResultProducer;
}
/**
* Set the result producer used for the current experiment.
*
* @param newResultProducer result producer to use for the current experiment.
*/
public void setResultProducer(ResultProducer newResultProducer) {
m_ResultProducer = newResultProducer;
}
/**
* Get the upper run number for the experiment.
*
* @return the upper run number for the experiment.
*/
public int getRunUpper() {
return m_RunUpper;
}
/**
* Set the upper run number for the experiment.
*
* @param newRunUpper the upper run number for the experiment.
*/
public void setRunUpper(int newRunUpper) {
m_RunUpper = newRunUpper;
}
/**
* Get the lower run number for the experiment.
*
* @return the lower run number for the experiment.
*/
public int getRunLower() {
return m_RunLower;
}
/**
* Set the lower run number for the experiment.
*
* @param newRunLower the lower run number for the experiment.
*/
public void setRunLower(int newRunLower) {
m_RunLower = newRunLower;
}
/**
* Get the user notes.
*
* @return User notes associated with the experiment.
*/
public String getNotes() {
return m_Notes;
}
/**
* Set the user notes.
*
* @param newNotes New user notes.
*/
public void setNotes(String newNotes) {
m_Notes = newNotes;
}
/**
* Returns an enumeration describing the available options..
*
* @return an enumeration of all the available options.
*/
@Override
public Enumeration<Option> listOptions() {
Vector<Option> newVector = new Vector<Option>(6);
newVector.addElement(new Option(
"\tThe lower run number to start the experiment from.\n"
+ "\t(default 1)", "L", 1, "-L <num>"));
newVector.addElement(new Option(
"\tThe upper run number to end the experiment at (inclusive).\n"
+ "\t(default 10)", "U", 1, "-U <num>"));
newVector.addElement(new Option("\tThe dataset to run the experiment on.\n"
+ "\t(required, may be specified multiple times)", "T", 1,
"-T <arff file>"));
newVector.addElement(new Option(
"\tThe full class name of a ResultProducer (required).\n"
+ "\teg: weka.experiment.RandomSplitResultProducer", "P", 1,
"-P <class name>"));
newVector
.addElement(new Option(
"\tThe full class name of a ResultListener (required).\n"
+ "\teg: weka.experiment.CSVResultListener", "D", 1,
"-D <class name>"));
newVector.addElement(new Option(
"\tA string containing any notes about the experiment.\n"
+ "\t(default none)", "N", 1, "-N <string>"));
if ((m_ResultProducer != null)
&& (m_ResultProducer instanceof OptionHandler)) {
newVector.addElement(new Option("", "", 0,
"\nOptions specific to result producer "
+ m_ResultProducer.getClass().getName() + ":"));
newVector.addAll(Collections.list(((OptionHandler) m_ResultProducer)
.listOptions()));
}
return newVector.elements();
}
/**
* Parses a given list of options.
* <p/>
*
* <!-- options-start --> Valid options are:
* <p/>
*
* <pre>
* -L <num>
* The lower run number to start the experiment from.
* (default 1)
* </pre>
*
* <pre>
* -U <num>
* The upper run number to end the experiment at (inclusive).
* (default 10)
* </pre>
*
* <pre>
* -T <arff file>
* The dataset to run the experiment on.
* (required, may be specified multiple times)
* </pre>
*
* <pre>
* -P <class name>
* The full class name of a ResultProducer (required).
* eg: weka.experiment.RandomSplitResultProducer
* </pre>
*
* <pre>
* -D <class name>
* The full class name of a ResultListener (required).
* eg: weka.experiment.CSVResultListener
* </pre>
*
* <pre>
* -N <string>
* A string containing any notes about the experiment.
* (default none)
* </pre>
*
* <pre>
* Options specific to result producer weka.experiment.RandomSplitResultProducer:
* </pre>
*
* <pre>
* -P <percent>
* The percentage of instances to use for training.
* (default 66)
* </pre>
*
* <pre>
* -D
* Save raw split evaluator output.
* </pre>
*
* <pre>
* -O <file/directory name/path>
* The filename where raw output will be stored.
* If a directory name is specified then then individual
* outputs will be gzipped, otherwise all output will be
* zipped to the named file. Use in conjuction with -D. (default splitEvalutorOut.zip)
* </pre>
*
* <pre>
* -W <class name>
* The full class name of a SplitEvaluator.
* eg: weka.experiment.ClassifierSplitEvaluator
* </pre>
*
* <pre>
* -R
* Set when data is not to be randomized and the data sets' size.
* Is not to be determined via probabilistic rounding.
* </pre>
*
* <pre>
* Options specific to split evaluator weka.experiment.ClassifierSplitEvaluator:
* </pre>
*
* <pre>
* -W <class name>
* The full class name of the classifier.
* eg: weka.classifiers.bayes.NaiveBayes
* </pre>
*
* <pre>
* -C <index>
* The index of the class for which IR statistics
* are to be output. (default 1)
* </pre>
*
* <pre>
* -I <index>
* The index of an attribute to output in the
* results. This attribute should identify an
* instance in order to know which instances are
* in the test set of a cross validation. if 0
* no output (default 0).
* </pre>
*
* <pre>
* -P
* Add target and prediction columns to the result
* for each fold.
* </pre>
*
* <pre>
* Options specific to classifier weka.classifiers.rules.ZeroR:
* </pre>
*
* <pre>
* -D
* If set, classifier is run in debug mode and
* may output additional info to the console
* </pre>
*
* <!-- options-end -->
*
* All options after -- will be passed to the result producer.
* <p>
*
* @param options the list of options as an array of strings
* @throws Exception if an option is not supported
*/
@Override
public void setOptions(String[] options) throws Exception {
String lowerString = Utils.getOption('L', options);
if (lowerString.length() != 0) {
setRunLower(Integer.parseInt(lowerString));
} else {
setRunLower(1);
}
String upperString = Utils.getOption('U', options);
if (upperString.length() != 0) {
setRunUpper(Integer.parseInt(upperString));
} else {
setRunUpper(10);
}
if (getRunLower() > getRunUpper()) {
throw new Exception("Lower (" + getRunLower()
+ ") is greater than upper (" + getRunUpper() + ")");
}
setNotes(Utils.getOption('N', options));
getDatasets().removeAllElements();
String dataName;
do {
dataName = Utils.getOption('T', options);
if (dataName.length() != 0) {
File dataset = new File(dataName);
getDatasets().addElement(dataset);
}
} while (dataName.length() != 0);
if (getDatasets().size() == 0) {
throw new Exception("Required: -T <arff file name>");
}
String rlName = Utils.getOption('D', options);
if (rlName.length() == 0) {
throw new Exception("Required: -D <ResultListener class name>");
}
rlName = rlName.trim();
// split off any options
int breakLoc = rlName.indexOf(' ');
String clName = rlName;
String rlOptionsString = "";
String[] rlOptions = null;
if (breakLoc != -1) {
clName = rlName.substring(0, breakLoc);
rlOptionsString = rlName.substring(breakLoc).trim();
rlOptions = Utils.splitOptions(rlOptionsString);
}
setResultListener((ResultListener) Utils.forName(ResultListener.class,
clName, rlOptions));
String rpName = Utils.getOption('P', options);
if (rpName.length() == 0) {
throw new Exception("Required: -P <ResultProducer class name>");
}
// Do it first without options, so if an exception is thrown during
// the option setting, listOptions will contain options for the actual
// RP.
// GHF -- nice idea, but it prevents you from using result producers that
// have *required* parameters
setResultProducer((ResultProducer) Utils.forName(ResultProducer.class,
rpName, Utils.partitionOptions(options))); // GHF
// GHF if (getResultProducer() instanceof OptionHandler) {
// GHF ((OptionHandler) getResultProducer())
// GHF .setOptions(Utils.partitionOptions(options));
// GHF }
}
/**
* Gets the current settings of the experiment iterator.
*
* @return an array of strings suitable for passing to setOptions
*/
@Override
public String[] getOptions() {
// Currently no way to set custompropertyiterators from the command line
m_UsePropertyIterator = false;
m_PropertyPath = null;
m_PropertyArray = null;
String[] rpOptions = new String[0];
if ((m_ResultProducer != null)
&& (m_ResultProducer instanceof OptionHandler)) {
rpOptions = ((OptionHandler) m_ResultProducer).getOptions();
}
String[] options = new String[rpOptions.length + getDatasets().size() * 2
+ 11];
int current = 0;
options[current++] = "-L";
options[current++] = "" + getRunLower();
options[current++] = "-U";
options[current++] = "" + getRunUpper();
if (getDatasets().size() != 0) {
for (int i = 0; i < getDatasets().size(); i++) {
options[current++] = "-T";
options[current++] = getDatasets().elementAt(i).toString();
}
}
if (getResultListener() != null) {
options[current++] = "-D";
options[current++] = getResultListener().getClass().getName();
}
if (getResultProducer() != null) {
options[current++] = "-P";
options[current++] = getResultProducer().getClass().getName();
}
if (!getNotes().equals("")) {
options[current++] = "-N";
options[current++] = getNotes();
}
options[current++] = "--";
System.arraycopy(rpOptions, 0, options, current, rpOptions.length);
current += rpOptions.length;
while (current < options.length) {
options[current++] = "";
}
return options;
}
/**
* Gets a string representation of the experiment configuration.
*
* @return a value of type 'String'
*/
@Override
public String toString() {
String result = "Runs from: " + m_RunLower + " to: " + m_RunUpper + '\n';
result += "Datasets:";
for (int i = 0; i < m_Datasets.size(); i++) {
result += " " + m_Datasets.elementAt(i);
}
result += '\n';
result += "Custom property iterator: "
+ (m_UsePropertyIterator ? "on" : "off") + "\n";
if (m_UsePropertyIterator) {
if (m_PropertyPath == null) {
throw new Error("*** null propertyPath ***");
}
if (m_PropertyArray == null) {
throw new Error("*** null propertyArray ***");
}
if (m_PropertyPath.length > 1) {
result += "Custom property path:\n";
for (int i = 0; i < m_PropertyPath.length - 1; i++) {
PropertyNode pn = m_PropertyPath[i];
result += "" + (i + 1) + " " + pn.parentClass.getName() + "::"
+ pn.toString() + ' ' + pn.value.toString() + '\n';
}
}
result += "Custom property name:"
+ m_PropertyPath[m_PropertyPath.length - 1].toString() + '\n';
result += "Custom property values:\n";
for (int i = 0; i < Array.getLength(m_PropertyArray); i++) {
Object current = Array.get(m_PropertyArray, i);
result += " " + (i + 1) + " " + current.getClass().getName() + " "
+ current.toString() + '\n';
}
}
result += "ResultProducer: " + m_ResultProducer + '\n';
result += "ResultListener: " + m_ResultListener + '\n';
if (!getNotes().equals("")) {
result += "Notes: " + getNotes();
}
return result;
}
/**
* Loads an experiment from a file.
*
* @param filename the file to load the experiment from
* @return the experiment
* @throws Exception if loading fails
*/
public static Experiment read(String filename) throws Exception {
Experiment result;
// KOML?
if ((KOML.isPresent())
&& (filename.toLowerCase().endsWith(KOML.FILE_EXTENSION))) {
result = (Experiment) KOML.read(filename);
}
// XML?
else if (filename.toLowerCase().endsWith(".xml")) {
XMLExperiment xml = new XMLExperiment();
result = (Experiment) xml.read(filename);
}
// binary
else {
FileInputStream fi = new FileInputStream(filename);
ObjectInputStream oi = SerializationHelper.getObjectInputStream(fi);
// ObjectInputStream oi = new ObjectInputStream(new BufferedInputStream(fi));
result = (Experiment) oi.readObject();
oi.close();
}
return result;
}
/**
* Writes the experiment to disk.
*
* @param filename the file to write to
* @param exp the experiment to save
* @throws Exception if writing fails
*/
public static void write(String filename, Experiment exp) throws Exception {
// KOML?
if ((KOML.isPresent())
&& (filename.toLowerCase().endsWith(KOML.FILE_EXTENSION))) {
KOML.write(filename, exp);
}
// XML?
else if (filename.toLowerCase().endsWith(".xml")) {
XMLExperiment xml = new XMLExperiment();
xml.write(filename, exp);
}
// binary
else {
FileOutputStream fo = new FileOutputStream(filename);
ObjectOutputStream oo = new ObjectOutputStream(new BufferedOutputStream(
fo));
oo.writeObject(exp);
oo.close();
}
}
/**
* Configures/Runs the Experiment from the command line.
*
* @param args command line arguments to the Experiment.
*/
public static void main(String[] args) {
try {
weka.core.WekaPackageManager.loadPackages(false, true, false);
Experiment exp = null;
// get options from XML?
String xmlOption = Utils.getOption("xml", args);
if (!xmlOption.equals("")) {
args = new XMLOptions(xmlOption).toArray();
}
String expFile = Utils.getOption('l', args);
String saveFile = Utils.getOption('s', args);
boolean runExp = Utils.getFlag('r', args);
boolean verbose = Utils.getFlag("verbose", args);
if (expFile.length() == 0) {
exp = new Experiment();
try {
exp.setOptions(args);
Utils.checkForRemainingOptions(args);
} catch (Exception ex) {
ex.printStackTrace();
String result = "Usage:\n\n" + "-l <exp|xml file>\n"
+ "\tLoad experiment from file (default use cli options).\n"
+ "\tThe type is determined, based on the extension ("
+ FILE_EXTENSION + " or .xml)\n" + "-s <exp|xml file>\n"
+ "\tSave experiment to file after setting other options.\n"
+ "\tThe type is determined, based on the extension ("
+ FILE_EXTENSION + " or .xml)\n" + "\t(default don't save)\n"
+ "-r\n" + "\tRun experiment (default don't run)\n"
+ "-xml <filename | xml-string>\n"
+ "\tget options from XML-Data instead from parameters.\n"
+ "-verbose\n" + "\toutput progress information to std out." + "\n";
Enumeration<Option> enm = ((OptionHandler) exp).listOptions();
while (enm.hasMoreElements()) {
Option option = enm.nextElement();
result += option.synopsis() + "\n";
result += option.description() + "\n";
}
throw new Exception(result + "\n" + ex.getMessage());
}
} else {
exp = read(expFile);
if (exp instanceof RemoteExperiment) {
throw new Exception("Cannot run remote experiment using Experiment class. Use RemoteExperiment class instead!");
}
// allow extra datasets to be added to pre-loaded experiment from
// command line
String dataName;
do {
dataName = Utils.getOption('T', args);
if (dataName.length() != 0) {
File dataset = new File(dataName);
exp.getDatasets().addElement(dataset);
}
} while (dataName.length() != 0);
}
System.err.println("Experiment:\n" + exp.toString());
if (saveFile.length() != 0) {
write(saveFile, exp);
}
if (runExp) {
System.err.println("Initializing...");
exp.initialize();
System.err.println("Iterating...");
exp.runExperiment(verbose);
System.err.println("Postprocessing...");
exp.postProcess();
}
} catch (Exception ex) {
System.err.println(ex.getMessage());
}
}
/**
* Returns the revision string.
*
* @return the revision
*/
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
} // Experiment
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/experiment/ExplicitTestsetResultProducer.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* ExplicitTestsetResultProducer.java
* Copyright (C) 2009-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.experiment;
import java.io.File;
import java.util.Calendar;
import java.util.Collections;
import java.util.Enumeration;
import java.util.Random;
import java.util.TimeZone;
import java.util.Vector;
import weka.core.AdditionalMeasureProducer;
import weka.core.Environment;
import weka.core.Instances;
import weka.core.Option;
import weka.core.OptionHandler;
import weka.core.RevisionHandler;
import weka.core.RevisionUtils;
import weka.core.Utils;
import weka.core.WekaException;
import weka.core.converters.ConverterUtils.DataSource;
/**
* <!-- globalinfo-start --> Loads the external test set and calls the
* appropriate SplitEvaluator to generate some results.<br/>
* The filename of the test set is constructed as follows:<br/>
* <dir> + / + <prefix> + <relation-name> + <suffix><br/>
* The relation-name can be modified by using the regular expression to replace
* the matching sub-string with a specified replacement string. In order to get
* rid of the string that the Weka filters add to the end of the relation name,
* just use '.*-weka' as the regular expression to find.<br/>
* The suffix determines the type of file to load, i.e., one is not restricted
* to ARFF files. As long as Weka recognizes the extension specified in the
* suffix, the data will be loaded with one of Weka's converters.
* <p/>
* <!-- globalinfo-end -->
*
* <!-- options-start --> Valid options are:
* <p/>
*
* <pre>
* -D
* Save raw split evaluator output.
* </pre>
*
* <pre>
* -O <file/directory name/path>
* The filename where raw output will be stored.
* If a directory name is specified then then individual
* outputs will be gzipped, otherwise all output will be
* zipped to the named file. Use in conjuction with -D.
* (default: splitEvalutorOut.zip)
* </pre>
*
* <pre>
* -W <class name>
* The full class name of a SplitEvaluator.
* eg: weka.experiment.ClassifierSplitEvaluator
* </pre>
*
* <pre>
* -R
* Set when data is to be randomized.
* </pre>
*
* <pre>
* -dir <directory>
* The directory containing the test sets.
* (default: current directory)
* </pre>
*
* <pre>
* -prefix <string>
* An optional prefix for the test sets (before the relation name).
* (default: empty string)
* </pre>
*
* <pre>
* -suffix <string>
* The suffix to append to the test set.
* (default: _test.arff)
* </pre>
*
* <pre>
* -find <regular expression>
* The regular expression to search the relation name with.
* Not used if an empty string.
* (default: empty string)
* </pre>
*
* <pre>
* -replace <string>
* The replacement string for the all the matches of '-find'.
* (default: empty string)
* </pre>
*
* <pre>
* Options specific to split evaluator weka.experiment.ClassifierSplitEvaluator:
* </pre>
*
* <pre>
* -W <class name>
* The full class name of the classifier.
* eg: weka.classifiers.bayes.NaiveBayes
* </pre>
*
* <pre>
* -C <index>
* The index of the class for which IR statistics
* are to be output. (default 1)
* </pre>
*
* <pre>
* -I <index>
* The index of an attribute to output in the
* results. This attribute should identify an
* instance in order to know which instances are
* in the test set of a cross validation. if 0
* no output (default 0).
* </pre>
*
* <pre>
* -P
* Add target and prediction columns to the result
* for each fold.
* </pre>
*
* <pre>
* Options specific to classifier weka.classifiers.rules.ZeroR:
* </pre>
*
* <pre>
* -D
* If set, classifier is run in debug mode and
* may output additional info to the console
* </pre>
*
* <!-- options-end -->
*
* All options after -- will be passed to the split evaluator.
*
* @author Len Trigg (trigg@cs.waikato.ac.nz)
* @author FracPete (fracpete at waikato dot ac dot nz)
* @version $Revision$
*/
public class ExplicitTestsetResultProducer implements ResultProducer,
OptionHandler, AdditionalMeasureProducer, RevisionHandler {
/** for serialization. */
private static final long serialVersionUID = 2613585409333652530L;
/** the default suffix. */
public final static String DEFAULT_SUFFIX = "_test.arff";
/** The dataset of interest. */
protected Instances m_Instances;
/** The ResultListener to send results to. */
protected ResultListener m_ResultListener = new CSVResultListener();
/** The directory containing all the test sets. */
protected File m_TestsetDir = new File(System.getProperty("user.dir"));
/** The prefix for all the test sets. */
protected String m_TestsetPrefix = "";
/** The suffix for all the test sets. */
protected String m_TestsetSuffix = DEFAULT_SUFFIX;
/** The regular expression to search for in the relation name. */
protected String m_RelationFind = "";
/** The string to use to replace the matches of the regular expression. */
protected String m_RelationReplace = "";
/** Whether dataset is to be randomized. */
protected boolean m_randomize = false;
/** The SplitEvaluator used to generate results. */
protected SplitEvaluator m_SplitEvaluator = new ClassifierSplitEvaluator();
/** The names of any additional measures to look for in SplitEvaluators. */
protected String[] m_AdditionalMeasures = null;
/** Save raw output of split evaluators --- for debugging purposes. */
protected boolean m_debugOutput = false;
/** The output zipper to use for saving raw splitEvaluator output. */
protected OutputZipper m_ZipDest = null;
/** The destination output file/directory for raw output. */
protected File m_OutputFile = new File(new File(
System.getProperty("user.dir")), "splitEvalutorOut.zip");
/** The name of the key field containing the dataset name. */
public static String DATASET_FIELD_NAME = "Dataset";
/** The name of the key field containing the run number. */
public static String RUN_FIELD_NAME = "Run";
/** The name of the result field containing the timestamp. */
public static String TIMESTAMP_FIELD_NAME = "Date_time";
protected transient Environment m_env;
/**
* Returns a string describing this result producer.
*
* @return a description of the result producer suitable for displaying in the
* explorer/experimenter gui
*/
public String globalInfo() {
return "Loads the external test set and calls the appropriate "
+ "SplitEvaluator to generate some results.\n"
+ "The filename of the test set is constructed as follows:\n"
+ " <dir> + / + <prefix> + <relation-name> + <suffix>\n"
+ "The relation-name can be modified by using the regular expression "
+ "to replace the matching sub-string with a specified replacement "
+ "string. In order to get rid of the string that the Weka filters "
+ "add to the end of the relation name, just use '.*-weka' as the "
+ "regular expression to find.\n"
+ "The suffix determines the type of file to load, i.e., one is "
+ "not restricted to ARFF files. As long as Weka recognizes the "
+ "extension specified in the suffix, the data will be loaded with "
+ "one of Weka's converters.";
}
/**
* Returns an enumeration describing the available options..
*
* @return an enumeration of all the available options.
*/
@Override
public Enumeration<Option> listOptions() {
Vector<Option> result = new Vector<Option>();
result.addElement(new Option("Save raw split evaluator output.", "D", 0,
"-D"));
result.addElement(new Option(
"\tThe filename where raw output will be stored.\n"
+ "\tIf a directory name is specified then then individual\n"
+ "\toutputs will be gzipped, otherwise all output will be\n"
+ "\tzipped to the named file. Use in conjuction with -D.\n"
+ "\t(default: splitEvalutorOut.zip)", "O", 1,
"-O <file/directory name/path>"));
result.addElement(new Option("\tThe full class name of a SplitEvaluator.\n"
+ "\teg: weka.experiment.ClassifierSplitEvaluator", "W", 1,
"-W <class name>"));
result.addElement(new Option("\tSet when data is to be randomized.", "R",
0, "-R"));
result.addElement(new Option("\tThe directory containing the test sets.\n"
+ "\t(default: current directory)", "dir", 1, "-dir <directory>"));
result.addElement(new Option(
"\tAn optional prefix for the test sets (before the relation name).\n"
+ "(default: empty string)", "prefix", 1, "-prefix <string>"));
result
.addElement(new Option("\tThe suffix to append to the test set.\n"
+ "\t(default: " + DEFAULT_SUFFIX + ")", "suffix", 1,
"-suffix <string>"));
result.addElement(new Option(
"\tThe regular expression to search the relation name with.\n"
+ "\tNot used if an empty string.\n" + "\t(default: empty string)",
"find", 1, "-find <regular expression>"));
result.addElement(new Option(
"\tThe replacement string for the all the matches of '-find'.\n"
+ "\t(default: empty string)", "replace", 1, "-replace <string>"));
if ((m_SplitEvaluator != null)
&& (m_SplitEvaluator instanceof OptionHandler)) {
result.addElement(new Option("", "", 0,
"\nOptions specific to split evaluator "
+ m_SplitEvaluator.getClass().getName() + ":"));
result.addAll(Collections.list(((OptionHandler) m_SplitEvaluator)
.listOptions()));
}
return result.elements();
}
/**
* Parses a given list of options.
* <p/>
*
* <!-- options-start --> Valid options are:
* <p/>
*
* <pre>
* -D
* Save raw split evaluator output.
* </pre>
*
* <pre>
* -O <file/directory name/path>
* The filename where raw output will be stored.
* If a directory name is specified then then individual
* outputs will be gzipped, otherwise all output will be
* zipped to the named file. Use in conjuction with -D.
* (default: splitEvalutorOut.zip)
* </pre>
*
* <pre>
* -W <class name>
* The full class name of a SplitEvaluator.
* eg: weka.experiment.ClassifierSplitEvaluator
* </pre>
*
* <pre>
* -R
* Set when data is to be randomized.
* </pre>
*
* <pre>
* -dir <directory>
* The directory containing the test sets.
* (default: current directory)
* </pre>
*
* <pre>
* -prefix <string>
* An optional prefix for the test sets (before the relation name).
* (default: empty string)
* </pre>
*
* <pre>
* -suffix <string>
* The suffix to append to the test set.
* (default: _test.arff)
* </pre>
*
* <pre>
* -find <regular expression>
* The regular expression to search the relation name with.
* Not used if an empty string.
* (default: empty string)
* </pre>
*
* <pre>
* -replace <string>
* The replacement string for the all the matches of '-find'.
* (default: empty string)
* </pre>
*
* <pre>
* Options specific to split evaluator weka.experiment.ClassifierSplitEvaluator:
* </pre>
*
* <pre>
* -W <class name>
* The full class name of the classifier.
* eg: weka.classifiers.bayes.NaiveBayes
* </pre>
*
* <pre>
* -C <index>
* The index of the class for which IR statistics
* are to be output. (default 1)
* </pre>
*
* <pre>
* -I <index>
* The index of an attribute to output in the
* results. This attribute should identify an
* instance in order to know which instances are
* in the test set of a cross validation. if 0
* no output (default 0).
* </pre>
*
* <pre>
* -P
* Add target and prediction columns to the result
* for each fold.
* </pre>
*
* <pre>
* Options specific to classifier weka.classifiers.rules.ZeroR:
* </pre>
*
* <pre>
* -D
* If set, classifier is run in debug mode and
* may output additional info to the console
* </pre>
*
* <!-- options-end -->
*
* All options after -- will be passed to the split evaluator.
*
* @param options the list of options as an array of strings
* @throws Exception if an option is not supported
*/
@Override
public void setOptions(String[] options) throws Exception {
String tmpStr;
setRawOutput(Utils.getFlag('D', options));
setRandomizeData(!Utils.getFlag('R', options));
tmpStr = Utils.getOption('O', options);
if (tmpStr.length() != 0) {
setOutputFile(new File(tmpStr));
}
tmpStr = Utils.getOption("dir", options);
if (tmpStr.length() > 0) {
setTestsetDir(new File(tmpStr));
} else {
setTestsetDir(new File(System.getProperty("user.dir")));
}
tmpStr = Utils.getOption("prefix", options);
if (tmpStr.length() > 0) {
setTestsetPrefix(tmpStr);
} else {
setTestsetPrefix("");
}
tmpStr = Utils.getOption("suffix", options);
if (tmpStr.length() > 0) {
setTestsetSuffix(tmpStr);
} else {
setTestsetSuffix(DEFAULT_SUFFIX);
}
tmpStr = Utils.getOption("find", options);
if (tmpStr.length() > 0) {
setRelationFind(tmpStr);
} else {
setRelationFind("");
}
tmpStr = Utils.getOption("replace", options);
if ((tmpStr.length() > 0) && (getRelationFind().length() > 0)) {
setRelationReplace(tmpStr);
} else {
setRelationReplace("");
}
tmpStr = Utils.getOption('W', options);
if (tmpStr.length() == 0) {
throw new Exception(
"A SplitEvaluator must be specified with the -W option.");
}
// Do it first without options, so if an exception is thrown during
// the option setting, listOptions will contain options for the actual
// SE.
setSplitEvaluator((SplitEvaluator) Utils.forName(SplitEvaluator.class,
tmpStr, null));
if (getSplitEvaluator() instanceof OptionHandler) {
((OptionHandler) getSplitEvaluator()).setOptions(Utils
.partitionOptions(options));
}
}
/**
* Gets the current settings of the result producer.
*
* @return an array of strings suitable for passing to setOptions
*/
@Override
public String[] getOptions() {
Vector<String> result;
String[] seOptions;
int i;
result = new Vector<String>();
seOptions = new String[0];
if ((m_SplitEvaluator != null)
&& (m_SplitEvaluator instanceof OptionHandler)) {
seOptions = ((OptionHandler) m_SplitEvaluator).getOptions();
}
if (getRawOutput()) {
result.add("-D");
}
if (!getRandomizeData()) {
result.add("-R");
}
result.add("-O");
result.add(getOutputFile().getName());
result.add("-dir");
result.add(getTestsetDir().getPath());
if (getTestsetPrefix().length() > 0) {
result.add("-prefix");
result.add(getTestsetPrefix());
}
result.add("-suffix");
result.add(getTestsetSuffix());
if (getRelationFind().length() > 0) {
result.add("-find");
result.add(getRelationFind());
if (getRelationReplace().length() > 0) {
result.add("-replace");
result.add(getRelationReplace());
}
}
if (getSplitEvaluator() != null) {
result.add("-W");
result.add(getSplitEvaluator().getClass().getName());
}
if (seOptions.length > 0) {
result.add("--");
for (i = 0; i < seOptions.length; i++) {
result.add(seOptions[i]);
}
}
return result.toArray(new String[result.size()]);
}
/**
* Sets the dataset that results will be obtained for.
*
* @param instances a value of type 'Instances'.
*/
@Override
public void setInstances(Instances instances) {
m_Instances = instances;
}
/**
* Set a list of method names for additional measures to look for in
* SplitEvaluators. This could contain many measures (of which only a subset
* may be produceable by the current SplitEvaluator) if an experiment is the
* type that iterates over a set of properties.
*
* @param additionalMeasures an array of measure names, null if none
*/
@Override
public void setAdditionalMeasures(String[] additionalMeasures) {
m_AdditionalMeasures = additionalMeasures;
if (m_SplitEvaluator != null) {
System.err.println("ExplicitTestsetResultProducer: setting additional "
+ "measures for split evaluator");
m_SplitEvaluator.setAdditionalMeasures(m_AdditionalMeasures);
}
}
/**
* Returns an enumeration of any additional measure names that might be in the
* SplitEvaluator.
*
* @return an enumeration of the measure names
*/
@Override
public Enumeration<String> enumerateMeasures() {
Vector<String> result = new Vector<String>();
if (m_SplitEvaluator instanceof AdditionalMeasureProducer) {
Enumeration<String> en = ((AdditionalMeasureProducer) m_SplitEvaluator)
.enumerateMeasures();
while (en.hasMoreElements()) {
String mname = en.nextElement();
result.add(mname);
}
}
return result.elements();
}
/**
* Returns the value of the named measure.
*
* @param additionalMeasureName the name of the measure to query for its value
* @return the value of the named measure
* @throws IllegalArgumentException if the named measure is not supported
*/
@Override
public double getMeasure(String additionalMeasureName) {
if (m_SplitEvaluator instanceof AdditionalMeasureProducer) {
return ((AdditionalMeasureProducer) m_SplitEvaluator)
.getMeasure(additionalMeasureName);
} else {
throw new IllegalArgumentException("ExplicitTestsetResultProducer: "
+ "Can't return value for : " + additionalMeasureName + ". "
+ m_SplitEvaluator.getClass().getName() + " "
+ "is not an AdditionalMeasureProducer");
}
}
/**
* Sets the object to send results of each run to.
*
* @param listener a value of type 'ResultListener'
*/
@Override
public void setResultListener(ResultListener listener) {
m_ResultListener = listener;
}
/**
* Gets a Double representing the current date and time. eg: 1:46pm on
* 20/5/1999 -> 19990520.1346
*
* @return a value of type Double
*/
public static Double getTimestamp() {
Calendar now = Calendar.getInstance(TimeZone.getTimeZone("UTC"));
double timestamp = now.get(Calendar.YEAR) * 10000
+ (now.get(Calendar.MONTH) + 1) * 100 + now.get(Calendar.DAY_OF_MONTH)
+ now.get(Calendar.HOUR_OF_DAY) / 100.0 + now.get(Calendar.MINUTE)
/ 10000.0;
return new Double(timestamp);
}
/**
* Prepare to generate results.
*
* @throws Exception if an error occurs during preprocessing.
*/
@Override
public void preProcess() throws Exception {
if (m_SplitEvaluator == null) {
throw new Exception("No SplitEvalutor set");
}
if (m_ResultListener == null) {
throw new Exception("No ResultListener set");
}
m_ResultListener.preProcess(this);
}
/**
* Perform any postprocessing. When this method is called, it indicates that
* no more requests to generate results for the current experiment will be
* sent.
*
* @throws Exception if an error occurs
*/
@Override
public void postProcess() throws Exception {
m_ResultListener.postProcess(this);
if (m_debugOutput) {
if (m_ZipDest != null) {
m_ZipDest.finished();
m_ZipDest = null;
}
}
}
/**
* Gets the keys for a specified run number. Different run numbers correspond
* to different randomizations of the data. Keys produced should be sent to
* the current ResultListener
*
* @param run the run number to get keys for.
* @throws Exception if a problem occurs while getting the keys
*/
@Override
public void doRunKeys(int run) throws Exception {
if (m_Instances == null) {
throw new Exception("No Instances set");
}
// Add in some fields to the key like run number, dataset name
Object[] seKey = m_SplitEvaluator.getKey();
Object[] key = new Object[seKey.length + 2];
key[0] = Utils.backQuoteChars(m_Instances.relationName());
key[1] = "" + run;
System.arraycopy(seKey, 0, key, 2, seKey.length);
if (m_ResultListener.isResultRequired(this, key)) {
try {
m_ResultListener.acceptResult(this, key, null);
} catch (Exception ex) {
// Save the train and test datasets for debugging purposes?
throw ex;
}
}
}
/**
* Generates a new filename for the given relation based on the current setup.
*
* @param inst the instances to create the filename for
* @return the generated filename
*/
protected String createFilename(Instances inst) {
String result;
String name;
name = inst.relationName();
if (getRelationFind().length() > 0) {
name = name.replaceAll(getRelationFind(), getRelationReplace());
}
result = getTestsetDir().getPath() + File.separator;
result += getTestsetPrefix() + name + getTestsetSuffix();
// substitute the run number (and any other variables)
// if specified
try {
result = m_env.substitute(result);
} catch (Exception ex) {
}
return result;
}
/**
* Gets the results for a specified run number. Different run numbers
* correspond to different randomizations of the data. Results produced should
* be sent to the current ResultListener
*
* @param run the run number to get results for.
* @throws Exception if a problem occurs while getting the results
*/
@Override
public void doRun(int run) throws Exception {
if (getRawOutput()) {
if (m_ZipDest == null) {
m_ZipDest = new OutputZipper(m_OutputFile);
}
}
if (m_Instances == null) {
throw new Exception("No Instances set");
}
// Add in some fields to the key like run number, dataset name
Object[] seKey = m_SplitEvaluator.getKey();
Object[] key = new Object[seKey.length + 2];
key[0] = Utils.backQuoteChars(m_Instances.relationName());
key[1] = "" + run;
System.arraycopy(seKey, 0, key, 2, seKey.length);
if (m_ResultListener.isResultRequired(this, key)) {
// training set
Instances train = new Instances(m_Instances);
if (m_randomize) {
Random rand = new Random(run);
train.randomize(rand);
}
if (m_env == null) {
m_env = new Environment();
}
m_env.addVariable("RUN_NUMBER", "" + run);
// test set
String filename = createFilename(train);
File file = new File(filename);
if (!file.exists()) {
throw new WekaException("Test set '" + filename + "' not found!");
}
Instances test = DataSource.read(filename);
// can we set the class attribute safely?
if (train.numAttributes() == test.numAttributes()) {
test.setClassIndex(train.classIndex());
} else {
throw new WekaException("Train and test set (= " + filename + ") "
+ "differ in number of attributes: " + train.numAttributes() + " != "
+ test.numAttributes());
}
// test headers
if (!train.equalHeaders(test)) {
throw new WekaException("Train and test set (= " + filename + ") "
+ "are not compatible:\n" + train.equalHeadersMsg(test));
}
try {
Object[] seResults = m_SplitEvaluator.getResult(train, test);
Object[] results = new Object[seResults.length + 1];
results[0] = getTimestamp();
System.arraycopy(seResults, 0, results, 1, seResults.length);
if (m_debugOutput) {
String resultName = ("" + run + "."
+ Utils.backQuoteChars(train.relationName()) + "." + m_SplitEvaluator
.toString()).replace(' ', '_');
resultName = Utils.removeSubstring(resultName, "weka.classifiers.");
resultName = Utils.removeSubstring(resultName, "weka.filters.");
resultName = Utils.removeSubstring(resultName,
"weka.attributeSelection.");
m_ZipDest.zipit(m_SplitEvaluator.getRawResultOutput(), resultName);
}
m_ResultListener.acceptResult(this, key, results);
} catch (Exception e) {
// Save the train and test datasets for debugging purposes?
throw e;
}
}
}
/**
* Gets the names of each of the columns produced for a single run. This
* method should really be static.
*
* @return an array containing the name of each column
*/
@Override
public String[] getKeyNames() {
String[] keyNames = m_SplitEvaluator.getKeyNames();
// Add in the names of our extra key fields
String[] newKeyNames = new String[keyNames.length + 2];
newKeyNames[0] = DATASET_FIELD_NAME;
newKeyNames[1] = RUN_FIELD_NAME;
System.arraycopy(keyNames, 0, newKeyNames, 2, keyNames.length);
return newKeyNames;
}
/**
* Gets the data types of each of the columns produced for a single run. This
* method should really be static.
*
* @return an array containing objects of the type of each column. The objects
* should be Strings, or Doubles.
*/
@Override
public Object[] getKeyTypes() {
Object[] keyTypes = m_SplitEvaluator.getKeyTypes();
// Add in the types of our extra fields
Object[] newKeyTypes = new String[keyTypes.length + 2];
newKeyTypes[0] = new String();
newKeyTypes[1] = new String();
System.arraycopy(keyTypes, 0, newKeyTypes, 2, keyTypes.length);
return newKeyTypes;
}
/**
* Gets the names of each of the columns produced for a single run. This
* method should really be static.
*
* @return an array containing the name of each column
*/
@Override
public String[] getResultNames() {
String[] resultNames = m_SplitEvaluator.getResultNames();
// Add in the names of our extra Result fields
String[] newResultNames = new String[resultNames.length + 1];
newResultNames[0] = TIMESTAMP_FIELD_NAME;
System.arraycopy(resultNames, 0, newResultNames, 1, resultNames.length);
return newResultNames;
}
/**
* Gets the data types of each of the columns produced for a single run. This
* method should really be static.
*
* @return an array containing objects of the type of each column. The objects
* should be Strings, or Doubles.
*/
@Override
public Object[] getResultTypes() {
Object[] resultTypes = m_SplitEvaluator.getResultTypes();
// Add in the types of our extra Result fields
Object[] newResultTypes = new Object[resultTypes.length + 1];
newResultTypes[0] = new Double(0);
System.arraycopy(resultTypes, 0, newResultTypes, 1, resultTypes.length);
return newResultTypes;
}
/**
* Gets a description of the internal settings of the result producer,
* sufficient for distinguishing a ResultProducer instance from another with
* different settings (ignoring those settings set through this interface).
* For example, a cross-validation ResultProducer may have a setting for the
* number of folds. For a given state, the results produced should be
* compatible. Typically if a ResultProducer is an OptionHandler, this string
* will represent the command line arguments required to set the
* ResultProducer to that state.
*
* @return the description of the ResultProducer state, or null if no state is
* defined
*/
@Override
public String getCompatibilityState() {
String result;
result = "";
if (getRandomizeData()) {
result += " -R";
}
result += " -dir " + getTestsetDir();
if (getTestsetPrefix().length() > 0) {
result += " -prefix " + getTestsetPrefix();
}
result += " -suffix " + getTestsetSuffix();
if (getRelationFind().length() > 0) {
result += " -find " + getRelationFind();
if (getRelationReplace().length() > 0) {
result += " -replace " + getRelationReplace();
}
}
if (m_SplitEvaluator == null) {
result += " <null SplitEvaluator>";
} else {
result += " -W " + m_SplitEvaluator.getClass().getName();
}
return result + " --";
}
/**
* Returns the tip text for this property.
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String outputFileTipText() {
return "Set the destination for saving raw output. If the rawOutput "
+ "option is selected, then output from the splitEvaluator for "
+ "individual train-test splits is saved. If the destination is a "
+ "directory, "
+ "then each output is saved to an individual gzip file; if the "
+ "destination is a file, then each output is saved as an entry "
+ "in a zip file.";
}
/**
* Get the value of OutputFile.
*
* @return Value of OutputFile.
*/
public File getOutputFile() {
return m_OutputFile;
}
/**
* Set the value of OutputFile.
*
* @param value Value to assign to OutputFile.
*/
public void setOutputFile(File value) {
m_OutputFile = value;
}
/**
* Returns the tip text for this property.
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String randomizeDataTipText() {
return "Do not randomize dataset and do not perform probabilistic rounding "
+ "if true";
}
/**
* Get if dataset is to be randomized.
*
* @return true if dataset is to be randomized
*/
public boolean getRandomizeData() {
return m_randomize;
}
/**
* Set to true if dataset is to be randomized.
*
* @param value true if dataset is to be randomized
*/
public void setRandomizeData(boolean value) {
m_randomize = value;
}
/**
* Returns the tip text for this property.
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String rawOutputTipText() {
return "Save raw output (useful for debugging). If set, then output is "
+ "sent to the destination specified by outputFile";
}
/**
* Get if raw split evaluator output is to be saved.
*
* @return true if raw split evalutor output is to be saved
*/
public boolean getRawOutput() {
return m_debugOutput;
}
/**
* Set to true if raw split evaluator output is to be saved.
*
* @param value true if output is to be saved
*/
public void setRawOutput(boolean value) {
m_debugOutput = value;
}
/**
* Returns the tip text for this property.
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String splitEvaluatorTipText() {
return "The evaluator to apply to the test data. "
+ "This may be a classifier, regression scheme etc.";
}
/**
* Get the SplitEvaluator.
*
* @return the SplitEvaluator.
*/
public SplitEvaluator getSplitEvaluator() {
return m_SplitEvaluator;
}
/**
* Set the SplitEvaluator.
*
* @param value new SplitEvaluator to use.
*/
public void setSplitEvaluator(SplitEvaluator value) {
m_SplitEvaluator = value;
m_SplitEvaluator.setAdditionalMeasures(m_AdditionalMeasures);
}
/**
* Returns the tip text for this property.
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String testsetDirTipText() {
return "The directory containing the test sets.";
}
/**
* Returns the currently set directory for the test sets.
*
* @return the directory
*/
public File getTestsetDir() {
return m_TestsetDir;
}
/**
* Sets the directory to use for the test sets.
*
* @param value the directory to use
*/
public void setTestsetDir(File value) {
m_TestsetDir = value;
}
/**
* Returns the tip text for this property.
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String testsetPrefixTipText() {
return "The prefix to use for the filename of the test sets.";
}
/**
* Returns the currently set prefix.
*
* @return the prefix
*/
public String getTestsetPrefix() {
return m_TestsetPrefix;
}
/**
* Sets the prefix to use for the test sets.
*
* @param value the prefix
*/
public void setTestsetPrefix(String value) {
m_TestsetPrefix = value;
}
/**
* Returns the tip text for this property.
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String testsetSuffixTipText() {
return "The suffix to use for the filename of the test sets - must contain "
+ "the file extension.";
}
/**
* Returns the currently set suffix.
*
* @return the suffix
*/
public String getTestsetSuffix() {
return m_TestsetSuffix;
}
/**
* Sets the suffix to use for the test sets.
*
* @param value the suffix
*/
public void setTestsetSuffix(String value) {
if ((value == null) || (value.length() == 0)) {
value = DEFAULT_SUFFIX;
}
m_TestsetSuffix = value;
}
/**
* Returns the tip text for this property.
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String relationFindTipText() {
return "The regular expression to use for removing parts of the relation "
+ "name, ignored if empty.";
}
/**
* Returns the currently set regular expression to use on the relation name.
*
* @return the regular expression
*/
public String getRelationFind() {
return m_RelationFind;
}
/**
* Sets the regular expression to use on the relation name.
*
* @param value the regular expression
*/
public void setRelationFind(String value) {
m_RelationFind = value;
}
/**
* Returns the tip text for this property.
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String relationReplaceTipText() {
return "The string to replace all matches of the regular expression with.";
}
/**
* Returns the currently set replacement string to use on the relation name.
*
* @return the replacement string
*/
public String getRelationReplace() {
return m_RelationReplace;
}
/**
* Sets the replacement string to use on the relation name.
*
* @param value the regular expression
*/
public void setRelationReplace(String value) {
m_RelationReplace = value;
}
/**
* Gets a text descrption of the result producer.
*
* @return a text description of the result producer.
*/
@Override
public String toString() {
String result = "ExplicitTestsetResultProducer: ";
result += getCompatibilityState();
if (m_Instances == null) {
result += ": <null Instances>";
} else {
result += ": " + Utils.backQuoteChars(m_Instances.relationName());
}
return result;
}
/**
* Returns the revision string.
*
* @return the revision
*/
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/experiment/InstanceQuery.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* InstanceQuery.java
* Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.experiment;
import java.io.File;
import java.sql.ResultSet;
import java.sql.ResultSetMetaData;
import java.sql.Time;
import java.sql.Timestamp;
import java.util.ArrayList;
import java.util.Date;
import java.util.Enumeration;
import java.util.Hashtable;
import java.util.Vector;
import weka.core.Attribute;
import weka.core.DenseInstance;
import weka.core.Instance;
import weka.core.Instances;
import weka.core.Option;
import weka.core.OptionHandler;
import weka.core.RevisionUtils;
import weka.core.SparseInstance;
import weka.core.Utils;
/**
* Convert the results of a database query into instances. The jdbc driver and
* database to be used default to "jdbc.idbDriver" and
* "jdbc:idb=experiments.prp". These may be changed by creating a java
* properties file called DatabaseUtils.props in user.home or the current
* directory. eg:
* <p>
*
* <code><pre>
* jdbcDriver=jdbc.idbDriver
* jdbcURL=jdbc:idb=experiments.prp
* </pre></code>
* <p>
*
* Command line use just outputs the instances to System.out.
* <p/>
*
* <!-- options-start --> Valid options are:
* <p/>
*
* <pre>
* -Q <query>
* SQL query to execute.
* </pre>
*
* <pre>
* -S
* Return sparse rather than normal instances.
* </pre>
*
* <pre>
* -U <username>
* The username to use for connecting.
* </pre>
*
* <pre>
* -P <password>
* The password to use for connecting.
* </pre>
*
* <pre>
* -D
* Enables debug output.
* </pre>
*
* <!-- options-end -->
*
* @author Len Trigg (trigg@cs.waikato.ac.nz)
* @version $Revision$
*/
public class InstanceQuery extends DatabaseUtils implements OptionHandler,
InstanceQueryAdapter {
/** for serialization */
static final long serialVersionUID = 718158370917782584L;
/** Determines whether sparse data is created */
protected boolean m_CreateSparseData = false;
/** Query to execute */
protected String m_Query = "SELECT * from ?";
/** the custom props file to use instead of default one. */
protected File m_CustomPropsFile = null;
/**
* Sets up the database drivers
*
* @throws Exception if an error occurs
*/
public InstanceQuery() throws Exception {
super();
}
/**
* Returns an enumeration describing the available options
* <p>
*
* @return an enumeration of all options
*/
@Override
public Enumeration<Option> listOptions() {
Vector<Option> result = new Vector<Option>();
result.addElement(new Option("\tSQL query to execute.", "Q", 1,
"-Q <query>"));
result.addElement(new Option(
"\tReturn sparse rather than normal instances.", "S", 0, "-S"));
result.addElement(new Option("\tThe username to use for connecting.", "U",
1, "-U <username>"));
result.addElement(new Option("\tThe password to use for connecting.", "P",
1, "-P <password>"));
result.add(new Option(
"\tThe custom properties file to use instead of default ones,\n"
+ "\tcontaining the database parameters.\n" + "\t(default: none)",
"custom-props", 1, "-custom-props <file>"));
result.addElement(new Option("\tEnables debug output.", "D", 0, "-D"));
return result.elements();
}
/**
* Parses a given list of options.
*
* <!-- options-start --> Valid options are:
* <p/>
*
* <pre>
* -Q <query>
* SQL query to execute.
* </pre>
*
* <pre>
* -S
* Return sparse rather than normal instances.
* </pre>
*
* <pre>
* -U <username>
* The username to use for connecting.
* </pre>
*
* <pre>
* -P <password>
* The password to use for connecting.
* </pre>
*
* <pre>
* -D
* Enables debug output.
* </pre>
*
* <!-- options-end -->
*
* @param options the list of options as an array of strings
* @throws Exception if an option is not supported
*/
@Override
public void setOptions(String[] options) throws Exception {
String tmpStr;
setSparseData(Utils.getFlag('S', options));
tmpStr = Utils.getOption('Q', options);
if (tmpStr.length() != 0) {
setQuery(tmpStr);
}
tmpStr = Utils.getOption('U', options);
if (tmpStr.length() != 0) {
setUsername(tmpStr);
}
tmpStr = Utils.getOption('P', options);
if (tmpStr.length() != 0) {
setPassword(tmpStr);
}
tmpStr = Utils.getOption("custom-props", options);
if (tmpStr.length() == 0) {
setCustomPropsFile(null);
} else {
setCustomPropsFile(new File(tmpStr));
}
setDebug(Utils.getFlag('D', options));
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String queryTipText() {
return "The SQL query to execute against the database.";
}
/**
* Set the query to execute against the database
*
* @param q the query to execute
*/
public void setQuery(String q) {
m_Query = q;
}
/**
* Get the query to execute against the database
*
* @return the query
*/
public String getQuery() {
return m_Query;
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String sparseDataTipText() {
return "Encode data as sparse instances.";
}
/**
* Sets whether data should be encoded as sparse instances
*
* @param s true if data should be encoded as a set of sparse instances
*/
public void setSparseData(boolean s) {
m_CreateSparseData = s;
}
/**
* Gets whether data is to be returned as a set of sparse instances
*
* @return true if data is to be encoded as sparse instances
*/
@Override
public boolean getSparseData() {
return m_CreateSparseData;
}
/**
* Sets the custom properties file to use.
*
* @param value the custom props file to load database parameters from, use
* null or directory to disable custom properties.
* @see #initialize(File)
*/
public void setCustomPropsFile(File value) {
m_CustomPropsFile = value;
initialize(m_CustomPropsFile);
}
/**
* Returns the custom properties file in use, if any.
*
* @return the custom props file, null if none used
*/
public File getCustomPropsFile() {
return m_CustomPropsFile;
}
/**
* The tip text for this property.
*
* @return the tip text
*/
public String customPropsFileTipText() {
return "The custom properties that the user can use to override the default ones.";
}
/**
* Gets the current settings of InstanceQuery
*
* @return an array of strings suitable for passing to setOptions()
*/
@Override
public String[] getOptions() {
Vector<String> options = new Vector<String>();
options.add("-Q");
options.add(getQuery());
if (getSparseData()) {
options.add("-S");
}
if (!getUsername().equals("")) {
options.add("-U");
options.add(getUsername());
}
if (!getPassword().equals("")) {
options.add("-P");
options.add(getPassword());
}
if ((m_CustomPropsFile != null) && !m_CustomPropsFile.isDirectory()) {
options.add("-custom-props");
options.add(m_CustomPropsFile.toString());
}
if (getDebug()) {
options.add("-D");
}
return options.toArray(new String[options.size()]);
}
/**
* Makes a database query using the query set through the -Q option to convert
* a table into a set of instances
*
* @return the instances contained in the result of the query
* @throws Exception if an error occurs
*/
public Instances retrieveInstances() throws Exception {
return retrieveInstances(m_Query);
}
public static Instances retrieveInstances(InstanceQueryAdapter adapter,
ResultSet rs) throws Exception {
if (adapter.getDebug()) {
System.err.println("Getting metadata...");
}
ResultSetMetaData md = rs.getMetaData();
if (adapter.getDebug()) {
System.err.println("Completed getting metadata...");
}
// Determine structure of the instances
int numAttributes = md.getColumnCount();
int[] attributeTypes = new int[numAttributes];
@SuppressWarnings("unchecked")
Hashtable<String, Double>[] nominalIndexes = new Hashtable[numAttributes];
@SuppressWarnings("unchecked")
ArrayList<String>[] nominalStrings = new ArrayList[numAttributes];
for (int i = 1; i <= numAttributes; i++) {
/*
* switch (md.getColumnType(i)) { case Types.CHAR: case Types.VARCHAR:
* case Types.LONGVARCHAR: case Types.BINARY: case Types.VARBINARY: case
* Types.LONGVARBINARY:
*/
switch (adapter.translateDBColumnType(md.getColumnTypeName(i))) {
case STRING:
// System.err.println("String --> nominal");
attributeTypes[i - 1] = Attribute.NOMINAL;
nominalIndexes[i - 1] = new Hashtable<String, Double>();
nominalStrings[i - 1] = new ArrayList<String>();
break;
case TEXT:
// System.err.println("Text --> string");
attributeTypes[i - 1] = Attribute.STRING;
nominalIndexes[i - 1] = new Hashtable<String, Double>();
nominalStrings[i - 1] = new ArrayList<String>();
break;
case BOOL:
// System.err.println("boolean --> nominal");
attributeTypes[i - 1] = Attribute.NOMINAL;
nominalIndexes[i - 1] = new Hashtable<String, Double>();
nominalIndexes[i - 1].put("false", new Double(0));
nominalIndexes[i - 1].put("true", new Double(1));
nominalStrings[i - 1] = new ArrayList<String>();
nominalStrings[i - 1].add("false");
nominalStrings[i - 1].add("true");
break;
case DOUBLE:
// System.err.println("BigDecimal --> numeric");
attributeTypes[i - 1] = Attribute.NUMERIC;
break;
case BYTE:
// System.err.println("byte --> numeric");
attributeTypes[i - 1] = Attribute.NUMERIC;
break;
case SHORT:
// System.err.println("short --> numeric");
attributeTypes[i - 1] = Attribute.NUMERIC;
break;
case INTEGER:
// System.err.println("int --> numeric");
attributeTypes[i - 1] = Attribute.NUMERIC;
break;
case LONG:
// System.err.println("long --> numeric");
attributeTypes[i - 1] = Attribute.NUMERIC;
break;
case FLOAT:
// System.err.println("float --> numeric");
attributeTypes[i - 1] = Attribute.NUMERIC;
break;
case DATE:
attributeTypes[i - 1] = Attribute.DATE;
break;
case TIME:
attributeTypes[i - 1] = Attribute.DATE;
break;
case TIMESTAMP:
attributeTypes[i - 1] = Attribute.DATE;
break;
default:
// System.err.println("Unknown column type");
attributeTypes[i - 1] = Attribute.STRING;
}
}
// For sqlite
// cache column names because the last while(rs.next()) { iteration for
// the tuples below will close the md object:
Vector<String> columnNames = new Vector<String>();
for (int i = 0; i < numAttributes; i++) {
columnNames.add(md.getColumnLabel(i + 1));
}
// Step through the tuples
if (adapter.getDebug()) {
System.err.println("Creating instances...");
}
ArrayList<Instance> instances = new ArrayList<Instance>();
int rowCount = 0;
while (rs.next()) {
if (rowCount % 100 == 0) {
if (adapter.getDebug()) {
System.err.print("read " + rowCount + " instances \r");
System.err.flush();
}
}
double[] vals = new double[numAttributes];
for (int i = 1; i <= numAttributes; i++) {
/*
* switch (md.getColumnType(i)) { case Types.CHAR: case Types.VARCHAR:
* case Types.LONGVARCHAR: case Types.BINARY: case Types.VARBINARY: case
* Types.LONGVARBINARY:
*/
switch (adapter.translateDBColumnType(md.getColumnTypeName(i))) {
case STRING:
String str = rs.getString(i);
if (rs.wasNull()) {
vals[i - 1] = Utils.missingValue();
} else {
Double index = nominalIndexes[i - 1].get(str);
if (index == null) {
index = new Double(nominalStrings[i - 1].size());
nominalIndexes[i - 1].put(str, index);
nominalStrings[i - 1].add(str);
}
vals[i - 1] = index.doubleValue();
}
break;
case TEXT:
String txt = rs.getString(i);
if (rs.wasNull()) {
vals[i - 1] = Utils.missingValue();
} else {
Double index = nominalIndexes[i - 1].get(txt);
if (index == null) {
// Need to add one because first value in
// string attribute is dummy value.
index = new Double(nominalStrings[i - 1].size()) + 1;
nominalIndexes[i - 1].put(txt, index);
nominalStrings[i - 1].add(txt);
}
vals[i - 1] = index.doubleValue();
}
break;
case BOOL:
boolean boo = rs.getBoolean(i);
if (rs.wasNull()) {
vals[i - 1] = Utils.missingValue();
} else {
vals[i - 1] = (boo ? 1.0 : 0.0);
}
break;
case DOUBLE:
// BigDecimal bd = rs.getBigDecimal(i, 4);
double dd = rs.getDouble(i);
// Use the column precision instead of 4?
if (rs.wasNull()) {
vals[i - 1] = Utils.missingValue();
} else {
// newInst.setValue(i - 1, bd.doubleValue());
vals[i - 1] = dd;
}
break;
case BYTE:
byte by = rs.getByte(i);
if (rs.wasNull()) {
vals[i - 1] = Utils.missingValue();
} else {
vals[i - 1] = by;
}
break;
case SHORT:
short sh = rs.getShort(i);
if (rs.wasNull()) {
vals[i - 1] = Utils.missingValue();
} else {
vals[i - 1] = sh;
}
break;
case INTEGER:
int in = rs.getInt(i);
if (rs.wasNull()) {
vals[i - 1] = Utils.missingValue();
} else {
vals[i - 1] = in;
}
break;
case LONG:
long lo = rs.getLong(i);
if (rs.wasNull()) {
vals[i - 1] = Utils.missingValue();
} else {
vals[i - 1] = lo;
}
break;
case FLOAT:
float fl = rs.getFloat(i);
if (rs.wasNull()) {
vals[i - 1] = Utils.missingValue();
} else {
vals[i - 1] = fl;
}
break;
case DATE:
Date date = rs.getDate(i);
if (rs.wasNull()) {
vals[i - 1] = Utils.missingValue();
} else {
// TODO: Do a value check here.
vals[i - 1] = date.getTime();
}
break;
case TIME:
Time time = rs.getTime(i);
if (rs.wasNull()) {
vals[i - 1] = Utils.missingValue();
} else {
// TODO: Do a value check here.
vals[i - 1] = time.getTime();
}
break;
case TIMESTAMP:
Timestamp ts = rs.getTimestamp(i);
if (rs.wasNull()) {
vals[i - 1] = Utils.missingValue();
} else {
vals[i - 1] = ts.getTime();
}
break;
default:
vals[i - 1] = Utils.missingValue();
}
}
Instance newInst;
if (adapter.getSparseData()) {
newInst = new SparseInstance(1.0, vals);
} else {
newInst = new DenseInstance(1.0, vals);
}
instances.add(newInst);
rowCount++;
}
// disconnectFromDatabase(); (perhaps other queries might be made)
// Create the header and add the instances to the dataset
if (adapter.getDebug()) {
System.err.println("Creating header...");
}
ArrayList<Attribute> attribInfo = new ArrayList<Attribute>();
for (int i = 0; i < numAttributes; i++) {
/* Fix for databases that uppercase column names */
// String attribName = attributeCaseFix(md.getColumnName(i + 1));
String attribName = adapter.attributeCaseFix(columnNames.get(i));
switch (attributeTypes[i]) {
case Attribute.NOMINAL:
attribInfo.add(new Attribute(attribName, nominalStrings[i]));
break;
case Attribute.NUMERIC:
attribInfo.add(new Attribute(attribName));
break;
case Attribute.STRING:
Attribute att = new Attribute(attribName, (ArrayList<String>) null);
attribInfo.add(att);
for (int n = 0; n < nominalStrings[i].size(); n++) {
att.addStringValue(nominalStrings[i].get(n));
}
break;
case Attribute.DATE:
attribInfo.add(new Attribute(attribName, (String) null));
break;
default:
throw new Exception("Unknown attribute type");
}
}
Instances result = new Instances("QueryResult", attribInfo,
instances.size());
for (int i = 0; i < instances.size(); i++) {
result.add(instances.get(i));
}
return result;
}
/**
* Makes a database query to convert a table into a set of instances
*
* @param query the query to convert to instances
* @return the instances contained in the result of the query, NULL if the SQL
* query doesn't return a ResultSet, e.g., DELETE/INSERT/UPDATE
* @throws Exception if an error occurs
*/
public Instances retrieveInstances(String query) throws Exception {
if (m_Debug) {
System.err.println("Executing query: " + query);
}
connectToDatabase();
if (execute(query) == false) {
if (m_PreparedStatement.getUpdateCount() == -1) {
throw new Exception("Query didn't produce results");
} else {
if (m_Debug) {
System.err.println(m_PreparedStatement.getUpdateCount()
+ " rows affected.");
}
close();
return null;
}
}
ResultSet rs = getResultSet();
if (m_Debug) {
System.err.println("Getting metadata...");
}
Instances result = retrieveInstances(this, rs);
close(rs);
return result;
}
/**
* Test the class from the command line. The instance query should be
* specified with -Q sql_query
*
* @param args contains options for the instance query
*/
public static void main(String args[]) {
try {
InstanceQuery iq = new InstanceQuery();
String query = Utils.getOption('Q', args);
if (query.length() == 0) {
iq.setQuery("select * from Experiment_index");
} else {
iq.setQuery(query);
}
iq.setOptions(args);
try {
Utils.checkForRemainingOptions(args);
} catch (Exception e) {
System.err.println("Options for weka.experiment.InstanceQuery:\n");
Enumeration<Option> en = iq.listOptions();
while (en.hasMoreElements()) {
Option o = en.nextElement();
System.err.println(o.synopsis() + "\n" + o.description());
}
System.exit(1);
}
Instances aha = iq.retrieveInstances();
iq.disconnectFromDatabase();
// query returned no result -> exit
if (aha == null) {
return;
}
// The dataset may be large, so to make things easier we'll
// output an instance at a time (rather than having to convert
// the entire dataset to one large string)
System.out.println(new Instances(aha, 0));
for (int i = 0; i < aha.numInstances(); i++) {
System.out.println(aha.instance(i));
}
} catch (Exception e) {
e.printStackTrace();
System.err.println(e.getMessage());
}
}
/**
* Returns the revision string.
*
* @return the revision
*/
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/experiment/InstanceQueryAdapter.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* InstanceQueryAdapter.java
* Copyright (C) 2011-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.experiment;
/**
* An interface implemented by InstanceQuery and any user class that is
* to be passed as the first argument to
* InstanceQuery.retrieveInstances(InstanceQueryAdapter, ResultSet).
*
* @author Wes Munsil (wes_munsil@cytoanalytics.com)
* @version $Revision$
*/
public interface InstanceQueryAdapter
{
/**
* returns key column headings in their original case. Used for
* those databases that create uppercase column names.
*
* @param columnName the column to retrieve the original case for
* @return the original case
*/
public String attributeCaseFix(String columnName);
/**
* Gets whether there should be printed some debugging output to stderr or not.
*
* @return true if output should be printed
*/
public boolean getDebug();
/**
* Gets whether data is to be returned as a set of sparse instances
* @return true if data is to be encoded as sparse instances
*/
public boolean getSparseData();
/**
* translates the column data type string to an integer value that indicates
* which data type / get()-Method to use in order to retrieve values from the
* database (see DatabaseUtils.Properties, InstanceQuery()). Blanks in the type
* are replaced with underscores "_", since Java property names can't contain blanks.
*
* @param type the column type as retrieved with
* java.sql.MetaData.getColumnTypeName(int)
* @return an integer value that indicates
* which data type / get()-Method to use in order to
* retrieve values from the
*/
public int translateDBColumnType(String type);
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/experiment/InstancesResultListener.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* InstancesResultListener.java
* Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.experiment;
import java.io.BufferedOutputStream;
import java.io.File;
import java.io.FileOutputStream;
import java.io.PrintWriter;
import java.util.ArrayList;
import java.util.Hashtable;
import weka.core.Attribute;
import weka.core.DenseInstance;
import weka.core.Instance;
import weka.core.Instances;
import weka.core.RevisionUtils;
import weka.core.Utils;
/**
* <!-- globalinfo-start --> Outputs the received results in arff format to a
* Writer. All results must be received before the instances can be written out.
* <p/>
* <!-- globalinfo-end -->
*
* <!-- options-start --> Valid options are:
* <p/>
*
* <pre>
* -O <file name>
* The filename where output will be stored. Use - for stdout.
* (default temp file)
* </pre>
*
* <!-- options-end -->
*
* @author Len Trigg (trigg@cs.waikato.ac.nz)
* @version $Revision$
*/
public class InstancesResultListener extends CSVResultListener {
/** for serialization */
static final long serialVersionUID = -2203808461809311178L;
/** Stores the instances created so far, before assigning to a header */
protected transient ArrayList<Instance> m_Instances;
/** Stores the attribute types for each column */
protected transient int[] m_AttributeTypes;
/** For lookup of indices given a string value for each nominal attribute */
protected transient Hashtable<String, Double>[] m_NominalIndexes;
/** Contains strings seen so far for each nominal attribute */
protected transient ArrayList<String>[] m_NominalStrings;
/**
* Sets temporary file.
*/
public InstancesResultListener() {
File resultsFile;
try {
resultsFile = File.createTempFile("weka_experiment", ".arff");
resultsFile.deleteOnExit();
} catch (Exception e) {
System.err.println("Cannot create temp file, writing to standard out.");
resultsFile = new File("-");
}
setOutputFile(resultsFile);
setOutputFileName("");
}
/**
* Returns a string describing this result listener
*
* @return a description of the result listener suitable for displaying in the
* explorer/experimenter gui
*/
@Override
public String globalInfo() {
return "Outputs the received results in arff format to "
+ "a Writer. All results must be received before the instances can be "
+ "written out.";
}
/**
* Prepare for the results to be received.
*
* @param rp the ResultProducer that will generate the results
* @exception Exception if an error occurs during preprocessing.
*/
@SuppressWarnings("unchecked")
@Override
public void preProcess(ResultProducer rp) throws Exception {
m_RP = rp;
if ((m_OutputFile == null) || (m_OutputFile.getName().equals("-"))) {
m_Out = new PrintWriter(System.out, true);
} else {
m_Out = new PrintWriter(new BufferedOutputStream(new FileOutputStream(
m_OutputFile)), true);
}
Object[] keyTypes = m_RP.getKeyTypes();
Object[] resultTypes = m_RP.getResultTypes();
m_AttributeTypes = new int[keyTypes.length + resultTypes.length];
m_NominalIndexes = new Hashtable[m_AttributeTypes.length];
m_NominalStrings = new ArrayList[m_AttributeTypes.length];
m_Instances = new ArrayList<Instance>();
for (int i = 0; i < m_AttributeTypes.length; i++) {
Object attribute = null;
if (i < keyTypes.length) {
attribute = keyTypes[i];
} else {
attribute = resultTypes[i - keyTypes.length];
}
if (attribute instanceof String) {
m_AttributeTypes[i] = Attribute.NOMINAL;
m_NominalIndexes[i] = new Hashtable<String, Double>();
m_NominalStrings[i] = new ArrayList<String>();
} else if (attribute instanceof Double) {
m_AttributeTypes[i] = Attribute.NUMERIC;
} else {
throw new Exception("Unknown attribute type in column " + (i + 1));
}
}
}
/**
* Perform any postprocessing. When this method is called, it indicates that
* no more results will be sent that need to be grouped together in any way.
*
* @param rp the ResultProducer that generated the results
* @exception Exception if an error occurs
*/
@Override
public void postProcess(ResultProducer rp) throws Exception {
if (m_RP != rp) {
throw new Error("Unrecognized ResultProducer sending results!!");
}
String[] keyNames = m_RP.getKeyNames();
String[] resultNames = m_RP.getResultNames();
ArrayList<Attribute> attribInfo = new ArrayList<Attribute>();
for (int i = 0; i < m_AttributeTypes.length; i++) {
String attribName = "Unknown";
if (i < keyNames.length) {
attribName = "Key_" + keyNames[i];
} else {
attribName = resultNames[i - keyNames.length];
}
switch (m_AttributeTypes[i]) {
case Attribute.NOMINAL:
if (m_NominalStrings[i].size() > 0) {
attribInfo.add(new Attribute(attribName, m_NominalStrings[i]));
} else {
attribInfo.add(new Attribute(attribName, (ArrayList<String>) null));
}
break;
case Attribute.NUMERIC:
attribInfo.add(new Attribute(attribName));
break;
case Attribute.STRING:
attribInfo.add(new Attribute(attribName, (ArrayList<String>) null));
break;
default:
throw new Exception("Unknown attribute type");
}
}
Instances result = new Instances("InstanceResultListener", attribInfo,
m_Instances.size());
for (int i = 0; i < m_Instances.size(); i++) {
result.add(m_Instances.get(i));
}
m_Out.println(new Instances(result, 0));
for (int i = 0; i < result.numInstances(); i++) {
m_Out.println(result.instance(i));
}
if (!(m_OutputFile == null) && !(m_OutputFile.getName().equals("-"))) {
m_Out.close();
}
}
/**
* Collects each instance and adjusts the header information.
*
* @param rp the ResultProducer that generated the result
* @param key The key for the results.
* @param result The actual results.
* @exception Exception if the result could not be accepted.
*/
@Override
public void acceptResult(ResultProducer rp, Object[] key, Object[] result)
throws Exception {
if (m_RP != rp) {
throw new Error("Unrecognized ResultProducer sending results!!");
}
Instance newInst = new DenseInstance(m_AttributeTypes.length);
for (int i = 0; i < m_AttributeTypes.length; i++) {
Object val = null;
if (i < key.length) {
val = key[i];
} else {
val = result[i - key.length];
}
if (val == null) {
newInst.setValue(i, Utils.missingValue());
} else {
switch (m_AttributeTypes[i]) {
case Attribute.NOMINAL:
String str = (String) val;
Double index = m_NominalIndexes[i].get(str);
if (index == null) {
index = new Double(m_NominalStrings[i].size());
m_NominalIndexes[i].put(str, index);
m_NominalStrings[i].add(str);
}
newInst.setValue(i, index.doubleValue());
break;
case Attribute.NUMERIC:
double dou = ((Double) val).doubleValue();
newInst.setValue(i, dou);
break;
default:
newInst.setValue(i, Utils.missingValue());
}
}
}
m_Instances.add(newInst);
}
/**
* Returns the revision string.
*
* @return the revision
*/
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
} // InstancesResultListener
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/experiment/LearningRateResultProducer.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* LearningRateResultProducer.java
* Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.experiment;
import java.util.Collections;
import java.util.Enumeration;
import java.util.Random;
import java.util.Vector;
import weka.core.AdditionalMeasureProducer;
import weka.core.Instances;
import weka.core.Option;
import weka.core.OptionHandler;
import weka.core.RevisionHandler;
import weka.core.RevisionUtils;
import weka.core.Utils;
/**
* <!-- globalinfo-start --> Tells a sub-ResultProducer to reproduce the current
* run for varying sized subsamples of the dataset. Normally used with an
* AveragingResultProducer and CrossValidationResultProducer combo to generate
* learning curve results. For non-numeric result fields, the first value is
* used.
* <p/>
* <!-- globalinfo-end -->
*
* <!-- options-start --> Valid options are:
* <p/>
*
* <pre>
* -X <num steps>
* The number of steps in the learning rate curve.
* (default 10)
* </pre>
*
* <pre>
* -W <class name>
* The full class name of a ResultProducer.
* eg: weka.experiment.CrossValidationResultProducer
* </pre>
*
* <pre>
* Options specific to result producer weka.experiment.AveragingResultProducer:
* </pre>
*
* <pre>
* -F <field name>
* The name of the field to average over.
* (default "Fold")
* </pre>
*
* <pre>
* -X <num results>
* The number of results expected per average.
* (default 10)
* </pre>
*
* <pre>
* -S
* Calculate standard deviations.
* (default only averages)
* </pre>
*
* <pre>
* -W <class name>
* The full class name of a ResultProducer.
* eg: weka.experiment.CrossValidationResultProducer
* </pre>
*
* <pre>
* Options specific to result producer weka.experiment.CrossValidationResultProducer:
* </pre>
*
* <pre>
* -X <number of folds>
* The number of folds to use for the cross-validation.
* (default 10)
* </pre>
*
* <pre>
* -D
* Save raw split evaluator output.
* </pre>
*
* <pre>
* -O <file/directory name/path>
* The filename where raw output will be stored.
* If a directory name is specified then then individual
* outputs will be gzipped, otherwise all output will be
* zipped to the named file. Use in conjuction with -D. (default splitEvalutorOut.zip)
* </pre>
*
* <pre>
* -W <class name>
* The full class name of a SplitEvaluator.
* eg: weka.experiment.ClassifierSplitEvaluator
* </pre>
*
* <pre>
* Options specific to split evaluator weka.experiment.ClassifierSplitEvaluator:
* </pre>
*
* <pre>
* -W <class name>
* The full class name of the classifier.
* eg: weka.classifiers.bayes.NaiveBayes
* </pre>
*
* <pre>
* -C <index>
* The index of the class for which IR statistics
* are to be output. (default 1)
* </pre>
*
* <pre>
* -I <index>
* The index of an attribute to output in the
* results. This attribute should identify an
* instance in order to know which instances are
* in the test set of a cross validation. if 0
* no output (default 0).
* </pre>
*
* <pre>
* -P
* Add target and prediction columns to the result
* for each fold.
* </pre>
*
* <pre>
* Options specific to classifier weka.classifiers.rules.ZeroR:
* </pre>
*
* <pre>
* -D
* If set, classifier is run in debug mode and
* may output additional info to the console
* </pre>
*
* <!-- options-end -->
*
* All options after -- will be passed to the result producer.
*
* @author Len Trigg (trigg@cs.waikato.ac.nz)
* @version $Revision$
*/
public class LearningRateResultProducer implements ResultListener,
ResultProducer, OptionHandler, AdditionalMeasureProducer, RevisionHandler {
/** for serialization */
static final long serialVersionUID = -3841159673490861331L;
/** The dataset of interest */
protected Instances m_Instances;
/** The ResultListener to send results to */
protected ResultListener m_ResultListener = new CSVResultListener();
/** The ResultProducer used to generate results */
protected ResultProducer m_ResultProducer = new AveragingResultProducer();
/** The names of any additional measures to look for in SplitEvaluators */
protected String[] m_AdditionalMeasures = null;
/**
* The minimum number of instances to use. If this is zero, the first step
* will contain m_StepSize instances
*/
protected int m_LowerSize = 0;
/**
* The maximum number of instances to use. -1 indicates no maximum (other than
* the total number of instances)
*/
protected int m_UpperSize = -1;
/** The number of instances to add at each step */
protected int m_StepSize = 10;
/** The current dataset size during stepping */
protected int m_CurrentSize = 0;
/** The name of the key field containing the learning rate step number */
public static String STEP_FIELD_NAME = "Total_instances";
/**
* Returns a string describing this result producer
*
* @return a description of the result producer suitable for displaying in the
* explorer/experimenter gui
*/
public String globalInfo() {
return "Tells a sub-ResultProducer to reproduce the current run for "
+ "varying sized subsamples of the dataset. Normally used with "
+ "an AveragingResultProducer and CrossValidationResultProducer "
+ "combo to generate learning curve results. For non-numeric "
+ "result fields, the first value is used.";
}
/**
* Determines if there are any constraints (imposed by the destination) on the
* result columns to be produced by resultProducers. Null should be returned
* if there are NO constraints, otherwise a list of column names should be
* returned as an array of Strings.
*
* @param rp the ResultProducer to which the constraints will apply
* @return an array of column names to which resutltProducer's results will be
* restricted.
* @throws Exception if constraints can't be determined
*/
@Override
public String[] determineColumnConstraints(ResultProducer rp)
throws Exception {
return null;
}
/**
* Gets the keys for a specified run number. Different run numbers correspond
* to different randomizations of the data. Keys produced should be sent to
* the current ResultListener
*
* @param run the run number to get keys for.
* @throws Exception if a problem occurs while getting the keys
*/
@Override
public void doRunKeys(int run) throws Exception {
if (m_ResultProducer == null) {
throw new Exception("No ResultProducer set");
}
if (m_ResultListener == null) {
throw new Exception("No ResultListener set");
}
if (m_Instances == null) {
throw new Exception("No Instances set");
}
// Tell the resultproducer to send results to us
m_ResultProducer.setResultListener(this);
m_ResultProducer.setInstances(m_Instances);
// For each subsample size
if (m_LowerSize == 0) {
m_CurrentSize = m_StepSize;
} else {
m_CurrentSize = m_LowerSize;
}
while (m_CurrentSize <= m_Instances.numInstances()
&& ((m_UpperSize == -1) || (m_CurrentSize <= m_UpperSize))) {
m_ResultProducer.doRunKeys(run);
m_CurrentSize += m_StepSize;
}
}
/**
* Gets the results for a specified run number. Different run numbers
* correspond to different randomizations of the data. Results produced should
* be sent to the current ResultListener
*
* @param run the run number to get results for.
* @throws Exception if a problem occurs while getting the results
*/
@Override
public void doRun(int run) throws Exception {
if (m_ResultProducer == null) {
throw new Exception("No ResultProducer set");
}
if (m_ResultListener == null) {
throw new Exception("No ResultListener set");
}
if (m_Instances == null) {
throw new Exception("No Instances set");
}
// Randomize on a copy of the original dataset
Instances runInstances = new Instances(m_Instances);
runInstances.randomize(new Random(run));
/*
* if (runInstances.classAttribute().isNominal() &&
* (m_Instances.numInstances() / m_StepSize >= 1)) { //
* runInstances.stratify(m_Instances.numInstances() / m_StepSize); }
*/
// Tell the resultproducer to send results to us
m_ResultProducer.setResultListener(this);
// For each subsample size
if (m_LowerSize == 0) {
m_CurrentSize = m_StepSize;
} else {
m_CurrentSize = m_LowerSize;
}
while (m_CurrentSize <= m_Instances.numInstances()
&& ((m_UpperSize == -1) || (m_CurrentSize <= m_UpperSize))) {
m_ResultProducer.setInstances(new Instances(runInstances, 0,
m_CurrentSize));
m_ResultProducer.doRun(run);
m_CurrentSize += m_StepSize;
}
}
/**
* Prepare for the results to be received.
*
* @param rp the ResultProducer that will generate the results
* @throws Exception if an error occurs during preprocessing.
*/
@Override
public void preProcess(ResultProducer rp) throws Exception {
if (m_ResultListener == null) {
throw new Exception("No ResultListener set");
}
m_ResultListener.preProcess(this);
}
/**
* Prepare to generate results. The ResultProducer should call
* preProcess(this) on the ResultListener it is to send results to.
*
* @throws Exception if an error occurs during preprocessing.
*/
@Override
public void preProcess() throws Exception {
if (m_ResultProducer == null) {
throw new Exception("No ResultProducer set");
}
// Tell the resultproducer to send results to us
m_ResultProducer.setResultListener(this);
m_ResultProducer.preProcess();
}
/**
* When this method is called, it indicates that no more results will be sent
* that need to be grouped together in any way.
*
* @param rp the ResultProducer that generated the results
* @throws Exception if an error occurs
*/
@Override
public void postProcess(ResultProducer rp) throws Exception {
m_ResultListener.postProcess(this);
}
/**
* When this method is called, it indicates that no more requests to generate
* results for the current experiment will be sent. The ResultProducer should
* call preProcess(this) on the ResultListener it is to send results to.
*
* @throws Exception if an error occurs
*/
@Override
public void postProcess() throws Exception {
m_ResultProducer.postProcess();
}
/**
* Accepts results from a ResultProducer.
*
* @param rp the ResultProducer that generated the results
* @param key an array of Objects (Strings or Doubles) that uniquely identify
* a result for a given ResultProducer with given compatibilityState
* @param result the results stored in an array. The objects stored in the
* array may be Strings, Doubles, or null (for the missing value).
* @throws Exception if the result could not be accepted.
*/
@Override
public void acceptResult(ResultProducer rp, Object[] key, Object[] result)
throws Exception {
if (m_ResultProducer != rp) {
throw new Error("Unrecognized ResultProducer sending results!!");
}
// Add in current step as key field
Object[] newKey = new Object[key.length + 1];
System.arraycopy(key, 0, newKey, 0, key.length);
newKey[key.length] = new String("" + m_CurrentSize);
// Pass on to result listener
m_ResultListener.acceptResult(this, newKey, result);
}
/**
* Determines whether the results for a specified key must be generated.
*
* @param rp the ResultProducer wanting to generate the results
* @param key an array of Objects (Strings or Doubles) that uniquely identify
* a result for a given ResultProducer with given compatibilityState
* @return true if the result should be generated
* @throws Exception if it could not be determined if the result is needed.
*/
@Override
public boolean isResultRequired(ResultProducer rp, Object[] key)
throws Exception {
if (m_ResultProducer != rp) {
throw new Error("Unrecognized ResultProducer sending results!!");
}
// Add in current step as key field
Object[] newKey = new Object[key.length + 1];
System.arraycopy(key, 0, newKey, 0, key.length);
newKey[key.length] = new String("" + m_CurrentSize);
// Pass on request to result listener
return m_ResultListener.isResultRequired(this, newKey);
}
/**
* Gets the names of each of the columns produced for a single run.
*
* @return an array containing the name of each column
* @throws Exception if key names cannot be generated
*/
@Override
public String[] getKeyNames() throws Exception {
String[] keyNames = m_ResultProducer.getKeyNames();
String[] newKeyNames = new String[keyNames.length + 1];
System.arraycopy(keyNames, 0, newKeyNames, 0, keyNames.length);
// Think of a better name for this key field
newKeyNames[keyNames.length] = STEP_FIELD_NAME;
return newKeyNames;
}
/**
* Gets the data types of each of the columns produced for a single run. This
* method should really be static.
*
* @return an array containing objects of the type of each column. The objects
* should be Strings, or Doubles.
* @throws Exception if the key types could not be determined (perhaps because
* of a problem from a nested sub-resultproducer)
*/
@Override
public Object[] getKeyTypes() throws Exception {
Object[] keyTypes = m_ResultProducer.getKeyTypes();
Object[] newKeyTypes = new Object[keyTypes.length + 1];
System.arraycopy(keyTypes, 0, newKeyTypes, 0, keyTypes.length);
newKeyTypes[keyTypes.length] = "";
return newKeyTypes;
}
/**
* Gets the names of each of the columns produced for a single run. A new
* result field is added for the number of results used to produce each
* average. If only averages are being produced the names are not altered, if
* standard deviations are produced then "Dev_" and "Avg_" are prepended to
* each result deviation and average field respectively.
*
* @return an array containing the name of each column
* @throws Exception if the result names could not be determined (perhaps
* because of a problem from a nested sub-resultproducer)
*/
@Override
public String[] getResultNames() throws Exception {
return m_ResultProducer.getResultNames();
}
/**
* Gets the data types of each of the columns produced for a single run.
*
* @return an array containing objects of the type of each column. The objects
* should be Strings, or Doubles.
* @throws Exception if the result types could not be determined (perhaps
* because of a problem from a nested sub-resultproducer)
*/
@Override
public Object[] getResultTypes() throws Exception {
return m_ResultProducer.getResultTypes();
}
/**
* Gets a description of the internal settings of the result producer,
* sufficient for distinguishing a ResultProducer instance from another with
* different settings (ignoring those settings set through this interface).
* For example, a cross-validation ResultProducer may have a setting for the
* number of folds. For a given state, the results produced should be
* compatible. Typically if a ResultProducer is an OptionHandler, this string
* will represent the command line arguments required to set the
* ResultProducer to that state.
*
* @return the description of the ResultProducer state, or null if no state is
* defined
*/
@Override
public String getCompatibilityState() {
String result = " ";
// + "-F " + Utils.quote(getKeyFieldName())
// + " -X " + getStepSize() + " ";
if (m_ResultProducer == null) {
result += "<null ResultProducer>";
} else {
result += "-W " + m_ResultProducer.getClass().getName();
result += " -- " + m_ResultProducer.getCompatibilityState();
}
return result.trim();
}
/**
* Returns an enumeration describing the available options..
*
* @return an enumeration of all the available options.
*/
@Override
public Enumeration<Option> listOptions() {
Vector<Option> newVector = new Vector<Option>(2);
newVector.addElement(new Option(
"\tThe number of steps in the learning rate curve.\n" + "\t(default 10)",
"X", 1, "-X <num steps>"));
newVector.addElement(new Option(
"\tThe full class name of a ResultProducer.\n"
+ "\teg: weka.experiment.CrossValidationResultProducer", "W", 1,
"-W <class name>"));
if ((m_ResultProducer != null)
&& (m_ResultProducer instanceof OptionHandler)) {
newVector.addElement(new Option("", "", 0,
"\nOptions specific to result producer "
+ m_ResultProducer.getClass().getName() + ":"));
newVector.addAll(Collections.list(((OptionHandler) m_ResultProducer)
.listOptions()));
}
return newVector.elements();
}
/**
* Parses a given list of options.
* <p/>
*
* <!-- options-start --> Valid options are:
* <p/>
*
* <pre>
* -X <num steps>
* The number of steps in the learning rate curve.
* (default 10)
* </pre>
*
* <pre>
* -W <class name>
* The full class name of a ResultProducer.
* eg: weka.experiment.CrossValidationResultProducer
* </pre>
*
* <pre>
* Options specific to result producer weka.experiment.AveragingResultProducer:
* </pre>
*
* <pre>
* -F <field name>
* The name of the field to average over.
* (default "Fold")
* </pre>
*
* <pre>
* -X <num results>
* The number of results expected per average.
* (default 10)
* </pre>
*
* <pre>
* -S
* Calculate standard deviations.
* (default only averages)
* </pre>
*
* <pre>
* -W <class name>
* The full class name of a ResultProducer.
* eg: weka.experiment.CrossValidationResultProducer
* </pre>
*
* <pre>
* Options specific to result producer weka.experiment.CrossValidationResultProducer:
* </pre>
*
* <pre>
* -X <number of folds>
* The number of folds to use for the cross-validation.
* (default 10)
* </pre>
*
* <pre>
* -D
* Save raw split evaluator output.
* </pre>
*
* <pre>
* -O <file/directory name/path>
* The filename where raw output will be stored.
* If a directory name is specified then then individual
* outputs will be gzipped, otherwise all output will be
* zipped to the named file. Use in conjuction with -D. (default splitEvalutorOut.zip)
* </pre>
*
* <pre>
* -W <class name>
* The full class name of a SplitEvaluator.
* eg: weka.experiment.ClassifierSplitEvaluator
* </pre>
*
* <pre>
* Options specific to split evaluator weka.experiment.ClassifierSplitEvaluator:
* </pre>
*
* <pre>
* -W <class name>
* The full class name of the classifier.
* eg: weka.classifiers.bayes.NaiveBayes
* </pre>
*
* <pre>
* -C <index>
* The index of the class for which IR statistics
* are to be output. (default 1)
* </pre>
*
* <pre>
* -I <index>
* The index of an attribute to output in the
* results. This attribute should identify an
* instance in order to know which instances are
* in the test set of a cross validation. if 0
* no output (default 0).
* </pre>
*
* <pre>
* -P
* Add target and prediction columns to the result
* for each fold.
* </pre>
*
* <pre>
* Options specific to classifier weka.classifiers.rules.ZeroR:
* </pre>
*
* <pre>
* -D
* If set, classifier is run in debug mode and
* may output additional info to the console
* </pre>
*
* <!-- options-end -->
*
* All options after -- will be passed to the result producer.
*
* @param options the list of options as an array of strings
* @throws Exception if an option is not supported
*/
@Override
public void setOptions(String[] options) throws Exception {
String stepSize = Utils.getOption('S', options);
if (stepSize.length() != 0) {
setStepSize(Integer.parseInt(stepSize));
} else {
setStepSize(10);
}
String lowerSize = Utils.getOption('L', options);
if (lowerSize.length() != 0) {
setLowerSize(Integer.parseInt(lowerSize));
} else {
setLowerSize(0);
}
String upperSize = Utils.getOption('U', options);
if (upperSize.length() != 0) {
setUpperSize(Integer.parseInt(upperSize));
} else {
setUpperSize(-1);
}
String rpName = Utils.getOption('W', options);
if (rpName.length() == 0) {
throw new Exception("A ResultProducer must be specified with"
+ " the -W option.");
}
// Do it first without options, so if an exception is thrown during
// the option setting, listOptions will contain options for the actual
// RP.
setResultProducer((ResultProducer) Utils.forName(ResultProducer.class,
rpName, null));
if (getResultProducer() instanceof OptionHandler) {
((OptionHandler) getResultProducer()).setOptions(Utils
.partitionOptions(options));
}
}
/**
* Gets the current settings of the result producer.
*
* @return an array of strings suitable for passing to setOptions
*/
@Override
public String[] getOptions() {
String[] seOptions = new String[0];
if ((m_ResultProducer != null)
&& (m_ResultProducer instanceof OptionHandler)) {
seOptions = ((OptionHandler) m_ResultProducer).getOptions();
}
String[] options = new String[seOptions.length + 9];
int current = 0;
options[current++] = "-S";
options[current++] = "" + getStepSize();
options[current++] = "-L";
options[current++] = "" + getLowerSize();
options[current++] = "-U";
options[current++] = "" + getUpperSize();
if (getResultProducer() != null) {
options[current++] = "-W";
options[current++] = getResultProducer().getClass().getName();
}
options[current++] = "--";
System.arraycopy(seOptions, 0, options, current, seOptions.length);
current += seOptions.length;
while (current < options.length) {
options[current++] = "";
}
return options;
}
/**
* Set a list of method names for additional measures to look for in
* SplitEvaluators. This could contain many measures (of which only a subset
* may be produceable by the current resultProducer) if an experiment is the
* type that iterates over a set of properties.
*
* @param additionalMeasures an array of measure names, null if none
*/
@Override
public void setAdditionalMeasures(String[] additionalMeasures) {
m_AdditionalMeasures = additionalMeasures;
if (m_ResultProducer != null) {
System.err.println("LearningRateResultProducer: setting additional "
+ "measures for " + "ResultProducer");
m_ResultProducer.setAdditionalMeasures(m_AdditionalMeasures);
}
}
/**
* Returns an enumeration of any additional measure names that might be in the
* result producer
*
* @return an enumeration of the measure names
*/
@Override
public Enumeration<String> enumerateMeasures() {
Vector<String> newVector = new Vector<String>();
if (m_ResultProducer instanceof AdditionalMeasureProducer) {
Enumeration<String> en = ((AdditionalMeasureProducer) m_ResultProducer)
.enumerateMeasures();
while (en.hasMoreElements()) {
String mname = en.nextElement();
newVector.add(mname);
}
}
return newVector.elements();
}
/**
* Returns the value of the named measure
*
* @param additionalMeasureName the name of the measure to query for its value
* @return the value of the named measure
* @throws IllegalArgumentException if the named measure is not supported
*/
@Override
public double getMeasure(String additionalMeasureName) {
if (m_ResultProducer instanceof AdditionalMeasureProducer) {
return ((AdditionalMeasureProducer) m_ResultProducer)
.getMeasure(additionalMeasureName);
} else {
throw new IllegalArgumentException("LearningRateResultProducer: "
+ "Can't return value for : " + additionalMeasureName + ". "
+ m_ResultProducer.getClass().getName() + " "
+ "is not an AdditionalMeasureProducer");
}
}
/**
* Sets the dataset that results will be obtained for.
*
* @param instances a value of type 'Instances'.
*/
@Override
public void setInstances(Instances instances) {
m_Instances = instances;
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String lowerSizeTipText() {
return "Set the minmum number of instances in a dataset. Setting zero "
+ "here will actually use <stepSize> number of instances at the first "
+ "step (since it makes no sense to use zero instances :-))";
}
/**
* Get the value of LowerSize.
*
* @return Value of LowerSize.
*/
public int getLowerSize() {
return m_LowerSize;
}
/**
* Set the value of LowerSize.
*
* @param newLowerSize Value to assign to LowerSize.
*/
public void setLowerSize(int newLowerSize) {
m_LowerSize = newLowerSize;
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String upperSizeTipText() {
return "Set the maximum number of instances in a dataset. Setting -1 "
+ "sets no upper limit (other than the total number of instances "
+ "in the full dataset)";
}
/**
* Get the value of UpperSize.
*
* @return Value of UpperSize.
*/
public int getUpperSize() {
return m_UpperSize;
}
/**
* Set the value of UpperSize.
*
* @param newUpperSize Value to assign to UpperSize.
*/
public void setUpperSize(int newUpperSize) {
m_UpperSize = newUpperSize;
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String stepSizeTipText() {
return "Set the number of instances to add at each step.";
}
/**
* Get the value of StepSize.
*
* @return Value of StepSize.
*/
public int getStepSize() {
return m_StepSize;
}
/**
* Set the value of StepSize.
*
* @param newStepSize Value to assign to StepSize.
*/
public void setStepSize(int newStepSize) {
m_StepSize = newStepSize;
}
/**
* Sets the object to send results of each run to.
*
* @param listener a value of type 'ResultListener'
*/
@Override
public void setResultListener(ResultListener listener) {
m_ResultListener = listener;
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String resultProducerTipText() {
return "Set the resultProducer for which learning rate results should be "
+ "generated.";
}
/**
* Get the ResultProducer.
*
* @return the ResultProducer.
*/
public ResultProducer getResultProducer() {
return m_ResultProducer;
}
/**
* Set the ResultProducer.
*
* @param newResultProducer new ResultProducer to use.
*/
public void setResultProducer(ResultProducer newResultProducer) {
m_ResultProducer = newResultProducer;
m_ResultProducer.setResultListener(this);
}
/**
* Gets a text descrption of the result producer.
*
* @return a text description of the result producer.
*/
@Override
public String toString() {
String result = "LearningRateResultProducer: ";
result += getCompatibilityState();
if (m_Instances == null) {
result += ": <null Instances>";
} else {
result += ": " + Utils.backQuoteChars(m_Instances.relationName());
}
return result;
}
/**
* Returns the revision string.
*
* @return the revision
*/
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
} // LearningRateResultProducer
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/experiment/OutputZipper.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* OutputZipper.java
* Copyright (C) 2000-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.experiment;
import java.io.DataOutputStream;
import java.io.File;
import java.io.FileOutputStream;
import java.util.zip.GZIPOutputStream;
import java.util.zip.ZipEntry;
import java.util.zip.ZipOutputStream;
import weka.core.RevisionHandler;
import weka.core.RevisionUtils;
/**
* OutputZipper writes output to either gzipped files or to a
* multi entry zip file. If the destination file is a directory
* each output string will be written to an individually named
* gzip file. If the destination file is a file, then each
* output string is appended as a named entry to the zip file until
* finished() is called to close the file.
*
* @author Mark Hall (mhall@cs.waikato.ac.nz)
* @version $Revision$
*/
public class OutputZipper
implements RevisionHandler {
File m_destination;
DataOutputStream m_zipOut = null;
ZipOutputStream m_zs = null;
/**
* Constructor.
*
* @param destination a destination file or directory
* @throws Exception if something goes wrong.
*/
public OutputZipper(File destination) throws Exception {
m_destination = destination;
// if a directory is specified then use gzip format, otherwise
// use zip
if (!m_destination.isDirectory()) {
m_zs = new ZipOutputStream(new FileOutputStream(m_destination));
m_zipOut = new DataOutputStream(m_zs);
}
}
/**
* Saves a string to either an individual gzipped file or as
* an entry in a zip file.
*
* @param outString the output string to save
* @param name the name of the file/entry to save it to
* @throws Exception if something goes wrong
*/
public void zipit(String outString, String name) throws Exception {
File saveFile;
ZipEntry ze;
if (m_zipOut == null) {
saveFile = new File(m_destination, name+".gz");
DataOutputStream dout =
new DataOutputStream(new GZIPOutputStream(
new FileOutputStream(saveFile)));
dout.writeBytes(outString);
dout.close();
} else {
ze = new ZipEntry(name);
m_zs.putNextEntry(ze);
m_zipOut.writeBytes(outString);
m_zs.closeEntry();
}
}
/**
* Closes the zip file.
*
* @throws Exception if something goes wrong
*/
public void finished() throws Exception {
if (m_zipOut != null) {
m_zipOut.close();
}
}
/**
* Returns the revision string.
*
* @return the revision
*/
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
/**
* Main method for testing this class
*/
public static void main(String [] args) {
try {
File testF = new File(new File(System.getProperty("user.dir")),
"testOut.zip");
OutputZipper oz = new OutputZipper(testF);
/* OutputZipper oz = new OutputZipper(
new File(System.getProperty("user.dir"))); */
oz.zipit("Here is some test text to be zipped","testzip");
oz.zipit("Here is a second entry to be zipped","testzip2");
oz.finished();
} catch (Exception ex) {
ex.printStackTrace();
System.err.println(ex.getMessage());
}
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/experiment/PairedCorrectedTTester.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* PairedCorrectedTTester.java
* Copyright (C) 2003-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.experiment;
import java.io.BufferedReader;
import java.io.FileReader;
import java.util.ArrayList;
import java.util.Enumeration;
import weka.core.Attribute;
import weka.core.Instance;
import weka.core.Instances;
import weka.core.Option;
import weka.core.RevisionUtils;
import weka.core.TechnicalInformation;
import weka.core.TechnicalInformation.Field;
import weka.core.TechnicalInformation.Type;
import weka.core.TechnicalInformationHandler;
import weka.core.Utils;
/**
* Behaves the same as PairedTTester, only it uses the corrected resampled
* t-test statistic.
* <p/>
*
* For more information see:
* <p/>
*
* <!-- technical-plaintext-start --> Claude Nadeau, Yoshua Bengio (2001).
* Inference for the Generalization Error. Machine Learning.. <!--
* technical-plaintext-end -->
*
* <p/>
*
* <!-- technical-bibtex-start --> BibTeX:
*
* <pre>
* @article{Nadeau2001,
* author = {Claude Nadeau and Yoshua Bengio},
* journal = {Machine Learning},
* title = {Inference for the Generalization Error},
* year = {2001},
* PDF = {http://www.iro.umontreal.ca/\~lisa/bib/pub_subject/comparative/pointeurs/nadeau_MLJ1597.pdf}
* }
* </pre>
* <p/>
* <!-- technical-bibtex-end -->
*
* <!-- options-start --> Valid options are:
* <p/>
*
* <pre>
* -D <index,index2-index4,...>
* Specify list of columns that specify a unique
* dataset.
* First and last are valid indexes. (default none)
* </pre>
*
* <pre>
* -R <index>
* Set the index of the column containing the run number
* </pre>
*
* <pre>
* -F <index>
* Set the index of the column containing the fold number
* </pre>
*
* <pre>
* -G <index1,index2-index4,...>
* Specify list of columns that specify a unique
* 'result generator' (eg: classifier name and options).
* First and last are valid indexes. (default none)
* </pre>
*
* <pre>
* -S <significance level>
* Set the significance level for comparisons (default 0.05)
* </pre>
*
* <pre>
* -V
* Show standard deviations
* </pre>
*
* <pre>
* -L
* Produce table comparisons in Latex table format
* </pre>
*
* <pre>
* -csv
* Produce table comparisons in CSV table format
* </pre>
*
* <pre>
* -html
* Produce table comparisons in HTML table format
* </pre>
*
* <pre>
* -significance
* Produce table comparisons with only the significance values
* </pre>
*
* <pre>
* -gnuplot
* Produce table comparisons output suitable for GNUPlot
* </pre>
*
* <!-- options-end -->
*
* @author Richard Kirkby (rkirkby@cs.waikato.ac.nz)
* @version $Revision$
*/
public class PairedCorrectedTTester extends PairedTTester implements
TechnicalInformationHandler {
/** for serialization */
static final long serialVersionUID = -3105268939845653323L;
/**
* Returns an instance of a TechnicalInformation object, containing detailed
* information about the technical background of this class, e.g., paper
* reference or book this class is based on.
*
* @return the technical information about this class
*/
@Override
public TechnicalInformation getTechnicalInformation() {
TechnicalInformation result;
result = new TechnicalInformation(Type.ARTICLE);
result.setValue(Field.AUTHOR, "Claude Nadeau and Yoshua Bengio");
result.setValue(Field.YEAR, "2001");
result.setValue(Field.TITLE, "Inference for the Generalization Error");
result.setValue(Field.JOURNAL, "Machine Learning");
result
.setValue(
Field.PDF,
"http://www.iro.umontreal.ca/~lisa/bib/pub_subject/comparative/pointeurs/nadeau_MLJ1597.pdf");
return result;
}
/**
* Computes a paired t-test comparison for a specified dataset between two
* resultsets.
*
* @param datasetSpecifier the dataset specifier
* @param resultset1Index the index of the first resultset
* @param resultset2Index the index of the second resultset
* @param comparisonColumn the column containing values to compare
* @return the results of the paired comparison
* @throws Exception if an error occurs
*/
@Override
public PairedStats calculateStatistics(Instance datasetSpecifier,
int resultset1Index, int resultset2Index, int comparisonColumn)
throws Exception {
if (m_Instances.attribute(comparisonColumn).type() != Attribute.NUMERIC) {
throw new Exception("Comparison column " + (comparisonColumn + 1) + " ("
+ m_Instances.attribute(comparisonColumn).name() + ") is not numeric");
}
if (!m_ResultsetsValid) {
prepareData();
}
Resultset resultset1 = (Resultset) m_Resultsets.get(resultset1Index);
Resultset resultset2 = (Resultset) m_Resultsets.get(resultset2Index);
ArrayList<Instance> dataset1 = resultset1.dataset(datasetSpecifier);
ArrayList<Instance> dataset2 = resultset2.dataset(datasetSpecifier);
String datasetName = templateString(datasetSpecifier);
if (dataset1 == null) {
throw new Exception("No results for dataset=" + datasetName
+ " for resultset=" + resultset1.templateString());
} else if (dataset2 == null) {
throw new Exception("No results for dataset=" + datasetName
+ " for resultset=" + resultset2.templateString());
} else if (dataset1.size() != dataset2.size()) {
throw new Exception("Results for dataset=" + datasetName
+ " differ in size for resultset=" + resultset1.templateString()
+ " and resultset=" + resultset2.templateString());
}
// calculate the test/train ratio
double testTrainRatio = 0.0;
int trainSizeIndex = -1;
int testSizeIndex = -1;
// find the columns with the train/test sizes
for (int i = 0; i < m_Instances.numAttributes(); i++) {
if (m_Instances.attribute(i).name().toLowerCase()
.equals("number_of_training_instances")) {
trainSizeIndex = i;
} else if (m_Instances.attribute(i).name().toLowerCase()
.equals("number_of_testing_instances")) {
testSizeIndex = i;
}
}
if (trainSizeIndex >= 0 && testSizeIndex >= 0) {
double totalTrainSize = 0.0;
double totalTestSize = 0.0;
for (int k = 0; k < dataset1.size(); k++) {
Instance current = dataset1.get(k);
totalTrainSize += current.value(trainSizeIndex);
totalTestSize += current.value(testSizeIndex);
}
testTrainRatio = totalTestSize / totalTrainSize;
}
PairedStats pairedStats = new PairedStatsCorrected(m_SignificanceLevel,
testTrainRatio);
for (int k = 0; k < dataset1.size(); k++) {
Instance current1 = dataset1.get(k);
Instance current2 = dataset2.get(k);
if (current1.isMissing(comparisonColumn)) {
System.err.println("Instance has missing value in comparison "
+ "column!\n" + current1);
continue;
}
if (current2.isMissing(comparisonColumn)) {
System.err.println("Instance has missing value in comparison "
+ "column!\n" + current2);
continue;
}
if (current1.value(m_RunColumn) != current2.value(m_RunColumn)) {
System.err.println("Run numbers do not match!\n" + current1 + current2);
}
if (m_FoldColumn != -1) {
if (current1.value(m_FoldColumn) != current2.value(m_FoldColumn)) {
System.err.println("Fold numbers do not match!\n" + current1
+ current2);
}
}
double value1 = current1.value(comparisonColumn);
double value2 = current2.value(comparisonColumn);
pairedStats.add(value1, value2);
}
pairedStats.calculateDerived();
return pairedStats;
}
/**
* Test the class from the command line.
*
* @param args contains options for the instance ttests
*/
public static void main(String args[]) {
try {
PairedCorrectedTTester tt = new PairedCorrectedTTester();
String datasetName = Utils.getOption('t', args);
String compareColStr = Utils.getOption('c', args);
String baseColStr = Utils.getOption('b', args);
boolean summaryOnly = Utils.getFlag('s', args);
boolean rankingOnly = Utils.getFlag('r', args);
try {
if ((datasetName.length() == 0) || (compareColStr.length() == 0)) {
throw new Exception("-t and -c options are required");
}
tt.setOptions(args);
Utils.checkForRemainingOptions(args);
} catch (Exception ex) {
String result = "";
Enumeration<Option> enu = tt.listOptions();
while (enu.hasMoreElements()) {
Option option = enu.nextElement();
result += option.synopsis() + '\n' + option.description() + '\n';
}
throw new Exception("Usage:\n\n" + "-t <file>\n"
+ "\tSet the dataset containing data to evaluate\n" + "-b <index>\n"
+ "\tSet the resultset to base comparisons against (optional)\n"
+ "-c <index>\n" + "\tSet the column to perform a comparison on\n"
+ "-s\n" + "\tSummarize wins over all resultset pairs\n\n" + "-r\n"
+ "\tGenerate a resultset ranking\n\n" + result);
}
Instances data = new Instances(new BufferedReader(new FileReader(
datasetName)));
tt.setInstances(data);
// tt.prepareData();
int compareCol = Integer.parseInt(compareColStr) - 1;
System.out.println(tt.header(compareCol));
if (rankingOnly) {
System.out.println(tt.multiResultsetRanking(compareCol));
} else if (summaryOnly) {
System.out.println(tt.multiResultsetSummary(compareCol));
} else {
System.out.println(tt.resultsetKey());
if (baseColStr.length() == 0) {
for (int i = 0; i < tt.getNumResultsets(); i++) {
System.out.println(tt.multiResultsetFull(i, compareCol));
}
} else {
int baseCol = Integer.parseInt(baseColStr) - 1;
System.out.println(tt.multiResultsetFull(baseCol, compareCol));
}
}
} catch (Exception e) {
e.printStackTrace();
System.err.println(e.getMessage());
}
}
/**
* returns the name of the tester
*
* @return the display name
*/
@Override
public String getDisplayName() {
return "Paired T-Tester (corrected)";
}
/**
* returns a string that is displayed as tooltip on the "perform test" button
* in the experimenter
*
* @return the string for the tool tip
*/
@Override
public String getToolTipText() {
return "Performs test using corrected resampled t-test statistic (Nadeau and Bengio)";
}
/**
* Returns the revision string.
*
* @return the revision
*/
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/experiment/PairedStats.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* PairedStats.java
* Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.experiment;
import weka.core.RevisionHandler;
import weka.core.RevisionUtils;
import weka.core.Statistics;
import weka.core.Utils;
/**
* A class for storing stats on a paired comparison (t-test and correlation)
*
* @author Len Trigg (trigg@cs.waikato.ac.nz)
* @version $Revision$
*/
public class PairedStats
implements RevisionHandler {
/** The stats associated with the data in column 1 */
public Stats xStats;
/** The stats associated with the data in column 2 */
public Stats yStats;
/** The stats associated with the paired differences */
public Stats differencesStats;
/** The probability of obtaining the observed differences */
public double differencesProbability;
/** The correlation coefficient */
public double correlation;
/** The sum of the products */
public double xySum;
/** The number of data points seen */
public double count;
/**
* A significance indicator:
* 0 if the differences are not significant
* > 0 if x significantly greater than y
* < 0 if x significantly less than y
*/
public int differencesSignificance;
/** The significance level for comparisons */
public double sigLevel;
/** The degrees of freedom (if set programmatically) */
protected int m_degreesOfFreedom = 0;
/**
* Creates a new PairedStats object with the supplied significance level.
*
* @param sig the significance level for comparisons
*/
public PairedStats(double sig) {
xStats = new Stats();
yStats = new Stats();
differencesStats = new Stats();
sigLevel = sig;
}
/**
* Sets the degrees of freedom (if calibration is required).
*/
public void setDegreesOfFreedom(int d) {
if (d <= 0) {
throw new IllegalArgumentException("PairedStats: degrees of freedom must be >= 1");
}
m_degreesOfFreedom = d;
}
/**
* Gets the degrees of freedom.
*/
public int getDegreesOfFreedom() {
return m_degreesOfFreedom;
}
/**
* Add an observed pair of values.
*
* @param value1 the value from column 1
* @param value2 the value from column 2
*/
public void add(double value1, double value2) {
xStats.add(value1);
yStats.add(value2);
differencesStats.add(value1 - value2);
xySum += value1 * value2;
count ++;
}
/**
* Removes an observed pair of values.
*
* @param value1 the value from column 1
* @param value2 the value from column 2
*/
public void subtract(double value1, double value2) {
xStats.subtract(value1);
yStats.subtract(value2);
differencesStats.subtract(value1 - value2);
xySum -= value1 * value2;
count --;
}
/**
* Adds an array of observed pair of values.
*
* @param value1 the array containing values from column 1
* @param value2 the array containing values from column 2
*/
public void add(double value1[], double value2[]) {
if ((value1 == null) || (value2 == null)) {
throw new NullPointerException();
}
if (value1.length != value2.length) {
throw new IllegalArgumentException("Arrays must be of the same length");
}
for (int i = 0; i < value1.length; i++) {
add(value1[i], value2[i]);
}
}
/**
* Removes an array of observed pair of values.
*
* @param value1 the array containing values from column 1
* @param value2 the array containing values from column 2
*/
public void subtract(double value1[], double value2[]) {
if ((value1 == null) || (value2 == null)) {
throw new NullPointerException();
}
if (value1.length != value2.length) {
throw new IllegalArgumentException("Arrays must be of the same length");
}
for (int i = 0; i < value1.length; i++) {
subtract(value1[i], value2[i]);
}
}
/**
* Calculates the derived statistics (significance etc).
*/
public void calculateDerived() {
xStats.calculateDerived();
yStats.calculateDerived();
differencesStats.calculateDerived();
correlation = Double.NaN;
if (!Double.isNaN(xStats.stdDev) && !Double.isNaN(yStats.stdDev)
&& (xStats.stdDev > 0) && (yStats.stdDev > 0) && (count > 1)) {
correlation = (xySum - xStats.sum * yStats.sum / count)
/ ((count - 1) * xStats.stdDev * yStats.stdDev);
}
if (differencesStats.stdDev > 0) {
double tval = differencesStats.mean
* Math.sqrt(count)
/ differencesStats.stdDev;
if (m_degreesOfFreedom >= 1) {
differencesProbability = Statistics.FProbability(tval * tval, 1,
m_degreesOfFreedom);
} else {
if (count > 1) {
differencesProbability = Statistics.FProbability(tval * tval, 1,
(int) count - 1);
} else {
differencesProbability = 1;
}
}
} else {
if (differencesStats.sumSq == 0) {
differencesProbability = 1.0;
} else {
differencesProbability = 0.0;
}
}
differencesSignificance = 0;
if (differencesProbability <= sigLevel) {
if (xStats.mean > yStats.mean) {
differencesSignificance = 1;
} else {
differencesSignificance = -1;
}
}
}
/**
* Returns statistics on the paired comparison.
*
* @return the t-test statistics as a string
*/
public String toString() {
return "Analysis for " + Utils.doubleToString(count, 0)
+ " points:\n"
+ " "
+ " Column 1"
+ " Column 2"
+ " Difference\n"
+ "Minimums "
+ Utils.doubleToString(xStats.min, 17, 4)
+ Utils.doubleToString(yStats.min, 17, 4)
+ Utils.doubleToString(differencesStats.min, 17, 4) + '\n'
+ "Maximums "
+ Utils.doubleToString(xStats.max, 17, 4)
+ Utils.doubleToString(yStats.max, 17, 4)
+ Utils.doubleToString(differencesStats.max, 17, 4) + '\n'
+ "Sums "
+ Utils.doubleToString(xStats.sum, 17, 4)
+ Utils.doubleToString(yStats.sum, 17, 4)
+ Utils.doubleToString(differencesStats.sum, 17, 4) + '\n'
+ "SumSquares "
+ Utils.doubleToString(xStats.sumSq, 17, 4)
+ Utils.doubleToString(yStats.sumSq, 17, 4)
+ Utils.doubleToString(differencesStats.sumSq, 17, 4) + '\n'
+ "Means "
+ Utils.doubleToString(xStats.mean, 17, 4)
+ Utils.doubleToString(yStats.mean, 17, 4)
+ Utils.doubleToString(differencesStats.mean, 17, 4) + '\n'
+ "SDs "
+ Utils.doubleToString(xStats.stdDev, 17, 4)
+ Utils.doubleToString(yStats.stdDev, 17, 4)
+ Utils.doubleToString(differencesStats.stdDev, 17, 4) + '\n'
+ "Prob(differences) "
+ Utils.doubleToString(differencesProbability, 4)
+ " (sigflag " + differencesSignificance + ")\n"
+ "Correlation "
+ Utils.doubleToString(correlation,4) + "\n";
}
/**
* Returns the revision string.
*
* @return the revision
*/
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
/**
* Tests the paired stats object from the command line.
* reads line from stdin, expecting two values per line.
*
* @param args ignored.
*/
public static void main(String [] args) {
try {
PairedStats ps = new PairedStats(0.05);
java.io.LineNumberReader r = new java.io.LineNumberReader(
new java.io.InputStreamReader(System.in));
String line;
while ((line = r.readLine()) != null) {
line = line.trim();
if (line.equals("") || line.startsWith("@") || line.startsWith("%")) {
continue;
}
java.util.StringTokenizer s
= new java.util.StringTokenizer(line, " ,\t\n\r\f");
int count = 0;
double v1 = 0, v2 = 0;
while (s.hasMoreTokens()) {
double val = (new Double(s.nextToken())).doubleValue();
if (count == 0) {
v1 = val;
} else if (count == 1) {
v2 = val;
} else {
System.err.println("MSG: Too many values in line \""
+ line + "\", skipped.");
break;
}
count++;
}
if (count == 2) {
ps.add(v1, v2);
}
}
ps.calculateDerived();
System.err.println(ps);
} catch (Exception ex) {
ex.printStackTrace();
System.err.println(ex.getMessage());
}
}
} // PairedStats
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/experiment/PairedStatsCorrected.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* PairedStatsCorrected.java
* Copyright (C) 2003-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.experiment;
import weka.core.RevisionUtils;
import weka.core.Statistics;
import weka.core.Utils;
/**
* A class for storing stats on a paired comparison. This version is
* based on the corrected resampled t-test statistic, which uses the
* ratio of the number of test examples/the number of training examples.<p>
*
* For more information see:<p>
*
* Claude Nadeau and Yoshua Bengio, "Inference for the Generalization Error,"
* Machine Learning, 2001.
*
* @author Richard Kirkby (rkirkby@cs.waikato.ac.nz)
* @version $Revision$
*/
public class PairedStatsCorrected
extends PairedStats {
/** The ratio used to correct the significance test */
protected double m_testTrainRatio;
/**
* Creates a new PairedStatsCorrected object with the supplied
* significance level and train/test ratio.
*
* @param sig the significance level for comparisons
* @param testTrainRatio the number test examples/training examples
*/
public PairedStatsCorrected(double sig, double testTrainRatio) {
super(sig);
m_testTrainRatio = testTrainRatio;
}
/**
* Calculates the derived statistics (significance etc).
*/
public void calculateDerived() {
xStats.calculateDerived();
yStats.calculateDerived();
differencesStats.calculateDerived();
correlation = Double.NaN;
if (!Double.isNaN(xStats.stdDev) && !Double.isNaN(yStats.stdDev)
&& (xStats.stdDev > 0) && (yStats.stdDev > 0) && (count > 1)) {
correlation = (xySum - xStats.sum * yStats.sum / count)
/ ((count - 1) * xStats.stdDev * yStats.stdDev);
}
if (differencesStats.stdDev > 0) {
double tval = differencesStats.mean
/ Math.sqrt((1 / count + m_testTrainRatio)
* differencesStats.stdDev * differencesStats.stdDev);
if (count > 1) {
differencesProbability = Statistics.FProbability(tval * tval, 1,
(int) count - 1);
} else differencesProbability = 1;
} else {
if (differencesStats.sumSq == 0) {
differencesProbability = 1.0;
} else {
differencesProbability = 0.0;
}
}
differencesSignificance = 0;
if (differencesProbability <= sigLevel) {
if (xStats.mean > yStats.mean) {
differencesSignificance = 1;
} else {
differencesSignificance = -1;
}
}
}
/**
* Returns the revision string.
*
* @return the revision
*/
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
/**
* Tests the paired stats object from the command line.
* reads line from stdin, expecting two values per line.
*
* @param args ignored.
*/
public static void main(String [] args) {
try {
PairedStatsCorrected ps = new PairedStatsCorrected(0.05, 1.0 / 9.0);
java.io.LineNumberReader r = new java.io.LineNumberReader(
new java.io.InputStreamReader(System.in));
String line;
while ((line = r.readLine()) != null) {
line = line.trim();
if (line.equals("") || line.startsWith("@") || line.startsWith("%")) {
continue;
}
java.util.StringTokenizer s
= new java.util.StringTokenizer(line, " ,\t\n\r\f");
int count = 0;
double v1 = 0, v2 = 0;
while (s.hasMoreTokens()) {
double val = (new Double(s.nextToken())).doubleValue();
if (count == 0) {
v1 = val;
} else if (count == 1) {
v2 = val;
} else {
System.err.println("MSG: Too many values in line \""
+ line + "\", skipped.");
break;
}
count++;
}
if (count == 2) {
ps.add(v1, v2);
}
}
ps.calculateDerived();
System.err.println(ps);
} catch (Exception ex) {
ex.printStackTrace();
System.err.println(ex.getMessage());
}
}}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/experiment/PairedTTester.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* PairedTTester.java
* Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.experiment;
import java.io.BufferedReader;
import java.io.FileReader;
import java.io.Serializable;
import java.text.SimpleDateFormat;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Date;
import java.util.Enumeration;
import java.util.Vector;
import weka.core.Attribute;
import weka.core.Instance;
import weka.core.Instances;
import weka.core.Option;
import weka.core.OptionHandler;
import weka.core.Range;
import weka.core.RevisionHandler;
import weka.core.RevisionUtils;
import weka.core.Utils;
/**
* Calculates T-Test statistics on data stored in a set of instances.
* <p/>
*
* <!-- options-start --> Valid options are:
* <p/>
*
* <pre>
* -D <index,index2-index4,...>
* Specify list of columns that specify a unique
* dataset.
* First and last are valid indexes. (default none)
* </pre>
*
* <pre>
* -R <index>
* Set the index of the column containing the run number
* </pre>
*
* <pre>
* -F <index>
* Set the index of the column containing the fold number
* </pre>
*
* <pre>
* -G <index1,index2-index4,...>
* Specify list of columns that specify a unique
* 'result generator' (eg: classifier name and options).
* First and last are valid indexes. (default none)
* </pre>
*
* <pre>
* -S <significance level>
* Set the significance level for comparisons (default 0.05)
* </pre>
*
* <pre>
* -V
* Show standard deviations
* </pre>
*
* <pre>
* -L
* Produce table comparisons in Latex table format
* </pre>
*
* <pre>
* -csv
* Produce table comparisons in CSV table format
* </pre>
*
* <pre>
* -html
* Produce table comparisons in HTML table format
* </pre>
*
* <pre>
* -significance
* Produce table comparisons with only the significance values
* </pre>
*
* <pre>
* -gnuplot
* Produce table comparisons output suitable for GNUPlot
* </pre>
*
* <!-- options-end -->
*
* @author Len Trigg (trigg@cs.waikato.ac.nz)
* @version $Revision$
*/
public class PairedTTester implements OptionHandler, Tester, RevisionHandler {
/** for serialization */
static final long serialVersionUID = 8370014624008728610L;
/** The set of instances we will analyse */
protected Instances m_Instances;
/** The index of the column containing the run number */
protected int m_RunColumn = 0;
/** The option setting for the run number column (-1 means last) */
protected int m_RunColumnSet = -1;
/** The option setting for the fold number column (-1 means none) */
protected int m_FoldColumn = -1;
/** The column to sort on (-1 means default sorting) */
protected int m_SortColumn = -1;
/** The sorting of the datasets (according to the sort column) */
protected int[] m_SortOrder = null;
/** The sorting of the columns (test base is always first) */
protected int[] m_ColOrder = null;
/** The significance level for comparisons */
protected double m_SignificanceLevel = 0.05;
/**
* The range of columns that specify a unique "dataset" (eg: scheme plus
* configuration)
*/
protected Range m_DatasetKeyColumnsRange = new Range();
/** An array containing the indexes of just the selected columns */
protected int[] m_DatasetKeyColumns;
/** The list of dataset specifiers */
protected DatasetSpecifiers m_DatasetSpecifiers = new DatasetSpecifiers();
/**
* The range of columns that specify a unique result set (eg: scheme plus
* configuration)
*/
protected Range m_ResultsetKeyColumnsRange = new Range();
/** An array containing the indexes of just the selected columns */
protected int[] m_ResultsetKeyColumns;
/** An array containing the indexes of the datasets to display */
protected int[] m_DisplayedResultsets = null;
/** Stores a vector for each resultset holding all instances in each set */
protected ArrayList<Resultset> m_Resultsets = new ArrayList<Resultset>();
/** Indicates whether the instances have been partitioned */
protected boolean m_ResultsetsValid;
/** Indicates whether standard deviations should be displayed */
protected boolean m_ShowStdDevs = false;
/** the instance of the class to produce the output. */
protected ResultMatrix m_ResultMatrix = new ResultMatrixPlainText();
/** A list of unique "dataset" specifiers that have been observed */
protected class DatasetSpecifiers implements RevisionHandler, Serializable {
/** for serialization. */
private static final long serialVersionUID = -9020938059902723401L;
/** the specifiers that have been observed */
ArrayList<Instance> m_Specifiers = new ArrayList<Instance>();
/**
* Removes all specifiers.
*/
protected void removeAllSpecifiers() {
this.m_Specifiers.clear();
}
/**
* Add an instance to the list of specifiers (if necessary)
*
* @param inst the instance to add
*/
protected void add(final Instance inst) {
for (int i = 0; i < this.m_Specifiers.size(); i++) {
Instance specifier = this.m_Specifiers.get(i);
boolean found = true;
for (int m_DatasetKeyColumn : PairedTTester.this.m_DatasetKeyColumns) {
if (inst.value(m_DatasetKeyColumn) != specifier.value(m_DatasetKeyColumn)) {
found = false;
}
}
if (found) {
return;
}
}
this.m_Specifiers.add(inst);
}
/**
* Get the template at the given position.
*
* @param i the index
* @return the template
*/
protected Instance specifier(final int i) {
return this.m_Specifiers.get(i);
}
/**
* Gets the number of specifiers.
*
* @return the current number of specifiers
*/
protected int numSpecifiers() {
return this.m_Specifiers.size();
}
/**
* Returns the revision string.
*
* @return the revision
*/
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
}
/** Utility class to store the instances pertaining to a dataset */
protected class Dataset implements RevisionHandler, Serializable {
/** for serialization. */
private static final long serialVersionUID = -2801397601839433282L;
/** the template */
Instance m_Template;
/** the dataset */
ArrayList<Instance> m_Dataset;
/**
* Constructor
*
* @param template the template
*/
public Dataset(final Instance template) {
this.m_Template = template;
this.m_Dataset = new ArrayList<Instance>();
this.add(template);
}
/**
* Returns true if the two instances match on those attributes that have
* been designated key columns (eg: scheme name and scheme options)
*
* @param first the first instance
* @return true if first and second match on the currently set key columns
*/
protected boolean matchesTemplate(final Instance first) {
for (int m_DatasetKeyColumn : PairedTTester.this.m_DatasetKeyColumns) {
if (first.value(m_DatasetKeyColumn) != this.m_Template.value(m_DatasetKeyColumn)) {
return false;
}
}
return true;
}
/**
* Adds the given instance to the dataset
*
* @param inst the instance to add
*/
protected void add(final Instance inst) {
this.m_Dataset.add(inst);
}
/**
* Returns a vector containing the instances in the dataset
*
* @return the current contents
*/
protected ArrayList<Instance> contents() {
return this.m_Dataset;
}
/**
* Sorts the instances in the dataset by the run number.
*
* @param runColumn a value of type 'int'
* @throws InterruptedException
*/
public void sort(final int runColumn) throws InterruptedException {
double[] runNums = new double[this.m_Dataset.size()];
for (int j = 0; j < runNums.length; j++) {
runNums[j] = this.m_Dataset.get(j).value(runColumn);
}
int[] index = Utils.stableSort(runNums);
ArrayList<Instance> newDataset = new ArrayList<Instance>(runNums.length);
for (int element : index) {
newDataset.add(this.m_Dataset.get(element));
}
this.m_Dataset = newDataset;
}
/**
* Returns the revision string.
*
* @return the revision
*/
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
}
/** Utility class to store the instances in a resultset */
protected class Resultset implements RevisionHandler, Serializable {
/** for serialization. */
private static final long serialVersionUID = 1543786683821339978L;
/** the template */
Instance m_Template;
/** the dataset */
ArrayList<Dataset> m_Datasets;
/**
* Constructir
*
* @param template the template
*/
public Resultset(final Instance template) {
this.m_Template = template;
this.m_Datasets = new ArrayList<Dataset>();
this.add(template);
}
/**
* Returns true if the two instances match on those attributes that have
* been designated key columns (eg: scheme name and scheme options)
*
* @param first the first instance
* @return true if first and second match on the currently set key columns
*/
protected boolean matchesTemplate(final Instance first) {
for (int m_ResultsetKeyColumn : PairedTTester.this.m_ResultsetKeyColumns) {
if (first.value(m_ResultsetKeyColumn) != this.m_Template.value(m_ResultsetKeyColumn)) {
return false;
}
}
return true;
}
/**
* Returns a string descriptive of the resultset key column values for this
* resultset
*
* @return a value of type 'String'
*/
protected String templateString() {
String result = "";
String tempResult = "";
for (int m_ResultsetKeyColumn : PairedTTester.this.m_ResultsetKeyColumns) {
tempResult = this.m_Template.toString(m_ResultsetKeyColumn) + ' ';
// compact the string
tempResult = Utils.removeSubstring(tempResult, "weka.classifiers.");
tempResult = Utils.removeSubstring(tempResult, "weka.filters.");
tempResult = Utils.removeSubstring(tempResult, "weka.attributeSelection.");
result += tempResult;
}
return result.trim();
}
/**
* Returns a vector containing all instances belonging to one dataset.
*
* @param inst a template instance
* @return a value of type 'FastVector'
*/
public ArrayList<Instance> dataset(final Instance inst) {
for (int i = 0; i < this.m_Datasets.size(); i++) {
if (this.m_Datasets.get(i).matchesTemplate(inst)) {
return this.m_Datasets.get(i).contents();
}
}
return null;
}
/**
* Adds an instance to this resultset
*
* @param newInst a value of type 'Instance'
*/
public void add(final Instance newInst) {
for (int i = 0; i < this.m_Datasets.size(); i++) {
if (this.m_Datasets.get(i).matchesTemplate(newInst)) {
this.m_Datasets.get(i).add(newInst);
return;
}
}
Dataset newDataset = new Dataset(newInst);
this.m_Datasets.add(newDataset);
}
/**
* Sorts the instances in each dataset by the run number.
*
* @param runColumn a value of type 'int'
* @throws InterruptedException
*/
public void sort(final int runColumn) throws InterruptedException {
for (int i = 0; i < this.m_Datasets.size(); i++) {
this.m_Datasets.get(i).sort(runColumn);
}
}
/**
* Returns the revision string.
*
* @return the revision
*/
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
} // Resultset
/**
* Returns a string descriptive of the key column values for the "datasets
*
* @param template the template
* @return a value of type 'String'
*/
protected String templateString(final Instance template) {
String result = "";
for (int m_DatasetKeyColumn : this.m_DatasetKeyColumns) {
result += template.toString(m_DatasetKeyColumn) + ' ';
}
if (result.startsWith("weka.classifiers.")) {
result = result.substring("weka.classifiers.".length());
}
return result.trim();
}
/**
* Sets the matrix to use to produce the output.
*
* @param matrix the instance to use to produce the output
* @see ResultMatrix
*/
@Override
public void setResultMatrix(final ResultMatrix matrix) {
this.m_ResultMatrix = matrix;
}
/**
* Gets the instance that produces the output.
*
* @return the instance to produce the output
*/
@Override
public ResultMatrix getResultMatrix() {
return this.m_ResultMatrix;
}
/**
* Set whether standard deviations are displayed or not.
*
* @param s true if standard deviations are to be displayed
*/
@Override
public void setShowStdDevs(final boolean s) {
this.m_ShowStdDevs = s;
}
/**
* Returns true if standard deviations have been requested.
*
* @return true if standard deviations are to be displayed.
*/
@Override
public boolean getShowStdDevs() {
return this.m_ShowStdDevs;
}
/**
* Separates the instances into resultsets and by dataset/run.
*
* @throws Exception if the TTest parameters have not been set.
*/
protected void prepareData() throws Exception {
if (this.m_Instances == null) {
throw new Exception("No instances have been set");
}
if (this.m_RunColumnSet == -1) {
this.m_RunColumn = this.m_Instances.numAttributes() - 1;
} else {
this.m_RunColumn = this.m_RunColumnSet;
}
if (this.m_ResultsetKeyColumnsRange == null) {
throw new Exception("No result specifier columns have been set");
}
this.m_ResultsetKeyColumnsRange.setUpper(this.m_Instances.numAttributes() - 1);
this.m_ResultsetKeyColumns = this.m_ResultsetKeyColumnsRange.getSelection();
if (this.m_DatasetKeyColumnsRange == null) {
throw new Exception("No dataset specifier columns have been set");
}
this.m_DatasetKeyColumnsRange.setUpper(this.m_Instances.numAttributes() - 1);
this.m_DatasetKeyColumns = this.m_DatasetKeyColumnsRange.getSelection();
// Split the data up into result sets
this.m_Resultsets.clear();
this.m_DatasetSpecifiers.removeAllSpecifiers();
for (int i = 0; i < this.m_Instances.numInstances(); i++) {
Instance current = this.m_Instances.instance(i);
if (current.isMissing(this.m_RunColumn)) {
throw new Exception("Instance has missing value in run " + "column!\n" + current);
}
for (int m_ResultsetKeyColumn : this.m_ResultsetKeyColumns) {
if (current.isMissing(m_ResultsetKeyColumn)) {
throw new Exception("Instance has missing value in resultset key " + "column " + (m_ResultsetKeyColumn + 1) + "!\n" + current);
}
}
for (int m_DatasetKeyColumn : this.m_DatasetKeyColumns) {
if (current.isMissing(m_DatasetKeyColumn)) {
throw new Exception("Instance has missing value in dataset key " + "column " + (m_DatasetKeyColumn + 1) + "!\n" + current);
}
}
boolean found = false;
for (int j = 0; j < this.m_Resultsets.size(); j++) {
Resultset resultset = this.m_Resultsets.get(j);
if (resultset.matchesTemplate(current)) {
resultset.add(current);
found = true;
break;
}
}
if (!found) {
Resultset resultset = new Resultset(current);
this.m_Resultsets.add(resultset);
}
this.m_DatasetSpecifiers.add(current);
}
// Tell each resultset to sort on the run column
for (int j = 0; j < this.m_Resultsets.size(); j++) {
Resultset resultset = this.m_Resultsets.get(j);
if (this.m_FoldColumn >= 0) {
// sort on folds first in case they are out of order
resultset.sort(this.m_FoldColumn);
}
resultset.sort(this.m_RunColumn);
}
this.m_ResultsetsValid = true;
}
/**
* Gets the number of datasets in the resultsets
*
* @return the number of datasets in the resultsets
*/
@Override
public int getNumDatasets() {
if (!this.m_ResultsetsValid) {
try {
this.prepareData();
} catch (Exception ex) {
ex.printStackTrace();
return 0;
}
}
return this.m_DatasetSpecifiers.numSpecifiers();
}
/**
* Gets the number of resultsets in the data.
*
* @return the number of resultsets in the data
*/
@Override
public int getNumResultsets() {
if (!this.m_ResultsetsValid) {
try {
this.prepareData();
} catch (Exception ex) {
ex.printStackTrace();
return 0;
}
}
return this.m_Resultsets.size();
}
/**
* Gets a string descriptive of the specified resultset.
*
* @param index the index of the resultset
* @return a descriptive string for the resultset
*/
@Override
public String getResultsetName(final int index) {
if (!this.m_ResultsetsValid) {
try {
this.prepareData();
} catch (Exception ex) {
ex.printStackTrace();
return null;
}
}
return this.m_Resultsets.get(index).templateString();
}
/**
* Checks whether the resultset with the given index shall be displayed.
*
* @param index the index of the resultset to check whether it shall be
* displayed
* @return whether the specified resultset is displayed
*/
@Override
public boolean displayResultset(final int index) {
boolean result;
int i;
result = true;
if (this.m_DisplayedResultsets != null) {
result = false;
for (i = 0; i < this.m_DisplayedResultsets.length; i++) {
if (this.m_DisplayedResultsets[i] == index) {
result = true;
break;
}
}
}
return result;
}
/**
* Computes a paired t-test comparison for a specified dataset between two
* resultsets.
*
* @param datasetSpecifier the dataset specifier
* @param resultset1Index the index of the first resultset
* @param resultset2Index the index of the second resultset
* @param comparisonColumn the column containing values to compare
* @return the results of the paired comparison
* @throws Exception if an error occurs
*/
@Override
public PairedStats calculateStatistics(final Instance datasetSpecifier, final int resultset1Index, final int resultset2Index, final int comparisonColumn) throws Exception {
if (this.m_Instances.attribute(comparisonColumn).type() != Attribute.NUMERIC) {
throw new Exception("Comparison column " + (comparisonColumn + 1) + " (" + this.m_Instances.attribute(comparisonColumn).name() + ") is not numeric");
}
if (!this.m_ResultsetsValid) {
this.prepareData();
}
Resultset resultset1 = this.m_Resultsets.get(resultset1Index);
Resultset resultset2 = this.m_Resultsets.get(resultset2Index);
ArrayList<Instance> dataset1 = resultset1.dataset(datasetSpecifier);
ArrayList<Instance> dataset2 = resultset2.dataset(datasetSpecifier);
String datasetName = this.templateString(datasetSpecifier);
if (dataset1 == null) {
throw new Exception("No results for dataset=" + datasetName + " for resultset=" + resultset1.templateString());
} else if (dataset2 == null) {
throw new Exception("No results for dataset=" + datasetName + " for resultset=" + resultset2.templateString());
} else if (dataset1.size() != dataset2.size()) {
throw new Exception("Results for dataset=" + datasetName + " differ in size for resultset=" + resultset1.templateString() + " and resultset=" + resultset2.templateString());
}
PairedStats pairedStats = new PairedStats(this.m_SignificanceLevel);
for (int k = 0; k < dataset1.size(); k++) {
Instance current1 = dataset1.get(k);
Instance current2 = dataset2.get(k);
if (current1.isMissing(comparisonColumn)) {
System.err.println("Instance has missing value in comparison " + "column!\n" + current1);
continue;
}
if (current2.isMissing(comparisonColumn)) {
System.err.println("Instance has missing value in comparison " + "column!\n" + current2);
continue;
}
if (current1.value(this.m_RunColumn) != current2.value(this.m_RunColumn)) {
System.err.println("Run numbers do not match!\n" + current1 + current2);
}
if (this.m_FoldColumn != -1) {
if (current1.value(this.m_FoldColumn) != current2.value(this.m_FoldColumn)) {
System.err.println("Fold numbers do not match!\n" + current1 + current2);
}
}
double value1 = current1.value(comparisonColumn);
double value2 = current2.value(comparisonColumn);
pairedStats.add(value1, value2);
}
pairedStats.calculateDerived();
// System.err.println("Differences stats:\n" +
// pairedStats.differencesStats);
return pairedStats;
}
/**
* Creates a key that maps resultset numbers to their descriptions.
*
* @return a value of type 'String'
*/
@Override
public String resultsetKey() {
if (!this.m_ResultsetsValid) {
try {
this.prepareData();
} catch (Exception ex) {
ex.printStackTrace();
return ex.getMessage();
}
}
String result = "";
for (int j = 0; j < this.getNumResultsets(); j++) {
result += "(" + (j + 1) + ") " + this.getResultsetName(j) + '\n';
}
return result + '\n';
}
/**
* Creates a "header" string describing the current resultsets.
*
* @param comparisonColumn a value of type 'int'
* @return a value of type 'String'
*/
@Override
public String header(final int comparisonColumn) {
if (!this.m_ResultsetsValid) {
try {
this.prepareData();
} catch (Exception ex) {
ex.printStackTrace();
return ex.getMessage();
}
}
this.initResultMatrix();
this.m_ResultMatrix.addHeader("Tester", this.getClass().getName() + " " + Utils.joinOptions(this.getOptions()));
this.m_ResultMatrix.addHeader("Analysing", this.m_Instances.attribute(comparisonColumn).name());
this.m_ResultMatrix.addHeader("Datasets", Integer.toString(this.getNumDatasets()));
this.m_ResultMatrix.addHeader("Resultsets", Integer.toString(this.getNumResultsets()));
this.m_ResultMatrix.addHeader("Confidence", this.getSignificanceLevel() + " (two tailed)");
this.m_ResultMatrix.addHeader("Sorted by", this.getSortColumnName());
this.m_ResultMatrix.addHeader("Date", (new SimpleDateFormat()).format(new Date()));
return this.m_ResultMatrix.toStringHeader() + "\n";
}
/**
* Carries out a comparison between all resultsets, counting the number of
* datsets where one resultset outperforms the other.
*
* @param comparisonColumn the index of the comparison column
* @param nonSigWin for storing the non-significant wins
* @return a 2d array where element [i][j] is the number of times resultset j
* performed significantly better than resultset i.
* @throws Exception if an error occurs
*/
@Override
public int[][] multiResultsetWins(final int comparisonColumn, final int[][] nonSigWin) throws Exception {
int numResultsets = this.getNumResultsets();
int[][] win = new int[numResultsets][numResultsets];
// int [][] nonSigWin = new int [numResultsets][numResultsets];
for (int i = 0; i < numResultsets; i++) {
for (int j = i + 1; j < numResultsets; j++) {
System.err.print("Comparing (" + (i + 1) + ") with (" + (j + 1) + ")\r");
System.err.flush();
for (int k = 0; k < this.getNumDatasets(); k++) {
try {
PairedStats pairedStats = this.calculateStatistics(this.m_DatasetSpecifiers.specifier(k), i, j, comparisonColumn);
if (pairedStats.differencesSignificance < 0) {
win[i][j]++;
} else if (pairedStats.differencesSignificance > 0) {
win[j][i]++;
}
if (pairedStats.differencesStats.mean < 0) {
nonSigWin[i][j]++;
} else if (pairedStats.differencesStats.mean > 0) {
nonSigWin[j][i]++;
}
} catch (Exception ex) {
// ex.printStackTrace();
System.err.println(ex.getMessage());
}
}
}
}
return win;
}
/**
* clears the content and fills the column and row names according to the
* given sorting
*/
protected void initResultMatrix() {
this.m_ResultMatrix.setSize(this.getNumResultsets(), this.getNumDatasets());
this.m_ResultMatrix.setShowStdDev(this.m_ShowStdDevs);
for (int i = 0; i < this.getNumDatasets(); i++) {
this.m_ResultMatrix.setRowName(i, this.templateString(this.m_DatasetSpecifiers.specifier(i)));
}
for (int j = 0; j < this.getNumResultsets(); j++) {
this.m_ResultMatrix.setColName(j, this.getResultsetName(j));
this.m_ResultMatrix.setColHidden(j, !this.displayResultset(j));
}
}
/**
* Carries out a comparison between all resultsets, counting the number of
* datsets where one resultset outperforms the other. The results are
* summarized in a table.
*
* @param comparisonColumn the index of the comparison column
* @return the results in a string
* @throws Exception if an error occurs
*/
@Override
public String multiResultsetSummary(final int comparisonColumn) throws Exception {
int[][] nonSigWin = new int[this.getNumResultsets()][this.getNumResultsets()];
int[][] win = this.multiResultsetWins(comparisonColumn, nonSigWin);
this.initResultMatrix();
this.m_ResultMatrix.setSummary(nonSigWin, win);
return this.m_ResultMatrix.toStringSummary();
}
/**
* returns a ranking of the resultsets
*
* @param comparisonColumn the column to compare with
* @return the ranking
* @throws Exception if something goes wrong
*/
@Override
public String multiResultsetRanking(final int comparisonColumn) throws Exception {
int[][] nonSigWin = new int[this.getNumResultsets()][this.getNumResultsets()];
int[][] win = this.multiResultsetWins(comparisonColumn, nonSigWin);
this.initResultMatrix();
this.m_ResultMatrix.setRanking(win);
return this.m_ResultMatrix.toStringRanking();
}
/**
* Creates a comparison table where a base resultset is compared to the other
* resultsets. Results are presented for every dataset.
*
* @param baseResultset the index of the base resultset
* @param comparisonColumn the index of the column to compare over
* @return the comparison table string
* @throws Exception if an error occurs
*/
@Override
public String multiResultsetFull(final int baseResultset, final int comparisonColumn) throws Exception {
int maxWidthMean = 2;
int maxWidthStdDev = 2;
double[] sortValues = new double[this.getNumDatasets()];
// determine max field width
for (int i = 0; i < this.getNumDatasets(); i++) {
sortValues[i] = Double.POSITIVE_INFINITY; // sorts skipped cols to end
for (int j = 0; j < this.getNumResultsets(); j++) {
if (!this.displayResultset(j)) {
continue;
}
try {
PairedStats pairedStats = this.calculateStatistics(this.m_DatasetSpecifiers.specifier(i), baseResultset, j, comparisonColumn);
if (!Double.isInfinite(pairedStats.yStats.mean) && !Double.isNaN(pairedStats.yStats.mean)) {
double width = ((Math.log(Math.abs(pairedStats.yStats.mean)) / Math.log(10)) + 1);
if (width > maxWidthMean) {
maxWidthMean = (int) width;
}
}
if (j == baseResultset) {
if (this.getSortColumn() != -1) {
sortValues[i] = this.calculateStatistics(this.m_DatasetSpecifiers.specifier(i), baseResultset, j, this.getSortColumn()).xStats.mean;
} else {
sortValues[i] = i;
}
}
if (this.m_ShowStdDevs && !Double.isInfinite(pairedStats.yStats.stdDev) && !Double.isNaN(pairedStats.yStats.stdDev)) {
double width = ((Math.log(Math.abs(pairedStats.yStats.stdDev)) / Math.log(10)) + 1);
if (width > maxWidthStdDev) {
maxWidthStdDev = (int) width;
}
}
} catch (Exception ex) {
// ex.printStackTrace();
System.err.println(ex);
}
}
}
// sort rows according to sort column
this.m_SortOrder = Utils.sort(sortValues);
// determine column order
this.m_ColOrder = new int[this.getNumResultsets()];
this.m_ColOrder[0] = baseResultset;
int index = 1;
for (int i = 0; i < this.getNumResultsets(); i++) {
if (i == baseResultset) {
continue;
}
this.m_ColOrder[index] = i;
index++;
}
// setup matrix
this.initResultMatrix();
this.m_ResultMatrix.setRowOrder(this.m_SortOrder);
this.m_ResultMatrix.setColOrder(this.m_ColOrder);
this.m_ResultMatrix.setMeanWidth(maxWidthMean);
this.m_ResultMatrix.setStdDevWidth(maxWidthStdDev);
this.m_ResultMatrix.setSignificanceWidth(1);
// make sure that test base is displayed, even though it might not be
// selected
for (int i = 0; i < this.m_ResultMatrix.getColCount(); i++) {
if ((i == baseResultset) && (this.m_ResultMatrix.getColHidden(i))) {
this.m_ResultMatrix.setColHidden(i, false);
System.err.println("Note: test base was hidden - set visible!");
}
}
// the data
for (int i = 0; i < this.getNumDatasets(); i++) {
this.m_ResultMatrix.setRowName(i, this.templateString(this.m_DatasetSpecifiers.specifier(i)));
for (int j = 0; j < this.getNumResultsets(); j++) {
try {
// calc stats
PairedStats pairedStats = this.calculateStatistics(this.m_DatasetSpecifiers.specifier(i), baseResultset, j, comparisonColumn);
// count
this.m_ResultMatrix.setCount(i, pairedStats.count);
// mean
this.m_ResultMatrix.setMean(j, i, pairedStats.yStats.mean);
// std dev
this.m_ResultMatrix.setStdDev(j, i, pairedStats.yStats.stdDev);
// significance
if (pairedStats.differencesSignificance < 0) {
this.m_ResultMatrix.setSignificance(j, i, ResultMatrix.SIGNIFICANCE_WIN);
} else if (pairedStats.differencesSignificance > 0) {
this.m_ResultMatrix.setSignificance(j, i, ResultMatrix.SIGNIFICANCE_LOSS);
} else {
this.m_ResultMatrix.setSignificance(j, i, ResultMatrix.SIGNIFICANCE_TIE);
}
} catch (Exception e) {
// e.printStackTrace();
System.err.println(e);
}
}
}
// generate output
StringBuffer result = new StringBuffer(1000);
try {
result.append(this.m_ResultMatrix.toStringMatrix());
} catch (Exception e) {
e.printStackTrace();
}
// append a key so that we can tell the difference between long
// scheme+option names
if (this.m_ResultMatrix.getEnumerateColNames()) {
result.append("\n\n" + this.m_ResultMatrix.toStringKey());
}
return result.toString();
}
/**
* Lists options understood by this object.
*
* @return an enumeration of Options.
*/
@Override
public Enumeration<Option> listOptions() {
Vector<Option> newVector = new Vector<Option>();
newVector.addElement(new Option("\tSpecify list of columns that specify a unique\n" + "\tdataset.\n" + "\tFirst and last are valid indexes. (default none)", "D", 1, "-D <index,index2-index4,...>"));
newVector.addElement(new Option("\tSet the index of the column containing the run number", "R", 1, "-R <index>"));
newVector.addElement(new Option("\tSet the index of the column containing the fold number", "F", 1, "-F <index>"));
newVector.addElement(
new Option("\tSpecify list of columns that specify a unique\n" + "\t'result generator' (eg: classifier name and options).\n" + "\tFirst and last are valid indexes. (default none)", "G", 1, "-G <index1,index2-index4,...>"));
newVector.addElement(new Option("\tSet the significance level for comparisons (default 0.05)", "S", 1, "-S <significance level>"));
newVector.addElement(new Option("\tSet the result matrix (classname plus parameters).\n\t(default: weka.experiment.ResultMatrixPlainText)", "result-matrix", 1, "-result-matrix <result-matrix-class>"));
newVector.addElement(new Option("\tShow standard deviations", "V", 0, "-V"));
newVector.addElement(new Option("\tProduce table comparisons in Latex table format", "L", 0, "-L"));
newVector.addElement(new Option("\tProduce table comparisons in CSV table format", "csv", 0, "-csv"));
newVector.addElement(new Option("\tProduce table comparisons in HTML table format", "html", 0, "-html"));
newVector.addElement(new Option("\tProduce table comparisons with only the significance values", "significance", 0, "-significance"));
newVector.addElement(new Option("\tProduce table comparisons output suitable for GNUPlot", "gnuplot", 0, "-gnuplot"));
newVector.addElement(new Option("", "", 0, "\nOptions specific to result matrix " + this.getResultMatrix().getClass().getName() + ":"));
newVector.addAll(Collections.list(((OptionHandler) this.getResultMatrix()).listOptions()));
return newVector.elements();
}
/**
* Parses a given list of options.
* <p/>
*
* <!-- options-start --> Valid options are:
* <p/>
*
* <pre>
* -D <index,index2-index4,...>
* Specify list of columns that specify a unique
* dataset.
* First and last are valid indexes. (default none)
* </pre>
*
* <pre>
* -R <index>
* Set the index of the column containing the run number
* </pre>
*
* <pre>
* -F <index>
* Set the index of the column containing the fold number
* </pre>
*
* <pre>
* -G <index1,index2-index4,...>
* Specify list of columns that specify a unique
* 'result generator' (eg: classifier name and options).
* First and last are valid indexes. (default none)
* </pre>
*
* <pre>
* -S <significance level>
* Set the significance level for comparisons (default 0.05)
* </pre>
*
* <pre>
* -V
* Show standard deviations
* </pre>
*
* <pre>
* -L
* Produce table comparisons in Latex table format
* </pre>
*
* <pre>
* -csv
* Produce table comparisons in CSV table format
* </pre>
*
* <pre>
* -html
* Produce table comparisons in HTML table format
* </pre>
*
* <pre>
* -significance
* Produce table comparisons with only the significance values
* </pre>
*
* <pre>
* -gnuplot
* Produce table comparisons output suitable for GNUPlot
* </pre>
*
* <!-- options-end -->
*
* @param options an array containing options to set.
* @throws Exception if invalid options are given
*/
@Override
public void setOptions(final String[] options) throws Exception {
this.setShowStdDevs(Utils.getFlag('V', options));
String outputOption = Utils.getOption("result-matrix", options);
if (outputOption.length() != 0) {
String[] resultMatrixSpec = Utils.splitOptions(outputOption);
if (resultMatrixSpec.length == 0) {
throw new Exception("Invalid ResultMatrix specification string");
}
String resultMatrixName = resultMatrixSpec[0];
resultMatrixSpec[0] = "";
ResultMatrix resultMatrix = (ResultMatrix) Utils.forName(Class.forName("weka.experiment.ResultMatrix"), resultMatrixName, resultMatrixSpec);
this.setResultMatrix(resultMatrix);
} else if (Utils.getFlag('L', options)) {
this.setResultMatrix(new ResultMatrixLatex());
} else if (Utils.getFlag("csv", options)) {
this.setResultMatrix(new ResultMatrixCSV());
} else if (Utils.getFlag("html", options)) {
this.setResultMatrix(new ResultMatrixHTML());
} else if (Utils.getFlag("significance", options)) {
this.setResultMatrix(new ResultMatrixSignificance());
} else if (Utils.getFlag("gnuplot", options)) {
this.setResultMatrix(new ResultMatrixGnuPlot());
}
String datasetList = Utils.getOption('D', options);
Range datasetRange = new Range();
if (datasetList.length() != 0) {
datasetRange.setRanges(datasetList);
}
this.setDatasetKeyColumns(datasetRange);
String indexStr = Utils.getOption('R', options);
if (indexStr.length() != 0) {
if (indexStr.equals("first")) {
this.setRunColumn(0);
} else if (indexStr.equals("last")) {
this.setRunColumn(-1);
} else {
this.setRunColumn(Integer.parseInt(indexStr) - 1);
}
} else {
this.setRunColumn(-1);
}
String foldStr = Utils.getOption('F', options);
if (foldStr.length() != 0) {
this.setFoldColumn(Integer.parseInt(foldStr) - 1);
} else {
this.setFoldColumn(-1);
}
String sigStr = Utils.getOption('S', options);
if (sigStr.length() != 0) {
this.setSignificanceLevel((new Double(sigStr)).doubleValue());
} else {
this.setSignificanceLevel(0.05);
}
String resultsetList = Utils.getOption('G', options);
Range generatorRange = new Range();
if (resultsetList.length() != 0) {
generatorRange.setRanges(resultsetList);
}
this.setResultsetKeyColumns(generatorRange);
}
/**
* Gets current settings of the PairedTTester.
*
* @return an array of strings containing current options.
*/
@Override
public String[] getOptions() {
Vector<String> options = new Vector<String>();
;
if (!this.getResultsetKeyColumns().getRanges().equals("")) {
options.add("-G");
options.add(this.getResultsetKeyColumns().getRanges());
}
if (!this.getDatasetKeyColumns().getRanges().equals("")) {
options.add("-D");
options.add(this.getDatasetKeyColumns().getRanges());
}
options.add("-R");
options.add("" + (this.getRunColumn() + 1));
options.add("-S");
options.add("" + this.getSignificanceLevel());
if (this.getShowStdDevs()) {
options.add("-V");
}
options.add("-result-matrix");
String spec = this.getResultMatrix().getClass().getName();
if (this.getResultMatrix() instanceof OptionHandler) {
spec += " " + Utils.joinOptions(((OptionHandler) this.getResultMatrix()).getOptions());
}
options.add(spec.trim());
return options.toArray(new String[options.size()]);
}
/**
* Get the value of ResultsetKeyColumns.
*
* @return Value of ResultsetKeyColumns.
*/
@Override
public Range getResultsetKeyColumns() {
return this.m_ResultsetKeyColumnsRange;
}
/**
* Set the value of ResultsetKeyColumns.
*
* @param newResultsetKeyColumns Value to assign to ResultsetKeyColumns.
*/
@Override
public void setResultsetKeyColumns(final Range newResultsetKeyColumns) {
this.m_ResultsetKeyColumnsRange = newResultsetKeyColumns;
this.m_ResultsetsValid = false;
}
/**
* Gets the indices of the the datasets that are displayed (if
* <code>null</code> then all are displayed). The base is always displayed.
*
* @return the indices of the datasets to display
*/
@Override
public int[] getDisplayedResultsets() {
return this.m_DisplayedResultsets;
}
/**
* Sets the indicies of the datasets to display (<code>null</code> means all).
* The base is always displayed.
*
* @param cols the indices of the datasets to display
*/
@Override
public void setDisplayedResultsets(final int[] cols) {
this.m_DisplayedResultsets = cols;
}
/**
* Get the value of SignificanceLevel.
*
* @return Value of SignificanceLevel.
*/
@Override
public double getSignificanceLevel() {
return this.m_SignificanceLevel;
}
/**
* Set the value of SignificanceLevel.
*
* @param newSignificanceLevel Value to assign to SignificanceLevel.
*/
@Override
public void setSignificanceLevel(final double newSignificanceLevel) {
this.m_SignificanceLevel = newSignificanceLevel;
}
/**
* Get the value of DatasetKeyColumns.
*
* @return Value of DatasetKeyColumns.
*/
@Override
public Range getDatasetKeyColumns() {
return this.m_DatasetKeyColumnsRange;
}
/**
* Set the value of DatasetKeyColumns.
*
* @param newDatasetKeyColumns Value to assign to DatasetKeyColumns.
*/
@Override
public void setDatasetKeyColumns(final Range newDatasetKeyColumns) {
this.m_DatasetKeyColumnsRange = newDatasetKeyColumns;
this.m_ResultsetsValid = false;
}
/**
* Get the value of RunColumn.
*
* @return Value of RunColumn.
*/
@Override
public int getRunColumn() {
return this.m_RunColumnSet;
}
/**
* Set the value of RunColumn.
*
* @param newRunColumn Value to assign to RunColumn.
*/
@Override
public void setRunColumn(final int newRunColumn) {
this.m_RunColumnSet = newRunColumn;
this.m_ResultsetsValid = false;
}
/**
* Get the value of FoldColumn.
*
* @return Value of FoldColumn.
*/
@Override
public int getFoldColumn() {
return this.m_FoldColumn;
}
/**
* Set the value of FoldColumn.
*
* @param newFoldColumn Value to assign to FoldColumn.
*/
@Override
public void setFoldColumn(final int newFoldColumn) {
this.m_FoldColumn = newFoldColumn;
this.m_ResultsetsValid = false;
}
/**
* Returns the name of the column to sort on.
*
* @return the name of the column to sort on.
*/
@Override
public String getSortColumnName() {
if (this.getSortColumn() == -1) {
return "-";
} else {
return this.m_Instances.attribute(this.getSortColumn()).name();
}
}
/**
* Returns the column to sort on, -1 means the default sorting.
*
* @return the column to sort on.
*/
@Override
public int getSortColumn() {
return this.m_SortColumn;
}
/**
* Set the column to sort on, -1 means the default sorting.
*
* @param newSortColumn the new sort column.
*/
@Override
public void setSortColumn(final int newSortColumn) {
if (newSortColumn >= -1) {
this.m_SortColumn = newSortColumn;
}
}
/**
* Get the value of Instances.
*
* @return Value of Instances.
*/
@Override
public Instances getInstances() {
return this.m_Instances;
}
/**
* Set the value of Instances.
*
* @param newInstances Value to assign to Instances.
*/
@Override
public void setInstances(final Instances newInstances) {
this.m_Instances = newInstances;
this.m_ResultsetsValid = false;
}
/**
* retrieves all the settings from the given Tester
*
* @param tester the Tester to get the settings from
*/
@Override
public void assign(final Tester tester) {
this.setInstances(tester.getInstances());
this.setResultMatrix(tester.getResultMatrix());
this.setShowStdDevs(tester.getShowStdDevs());
this.setResultsetKeyColumns(tester.getResultsetKeyColumns());
this.setDisplayedResultsets(tester.getDisplayedResultsets());
this.setSignificanceLevel(tester.getSignificanceLevel());
this.setDatasetKeyColumns(tester.getDatasetKeyColumns());
this.setRunColumn(tester.getRunColumn());
this.setFoldColumn(tester.getFoldColumn());
this.setSortColumn(tester.getSortColumn());
}
/**
* returns a string that is displayed as tooltip on the "perform test" button
* in the experimenter
*
* @return the tool tip
*/
@Override
public String getToolTipText() {
return "Performs test using t-test statistic";
}
/**
* returns the name of the tester
*
* @return the display name
*/
@Override
public String getDisplayName() {
return "Paired T-Tester";
}
/**
* Returns the revision string.
*
* @return the revision
*/
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
/**
* Test the class from the command line.
*
* @param args contains options for the instance ttests
*/
public static void main(final String args[]) {
try {
PairedTTester tt = new PairedTTester();
String datasetName = Utils.getOption('t', args);
String compareColStr = Utils.getOption('c', args);
String baseColStr = Utils.getOption('b', args);
boolean summaryOnly = Utils.getFlag('s', args);
boolean rankingOnly = Utils.getFlag('r', args);
boolean noHeader = Utils.getFlag('n', args);
try {
if ((datasetName.length() == 0) || (compareColStr.length() == 0)) {
throw new Exception("-t and -c options are required");
}
tt.setOptions(args);
Utils.checkForRemainingOptions(args);
} catch (Exception ex) {
String result = "";
Enumeration<Option> enu = tt.listOptions();
while (enu.hasMoreElements()) {
Option option = enu.nextElement();
result += option.synopsis() + '\n' + option.description() + '\n';
}
throw new Exception(ex.getMessage() + "\n\nUsage:\n\n" + "-t <file>\n" + "\tSet the dataset containing data to evaluate\n" + "-b <index>\n" + "\tSet the resultset to base comparisons against (optional)\n" + "-c <index>\n"
+ "\tSet the column to perform a comparison on\n" + "-s\n" + "\tSummarize wins over all resultset pairs\n" + "-r\n" + "\tGenerate a resultset ranking\n" + "-n\n" + "\tDo not output header info\n" + result);
}
Instances data = new Instances(new BufferedReader(new FileReader(datasetName)));
tt.setInstances(data);
// tt.prepareData();
int compareCol = Integer.parseInt(compareColStr) - 1;
if (!noHeader) {
System.out.println(tt.header(compareCol));
}
if (rankingOnly) {
System.out.println(tt.multiResultsetRanking(compareCol));
} else if (summaryOnly) {
System.out.println(tt.multiResultsetSummary(compareCol));
} else {
// System.out.println(tt.resultsetKey());
if (baseColStr.length() == 0) {
for (int i = 0; i < tt.getNumResultsets(); i++) {
if (!tt.displayResultset(i)) {
continue;
}
System.out.println(tt.multiResultsetFull(i, compareCol));
}
} else {
int baseCol = Integer.parseInt(baseColStr) - 1;
System.out.println(tt.multiResultsetFull(baseCol, compareCol));
}
}
} catch (Exception e) {
System.err.println(e.getMessage());
}
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/experiment/PropertyNode.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* PropertyNode.java
* Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.experiment;
import java.beans.IntrospectionException;
import java.beans.PropertyDescriptor;
import java.io.IOException;
import java.io.Serializable;
import weka.core.RevisionHandler;
import weka.core.RevisionUtils;
/**
* Stores information on a property of an object: the class of the object with
* the property; the property descriptor, and the current value.
*
* @author Len Trigg (trigg@cs.waikato.ac.nz)
* @version $Revision$
*/
public class PropertyNode implements Serializable, RevisionHandler {
/** for serialization */
private static final long serialVersionUID = -8718165742572631384L;
/** The current property value */
public Object value;
/** The class of the object with this property */
public Class<?> parentClass;
/** Other info about the property */
public PropertyDescriptor property;
/**
* Creates a mostly empty property.
*
* @param pValue a property value.
*/
public PropertyNode(Object pValue) {
this(pValue, null, null);
}
/**
* Creates a fully specified property node.
*
* @param pValue the current property value.
* @param prop the PropertyDescriptor.
* @param pClass the Class of the object with this property.
*/
public PropertyNode(Object pValue, PropertyDescriptor prop, Class<?> pClass) {
value = pValue;
property = prop;
parentClass = pClass;
}
/**
* Returns a string description of this property.
*
* @return a value of type 'String'
*/
@Override
public String toString() {
if (property == null) {
return "Available properties";
}
return property.getDisplayName();
}
/*
* Handle serialization ourselves since PropertyDescriptor isn't serializable
*/
private void writeObject(java.io.ObjectOutputStream out) throws IOException {
try {
out.writeObject(value);
} catch (Exception ex) {
throw new IOException("Can't serialize object: " + ex.getMessage());
}
out.writeObject(parentClass);
out.writeObject(property.getDisplayName());
out.writeObject(property.getReadMethod().getName());
out.writeObject(property.getWriteMethod().getName());
}
private void readObject(java.io.ObjectInputStream in) throws IOException,
ClassNotFoundException {
value = in.readObject();
parentClass = (Class<?>) in.readObject();
String name = (String) in.readObject();
String getter = (String) in.readObject();
String setter = (String) in.readObject();
/*
* System.err.println("Loading property descriptor:\n" + "\tparentClass: " +
* parentClass.getName() + "\tname: " + name + "\tgetter: " + getter +
* "\tsetter: " + setter);
*/
try {
property = new PropertyDescriptor(name, parentClass, getter, setter);
} catch (IntrospectionException ex) {
throw new ClassNotFoundException("Couldn't create property descriptor: "
+ parentClass.getName() + "::" + name);
}
}
/**
* Returns the revision string.
*
* @return the revision
*/
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
} // PropertyNode
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/experiment/RandomSplitResultProducer.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* RandomSplitResultProducer.java
* Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.experiment;
import java.io.File;
import java.util.Calendar;
import java.util.Collections;
import java.util.Enumeration;
import java.util.Random;
import java.util.TimeZone;
import java.util.Vector;
import weka.core.AdditionalMeasureProducer;
import weka.core.Instance;
import weka.core.Instances;
import weka.core.Option;
import weka.core.OptionHandler;
import weka.core.RevisionHandler;
import weka.core.RevisionUtils;
import weka.core.Utils;
/**
* <!-- globalinfo-start --> Generates a single train/test split and calls the
* appropriate SplitEvaluator to generate some results.
* <p/>
* <!-- globalinfo-end -->
*
* <!-- options-start --> Valid options are:
* <p/>
*
* <pre>
* -P <percent>
* The percentage of instances to use for training.
* (default 66)
* </pre>
*
* <pre>
* -D
* Save raw split evaluator output.
* </pre>
*
* <pre>
* -O <file/directory name/path>
* The filename where raw output will be stored.
* If a directory name is specified then then individual
* outputs will be gzipped, otherwise all output will be
* zipped to the named file. Use in conjuction with -D. (default splitEvalutorOut.zip)
* </pre>
*
* <pre>
* -W <class name>
* The full class name of a SplitEvaluator.
* eg: weka.experiment.ClassifierSplitEvaluator
* </pre>
*
* <pre>
* -R
* Set when data is not to be randomized and the data sets' size.
* Is not to be determined via probabilistic rounding.
* </pre>
*
* <pre>
* Options specific to split evaluator weka.experiment.ClassifierSplitEvaluator:
* </pre>
*
* <pre>
* -W <class name>
* The full class name of the classifier.
* eg: weka.classifiers.bayes.NaiveBayes
* </pre>
*
* <pre>
* -C <index>
* The index of the class for which IR statistics
* are to be output. (default 1)
* </pre>
*
* <pre>
* -I <index>
* The index of an attribute to output in the
* results. This attribute should identify an
* instance in order to know which instances are
* in the test set of a cross validation. if 0
* no output (default 0).
* </pre>
*
* <pre>
* -P
* Add target and prediction columns to the result
* for each fold.
* </pre>
*
* <pre>
* Options specific to classifier weka.classifiers.rules.ZeroR:
* </pre>
*
* <pre>
* -D
* If set, classifier is run in debug mode and
* may output additional info to the console
* </pre>
*
* <!-- options-end -->
*
* All options after -- will be passed to the split evaluator.
*
* @author Len Trigg (trigg@cs.waikato.ac.nz)
* @version $Revision$
*/
public class RandomSplitResultProducer implements ResultProducer,
OptionHandler, AdditionalMeasureProducer, RevisionHandler {
/** for serialization */
static final long serialVersionUID = 1403798165056795073L;
/** The dataset of interest */
protected Instances m_Instances;
/** The ResultListener to send results to */
protected ResultListener m_ResultListener = new CSVResultListener();
/** The percentage of instances to use for training */
protected double m_TrainPercent = 66;
/** Whether dataset is to be randomized */
protected boolean m_randomize = true;
/** The SplitEvaluator used to generate results */
protected SplitEvaluator m_SplitEvaluator = new ClassifierSplitEvaluator();
/** The names of any additional measures to look for in SplitEvaluators */
protected String[] m_AdditionalMeasures = null;
/** Save raw output of split evaluators --- for debugging purposes */
protected boolean m_debugOutput = false;
/** The output zipper to use for saving raw splitEvaluator output */
protected OutputZipper m_ZipDest = null;
/** The destination output file/directory for raw output */
protected File m_OutputFile = new File(new File(
System.getProperty("user.dir")), "splitEvalutorOut.zip");
/** The name of the key field containing the dataset name */
public static String DATASET_FIELD_NAME = "Dataset";
/** The name of the key field containing the run number */
public static String RUN_FIELD_NAME = "Run";
/** The name of the result field containing the timestamp */
public static String TIMESTAMP_FIELD_NAME = "Date_time";
/**
* Returns a string describing this result producer
*
* @return a description of the result producer suitable for displaying in the
* explorer/experimenter gui
*/
public String globalInfo() {
return "Generates a single train/test split and calls the appropriate "
+ "SplitEvaluator to generate some results.";
}
/**
* Sets the dataset that results will be obtained for.
*
* @param instances a value of type 'Instances'.
*/
@Override
public void setInstances(Instances instances) {
m_Instances = instances;
}
/**
* Set a list of method names for additional measures to look for in
* SplitEvaluators. This could contain many measures (of which only a subset
* may be produceable by the current SplitEvaluator) if an experiment is the
* type that iterates over a set of properties.
*
* @param additionalMeasures an array of measure names, null if none
*/
@Override
public void setAdditionalMeasures(String[] additionalMeasures) {
m_AdditionalMeasures = additionalMeasures;
if (m_SplitEvaluator != null) {
System.err.println("RandomSplitResultProducer: setting additional "
+ "measures for " + "split evaluator");
m_SplitEvaluator.setAdditionalMeasures(m_AdditionalMeasures);
}
}
/**
* Returns an enumeration of any additional measure names that might be in the
* SplitEvaluator
*
* @return an enumeration of the measure names
*/
@Override
public Enumeration<String> enumerateMeasures() {
Vector<String> newVector = new Vector<String>();
if (m_SplitEvaluator instanceof AdditionalMeasureProducer) {
Enumeration<String> en = ((AdditionalMeasureProducer) m_SplitEvaluator)
.enumerateMeasures();
while (en.hasMoreElements()) {
String mname = en.nextElement();
newVector.add(mname);
}
}
return newVector.elements();
}
/**
* Returns the value of the named measure
*
* @param additionalMeasureName the name of the measure to query for its value
* @return the value of the named measure
* @throws IllegalArgumentException if the named measure is not supported
*/
@Override
public double getMeasure(String additionalMeasureName) {
if (m_SplitEvaluator instanceof AdditionalMeasureProducer) {
return ((AdditionalMeasureProducer) m_SplitEvaluator)
.getMeasure(additionalMeasureName);
} else {
throw new IllegalArgumentException("RandomSplitResultProducer: "
+ "Can't return value for : " + additionalMeasureName + ". "
+ m_SplitEvaluator.getClass().getName() + " "
+ "is not an AdditionalMeasureProducer");
}
}
/**
* Sets the object to send results of each run to.
*
* @param listener a value of type 'ResultListener'
*/
@Override
public void setResultListener(ResultListener listener) {
m_ResultListener = listener;
}
/**
* Gets a Double representing the current date and time. eg: 1:46pm on
* 20/5/1999 -> 19990520.1346
*
* @return a value of type Double
*/
public static Double getTimestamp() {
Calendar now = Calendar.getInstance(TimeZone.getTimeZone("UTC"));
double timestamp = now.get(Calendar.YEAR) * 10000
+ (now.get(Calendar.MONTH) + 1) * 100 + now.get(Calendar.DAY_OF_MONTH)
+ now.get(Calendar.HOUR_OF_DAY) / 100.0 + now.get(Calendar.MINUTE)
/ 10000.0;
return new Double(timestamp);
}
/**
* Prepare to generate results.
*
* @throws Exception if an error occurs during preprocessing.
*/
@Override
public void preProcess() throws Exception {
if (m_SplitEvaluator == null) {
throw new Exception("No SplitEvalutor set");
}
if (m_ResultListener == null) {
throw new Exception("No ResultListener set");
}
m_ResultListener.preProcess(this);
}
/**
* Perform any postprocessing. When this method is called, it indicates that
* no more requests to generate results for the current experiment will be
* sent.
*
* @throws Exception if an error occurs
*/
@Override
public void postProcess() throws Exception {
m_ResultListener.postProcess(this);
if (m_debugOutput) {
if (m_ZipDest != null) {
m_ZipDest.finished();
m_ZipDest = null;
}
}
}
/**
* Gets the keys for a specified run number. Different run numbers correspond
* to different randomizations of the data. Keys produced should be sent to
* the current ResultListener
*
* @param run the run number to get keys for.
* @throws Exception if a problem occurs while getting the keys
*/
@Override
public void doRunKeys(int run) throws Exception {
if (m_Instances == null) {
throw new Exception("No Instances set");
}
// Add in some fields to the key like run number, dataset name
Object[] seKey = m_SplitEvaluator.getKey();
Object[] key = new Object[seKey.length + 2];
key[0] = Utils.backQuoteChars(m_Instances.relationName());
key[1] = "" + run;
System.arraycopy(seKey, 0, key, 2, seKey.length);
if (m_ResultListener.isResultRequired(this, key)) {
try {
m_ResultListener.acceptResult(this, key, null);
} catch (Exception ex) {
// Save the train and test datasets for debugging purposes?
throw ex;
}
}
}
/**
* Gets the results for a specified run number. Different run numbers
* correspond to different randomizations of the data. Results produced should
* be sent to the current ResultListener
*
* @param run the run number to get results for.
* @throws Exception if a problem occurs while getting the results
*/
@Override
public void doRun(int run) throws Exception {
if (getRawOutput()) {
if (m_ZipDest == null) {
m_ZipDest = new OutputZipper(m_OutputFile);
}
}
if (m_Instances == null) {
throw new Exception("No Instances set");
}
// Add in some fields to the key like run number, dataset name
Object[] seKey = m_SplitEvaluator.getKey();
Object[] key = new Object[seKey.length + 2];
key[0] = Utils.backQuoteChars(m_Instances.relationName());
key[1] = "" + run;
System.arraycopy(seKey, 0, key, 2, seKey.length);
if (m_ResultListener.isResultRequired(this, key)) {
// Randomize on a copy of the original dataset
Instances runInstances = new Instances(m_Instances);
Instances train;
Instances test;
if (!m_randomize) {
// Don't do any randomization
int trainSize = Utils.round(runInstances.numInstances()
* m_TrainPercent / 100);
int testSize = runInstances.numInstances() - trainSize;
train = new Instances(runInstances, 0, trainSize);
test = new Instances(runInstances, trainSize, testSize);
} else {
Random rand = new Random(run);
runInstances.randomize(rand);
// Nominal class
if (runInstances.classAttribute().isNominal()) {
// create the subset for each classs
int numClasses = runInstances.numClasses();
Instances[] subsets = new Instances[numClasses + 1];
for (int i = 0; i < numClasses + 1; i++) {
subsets[i] = new Instances(runInstances, 10);
}
// divide instances into subsets
Enumeration<Instance> e = runInstances.enumerateInstances();
while (e.hasMoreElements()) {
Instance inst = e.nextElement();
if (inst.classIsMissing()) {
subsets[numClasses].add(inst);
} else {
subsets[(int) inst.classValue()].add(inst);
}
}
// Compactify them
for (int i = 0; i < numClasses + 1; i++) {
subsets[i].compactify();
}
// merge into train and test sets
train = new Instances(runInstances, runInstances.numInstances());
test = new Instances(runInstances, runInstances.numInstances());
for (int i = 0; i < numClasses + 1; i++) {
int trainSize = Utils.probRound(subsets[i].numInstances()
* m_TrainPercent / 100, rand);
for (int j = 0; j < trainSize; j++) {
train.add(subsets[i].instance(j));
}
for (int j = trainSize; j < subsets[i].numInstances(); j++) {
test.add(subsets[i].instance(j));
}
// free memory
subsets[i] = null;
}
train.compactify();
test.compactify();
// randomize the final sets
train.randomize(rand);
test.randomize(rand);
} else {
// Numeric target
int trainSize = Utils.probRound(runInstances.numInstances()
* m_TrainPercent / 100, rand);
int testSize = runInstances.numInstances() - trainSize;
train = new Instances(runInstances, 0, trainSize);
test = new Instances(runInstances, trainSize, testSize);
}
}
try {
Object[] seResults = m_SplitEvaluator.getResult(train, test);
Object[] results = new Object[seResults.length + 1];
results[0] = getTimestamp();
System.arraycopy(seResults, 0, results, 1, seResults.length);
if (m_debugOutput) {
String resultName = ("" + run + "."
+ Utils.backQuoteChars(runInstances.relationName()) + "." + m_SplitEvaluator
.toString()).replace(' ', '_');
resultName = Utils.removeSubstring(resultName, "weka.classifiers.");
resultName = Utils.removeSubstring(resultName, "weka.filters.");
resultName = Utils.removeSubstring(resultName,
"weka.attributeSelection.");
m_ZipDest.zipit(m_SplitEvaluator.getRawResultOutput(), resultName);
}
m_ResultListener.acceptResult(this, key, results);
} catch (Exception ex) {
// Save the train and test datasets for debugging purposes?
throw ex;
}
}
}
/**
* Gets the names of each of the columns produced for a single run. This
* method should really be static.
*
* @return an array containing the name of each column
*/
@Override
public String[] getKeyNames() {
String[] keyNames = m_SplitEvaluator.getKeyNames();
// Add in the names of our extra key fields
String[] newKeyNames = new String[keyNames.length + 2];
newKeyNames[0] = DATASET_FIELD_NAME;
newKeyNames[1] = RUN_FIELD_NAME;
System.arraycopy(keyNames, 0, newKeyNames, 2, keyNames.length);
return newKeyNames;
}
/**
* Gets the data types of each of the columns produced for a single run. This
* method should really be static.
*
* @return an array containing objects of the type of each column. The objects
* should be Strings, or Doubles.
*/
@Override
public Object[] getKeyTypes() {
Object[] keyTypes = m_SplitEvaluator.getKeyTypes();
// Add in the types of our extra fields
Object[] newKeyTypes = new String[keyTypes.length + 2];
newKeyTypes[0] = new String();
newKeyTypes[1] = new String();
System.arraycopy(keyTypes, 0, newKeyTypes, 2, keyTypes.length);
return newKeyTypes;
}
/**
* Gets the names of each of the columns produced for a single run. This
* method should really be static.
*
* @return an array containing the name of each column
*/
@Override
public String[] getResultNames() {
String[] resultNames = m_SplitEvaluator.getResultNames();
// Add in the names of our extra Result fields
String[] newResultNames = new String[resultNames.length + 1];
newResultNames[0] = TIMESTAMP_FIELD_NAME;
System.arraycopy(resultNames, 0, newResultNames, 1, resultNames.length);
return newResultNames;
}
/**
* Gets the data types of each of the columns produced for a single run. This
* method should really be static.
*
* @return an array containing objects of the type of each column. The objects
* should be Strings, or Doubles.
*/
@Override
public Object[] getResultTypes() {
Object[] resultTypes = m_SplitEvaluator.getResultTypes();
// Add in the types of our extra Result fields
Object[] newResultTypes = new Object[resultTypes.length + 1];
newResultTypes[0] = new Double(0);
System.arraycopy(resultTypes, 0, newResultTypes, 1, resultTypes.length);
return newResultTypes;
}
/**
* Gets a description of the internal settings of the result producer,
* sufficient for distinguishing a ResultProducer instance from another with
* different settings (ignoring those settings set through this interface).
* For example, a cross-validation ResultProducer may have a setting for the
* number of folds. For a given state, the results produced should be
* compatible. Typically if a ResultProducer is an OptionHandler, this string
* will represent the command line arguments required to set the
* ResultProducer to that state.
*
* @return the description of the ResultProducer state, or null if no state is
* defined
*/
@Override
public String getCompatibilityState() {
String result = "-P " + m_TrainPercent;
if (!getRandomizeData()) {
result += " -R";
}
if (m_SplitEvaluator == null) {
result += " <null SplitEvaluator>";
} else {
result += " -W " + m_SplitEvaluator.getClass().getName();
}
return result + " --";
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String outputFileTipText() {
return "Set the destination for saving raw output. If the rawOutput "
+ "option is selected, then output from the splitEvaluator for "
+ "individual train-test splits is saved. If the destination is a "
+ "directory, "
+ "then each output is saved to an individual gzip file; if the "
+ "destination is a file, then each output is saved as an entry "
+ "in a zip file.";
}
/**
* Get the value of OutputFile.
*
* @return Value of OutputFile.
*/
public File getOutputFile() {
return m_OutputFile;
}
/**
* Set the value of OutputFile.
*
* @param newOutputFile Value to assign to OutputFile.
*/
public void setOutputFile(File newOutputFile) {
m_OutputFile = newOutputFile;
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String randomizeDataTipText() {
return "Do not randomize dataset and do not perform probabilistic rounding "
+ "if false";
}
/**
* Get if dataset is to be randomized
*
* @return true if dataset is to be randomized
*/
public boolean getRandomizeData() {
return m_randomize;
}
/**
* Set to true if dataset is to be randomized
*
* @param d true if dataset is to be randomized
*/
public void setRandomizeData(boolean d) {
m_randomize = d;
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String rawOutputTipText() {
return "Save raw output (useful for debugging). If set, then output is "
+ "sent to the destination specified by outputFile";
}
/**
* Get if raw split evaluator output is to be saved
*
* @return true if raw split evalutor output is to be saved
*/
public boolean getRawOutput() {
return m_debugOutput;
}
/**
* Set to true if raw split evaluator output is to be saved
*
* @param d true if output is to be saved
*/
public void setRawOutput(boolean d) {
m_debugOutput = d;
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String trainPercentTipText() {
return "Set the percentage of data to use for training.";
}
/**
* Get the value of TrainPercent.
*
* @return Value of TrainPercent.
*/
public double getTrainPercent() {
return m_TrainPercent;
}
/**
* Set the value of TrainPercent.
*
* @param newTrainPercent Value to assign to TrainPercent.
*/
public void setTrainPercent(double newTrainPercent) {
m_TrainPercent = newTrainPercent;
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String splitEvaluatorTipText() {
return "The evaluator to apply to the test data. "
+ "This may be a classifier, regression scheme etc.";
}
/**
* Get the SplitEvaluator.
*
* @return the SplitEvaluator.
*/
public SplitEvaluator getSplitEvaluator() {
return m_SplitEvaluator;
}
/**
* Set the SplitEvaluator.
*
* @param newSplitEvaluator new SplitEvaluator to use.
*/
public void setSplitEvaluator(SplitEvaluator newSplitEvaluator) {
m_SplitEvaluator = newSplitEvaluator;
m_SplitEvaluator.setAdditionalMeasures(m_AdditionalMeasures);
}
/**
* Returns an enumeration describing the available options..
*
* @return an enumeration of all the available options.
*/
@Override
public Enumeration<Option> listOptions() {
Vector<Option> newVector = new Vector<Option>(5);
newVector
.addElement(new Option(
"\tThe percentage of instances to use for training.\n"
+ "\t(default 66)", "P", 1, "-P <percent>"));
newVector.addElement(new Option("Save raw split evaluator output.", "D", 0,
"-D"));
newVector.addElement(new Option(
"\tThe filename where raw output will be stored.\n"
+ "\tIf a directory name is specified then then individual\n"
+ "\toutputs will be gzipped, otherwise all output will be\n"
+ "\tzipped to the named file. Use in conjuction with -D."
+ "\t(default splitEvalutorOut.zip)", "O", 1,
"-O <file/directory name/path>"));
newVector.addElement(new Option(
"\tThe full class name of a SplitEvaluator.\n"
+ "\teg: weka.experiment.ClassifierSplitEvaluator", "W", 1,
"-W <class name>"));
newVector
.addElement(new Option(
"\tSet when data is not to be randomized and the data sets' size.\n"
+ "\tIs not to be determined via probabilistic rounding.", "R", 0,
"-R"));
if ((m_SplitEvaluator != null)
&& (m_SplitEvaluator instanceof OptionHandler)) {
newVector.addElement(new Option("", "", 0,
"\nOptions specific to split evaluator "
+ m_SplitEvaluator.getClass().getName() + ":"));
newVector.addAll(Collections.list(((OptionHandler) m_SplitEvaluator)
.listOptions()));
}
return newVector.elements();
}
/**
* Parses a given list of options.
* <p/>
*
* <!-- options-start --> Valid options are:
* <p/>
*
* <pre>
* -P <percent>
* The percentage of instances to use for training.
* (default 66)
* </pre>
*
* <pre>
* -D
* Save raw split evaluator output.
* </pre>
*
* <pre>
* -O <file/directory name/path>
* The filename where raw output will be stored.
* If a directory name is specified then then individual
* outputs will be gzipped, otherwise all output will be
* zipped to the named file. Use in conjuction with -D. (default splitEvalutorOut.zip)
* </pre>
*
* <pre>
* -W <class name>
* The full class name of a SplitEvaluator.
* eg: weka.experiment.ClassifierSplitEvaluator
* </pre>
*
* <pre>
* -R
* Set when data is not to be randomized and the data sets' size.
* Is not to be determined via probabilistic rounding.
* </pre>
*
* <pre>
* Options specific to split evaluator weka.experiment.ClassifierSplitEvaluator:
* </pre>
*
* <pre>
* -W <class name>
* The full class name of the classifier.
* eg: weka.classifiers.bayes.NaiveBayes
* </pre>
*
* <pre>
* -C <index>
* The index of the class for which IR statistics
* are to be output. (default 1)
* </pre>
*
* <pre>
* -I <index>
* The index of an attribute to output in the
* results. This attribute should identify an
* instance in order to know which instances are
* in the test set of a cross validation. if 0
* no output (default 0).
* </pre>
*
* <pre>
* -P
* Add target and prediction columns to the result
* for each fold.
* </pre>
*
* <pre>
* Options specific to classifier weka.classifiers.rules.ZeroR:
* </pre>
*
* <pre>
* -D
* If set, classifier is run in debug mode and
* may output additional info to the console
* </pre>
*
* <!-- options-end -->
*
* All options after -- will be passed to the split evaluator.
*
* @param options the list of options as an array of strings
* @throws Exception if an option is not supported
*/
@Override
public void setOptions(String[] options) throws Exception {
setRawOutput(Utils.getFlag('D', options));
setRandomizeData(!Utils.getFlag('R', options));
String fName = Utils.getOption('O', options);
if (fName.length() != 0) {
setOutputFile(new File(fName));
}
String trainPct = Utils.getOption('P', options);
if (trainPct.length() != 0) {
setTrainPercent((new Double(trainPct)).doubleValue());
} else {
setTrainPercent(66);
}
String seName = Utils.getOption('W', options);
if (seName.length() == 0) {
throw new Exception("A SplitEvaluator must be specified with"
+ " the -W option.");
}
// Do it first without options, so if an exception is thrown during
// the option setting, listOptions will contain options for the actual
// SE.
setSplitEvaluator((SplitEvaluator) Utils.forName(SplitEvaluator.class,
seName, null));
if (getSplitEvaluator() instanceof OptionHandler) {
((OptionHandler) getSplitEvaluator()).setOptions(Utils
.partitionOptions(options));
}
}
/**
* Gets the current settings of the result producer.
*
* @return an array of strings suitable for passing to setOptions
*/
@Override
public String[] getOptions() {
String[] seOptions = new String[0];
if ((m_SplitEvaluator != null)
&& (m_SplitEvaluator instanceof OptionHandler)) {
seOptions = ((OptionHandler) m_SplitEvaluator).getOptions();
}
String[] options = new String[seOptions.length + 9];
int current = 0;
options[current++] = "-P";
options[current++] = "" + getTrainPercent();
if (getRawOutput()) {
options[current++] = "-D";
}
if (!getRandomizeData()) {
options[current++] = "-R";
}
options[current++] = "-O";
options[current++] = getOutputFile().getName();
if (getSplitEvaluator() != null) {
options[current++] = "-W";
options[current++] = getSplitEvaluator().getClass().getName();
}
options[current++] = "--";
System.arraycopy(seOptions, 0, options, current, seOptions.length);
current += seOptions.length;
while (current < options.length) {
options[current++] = "";
}
return options;
}
/**
* Gets a text descrption of the result producer.
*
* @return a text description of the result producer.
*/
@Override
public String toString() {
String result = "RandomSplitResultProducer: ";
result += getCompatibilityState();
if (m_Instances == null) {
result += ": <null Instances>";
} else {
result += ": " + Utils.backQuoteChars(m_Instances.relationName());
}
return result;
}
/**
* Returns the revision string.
*
* @return the revision
*/
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
} // RandomSplitResultProducer
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/experiment/RegressionSplitEvaluator.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* RegressionSplitEvaluator.java
* Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.experiment;
import java.io.ByteArrayOutputStream;
import java.io.ObjectOutputStream;
import java.io.ObjectStreamClass;
import java.io.Serializable;
import java.lang.management.ManagementFactory;
import java.lang.management.ThreadMXBean;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.Enumeration;
import java.util.List;
import java.util.Vector;
import weka.classifiers.AbstractClassifier;
import weka.classifiers.Classifier;
import weka.classifiers.Evaluation;
import weka.classifiers.evaluation.AbstractEvaluationMetric;
import weka.classifiers.rules.ZeroR;
import weka.core.AdditionalMeasureProducer;
import weka.core.Attribute;
import weka.core.Instances;
import weka.core.Option;
import weka.core.OptionHandler;
import weka.core.RevisionHandler;
import weka.core.RevisionUtils;
import weka.core.Summarizable;
import weka.core.Utils;
/**
* <!-- globalinfo-start --> A SplitEvaluator that produces results for a
* classification scheme on a numeric class attribute.
* <p/>
* <!-- globalinfo-end -->
*
* <!-- options-start --> Valid options are:
* <p/>
*
* <pre>
* -no-size
* Skips the determination of sizes (train/test/classifier)
* (default: sizes are determined)
* </pre>
*
* <pre>
* -W <class name>
* The full class name of the classifier.
* eg: weka.classifiers.bayes.NaiveBayes
* </pre>
*
* <pre>
* Options specific to classifier weka.classifiers.rules.ZeroR:
* </pre>
*
* <pre>
* -D
* If set, classifier is run in debug mode and
* may output additional info to the console
* </pre>
*
* <!-- options-end -->
*
* All options after -- will be passed to the classifier.
*
* @author Len Trigg (trigg@cs.waikato.ac.nz)
* @version $Revision$
*/
public class RegressionSplitEvaluator implements SplitEvaluator, OptionHandler,
AdditionalMeasureProducer, RevisionHandler {
/** for serialization */
static final long serialVersionUID = -328181640503349202L;
/** The template classifier */
protected Classifier m_Template = new ZeroR();
/** The classifier used for evaluation */
protected Classifier m_Classifier;
/** Holds the most recently used Evaluation object */
protected Evaluation m_Evaluation;
/** The names of any additional measures to look for in SplitEvaluators */
protected String[] m_AdditionalMeasures = null;
/**
* Array of booleans corresponding to the measures in m_AdditionalMeasures
* indicating which of the AdditionalMeasures the current classifier can
* produce
*/
protected boolean[] m_doesProduce = null;
/** Holds the statistics for the most recent application of the classifier */
protected String m_result = null;
/** The classifier options (if any) */
protected String m_ClassifierOptions = "";
/** The classifier version */
protected String m_ClassifierVersion = "";
/** whether to skip determination of sizes (train/test/classifier). */
private boolean m_NoSizeDetermination;
/** The length of a key */
private static final int KEY_SIZE = 3;
/** The length of a result */
private static final int RESULT_SIZE = 27;
protected final List<AbstractEvaluationMetric> m_pluginMetrics =
new ArrayList<AbstractEvaluationMetric>();
protected int m_numPluginStatistics = 0;
/**
* No args constructor.
*/
public RegressionSplitEvaluator() {
updateOptions();
List<AbstractEvaluationMetric> pluginMetrics = AbstractEvaluationMetric
.getPluginMetrics();
if (pluginMetrics != null) {
for (AbstractEvaluationMetric m : pluginMetrics) {
if (m.appliesToNumericClass()) {
m_pluginMetrics.add(m);
m_numPluginStatistics += m.getStatisticNames().size();
}
}
}
}
/**
* Returns a string describing this split evaluator
*
* @return a description of the split evaluator suitable for displaying in the
* explorer/experimenter gui
*/
public String globalInfo() {
return "A SplitEvaluator that produces results for a classification "
+ "scheme on a numeric class attribute.";
}
/**
* Returns an enumeration describing the available options..
*
* @return an enumeration of all the available options.
*/
@Override
public Enumeration<Option> listOptions() {
Vector<Option> newVector = new Vector<Option>(2);
newVector.addElement(new Option(
"\tSkips the determination of sizes (train/test/classifier)\n"
+ "\t(default: sizes are determined)", "no-size", 0, "-no-size"));
newVector
.addElement(new Option("\tThe full class name of the classifier.\n"
+ "\teg: weka.classifiers.bayes.NaiveBayes", "W", 1, "-W <class name>"));
if ((m_Template != null) && (m_Template instanceof OptionHandler)) {
newVector.addElement(new Option("", "", 0,
"\nOptions specific to classifier " + m_Template.getClass().getName()
+ ":"));
newVector.addAll(Collections.list(((OptionHandler) m_Template)
.listOptions()));
}
return newVector.elements();
}
/**
* Parses a given list of options.
* <p/>
*
* <!-- options-start --> Valid options are:
* <p/>
*
* <pre>
* -no-size
* Skips the determination of sizes (train/test/classifier)
* (default: sizes are determined)
* </pre>
*
* <pre>
* -W <class name>
* The full class name of the classifier.
* eg: weka.classifiers.bayes.NaiveBayes
* </pre>
*
* <pre>
* Options specific to classifier weka.classifiers.rules.ZeroR:
* </pre>
*
* <pre>
* -D
* If set, classifier is run in debug mode and
* may output additional info to the console
* </pre>
*
* <!-- options-end -->
*
* All option after -- will be passed to the classifier.
*
* @param options the list of options as an array of strings
* @throws Exception if an option is not supported
*/
@Override
public void setOptions(String[] options) throws Exception {
m_NoSizeDetermination = Utils.getFlag("no-size", options);
String cName = Utils.getOption('W', options);
if (cName.length() == 0) {
throw new Exception("A classifier must be specified with"
+ " the -W option.");
}
// Do it first without options, so if an exception is thrown during
// the option setting, listOptions will contain options for the actual
// Classifier.
setClassifier(AbstractClassifier.forName(cName, null));
if (getClassifier() instanceof OptionHandler) {
((OptionHandler) getClassifier()).setOptions(Utils
.partitionOptions(options));
updateOptions();
}
}
/**
* Gets the current settings of the Classifier.
*
* @return an array of strings suitable for passing to setOptions
*/
@Override
public String[] getOptions() {
Vector<String> result;
String[] classifierOptions;
result = new Vector<String>();
classifierOptions = new String[0];
if ((m_Template != null) && (m_Template instanceof OptionHandler)) {
classifierOptions = ((OptionHandler) m_Template).getOptions();
}
if (getNoSizeDetermination()) {
result.add("-no-size");
}
if (getClassifier() != null) {
result.add("-W");
result.add(getClassifier().getClass().getName());
}
result.add("--");
result.addAll(Arrays.asList(classifierOptions));
return result.toArray(new String[result.size()]);
}
/**
* Set a list of method names for additional measures to look for in
* Classifiers. This could contain many measures (of which only a subset may
* be produceable by the current Classifier) if an experiment is the type that
* iterates over a set of properties.
*
* @param additionalMeasures an array of method names.
*/
@Override
public void setAdditionalMeasures(String[] additionalMeasures) {
m_AdditionalMeasures = additionalMeasures;
// determine which (if any) of the additional measures this classifier
// can produce
if (m_AdditionalMeasures != null && m_AdditionalMeasures.length > 0) {
m_doesProduce = new boolean[m_AdditionalMeasures.length];
if (m_Template instanceof AdditionalMeasureProducer) {
Enumeration<String> en = ((AdditionalMeasureProducer) m_Template)
.enumerateMeasures();
while (en.hasMoreElements()) {
String mname = en.nextElement();
for (int j = 0; j < m_AdditionalMeasures.length; j++) {
if (mname.compareToIgnoreCase(m_AdditionalMeasures[j]) == 0) {
m_doesProduce[j] = true;
}
}
}
}
} else {
m_doesProduce = null;
}
}
/**
* Returns an enumeration of any additional measure names that might be in the
* classifier
*
* @return an enumeration of the measure names
*/
@Override
public Enumeration<String> enumerateMeasures() {
Vector<String> newVector = new Vector<String>();
if (m_Template instanceof AdditionalMeasureProducer) {
Enumeration<String> en = ((AdditionalMeasureProducer) m_Template)
.enumerateMeasures();
while (en.hasMoreElements()) {
String mname = en.nextElement();
newVector.add(mname);
}
}
return newVector.elements();
}
/**
* Returns the value of the named measure
*
* @param additionalMeasureName the name of the measure to query for its value
* @return the value of the named measure
* @throws IllegalArgumentException if the named measure is not supported
*/
@Override
public double getMeasure(String additionalMeasureName) {
if (m_Template instanceof AdditionalMeasureProducer) {
if (m_Classifier == null) {
throw new IllegalArgumentException("ClassifierSplitEvaluator: "
+ "Can't return result for measure, "
+ "classifier has not been built yet.");
}
return ((AdditionalMeasureProducer) m_Classifier)
.getMeasure(additionalMeasureName);
} else {
throw new IllegalArgumentException("ClassifierSplitEvaluator: "
+ "Can't return value for : " + additionalMeasureName + ". "
+ m_Template.getClass().getName() + " "
+ "is not an AdditionalMeasureProducer");
}
}
/**
* Gets the data types of each of the key columns produced for a single run.
* The number of key fields must be constant for a given SplitEvaluator.
*
* @return an array containing objects of the type of each key column. The
* objects should be Strings, or Doubles.
*/
@Override
public Object[] getKeyTypes() {
Object[] keyTypes = new Object[KEY_SIZE];
keyTypes[0] = "";
keyTypes[1] = "";
keyTypes[2] = "";
return keyTypes;
}
/**
* Gets the names of each of the key columns produced for a single run. The
* number of key fields must be constant for a given SplitEvaluator.
*
* @return an array containing the name of each key column
*/
@Override
public String[] getKeyNames() {
String[] keyNames = new String[KEY_SIZE];
keyNames[0] = "Scheme";
keyNames[1] = "Scheme_options";
keyNames[2] = "Scheme_version_ID";
return keyNames;
}
/**
* Gets the key describing the current SplitEvaluator. For example This may
* contain the name of the classifier used for classifier predictive
* evaluation. The number of key fields must be constant for a given
* SplitEvaluator.
*
* @return an array of objects containing the key.
*/
@Override
public Object[] getKey() {
Object[] key = new Object[KEY_SIZE];
key[0] = m_Template.getClass().getName();
key[1] = m_ClassifierOptions;
key[2] = m_ClassifierVersion;
return key;
}
/**
* Gets the data types of each of the result columns produced for a single
* run. The number of result fields must be constant for a given
* SplitEvaluator.
*
* @return an array containing objects of the type of each result column. The
* objects should be Strings, or Doubles.
*/
@Override
public Object[] getResultTypes() {
int addm = (m_AdditionalMeasures != null) ? m_AdditionalMeasures.length : 0;
Object[] resultTypes = new Object[RESULT_SIZE + addm
+ m_numPluginStatistics];
Double doub = new Double(0);
int current = 0;
resultTypes[current++] = doub;
resultTypes[current++] = doub;
resultTypes[current++] = doub;
resultTypes[current++] = doub;
resultTypes[current++] = doub;
resultTypes[current++] = doub;
resultTypes[current++] = doub;
resultTypes[current++] = doub;
resultTypes[current++] = doub;
resultTypes[current++] = doub;
resultTypes[current++] = doub;
resultTypes[current++] = doub;
resultTypes[current++] = doub;
resultTypes[current++] = doub;
resultTypes[current++] = doub;
// Timing stats
resultTypes[current++] = doub;
resultTypes[current++] = doub;
resultTypes[current++] = doub;
resultTypes[current++] = doub;
resultTypes[current++] = doub;
resultTypes[current++] = doub;
// sizes
resultTypes[current++] = doub;
resultTypes[current++] = doub;
resultTypes[current++] = doub;
// Prediction interval statistics
resultTypes[current++] = doub;
resultTypes[current++] = doub;
resultTypes[current++] = "";
// add any additional measures
for (int i = 0; i < addm; i++) {
resultTypes[current++] = doub;
}
// plugin metrics
for (int i = 0; i < m_numPluginStatistics; i++) {
resultTypes[current++] = doub;
}
if (current != RESULT_SIZE + addm + m_numPluginStatistics) {
throw new Error("ResultTypes didn't fit RESULT_SIZE");
}
return resultTypes;
}
/**
* Gets the names of each of the result columns produced for a single run. The
* number of result fields must be constant for a given SplitEvaluator.
*
* @return an array containing the name of each result column
*/
@Override
public String[] getResultNames() {
int addm = (m_AdditionalMeasures != null) ? m_AdditionalMeasures.length : 0;
String[] resultNames = new String[RESULT_SIZE + addm
+ m_numPluginStatistics];
int current = 0;
resultNames[current++] = "Number_of_training_instances";
resultNames[current++] = "Number_of_testing_instances";
// Sensitive stats - certainty of predictions
resultNames[current++] = "Mean_absolute_error";
resultNames[current++] = "Root_mean_squared_error";
resultNames[current++] = "Relative_absolute_error";
resultNames[current++] = "Root_relative_squared_error";
resultNames[current++] = "Correlation_coefficient";
resultNames[current++] = "Number_unclassified";
resultNames[current++] = "Percent_unclassified";
// SF stats
resultNames[current++] = "SF_prior_entropy";
resultNames[current++] = "SF_scheme_entropy";
resultNames[current++] = "SF_entropy_gain";
resultNames[current++] = "SF_mean_prior_entropy";
resultNames[current++] = "SF_mean_scheme_entropy";
resultNames[current++] = "SF_mean_entropy_gain";
// Timing stats
resultNames[current++] = "Elapsed_Time_training";
resultNames[current++] = "Elapsed_Time_testing";
resultNames[current++] = "UserCPU_Time_training";
resultNames[current++] = "UserCPU_Time_testing";
resultNames[current++] = "UserCPU_Time_millis_training";
resultNames[current++] = "UserCPU_Time_millis_testing";
// sizes
resultNames[current++] = "Serialized_Model_Size";
resultNames[current++] = "Serialized_Train_Set_Size";
resultNames[current++] = "Serialized_Test_Set_Size";
// Prediction interval statistics
resultNames[current++] = "Coverage_of_Test_Cases_By_Regions";
resultNames[current++] = "Size_of_Predicted_Regions";
// Classifier defined extras
resultNames[current++] = "Summary";
// add any additional measures
for (int i = 0; i < addm; i++) {
resultNames[current++] = m_AdditionalMeasures[i];
}
for (AbstractEvaluationMetric m : m_pluginMetrics) {
List<String> statNames = m.getStatisticNames();
for (String s : statNames) {
resultNames[current++] = s;
}
}
if (current != RESULT_SIZE + addm + m_numPluginStatistics) {
throw new Error("ResultNames didn't fit RESULT_SIZE");
}
return resultNames;
}
/**
* Gets the results for the supplied train and test datasets. Now performs a
* deep copy of the classifier before it is built and evaluated (just in case
* the classifier is not initialized properly in buildClassifier()).
*
* @param train the training Instances.
* @param test the testing Instances.
* @return the results stored in an array. The objects stored in the array may
* be Strings, Doubles, or null (for the missing value).
* @throws Exception if a problem occurs while getting the results
*/
@Override
public Object[] getResult(Instances train, Instances test) throws Exception {
if (train.classAttribute().type() != Attribute.NUMERIC) {
throw new Exception("Class attribute is not numeric!");
}
if (m_Template == null) {
throw new Exception("No classifier has been specified");
}
ThreadMXBean thMonitor = ManagementFactory.getThreadMXBean();
boolean canMeasureCPUTime = thMonitor.isThreadCpuTimeSupported();
if (canMeasureCPUTime && !thMonitor.isThreadCpuTimeEnabled()) {
thMonitor.setThreadCpuTimeEnabled(true);
}
int addm = (m_AdditionalMeasures != null) ? m_AdditionalMeasures.length : 0;
Object[] result = new Object[RESULT_SIZE + addm + m_numPluginStatistics];
long thID = Thread.currentThread().getId();
long CPUStartTime = -1, trainCPUTimeElapsed = -1, testCPUTimeElapsed = -1, trainTimeStart, trainTimeElapsed, testTimeStart, testTimeElapsed;
Evaluation eval = new Evaluation(train);
m_Classifier = AbstractClassifier.makeCopy(m_Template);
trainTimeStart = System.currentTimeMillis();
if (canMeasureCPUTime) {
CPUStartTime = thMonitor.getThreadUserTime(thID);
}
m_Classifier.buildClassifier(train);
if (canMeasureCPUTime) {
trainCPUTimeElapsed = thMonitor.getThreadUserTime(thID) - CPUStartTime;
}
trainTimeElapsed = System.currentTimeMillis() - trainTimeStart;
testTimeStart = System.currentTimeMillis();
if (canMeasureCPUTime) {
CPUStartTime = thMonitor.getThreadUserTime(thID);
}
eval.evaluateModel(m_Classifier, test);
if (canMeasureCPUTime) {
testCPUTimeElapsed = thMonitor.getThreadUserTime(thID) - CPUStartTime;
}
testTimeElapsed = System.currentTimeMillis() - testTimeStart;
thMonitor = null;
m_result = eval.toSummaryString();
// The results stored are all per instance -- can be multiplied by the
// number of instances to get absolute numbers
int current = 0;
result[current++] = new Double(train.numInstances());
result[current++] = new Double(eval.numInstances());
result[current++] = new Double(eval.meanAbsoluteError());
result[current++] = new Double(eval.rootMeanSquaredError());
result[current++] = new Double(eval.relativeAbsoluteError());
result[current++] = new Double(eval.rootRelativeSquaredError());
result[current++] = new Double(eval.correlationCoefficient());
result[current++] = new Double(eval.unclassified());
result[current++] = new Double(eval.pctUnclassified());
result[current++] = new Double(eval.SFPriorEntropy());
result[current++] = new Double(eval.SFSchemeEntropy());
result[current++] = new Double(eval.SFEntropyGain());
result[current++] = new Double(eval.SFMeanPriorEntropy());
result[current++] = new Double(eval.SFMeanSchemeEntropy());
result[current++] = new Double(eval.SFMeanEntropyGain());
// Timing stats
result[current++] = new Double(trainTimeElapsed / 1000.0);
result[current++] = new Double(testTimeElapsed / 1000.0);
if (canMeasureCPUTime) {
result[current++] =
new Double((trainCPUTimeElapsed / 1000000.0) / 1000.0);
result[current++] = new Double((testCPUTimeElapsed / 1000000.0) / 1000.0);
result[current++] =
new Double(trainCPUTimeElapsed / 1000000.0);
result[current++] = new Double(testCPUTimeElapsed / 1000000.0);
} else {
result[current++] = new Double(Utils.missingValue());
result[current++] = new Double(Utils.missingValue());
result[current++] = new Double(Utils.missingValue());
result[current++] = new Double(Utils.missingValue());
}
// sizes
if (m_NoSizeDetermination) {
result[current++] = -1.0;
result[current++] = -1.0;
result[current++] = -1.0;
} else {
ByteArrayOutputStream bastream = new ByteArrayOutputStream();
ObjectOutputStream oostream = new ObjectOutputStream(bastream);
oostream.writeObject(m_Classifier);
result[current++] = new Double(bastream.size());
bastream = new ByteArrayOutputStream();
oostream = new ObjectOutputStream(bastream);
oostream.writeObject(train);
result[current++] = new Double(bastream.size());
bastream = new ByteArrayOutputStream();
oostream = new ObjectOutputStream(bastream);
oostream.writeObject(test);
result[current++] = new Double(bastream.size());
}
// Prediction interval statistics
result[current++] =
new Double(eval.coverageOfTestCasesByPredictedRegions());
result[current++] = new Double(eval.sizeOfPredictedRegions());
if (m_Classifier instanceof Summarizable) {
result[current++] = ((Summarizable) m_Classifier).toSummaryString();
} else {
result[current++] = null;
}
for (int i = 0; i < addm; i++) {
if (m_doesProduce[i]) {
try {
double dv = ((AdditionalMeasureProducer) m_Classifier)
.getMeasure(m_AdditionalMeasures[i]);
if (!Utils.isMissingValue(dv)) {
Double value = new Double(dv);
result[current++] = value;
} else {
result[current++] = null;
}
} catch (Exception ex) {
System.err.println(ex);
}
} else {
result[current++] = null;
}
}
// get the actual metrics from the evaluation object
List<AbstractEvaluationMetric> metrics = eval.getPluginMetrics();
if (metrics != null) {
for (AbstractEvaluationMetric m : metrics) {
if (m.appliesToNumericClass()) {
List<String> statNames = m.getStatisticNames();
for (String s : statNames) {
result[current++] = new Double(m.getStatistic(s));
}
}
}
}
if (current != RESULT_SIZE + addm + m_numPluginStatistics) {
throw new Error("Results didn't fit RESULT_SIZE");
}
m_Evaluation = eval;
return result;
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String classifierTipText() {
return "The classifier to use.";
}
/**
* Get the value of Classifier.
*
* @return Value of Classifier.
*/
public Classifier getClassifier() {
return m_Template;
}
/**
* Sets the classifier.
*
* @param newClassifier the new classifier to use.
*/
public void setClassifier(Classifier newClassifier) {
m_Template = newClassifier;
updateOptions();
System.err.println("RegressionSplitEvaluator: In set classifier");
}
/**
* Returns whether the size determination (train/test/classifer) is skipped.
*
* @return true if size determination skipped
*/
public boolean getNoSizeDetermination() {
return m_NoSizeDetermination;
}
/**
* Sets whether the size determination (train/test/classifer) is skipped.
*
* @param value true if to determine sizes
*/
public void setNoSizeDetermination(boolean value) {
m_NoSizeDetermination = value;
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String noSizeDeterminationTipText() {
return "If enabled, the size determination for train/test/classifier is skipped.";
}
/**
* Updates the options that the current classifier is using.
*/
protected void updateOptions() {
if (m_Template instanceof OptionHandler) {
m_ClassifierOptions = Utils.joinOptions(((OptionHandler) m_Template)
.getOptions());
} else {
m_ClassifierOptions = "";
}
if (m_Template instanceof Serializable) {
ObjectStreamClass obs = ObjectStreamClass.lookup(m_Template.getClass());
m_ClassifierVersion = "" + obs.getSerialVersionUID();
} else {
m_ClassifierVersion = "";
}
}
/**
* Set the Classifier to use, given it's class name. A new classifier will be
* instantiated.
*
* @param newClassifierName the Classifier class name.
* @throws Exception if the class name is invalid.
*/
public void setClassifierName(String newClassifierName) throws Exception {
try {
setClassifier((Classifier) Class.forName(newClassifierName).newInstance());
} catch (Exception ex) {
throw new Exception("Can't find Classifier with class name: "
+ newClassifierName);
}
}
/**
* Gets the raw output from the classifier
*
* @return the raw output from the classifier
*/
@Override
public String getRawResultOutput() {
StringBuffer result = new StringBuffer();
if (m_Classifier == null) {
return "<null> classifier";
}
result.append(toString());
result.append("Classifier model: \n" + m_Classifier.toString() + '\n');
// append the performance statistics
if (m_result != null) {
result.append(m_result);
if (m_doesProduce != null) {
for (int i = 0; i < m_doesProduce.length; i++) {
if (m_doesProduce[i]) {
try {
double dv = ((AdditionalMeasureProducer) m_Classifier)
.getMeasure(m_AdditionalMeasures[i]);
if (!Utils.isMissingValue(dv)) {
Double value = new Double(dv);
result.append(m_AdditionalMeasures[i] + " : " + value + '\n');
} else {
result.append(m_AdditionalMeasures[i] + " : " + '?' + '\n');
}
} catch (Exception ex) {
System.err.println(ex);
}
}
}
}
}
return result.toString();
}
/**
* Returns a text description of the split evaluator.
*
* @return a text description of the split evaluator.
*/
@Override
public String toString() {
String result = "RegressionSplitEvaluator: ";
if (m_Template == null) {
return result + "<null> classifier";
}
return result + m_Template.getClass().getName() + " " + m_ClassifierOptions
+ "(version " + m_ClassifierVersion + ")";
}
/**
* Returns the revision string.
*
* @return the revision
*/
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/experiment/RemoteEngine.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* RemoteEngine.java
* Copyright (C) 2000-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.experiment;
import java.net.InetAddress;
import java.net.URL;
import java.net.URLClassLoader;
import java.rmi.Naming;
import java.rmi.RMISecurityManager;
import java.rmi.RemoteException;
import java.rmi.server.UnicastRemoteObject;
import java.util.Enumeration;
import java.util.Hashtable;
import weka.core.Queue;
import weka.core.RevisionHandler;
import weka.core.RevisionUtils;
import weka.core.Utils;
/**
* A general purpose server for executing Task objects sent via RMI.
*
* @author Mark Hall (mhall@cs.waikato.ac.nz)
* @version $Revision$
*/
public class RemoteEngine extends UnicastRemoteObject implements Compute,
RevisionHandler {
/** for serialization */
private static final long serialVersionUID = -1021538162895448259L;
/** The name of the host that this engine is started on */
private String m_HostName = "local";
/** A queue of waiting tasks */
private final Queue m_TaskQueue = new Queue();
/** A queue of corresponding ID's for tasks */
private final Queue m_TaskIdQueue = new Queue();
/** A hashtable of experiment status */
private final Hashtable<String, TaskStatusInfo> m_TaskStatus = new Hashtable<String, TaskStatusInfo>();
/** Is there a task running */
private boolean m_TaskRunning = false;
/** Clean up interval (in ms) */
protected static long CLEANUPTIMEOUT = 3600000;
/**
* Constructor
*
* @param hostName name of the host
* @exception RemoteException if something goes wrong
*/
public RemoteEngine(String hostName) throws RemoteException {
super();
m_HostName = hostName;
/*
* launch a clean-up thread. Will purge any failed or finished tasks still
* in the TaskStatus hashtable after an hour
*/
Thread cleanUpThread;
cleanUpThread = new Thread() {
@Override
public void run() {
while (true) {
try {
// sleep for a while
Thread.sleep(CLEANUPTIMEOUT);
} catch (InterruptedException ie) {
}
if (m_TaskStatus.size() > 0) {
purge();
} else {
System.err.println("RemoteEngine : purge - no tasks to check.");
}
}
}
};
cleanUpThread.setPriority(Thread.MIN_PRIORITY);
cleanUpThread.setDaemon(true);
cleanUpThread.start();
}
/**
* Takes a task object and queues it for execution
*
* @param t the Task object to execute
* @return an identifier for the Task that can be used when querying Task
* status
*/
@Override
public synchronized Object executeTask(Task t) throws RemoteException {
String taskId = "" + System.currentTimeMillis() + ":";
taskId += t.hashCode();
addTaskToQueue(t, taskId);
return taskId;
// return t.execute();
}
/**
* Returns status information on a particular task
*
* @param taskId the ID of the task to check
* @return a <code>TaskStatusInfo</code> encapsulating task status info
* @exception Exception if an error occurs
*/
@Override
public Object checkStatus(Object taskId) throws Exception {
TaskStatusInfo inf = m_TaskStatus.get(taskId);
if (inf == null) {
throw new Exception("RemoteEngine (" + m_HostName + ") : Task not found.");
}
TaskStatusInfo result = new TaskStatusInfo();
result.setExecutionStatus(inf.getExecutionStatus());
result.setStatusMessage(inf.getStatusMessage());
result.setTaskResult(inf.getTaskResult());
if (inf.getExecutionStatus() == TaskStatusInfo.FINISHED
|| inf.getExecutionStatus() == TaskStatusInfo.FAILED) {
System.err.println("Finished/failed Task id : " + taskId
+ " checked by client. Removing.");
inf.setTaskResult(null);
inf = null;
m_TaskStatus.remove(taskId);
}
inf = null;
return result;
}
/**
* Adds a new task to the queue.
*
* @param t a <code>Task</code> value to be added
* @param taskId the id of the task to be added
*/
private synchronized void addTaskToQueue(Task t, String taskId) {
TaskStatusInfo newTask = t.getTaskStatus();
if (newTask == null) {
newTask = new TaskStatusInfo();
}
m_TaskQueue.push(t);
m_TaskIdQueue.push(taskId);
newTask.setStatusMessage("RemoteEngine (" + m_HostName + ") : task "
+ taskId + " queued at postion: " + m_TaskQueue.size());
// add task status to HashTable
m_TaskStatus.put(taskId, newTask);
System.err.println("Task id : " + taskId + " Queued.");
if (m_TaskRunning == false) {
startTask();
}
}
/**
* Checks to see if there are any waiting tasks, and if no task is currently
* running starts a waiting task.
*/
private void startTask() {
if (m_TaskRunning == false && m_TaskQueue.size() > 0) {
Thread activeTaskThread;
activeTaskThread = new Thread() {
@Override
public void run() {
m_TaskRunning = true;
Task currentTask = (Task) m_TaskQueue.pop();
String taskId = (String) m_TaskIdQueue.pop();
TaskStatusInfo tsi = m_TaskStatus.get(taskId);
tsi.setExecutionStatus(TaskStatusInfo.PROCESSING);
tsi.setStatusMessage("RemoteEngine (" + m_HostName + ") : task "
+ taskId + " running...");
try {
System.err.println("Launching task id : " + taskId + "...");
currentTask.execute();
TaskStatusInfo runStatus = currentTask.getTaskStatus();
tsi.setExecutionStatus(runStatus.getExecutionStatus());
tsi.setStatusMessage("RemoteExperiment (" + m_HostName + ") "
+ runStatus.getStatusMessage());
tsi.setTaskResult(runStatus.getTaskResult());
} catch (Error er) {
// Object initialization can raise Error, which are not subclass of
// Exception
tsi.setExecutionStatus(TaskStatusInfo.FAILED);
if (er.getCause() instanceof java.security.AccessControlException) {
tsi.setStatusMessage("RemoteEngine (" + m_HostName
+ ") : security error, check remote policy file.");
System.err.println("Task id " + taskId
+ " Failed! Check remote policy file");
} else {
tsi.setStatusMessage("RemoteEngine (" + m_HostName
+ ") : unknown initialization error.");
System.err.println("Task id " + taskId
+ " Unknown initialization error");
er.printStackTrace();
System.err.println("Detailed message " + er.getMessage());
System.err.println("Detailed cause: " + er.getCause().toString());
}
} catch (Exception ex) {
tsi.setExecutionStatus(TaskStatusInfo.FAILED);
tsi.setStatusMessage("RemoteEngine (" + m_HostName + ") : "
+ ex.getMessage());
System.err.println("Task id " + taskId + " Failed, "
+ ex.getMessage());
} finally {
if (m_TaskStatus.size() == 0) {
purgeClasses();
}
m_TaskRunning = false;
// start any waiting tasks
startTask();
}
}
};
activeTaskThread.setPriority(Thread.MIN_PRIORITY);
activeTaskThread.start();
}
}
/**
* Attempts to purge class types from the virtual machine. May take some time
* as it relies on garbage collection
*/
private void purgeClasses() {
try {
// see if we can purge classes
ClassLoader prevCl = Thread.currentThread().getContextClassLoader();
ClassLoader urlCl = URLClassLoader.newInstance(new URL[] { new URL(
"file:.") }, prevCl);
Thread.currentThread().setContextClassLoader(urlCl);
} catch (Exception ex) {
ex.printStackTrace();
}
}
/**
* Checks the hash table for failed/finished tasks. Any that have been around
* for an @seeCLEANUPTIMEOUT or more are removed. Clients are expected to
* check on the status of their remote tasks. Checking on the status of a
* finished/failed task will remove it from the hash table, therefore any
* failed/finished tasks left lying around for more than an hour suggest that
* their client has died..
*
*/
private void purge() {
Enumeration<String> keys = m_TaskStatus.keys();
long currentTime = System.currentTimeMillis();
System.err.println("RemoteEngine purge. Current time : " + currentTime);
while (keys.hasMoreElements()) {
String taskId = keys.nextElement();
System.err.print("Examining task id : " + taskId + "... ");
String timeString = taskId.substring(0, taskId.indexOf(':'));
long ts = Long.valueOf(timeString).longValue();
if (currentTime - ts > CLEANUPTIMEOUT) {
TaskStatusInfo tsi = m_TaskStatus.get(taskId);
if ((tsi != null)
&& (tsi.getExecutionStatus() == TaskStatusInfo.FINISHED || tsi
.getExecutionStatus() == TaskStatusInfo.FAILED)) {
System.err.println("\nTask id : " + taskId
+ " has gone stale. Removing.");
m_TaskStatus.remove(taskId);
tsi.setTaskResult(null);
tsi = null;
}
} else {
System.err.println("ok.");
}
}
if (m_TaskStatus.size() == 0) {
purgeClasses();
}
}
/**
* Returns the revision string.
*
* @return the revision
*/
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
/**
* Main method. Gets address of the local host, creates a remote engine object
* and binds it in the RMI registry. If there is no RMI registry, then it
* tries to create one with default port 1099.
*
* @param args
*/
public static void main(String[] args) {
// make sure that all packages are loaded and available to
// the remote engines
weka.gui.GenericObjectEditor.determineClasses();
if (System.getSecurityManager() == null) {
System.setSecurityManager(new RMISecurityManager());
}
int port = 1099;
InetAddress localhost = null;
try {
localhost = InetAddress.getLocalHost();
System.err.println("Host name : " + localhost.getHostName());
} catch (Exception ex) {
ex.printStackTrace();
}
String name;
if (localhost != null) {
name = localhost.getHostName();
} else {
name = "localhost";
}
// get optional port
try {
String portOption = Utils.getOption("p", args);
if (!portOption.equals("")) {
port = Integer.parseInt(portOption);
}
} catch (Exception ex) {
System.err.println("Usage : -p <port>");
}
if (port != 1099) {
name = name + ":" + port;
}
name = "//" + name + "/RemoteEngine";
try {
Compute engine = new RemoteEngine(name);
try {
Naming.rebind(name, engine);
System.out.println("RemoteEngine bound in RMI registry");
} catch (RemoteException ex) {
// try to bootstrap a new registry
System.err.println("Attempting to start RMI registry on port " + port
+ "...");
java.rmi.registry.LocateRegistry.createRegistry(port);
Naming.bind(name, engine);
System.out.println("RemoteEngine bound in RMI registry");
}
} catch (Exception e) {
System.err.println("RemoteEngine exception: " + e.getMessage());
e.printStackTrace();
}
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/experiment/RemoteExperiment.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* RemoteExperiment.java
* Copyright (C) 2000-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.experiment;
import java.io.BufferedInputStream;
import java.io.BufferedOutputStream;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.ObjectInputStream;
import java.io.ObjectOutputStream;
import java.rmi.Naming;
import java.util.ArrayList;
import java.util.Enumeration;
import javax.swing.DefaultListModel;
import weka.core.Option;
import weka.core.OptionHandler;
import weka.core.Queue;
import weka.core.RevisionUtils;
import weka.core.SerializedObject;
import weka.core.Utils;
import weka.core.xml.KOML;
import weka.core.xml.XMLOptions;
import weka.experiment.xml.XMLExperiment;
/**
* Holds all the necessary configuration information for a distributed
* experiment. This object is able to be serialized for storage on disk.
* <p>
*
* This class is experimental at present. Has been tested using
* CSVResultListener (sending results to standard out) and
* DatabaseResultListener (InstantDB + RmiJdbc bridge).
* <p>
*
* Getting started:
* <p>
*
* Start InstantDB (with the RMI bridge) on some machine. If using java2 then
* specify -Djava.security.policy=db.policy to the virtual machine. Where
* db.policy is as follows: <br>
*
* <pre>
* grant {
* permission java.security.AllPermission;
* };
* </pre>
* <p>
*
* Start RemoteEngine servers on x machines as per the instructons in the
* README_Experiment_Gui file. There must be a DatabaseUtils.props in either the
* HOME or current directory of each machine, listing all necessary jdbc
* drivers.
* <p>
*
* The machine where a RemoteExperiment is started must also have a copy of
* DatabaseUtils.props listing the URL to the machine where the database server
* is running (RmiJdbc + InstantDB).
* <p>
*
* Here is an example of starting a RemoteExperiment:
* <p>
*
* <pre>
*
* java -Djava.rmi.server.codebase=file:/path to weka classes/ \
* weka.experiment.RemoteExperiment -L 1 -U 10 \
* -T /home/ml/datasets/UCI/iris.arff \
* -D "weka.experiment.DatabaseResultListener" \
* -P "weka.experiment.RandomSplitResultProducer" \
* -h rosebud.cs.waikato.ac.nz -h blackbird.cs.waikato.ac.nz -r -- \
* -W weka.experiment.ClassifierSplitEvaluator -- \
* -W weka.classifiers.bayes.NaiveBayes
*
* </pre>
* <p>
* The "codebase" property tells rmi where to serve up weka classes from. This
* can either be a file url (as long as a shared file system is being used that
* is accessable by the remoteEngine servers), or http url (which of course
* supposes that a web server is running and you have put your weka classes
* somewhere that is web accessable). If using a file url the trailing "/" is
* *most* important unless the weka classes are in a jar file.
* <p>
*
* <!-- options-start --> Valid options are:
* <p/>
*
* <pre>
* -L <num>
* The lower run number to start the experiment from.
* (default 1)
* </pre>
*
* <pre>
* -U <num>
* The upper run number to end the experiment at (inclusive).
* (default 10)
* </pre>
*
* <pre>
* -T <arff file>
* The dataset to run the experiment on.
* (required, may be specified multiple times)
* </pre>
*
* <pre>
* -P <class name>
* The full class name of a ResultProducer (required).
* eg: weka.experiment.RandomSplitResultProducer
* </pre>
*
* <pre>
* -D <class name>
* The full class name of a ResultListener (required).
* eg: weka.experiment.CSVResultListener
* </pre>
*
* <pre>
* -N <string>
* A string containing any notes about the experiment.
* (default none)
* </pre>
*
* <pre>
* Options specific to result producer weka.experiment.RandomSplitResultProducer:
* </pre>
*
* <pre>
* -P <percent>
* The percentage of instances to use for training.
* (default 66)
* </pre>
*
* <pre>
* -D
* Save raw split evaluator output.
* </pre>
*
* <pre>
* -O <file/directory name/path>
* The filename where raw output will be stored.
* If a directory name is specified then then individual
* outputs will be gzipped, otherwise all output will be
* zipped to the named file. Use in conjuction with -D. (default splitEvalutorOut.zip)
* </pre>
*
* <pre>
* -W <class name>
* The full class name of a SplitEvaluator.
* eg: weka.experiment.ClassifierSplitEvaluator
* </pre>
*
* <pre>
* -R
* Set when data is not to be randomized and the data sets' size.
* Is not to be determined via probabilistic rounding.
* </pre>
*
* <pre>
* Options specific to split evaluator weka.experiment.ClassifierSplitEvaluator:
* </pre>
*
* <pre>
* -W <class name>
* The full class name of the classifier.
* eg: weka.classifiers.bayes.NaiveBayes
* </pre>
*
* <pre>
* -C <index>
* The index of the class for which IR statistics
* are to be output. (default 1)
* </pre>
*
* <pre>
* -I <index>
* The index of an attribute to output in the
* results. This attribute should identify an
* instance in order to know which instances are
* in the test set of a cross validation. if 0
* no output (default 0).
* </pre>
*
* <pre>
* -P
* Add target and prediction columns to the result
* for each fold.
* </pre>
*
* <pre>
* Options specific to classifier weka.classifiers.rules.ZeroR:
* </pre>
*
* <pre>
* -D
* If set, classifier is run in debug mode and
* may output additional info to the console
* </pre>
*
* <!-- options-end -->
*
* @author Mark Hall (mhall@cs.waikato.ac.nz)
* @version $Revision$
*/
public class RemoteExperiment extends Experiment {
/** for serialization */
static final long serialVersionUID = -7357668825635314937L;
/** The list of objects listening for remote experiment events */
private final ArrayList<RemoteExperimentListener> m_listeners = new ArrayList<RemoteExperimentListener>();
/** Holds the names of machines with remoteEngine servers running */
protected DefaultListModel m_remoteHosts = new DefaultListModel();
/** The queue of available hosts */
private Queue m_remoteHostsQueue = new Queue();
/** The status of each of the remote hosts */
private int[] m_remoteHostsStatus;
/** The number of times tasks have failed on each remote host */
private int[] m_remoteHostFailureCounts;
/** status of the remote host: available */
protected static final int AVAILABLE = 0;
/** status of the remote host: in use */
protected static final int IN_USE = 1;
/** status of the remote host: connection failed */
protected static final int CONNECTION_FAILED = 2;
/** status of the remote host: some other failure */
protected static final int SOME_OTHER_FAILURE = 3;
// protected static final int TO_BE_RUN=0;
// protected static final int PROCESSING=1;
// protected static final int FAILED=2;
// protected static final int FINISHED=3;
/**
* allow at most 3 failures on a host before it is removed from the list of
* usable hosts
*/
protected static final int MAX_FAILURES = 3;
/**
* Set to true if MAX_FAILURES exceeded on all hosts or connections fail on
* all hosts or user aborts experiment (via gui)
*/
private boolean m_experimentAborted = false;
/** The number of hosts removed due to exceeding max failures */
private int m_removedHosts;
/** The count of failed sub-experiments */
private int m_failedCount;
/** The count of successfully completed sub-experiments */
private int m_finishedCount;
/**
* The base experiment to split up into sub experiments for remote execution
*/
private Experiment m_baseExperiment = null;
/** The sub experiments */
protected Experiment[] m_subExperiments;
/** The queue of sub experiments waiting to be processed */
private Queue m_subExpQueue = new Queue();
/** The status of each of the sub-experiments */
protected int[] m_subExpComplete;
/** If true, then sub experiments are created on the basis of data sets. */
protected boolean m_splitByDataSet = true;
/** If true, then sub experiments are created on the basis of properties */
protected boolean m_splitByProperty = false;
/**
* Returns true if sub experiments are to be created on the basis of data
* set.
*
* @return a <code>boolean</code> value indicating whether sub experiments are
* to be created on the basis of data set (true) or run number
* (false).
*/
public boolean getSplitByDataSet() {
return m_splitByDataSet;
}
/**
* Set whether sub experiments are to be created on the basis of data set.
*
* @param sd true if sub experiments are to be created on the basis of data
* set. Otherwise sub experiments are created on the basis of run
* number.
*/
public void setSplitByDataSet(boolean sd) {
m_splitByDataSet = sd;
if (sd) {
m_splitByProperty = false; // Cannot split based on both dataset and property
}
}
/**
* Returns true if sub experiments are to be created on the basis of property.
*
* @return a <code>boolean</code> value indicating whether sub experiments are
* to be created on the basis of data set (true) or run number
* (false).
*/
public boolean getSplitByProperty() {
return m_splitByProperty;
}
/**
* Set whether sub experiments are to be created on the basis of property.
*
* @param sd true if sub experiments are to be created on the basis of data
* set. Otherwise sub experiments are created on the basis of run
* number.
*/
public void setSplitByProperty(boolean sd) {
m_splitByProperty = sd;
if (sd) {
m_splitByDataSet = false; // Cannot split based on both dataset and property
}
}
/**
* Construct a new RemoteExperiment using an empty Experiment as base
* Experiment
*
* @throws Exception if the base experiment is null
*/
public RemoteExperiment() throws Exception {
this(new Experiment());
}
/**
* Construct a new RemoteExperiment using a base Experiment
*
* @param base the base experiment to use
* @throws Exception if the base experiment is null
*/
public RemoteExperiment(Experiment base) throws Exception {
setBaseExperiment(base);
}
/**
* Add an object to the list of those interested in recieving update
* information from the RemoteExperiment
*
* @param r a listener
*/
public void addRemoteExperimentListener(RemoteExperimentListener r) {
m_listeners.add(r);
}
/**
* Get the base experiment used by this remote experiment
*
* @return the base experiment
*/
public Experiment getBaseExperiment() {
return m_baseExperiment;
}
/**
* Set the base experiment. A sub experiment will be created for each run in
* the base experiment.
*
* @param base the base experiment to use.
* @throws Exception if supplied base experiment is null
*/
public void setBaseExperiment(Experiment base) throws Exception {
if (base == null) {
throw new Exception("Base experiment is null!");
}
m_baseExperiment = base;
setRunLower(m_baseExperiment.getRunLower());
setRunUpper(m_baseExperiment.getRunUpper());
setResultListener(m_baseExperiment.getResultListener());
setResultProducer(m_baseExperiment.getResultProducer());
setDatasets(m_baseExperiment.getDatasets());
setUsePropertyIterator(m_baseExperiment.getUsePropertyIterator());
setPropertyPath(m_baseExperiment.getPropertyPath());
setPropertyArray(m_baseExperiment.getPropertyArray());
setNotes(m_baseExperiment.getNotes());
m_ClassFirst = m_baseExperiment.m_ClassFirst;
m_AdvanceDataSetFirst = m_baseExperiment.m_AdvanceDataSetFirst;
}
/**
* Set the user notes.
*
* @param newNotes New user notes.
*/
@Override
public void setNotes(String newNotes) {
super.setNotes(newNotes);
m_baseExperiment.setNotes(newNotes);
}
/**
* Set the lower run number for the experiment.
*
* @param newRunLower the lower run number for the experiment.
*/
@Override
public void setRunLower(int newRunLower) {
super.setRunLower(newRunLower);
m_baseExperiment.setRunLower(newRunLower);
}
/**
* Set the upper run number for the experiment.
*
* @param newRunUpper the upper run number for the experiment.
*/
@Override
public void setRunUpper(int newRunUpper) {
super.setRunUpper(newRunUpper);
m_baseExperiment.setRunUpper(newRunUpper);
}
/**
* Sets the result listener where results will be sent.
*
* @param newResultListener the result listener where results will be sent.
*/
@Override
public void setResultListener(ResultListener newResultListener) {
super.setResultListener(newResultListener);
m_baseExperiment.setResultListener(newResultListener);
}
/**
* Set the result producer used for the current experiment.
*
* @param newResultProducer result producer to use for the current experiment.
*/
@Override
public void setResultProducer(ResultProducer newResultProducer) {
super.setResultProducer(newResultProducer);
m_baseExperiment.setResultProducer(newResultProducer);
}
/**
* Set the datasets to use in the experiment
*
* @param ds the list of datasets to use
*/
@Override
public void setDatasets(DefaultListModel ds) {
super.setDatasets(ds);
m_baseExperiment.setDatasets(ds);
}
/**
* Sets whether the custom property iterator should be used.
*
* @param newUsePropertyIterator true if so
*/
@Override
public void setUsePropertyIterator(boolean newUsePropertyIterator) {
super.setUsePropertyIterator(newUsePropertyIterator);
m_baseExperiment.setUsePropertyIterator(newUsePropertyIterator);
}
/**
* Sets the path of properties taken to get to the custom property to iterate
* over.
*
* @param newPropertyPath an array of PropertyNodes
*/
@Override
public void setPropertyPath(PropertyNode[] newPropertyPath) {
super.setPropertyPath(newPropertyPath);
m_baseExperiment.setPropertyPath(newPropertyPath);
}
/**
* Sets the array of values to set the custom property to.
*
* @param newPropArray a value of type Object which should be an array of the
* appropriate values.
*/
@Override
public void setPropertyArray(Object newPropArray) {
super.setPropertyArray(newPropArray);
m_baseExperiment.setPropertyArray(newPropArray);
}
/**
* Prepares a remote experiment for running, creates sub experiments
*
* @throws Exception if an error occurs
*/
@Override
public void initialize() throws Exception {
if (m_baseExperiment == null) {
throw new Exception("No base experiment specified!");
}
m_experimentAborted = false;
m_finishedCount = 0;
m_failedCount = 0;
m_RunNumber = getRunLower();
m_DatasetNumber = 0;
m_PropertyNumber = 0;
m_CurrentProperty = -1;
m_CurrentInstances = null;
m_Finished = false;
if (m_remoteHosts.size() == 0) {
throw new Exception("No hosts specified!");
}
// initialize all remote hosts to available
m_remoteHostsStatus = new int[m_remoteHosts.size()];
m_remoteHostFailureCounts = new int[m_remoteHosts.size()];
m_remoteHostsQueue = new Queue();
// prime the hosts queue
for (int i = 0; i < m_remoteHosts.size(); i++) {
m_remoteHostsQueue.push(new Integer(i));
}
// set up sub experiments
m_subExpQueue = new Queue();
int numExps;
if (getSplitByDataSet()) {
numExps = m_baseExperiment.getDatasets().size();
} else if (getSplitByProperty()) {
numExps = m_baseExperiment.getPropertyArrayLength();
} else {
numExps = getRunUpper() - getRunLower() + 1;
}
m_subExperiments = new Experiment[numExps];
m_subExpComplete = new int[numExps];
// create copy of base experiment
SerializedObject so = new SerializedObject(m_baseExperiment);
if (getSplitByDataSet()) {
for (int i = 0; i < m_baseExperiment.getDatasets().size(); i++) {
m_subExperiments[i] = (Experiment) so.getObject();
// one for each data set
DefaultListModel temp = new DefaultListModel();
temp.addElement(m_baseExperiment.getDatasets().get(i));
m_subExperiments[i].setDatasets(temp);
m_subExpQueue.push(new Integer(i));
}
} else if (getSplitByProperty()) {
for (int i = 0; i < m_baseExperiment.getPropertyArrayLength(); i++) {
m_subExperiments[i] = (Experiment) so.getObject();
Object[] a = new Object[1];
a[0] = m_baseExperiment.getPropertyArrayValue(i);
m_subExperiments[i].setPropertyArray(a);
m_subExpQueue.push(new Integer(i));
}
} else {
for (int i = getRunLower(); i <= getRunUpper(); i++) {
m_subExperiments[i - getRunLower()] = (Experiment) so.getObject();
// one run for each sub experiment
m_subExperiments[i - getRunLower()].setRunLower(i);
m_subExperiments[i - getRunLower()].setRunUpper(i);
m_subExpQueue.push(new Integer(i - getRunLower()));
}
}
}
/**
* Inform all listeners of progress
*
* @param status true if this is a status type of message
* @param log true if this is a log type of message
* @param finished true if the remote experiment has finished
* @param message the message.
*/
private synchronized void notifyListeners(boolean status, boolean log,
boolean finished, String message) {
if (m_listeners.size() > 0) {
for (int i = 0; i < m_listeners.size(); i++) {
RemoteExperimentListener r = (m_listeners.get(i));
r.remoteExperimentStatus(new RemoteExperimentEvent(status, log,
finished, message));
}
} else {
System.err.println(message);
}
}
/**
* Set the abort flag
*/
public void abortExperiment() {
m_experimentAborted = true;
}
/**
* Increment the number of successfully completed sub experiments
*/
protected synchronized void incrementFinished() {
m_finishedCount++;
}
/**
* Increment the overall number of failures and the number of failures for a
* particular host
*
* @param hostNum the index of the host to increment failure count
*/
protected synchronized void incrementFailed(int hostNum) {
m_failedCount++;
m_remoteHostFailureCounts[hostNum]++;
}
/**
* Push an experiment back on the queue of waiting experiments
*
* @param expNum the index of the experiment to push onto the queue
*/
protected synchronized void waitingExperiment(int expNum) {
m_subExpQueue.push(new Integer(expNum));
}
/**
* Check to see if we have failed to connect to all hosts
*
* @return true if failed to connect to all hosts
*/
private boolean checkForAllFailedHosts() {
boolean allbad = true;
for (int m_remoteHostsStatu : m_remoteHostsStatus) {
if (m_remoteHostsStatu != CONNECTION_FAILED) {
allbad = false;
break;
}
}
if (allbad) {
abortExperiment();
notifyListeners(false, true, true, "Experiment aborted! All connections "
+ "to remote hosts failed.");
}
return allbad;
}
/**
* Returns some post experiment information.
*
* @return a String containing some post experiment info
*/
private String postExperimentInfo() {
StringBuffer text = new StringBuffer();
text.append(m_finishedCount + (m_splitByDataSet ? " data sets" : " runs")
+ " completed successfully. " + m_failedCount
+ " failures during running.\n");
System.err.print(text.toString());
return text.toString();
}
/**
* Pushes a host back onto the queue of available hosts and attempts to launch
* a waiting experiment (if any).
*
* @param hostNum the index of the host to push back onto the queue of
* available hosts
*/
protected synchronized void availableHost(int hostNum) {
if (hostNum >= 0) {
if (m_remoteHostFailureCounts[hostNum] < MAX_FAILURES) {
m_remoteHostsQueue.push(new Integer(hostNum));
} else {
notifyListeners(false, true, false, "Max failures exceeded for host "
+ ((String) m_remoteHosts.elementAt(hostNum))
+ ". Removed from host list.");
m_removedHosts++;
}
}
// check for all sub exp complete or all hosts failed or failed count
// exceeded
if (m_failedCount == (MAX_FAILURES * m_remoteHosts.size())) {
abortExperiment();
notifyListeners(false, true, true, "Experiment aborted! Max failures "
+ "exceeded on all remote hosts.");
return;
}
if ((getSplitByDataSet() && (m_baseExperiment.getDatasets().size() == m_finishedCount))
|| (getSplitByProperty() && (m_baseExperiment.getPropertyArrayLength() == m_finishedCount))
|| (!getSplitByDataSet() && !getSplitByProperty() && (getRunUpper() - getRunLower() + 1) == m_finishedCount)) {
notifyListeners(false, true, false, "Experiment completed successfully.");
notifyListeners(false, true, true, postExperimentInfo());
return;
}
if (checkForAllFailedHosts()) {
return;
}
if (m_experimentAborted
&& (m_remoteHostsQueue.size() + m_removedHosts) == m_remoteHosts.size()) {
notifyListeners(false, true, true,
"Experiment aborted. All remote tasks " + "finished.");
}
if (!m_subExpQueue.empty() && !m_experimentAborted) {
if (!m_remoteHostsQueue.empty()) {
int availHost, waitingExp;
try {
availHost = ((Integer) m_remoteHostsQueue.pop()).intValue();
waitingExp = ((Integer) m_subExpQueue.pop()).intValue();
launchNext(waitingExp, availHost);
} catch (Exception ex) {
ex.printStackTrace();
}
}
}
}
/**
* Launch a sub experiment on a remote host
*
* @param wexp the index of the sub experiment to launch
* @param ah the index of the available host to launch on
*/
public void launchNext(final int wexp, final int ah) {
Thread subExpThread;
subExpThread = new Thread() {
@Override
public void run() {
m_remoteHostsStatus[ah] = IN_USE;
m_subExpComplete[wexp] = TaskStatusInfo.PROCESSING;
RemoteExperimentSubTask expSubTsk = new RemoteExperimentSubTask();
expSubTsk.setExperiment(m_subExperiments[wexp]);
String subTaskType = null;
if (getSplitByDataSet()) {
subTaskType = "dataset: "
+ ((File) m_subExperiments[wexp].getDatasets().elementAt(0))
.getName();
} else if (getSplitByProperty()) {
subTaskType = "property: " + m_subExperiments[wexp].getPropertyArrayValue(0).getClass().getName() + " :" +
m_subExperiments[wexp].getPropertyArrayValue(0);
} else {
subTaskType = "run: " + m_subExperiments[wexp].getRunLower();
}
try {
String name = "//" + ((String) m_remoteHosts.elementAt(ah))
+ "/RemoteEngine";
Compute comp = (Compute) Naming.lookup(name);
// assess the status of the sub-exp
notifyListeners(false, true, false, "Starting " + subTaskType
+ " on host " + ((String) m_remoteHosts.elementAt(ah)));
Object subTaskId = comp.executeTask(expSubTsk);
boolean finished = false;
TaskStatusInfo is = null;
while (!finished) {
try {
Thread.sleep(2000);
TaskStatusInfo cs = (TaskStatusInfo) comp.checkStatus(subTaskId);
if (cs.getExecutionStatus() == TaskStatusInfo.FINISHED) {
// push host back onto queue and try launching any waiting
// sub-experiments
notifyListeners(false, true, false, cs.getStatusMessage());
m_remoteHostsStatus[ah] = AVAILABLE;
incrementFinished();
availableHost(ah);
finished = true;
} else if (cs.getExecutionStatus() == TaskStatusInfo.FAILED) {
// a non connection related error---possibly host doesn't have
// access to data sets or security policy is not set up
// correctly or classifier(s) failed for some reason
notifyListeners(false, true, false, cs.getStatusMessage());
m_remoteHostsStatus[ah] = SOME_OTHER_FAILURE;
m_subExpComplete[wexp] = TaskStatusInfo.FAILED;
notifyListeners(false, true, false,
subTaskType + " " + cs.getStatusMessage()
+ ". Scheduling for execution on another host.");
incrementFailed(ah);
// push experiment back onto queue
waitingExperiment(wexp);
// push host back onto queue and try launching any waiting
// sub-experiments. Host is pushed back on the queue as the
// failure may be temporary---eg. with InstantDB using the
// RMI bridge, two or more threads may try to create the
// experiment index or results table simultaneously; all but
// one will throw an exception. These hosts are still usable
// however.
availableHost(ah);
finished = true;
} else {
if (is == null) {
is = cs;
notifyListeners(false, true, false, cs.getStatusMessage());
} else {
if (cs.getStatusMessage().compareTo(is.getStatusMessage()) != 0) {
notifyListeners(false, true, false, cs.getStatusMessage());
}
is = cs;
}
}
} catch (InterruptedException ie) {
}
}
} catch (Exception ce) {
m_remoteHostsStatus[ah] = CONNECTION_FAILED;
m_subExpComplete[wexp] = TaskStatusInfo.TO_BE_RUN;
System.err.println(ce);
ce.printStackTrace();
notifyListeners(false, true, false, "Connection to "
+ ((String) m_remoteHosts.elementAt(ah)) + " failed. Scheduling "
+ subTaskType + " for execution on another host.");
checkForAllFailedHosts();
waitingExperiment(wexp);
} finally {
if (isInterrupted()) {
System.err.println("Sub exp Interupted!");
}
}
}
};
subExpThread.setPriority(Thread.MIN_PRIORITY);
subExpThread.start();
}
/**
* Overides the one in Experiment
*
* @throws Exception never throws an exception
*/
@Override
public void nextIteration() throws Exception {
}
/**
* overides the one in Experiment
*/
@Override
public void advanceCounters() {
}
/**
* overides the one in Experiment
*/
@Override
public void postProcess() {
}
/**
* Add a host name to the list of remote hosts
*
* @param hostname the host name to add to the list
*/
public void addRemoteHost(String hostname) {
m_remoteHosts.addElement(hostname);
}
/**
* Get the list of remote host names
*
* @return the list of remote host names
*/
public DefaultListModel getRemoteHosts() {
return m_remoteHosts;
}
/**
* Set the list of remote host names
*
* @param list the list of remote host names
*/
public void setRemoteHosts(DefaultListModel list) {
m_remoteHosts = list;
}
/**
* Overides toString in Experiment
*
* @return a description of this remote experiment
*/
@Override
public String toString() {
String result = m_baseExperiment.toString();
result += "\nRemote Hosts:\n";
for (int i = 0; i < m_remoteHosts.size(); i++) {
result += ((String) m_remoteHosts.elementAt(i)) + '\n';
}
return result;
}
/**
* Overides runExperiment in Experiment
*/
@Override
public void runExperiment() {
int totalHosts = m_remoteHostsQueue.size();
// Try to launch sub experiments on all available hosts
for (int i = 0; i < totalHosts; i++) {
availableHost(-1);
}
}
/**
* Returns the revision string.
*
* @return the revision
*/
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
/**
* Configures/Runs the Experiment from the command line.
*
* @param args command line arguments to the Experiment.
*/
public static void main(String[] args) {
try {
weka.core.WekaPackageManager.loadPackages(false, true, false);
RemoteExperiment exp = null;
// get options from XML?
String xmlOption = Utils.getOption("xml", args);
if (!xmlOption.equals("")) {
args = new XMLOptions(xmlOption).toArray();
}
Experiment base = null;
String expFile = Utils.getOption('l', args);
String saveFile = Utils.getOption('s', args);
boolean runExp = Utils.getFlag('r', args);
ArrayList<String> remoteHosts = new ArrayList<String>();
String runHost = " ";
while (runHost.length() != 0) {
runHost = Utils.getOption('h', args);
if (runHost.length() != 0) {
remoteHosts.add(runHost);
}
}
if (expFile.length() == 0) {
base = new Experiment();
try {
base.setOptions(args);
Utils.checkForRemainingOptions(args);
} catch (Exception ex) {
ex.printStackTrace();
String result = "Usage:\n\n" + "-l <exp file>\n"
+ "\tLoad experiment from file (default use cli options)\n"
+ "-s <exp file>\n"
+ "\tSave experiment to file after setting other options\n"
+ "\t(default don't save)\n" + "-h <remote host name>\n"
+ "\tHost to run experiment on (may be specified more than once\n"
+ "\tfor multiple remote hosts)\n" + "-r \n"
+ "\tRun experiment on (default don't run)\n"
+ "-xml <filename | xml-string>\n"
+ "\tget options from XML-Data instead from parameters\n" + "\n";
Enumeration<Option> enm = ((OptionHandler) base).listOptions();
while (enm.hasMoreElements()) {
Option option = enm.nextElement();
result += option.synopsis() + "\n";
result += option.description() + "\n";
}
throw new Exception(result + "\n" + ex.getMessage());
}
} else {
Object tmp;
// KOML?
if ((KOML.isPresent())
&& (expFile.toLowerCase().endsWith(KOML.FILE_EXTENSION))) {
tmp = KOML.read(expFile);
} else
// XML?
if (expFile.toLowerCase().endsWith(".xml")) {
XMLExperiment xml = new XMLExperiment();
tmp = xml.read(expFile);
}
// binary
else {
FileInputStream fi = new FileInputStream(expFile);
ObjectInputStream oi = new ObjectInputStream(new BufferedInputStream(
fi));
tmp = oi.readObject();
oi.close();
}
if (tmp instanceof RemoteExperiment) {
exp = (RemoteExperiment) tmp;
} else {
base = (Experiment) tmp;
}
}
if (base != null) {
exp = new RemoteExperiment(base);
}
for (int i = 0; i < remoteHosts.size(); i++) {
exp.addRemoteHost(remoteHosts.get(i));
}
System.err.println("Experiment:\n" + exp.toString());
if (saveFile.length() != 0) {
// KOML?
if ((KOML.isPresent())
&& (saveFile.toLowerCase().endsWith(KOML.FILE_EXTENSION))) {
KOML.write(saveFile, exp);
} else
// XML?
if (saveFile.toLowerCase().endsWith(".xml")) {
XMLExperiment xml = new XMLExperiment();
xml.write(saveFile, exp);
}
// binary
else {
FileOutputStream fo = new FileOutputStream(saveFile);
ObjectOutputStream oo = new ObjectOutputStream(
new BufferedOutputStream(fo));
oo.writeObject(exp);
oo.close();
}
}
if (runExp) {
System.err.println("Initializing...");
exp.initialize();
System.err.println("Iterating...");
exp.runExperiment();
System.err.println("Postprocessing...");
exp.postProcess();
}
} catch (Exception ex) {
ex.printStackTrace();
System.err.println(ex.getMessage());
}
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/experiment/RemoteExperimentEvent.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* RemoteExperimentEvent.java
* Copyright (C) 2000-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.experiment;
import java.io.Serializable;
/**
* Class encapsulating information on progress of a remote experiment
*
* @author Mark Hall (mhall@cs.waikato.ac.nz)
* @version $Revision$
*/
public class RemoteExperimentEvent
implements Serializable {
/** for serialization */
private static final long serialVersionUID = 7000867987391866451L;
/** A status type message */
public boolean m_statusMessage;
/** A log type message */
public boolean m_logMessage;
/** The message */
public String m_messageString;
/** True if a remote experiment has finished */
public boolean m_experimentFinished;
/**
* Constructor
* @param status true for status type messages
* @param log true for log type messages
* @param finished true if experiment has finished
* @param message the message
*/
public RemoteExperimentEvent(boolean status, boolean log, boolean finished,
String message) {
m_statusMessage = status;
m_logMessage = log;
m_experimentFinished = finished;
m_messageString = message;
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/experiment/RemoteExperimentListener.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* RemoteExperimentListener.java
* Copyright (C) 2000-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.experiment;
/**
* Interface for classes that want to listen for updates on RemoteExperiment
* progress
*
* @author Mark Hall (mhall@cs.waikato.ac.nz)
* @version $Revision$
*/
public interface RemoteExperimentListener {
/**
* Called when progress has been made in a remote experiment
* @param e the event encapsulating what happened
*/
void remoteExperimentStatus(RemoteExperimentEvent e);
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/experiment/RemoteExperimentSubTask.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* RemoteExperimentSubTask.java
* Copyright (C) 2000-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.experiment;
import java.io.File;
import weka.core.RevisionHandler;
import weka.core.RevisionUtils;
import weka.core.SerializedObject;
/**
* Class to encapsulate an experiment as a task that can be executed on a remote
* host.
*
* @author Mark Hall (mhall@cs.waikato.ac.nz)
* @version $Revision$
*/
public class RemoteExperimentSubTask implements Task, RevisionHandler {
/** ID added to avoid warning */
private static final long serialVersionUID = -1674092706571603720L;
/* Info on the task */
private TaskStatusInfo m_result = new TaskStatusInfo();
/* The (sub) experiment to execute */
private Experiment m_experiment;
private SerializedObject m_serializedExp;
public RemoteExperimentSubTask() {
m_result.setStatusMessage("Not running.");
m_result.setExecutionStatus(TaskStatusInfo.TO_BE_RUN);
}
/**
* Set the experiment for this sub task
*
* @param task the experiment
*/
public void setExperiment(Experiment task) {
// m_experiment = task;
try {
m_serializedExp = new SerializedObject(task);
} catch (Exception e) {
e.printStackTrace();
}
}
/**
* Get the experiment for this sub task
*
* @return this sub task's experiment
*/
public Experiment getExperiment() {
if (m_experiment != null) {
return m_experiment;
} else {
m_experiment = (Experiment) m_serializedExp.getObject();
return m_experiment;
}
}
/**
* Run the experiment
*/
@Override
public void execute() {
m_experiment = (Experiment) m_serializedExp.getObject();
// FastVector result = new FastVector();
m_result = new TaskStatusInfo();
m_result.setStatusMessage("Running...");
String goodResult = "(sub)experiment completed successfully";
String subTaskType;
if (m_experiment.getRunLower() != m_experiment.getRunUpper()) {
subTaskType = "(dataset "
+ ((File) m_experiment.getDatasets().elementAt(0)).getName();
} else {
subTaskType = "(exp run # " + m_experiment.getRunLower();
}
try {
System.err.println("Initializing " + subTaskType + ")...");
m_experiment.initialize();
System.err.println("Iterating " + subTaskType + ")...");
// Do not invoke runExperiment(): every exception will be lost
while (m_experiment.hasMoreIterations()) {
m_experiment.nextIteration();
}
System.err.println("Postprocessing " + subTaskType + ")...");
m_experiment.postProcess();
} catch (Exception ex) {
ex.printStackTrace();
String badResult = "(sub)experiment " + subTaskType + ") failed : "
+ ex.toString();
m_result.setExecutionStatus(TaskStatusInfo.FAILED);
// m_result.addElement(new Integer(RemoteExperiment.FAILED));
// m_result.addElement(badResult);
m_result.setStatusMessage(badResult);
m_result.setTaskResult("Failed");
// return m_result;
return;
}
// m_result.addElement(new Integer(RemoteExperiment.FINISHED));
// m_result.addElement(goodResult);
m_result.setExecutionStatus(TaskStatusInfo.FINISHED);
m_result.setStatusMessage(goodResult + " " + subTaskType + ").");
m_result.setTaskResult("No errors");
// return m_result;
}
@Override
public TaskStatusInfo getTaskStatus() {
return m_result;
}
/**
* Returns the revision string.
*
* @return the revision
*/
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/experiment/ResultListener.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* ResultListener.java
* Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.experiment;
import java.io.Serializable;
/**
* Interface for objects able to listen for results obtained
* by a ResultProducer
*
* @author Len Trigg (trigg@cs.waikato.ac.nz)
* @version $Revision$
*/
public interface ResultListener extends Serializable {
/**
* Determines if there are any constraints (imposed by the
* destination) on additional result columns to be produced by
* resultProducers. Null should be returned if there are NO
* constraints, otherwise a list of column names should be
* returned as an array of Strings.
* @param rp the ResultProducer to which the constraints will apply
* @return an array of column names to which resutltProducer's
* additional results will be restricted.
* @exception Exception if an error occurs
*/
String [] determineColumnConstraints(ResultProducer rp)
throws Exception;
/**
* Prepare for the results to be received.
*
* @param rp the ResultProducer that will generate the results
* @exception Exception if an error occurs during preprocessing.
*/
void preProcess(ResultProducer rp) throws Exception;
/**
* Perform any postprocessing. When this method is called, it indicates
* that no more results will be sent that need to be grouped together
* in any way.
*
* @param rp the ResultProducer that generated the results
* @exception Exception if an error occurs
*/
void postProcess(ResultProducer rp) throws Exception;
/**
* Accepts results from a ResultProducer.
*
* @param rp the ResultProducer that generated the results
* @param key an array of Objects (Strings or Doubles) that uniquely
* identify a result for a given ResultProducer with given compatibilityState
* @param result the results stored in an array. The objects stored in
* the array may be Strings, Doubles, or null (for the missing value).
* @exception Exception if the result could not be accepted.
*/
void acceptResult(ResultProducer rp, Object [] key, Object [] result)
throws Exception;
/**
* Determines whether the results for a specified key must be
* generated.
*
* @param rp the ResultProducer wanting to generate the results
* @param key an array of Objects (Strings or Doubles) that uniquely
* identify a result for a given ResultProducer with given compatibilityState
* @return true if the result should be generated
* @exception Exception if it could not be determined if the result
* is needed.
*/
boolean isResultRequired(ResultProducer rp, Object [] key)
throws Exception;
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/experiment/ResultMatrix.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* ResultMatrix.java
* Copyright (C) 2005-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.experiment;
import java.io.Serializable;
import java.util.Enumeration;
import java.util.Vector;
import weka.core.Option;
import weka.core.OptionHandler;
import weka.core.RevisionHandler;
import weka.core.Utils;
/**
* This matrix is a container for the datasets and classifier setups and their
* statistics. Derived classes output the data in different formats. Derived
* classes need to implement the following methods:
* <ul>
* <li><code>toStringMatrix()</code></li>
* <li><code>toStringKey()</code></li>
* <li><code>toStringHeader()</code></li>
* <li><code>toStringSummary()</code></li>
* <li><code>toStringRanking()</code></li>
* </ul>
*
*
* @author FracPete (fracpete at waikato dot ac dot nz)
* @version $Revision$
* @see #toStringMatrix()
* @see #toStringKey()
* @see #toStringHeader()
* @see #toStringSummary()
* @see #toStringRanking()
*/
public abstract class ResultMatrix implements Serializable, RevisionHandler,
OptionHandler {
/** for serialization. */
private static final long serialVersionUID = 4487179306428209739L;
/** tie. */
public final static int SIGNIFICANCE_TIE = 0;
/** win. */
public final static int SIGNIFICANCE_WIN = 1;
/** loss. */
public final static int SIGNIFICANCE_LOSS = 2;
/** tie string. */
public String TIE_STRING = " ";
/** win string. */
public String WIN_STRING = "v";
/** loss string. */
public String LOSS_STRING = "*";
/** the left parentheses for enumerating cols/rows. */
public String LEFT_PARENTHESES = "(";
/** the right parentheses for enumerating cols/rows. */
public String RIGHT_PARENTHESES = ")";
/** the column names. */
protected String[] m_ColNames = null;
/** the row names. */
protected String[] m_RowNames = null;
/** whether a column is hidden. */
protected boolean[] m_ColHidden = null;
/** whether a row is hidden. */
protected boolean[] m_RowHidden = null;
/** the significance. */
protected int[][] m_Significance = null;
/** the values. */
protected double[][] m_Mean = null;
/** the standard deviation. */
protected double[][] m_StdDev = null;
/** the counts for the different datasets. */
protected double[] m_Counts = null;
/** the standard mean precision. */
protected int m_MeanPrec;
/** the standard std. deviation preicision. */
protected int m_StdDevPrec;
/** whether std. deviations are printed as well. */
protected boolean m_ShowStdDev;
/** whether the average for each column should be printed. */
protected boolean m_ShowAverage;
/** whether the names or numbers are output as column declarations. */
protected boolean m_PrintColNames;
/** whether the names or numbers are output as row declarations. */
protected boolean m_PrintRowNames;
/**
* whether a "(x)" is printed before each column name with "x" as the index.
*/
protected boolean m_EnumerateColNames;
/** whether a "(x)" is printed before each row name with "x" as the index. */
protected boolean m_EnumerateRowNames;
/** the size of the names of the columns. */
protected int m_ColNameWidth;
/** the size of the names of the rows. */
protected int m_RowNameWidth;
/** the size of the mean columns. */
protected int m_MeanWidth;
/** the size of the std dev columns. */
protected int m_StdDevWidth;
/** the size of the significance columns. */
protected int m_SignificanceWidth;
/** the size of the counts. */
protected int m_CountWidth;
/** contains the keys for the header. */
protected Vector<String> m_HeaderKeys = null;
/** contains the values for the header. */
protected Vector<String> m_HeaderValues = null;
/** the non-significant wins. */
protected int[][] m_NonSigWins = null;
/** the significant wins. */
protected int[][] m_Wins = null;
/** the wins in ranking. */
protected int[] m_RankingWins = null;
/** the losses in ranking. */
protected int[] m_RankingLosses = null;
/** the difference between wins and losses. */
protected int[] m_RankingDiff = null;
/** the ordering of the rows. */
protected int[] m_RowOrder = null;
/** the ordering of the columns. */
protected int[] m_ColOrder = null;
/** whether to remove the filter name from the dataaset name. */
protected boolean m_RemoveFilterName = false;
/**
* initializes the matrix as 1x1 matrix.
*/
public ResultMatrix() {
this(1, 1);
}
/**
* initializes the matrix with the given dimensions.
*
* @param cols the number of columns
* @param rows the number of rows
*/
public ResultMatrix(int cols, int rows) {
setSize(cols, rows);
clear();
}
/**
* initializes the matrix with the values from the given matrix.
*
* @param matrix the matrix to get the values from
*/
public ResultMatrix(ResultMatrix matrix) {
assign(matrix);
}
/**
* Returns a string describing the matrix.
*
* @return a description suitable for displaying in the experimenter gui
*/
public abstract String globalInfo();
/**
* Returns an enumeration of all the available options..
*
* @return an enumeration of all available options.
*/
@Override
public Enumeration<Option> listOptions() {
Vector<Option> result;
result = new Vector<Option>();
result.addElement(new Option(
"\tThe number of decimals after the decimal point for the mean.\n"
+ "\t(default: " + getDefaultMeanPrec() + ")", "mean-prec", 1,
"-mean-prec <int>"));
result.addElement(new Option(
"\tThe number of decimals after the decimal point for the mean.\n"
+ "\t(default: " + getDefaultStdDevPrec() + ")", "stddev-prec", 1,
"-stddev-prec <int>"));
result.addElement(new Option(
"\tThe maximum width for the column names (0 = optimal).\n"
+ "\t(default: " + getDefaultColNameWidth() + ")", "col-name-width", 1,
"-col-name-width <int>"));
result.addElement(new Option(
"\tThe maximum width for the row names (0 = optimal).\n" + "\t(default: "
+ getDefaultRowNameWidth() + ")", "row-name-width", 1,
"-row-name-width <int>"));
result.addElement(new Option("\tThe width of the mean (0 = optimal).\n"
+ "\t(default: " + getDefaultMeanWidth() + ")", "mean-width", 1,
"-mean-width <int>"));
result.addElement(new Option(
"\tThe width of the standard deviation (0 = optimal).\n" + "\t(default: "
+ getDefaultStdDevWidth() + ")", "stddev-width", 1,
"-stddev-width <int>"));
result.addElement(new Option(
"\tThe width of the significance indicator (0 = optimal).\n"
+ "\t(default: " + getDefaultSignificanceWidth() + ")", "sig-width", 1,
"-sig-width <int>"));
result.addElement(new Option("\tThe width of the counts (0 = optimal).\n"
+ "\t(default: " + getDefaultCountWidth() + ")", "count-width", 1,
"-count-width <int>"));
result.addElement(new Option(
"\tWhether to display the standard deviation column.\n"
+ "\t(default: no)", "show-stddev", 0, "-show-stddev"));
result.addElement(new Option("\tWhether to show the row with averages.\n"
+ "\t(default: no)", "show-avg", 0, "-show-avg"));
result.addElement(new Option(
"\tWhether to remove the classname package prefixes from the\n"
+ "\tfilter names in datasets.\n" + "\t(default: no)", "remove-filter",
0, "-remove-filter"));
result.addElement(new Option(
"\tWhether to output column names or just numbers representing them.\n"
+ "\t(default: no)", "print-col-names", 0, "-print-col-names"));
result.addElement(new Option(
"\tWhether to output row names or just numbers representing them.\n"
+ "\t(default: no)", "print-row-names", 0, "-print-row-names"));
result.addElement(new Option(
"\tWhether to enumerate the column names (prefixing them with \n"
+ "\t'(x)', with 'x' being the index).\n" + "\t(default: no)",
"enum-col-names", 0, "-enum-col-names"));
result.addElement(new Option(
"\tWhether to enumerate the row names (prefixing them with \n"
+ "\t'(x)', with 'x' being the index).\n" + "\t(default: no)",
"enum-row-names", 0, "-enum-row-names"));
return result.elements();
}
/**
* Sets the OptionHandler's options using the given list. All options will be
* set (or reset) during this call (i.e. incremental setting of options is not
* possible).
*
* @param options the list of options as an array of strings
* @throws Exception if an option is not supported
*/
@Override
public void setOptions(String[] options) throws Exception {
String tmpStr;
tmpStr = Utils.getOption("mean-prec", options);
if (tmpStr.length() > 0) {
setMeanPrec(Integer.parseInt(tmpStr));
} else {
setMeanPrec(getDefaultMeanPrec());
}
tmpStr = Utils.getOption("stddev-prec", options);
if (tmpStr.length() > 0) {
setStdDevPrec(Integer.parseInt(tmpStr));
} else {
setStdDevPrec(getDefaultStdDevPrec());
}
tmpStr = Utils.getOption("col-name-width", options);
if (tmpStr.length() > 0) {
setColNameWidth(Integer.parseInt(tmpStr));
} else {
setColNameWidth(getDefaultColNameWidth());
}
tmpStr = Utils.getOption("row-name-width", options);
if (tmpStr.length() > 0) {
setRowNameWidth(Integer.parseInt(tmpStr));
} else {
setRowNameWidth(getDefaultRowNameWidth());
}
tmpStr = Utils.getOption("mean-width", options);
if (tmpStr.length() > 0) {
setMeanWidth(Integer.parseInt(tmpStr));
} else {
setMeanWidth(getDefaultMeanWidth());
}
tmpStr = Utils.getOption("stddev-width", options);
if (tmpStr.length() > 0) {
setStdDevWidth(Integer.parseInt(tmpStr));
} else {
setStdDevWidth(getDefaultStdDevWidth());
}
tmpStr = Utils.getOption("sig-width", options);
if (tmpStr.length() > 0) {
setSignificanceWidth(Integer.parseInt(tmpStr));
} else {
setSignificanceWidth(getDefaultSignificanceWidth());
}
tmpStr = Utils.getOption("count-width", options);
if (tmpStr.length() > 0) {
setStdDevPrec(Integer.parseInt(tmpStr));
} else {
setStdDevPrec(getDefaultCountWidth());
}
setShowStdDev(Utils.getFlag("show-stddev", options));
setShowAverage(Utils.getFlag("show-avg", options));
setRemoveFilterName(Utils.getFlag("remove-filter", options));
setEnumerateColNames(Utils.getFlag("enum-col-names", options));
setEnumerateRowNames(Utils.getFlag("enum-row-names", options));
setPrintColNames(Utils.getFlag("print-col-names", options));
setPrintRowNames(Utils.getFlag("print-row-names", options));
}
/**
* Gets the current option settings for the OptionHandler.
*
* @return the list of current option settings as an array of strings
*/
@Override
public String[] getOptions() {
Vector<String> result;
result = new Vector<String>();
result.add("-mean-prec");
result.add("" + getMeanPrec());
result.add("-stddev-prec");
result.add("" + getStdDevPrec());
result.add("-col-name-width");
result.add("" + getColNameWidth());
result.add("-row-name-width");
result.add("" + getRowNameWidth());
result.add("-mean-width");
result.add("" + getMeanWidth());
result.add("-stddev-width");
result.add("" + getStdDevWidth());
result.add("-sig-width");
result.add("" + getSignificanceWidth());
result.add("-count-width");
result.add("" + getCountWidth());
if (getShowStdDev()) {
result.add("-show-stddev");
}
if (getShowAverage()) {
result.add("-show-avg");
}
if (getRemoveFilterName()) {
result.add("-remove-filter");
}
if (getPrintColNames()) {
result.add("-print-col-names");
}
if (getPrintRowNames()) {
result.add("-print-row-names");
}
if (getEnumerateColNames()) {
result.add("-enum-col-names");
}
if (getEnumerateRowNames()) {
result.add("-enum-row-names");
}
return result.toArray(new String[result.size()]);
}
/**
* returns the name of the output format.
*
* @return the display name
*/
public abstract String getDisplayName();
/**
* acquires the data from the given matrix.
*
* @param matrix the matrix to get the data from
*/
@SuppressWarnings("unchecked")
public void assign(ResultMatrix matrix) {
int i;
int n;
setSize(matrix.getColCount(), matrix.getRowCount());
// output parameters
TIE_STRING = matrix.TIE_STRING;
WIN_STRING = matrix.WIN_STRING;
LOSS_STRING = matrix.LOSS_STRING;
LEFT_PARENTHESES = matrix.LEFT_PARENTHESES;
RIGHT_PARENTHESES = matrix.RIGHT_PARENTHESES;
m_MeanPrec = matrix.m_MeanPrec;
m_StdDevPrec = matrix.m_StdDevPrec;
m_ShowStdDev = matrix.m_ShowStdDev;
m_ShowAverage = matrix.m_ShowAverage;
m_PrintColNames = matrix.m_PrintColNames;
m_PrintRowNames = matrix.m_PrintRowNames;
m_EnumerateColNames = matrix.m_EnumerateColNames;
m_EnumerateRowNames = matrix.m_EnumerateRowNames;
m_RowNameWidth = matrix.m_RowNameWidth;
m_MeanWidth = matrix.m_MeanWidth;
m_StdDevWidth = matrix.m_StdDevWidth;
m_SignificanceWidth = matrix.m_SignificanceWidth;
m_CountWidth = matrix.m_CountWidth;
m_RemoveFilterName = matrix.m_RemoveFilterName;
// header
m_HeaderKeys = (Vector<String>) matrix.m_HeaderKeys.clone();
m_HeaderValues = (Vector<String>) matrix.m_HeaderValues.clone();
// matrix
for (i = 0; i < matrix.m_Mean.length; i++) {
for (n = 0; n < matrix.m_Mean[i].length; n++) {
m_Mean[i][n] = matrix.m_Mean[i][n];
m_StdDev[i][n] = matrix.m_StdDev[i][n];
m_Significance[i][n] = matrix.m_Significance[i][n];
}
}
for (i = 0; i < matrix.m_ColNames.length; i++) {
m_ColNames[i] = matrix.m_ColNames[i];
m_ColHidden[i] = matrix.m_ColHidden[i];
}
for (i = 0; i < matrix.m_RowNames.length; i++) {
m_RowNames[i] = matrix.m_RowNames[i];
m_RowHidden[i] = matrix.m_RowHidden[i];
}
for (i = 0; i < matrix.m_Counts.length; i++) {
m_Counts[i] = matrix.m_Counts[i];
}
// summary
if (matrix.m_NonSigWins != null) {
m_NonSigWins = new int[matrix.m_NonSigWins.length][];
m_Wins = new int[matrix.m_NonSigWins.length][];
for (i = 0; i < matrix.m_NonSigWins.length; i++) {
m_NonSigWins[i] = new int[matrix.m_NonSigWins[i].length];
m_Wins[i] = new int[matrix.m_NonSigWins[i].length];
for (n = 0; n < matrix.m_NonSigWins[i].length; n++) {
m_NonSigWins[i][n] = matrix.m_NonSigWins[i][n];
m_Wins[i][n] = matrix.m_Wins[i][n];
}
}
}
// ranking
if (matrix.m_RankingWins != null) {
m_RankingWins = new int[matrix.m_RankingWins.length];
m_RankingLosses = new int[matrix.m_RankingWins.length];
m_RankingDiff = new int[matrix.m_RankingWins.length];
for (i = 0; i < matrix.m_RankingWins.length; i++) {
m_RankingWins[i] = matrix.m_RankingWins[i];
m_RankingLosses[i] = matrix.m_RankingLosses[i];
m_RankingDiff[i] = matrix.m_RankingDiff[i];
}
}
}
/**
* removes the stored data and the ordering, but retains the dimensions of the
* matrix.
*/
public void clear() {
m_MeanPrec = getDefaultMeanPrec();
m_StdDevPrec = getDefaultStdDevPrec();
m_ShowStdDev = getDefaultShowStdDev();
m_ShowAverage = getDefaultShowAverage();
m_RemoveFilterName = getDefaultRemoveFilterName();
m_PrintColNames = getDefaultPrintColNames();
m_PrintRowNames = getDefaultPrintRowNames();
m_EnumerateColNames = getDefaultEnumerateColNames();
m_EnumerateRowNames = getDefaultEnumerateRowNames();
m_RowNameWidth = getDefaultRowNameWidth();
m_ColNameWidth = getDefaultColNameWidth();
m_MeanWidth = getDefaultMeanWidth();
m_StdDevWidth = getDefaultStdDevWidth();
m_SignificanceWidth = getDefaultSignificanceWidth();
m_CountWidth = getDefaultCountWidth();
setSize(getColCount(), getRowCount());
}
/**
* clears the content of the matrix and sets the new size.
*
* @param cols the number of mean columns
* @param rows the number of mean rows
*/
public void setSize(int cols, int rows) {
int i;
int n;
m_ColNames = new String[cols];
m_RowNames = new String[rows];
m_Counts = new double[rows];
m_ColHidden = new boolean[cols];
m_RowHidden = new boolean[rows];
m_Mean = new double[rows][cols];
m_Significance = new int[rows][cols];
m_StdDev = new double[rows][cols];
m_ColOrder = null;
m_RowOrder = null;
// NaN means that there exists no value! -> toArray()
for (i = 0; i < m_Mean.length; i++) {
for (n = 0; n < m_Mean[i].length; n++) {
m_Mean[i][n] = Double.NaN;
}
}
for (i = 0; i < m_ColNames.length; i++) {
m_ColNames[i] = "col" + i;
}
for (i = 0; i < m_RowNames.length; i++) {
m_RowNames[i] = "row" + i;
}
clearHeader();
clearSummary();
clearRanking();
}
/**
* sets the precision for the means.
*
* @param prec the number of decimals
*/
public void setMeanPrec(int prec) {
if (prec >= 0) {
m_MeanPrec = prec;
}
}
/**
* returns the current precision for the means.
*
* @return the number of decimals
*/
public int getMeanPrec() {
return m_MeanPrec;
}
/**
* returns the default precision for the means.
*
* @return the number of decimals
*/
public int getDefaultMeanPrec() {
return 2;
}
/**
* Returns the tip text for this property.
*
* @return tip text for this property suitable for displaying in the
* experimenter gui
*/
public String meanPrecTipText() {
return "The number of decimals after the decimal point for the mean.";
}
/**
* sets the precision for the standard deviation.
*
* @param prec the number of decimals
*/
public void setStdDevPrec(int prec) {
if (prec >= 0) {
m_StdDevPrec = prec;
}
}
/**
* returns the current standard deviation precision.
*
* @return the number of decimals
*/
public int getStdDevPrec() {
return m_StdDevPrec;
}
/**
* returns the default standard deviation precision.
*
* @return the number of decimals
*/
public int getDefaultStdDevPrec() {
return 2;
}
/**
* Returns the tip text for this property.
*
* @return tip text for this property suitable for displaying in the
* experimenter gui
*/
public String stdDevPrecTipText() {
return "The number of decimals after the decimal point for the standard deviation.";
}
/**
* sets the width for the column names (0 = optimal).
*
* @param width the width
*/
public void setColNameWidth(int width) {
if (width >= 0) {
m_ColNameWidth = width;
}
}
/**
* returns the current width for the column names.
*
* @return the width
*/
public int getColNameWidth() {
return m_ColNameWidth;
}
/**
* returns the default width for the column names.
*
* @return the width
*/
public int getDefaultColNameWidth() {
return 0;
}
/**
* Returns the tip text for this property.
*
* @return tip text for this property suitable for displaying in the
* experimenter gui
*/
public String colNameWidthTipText() {
return "The maximum width of the column names (0 = optimal).";
}
/**
* sets the width for the row names (0 = optimal).
*
* @param width the width
*/
public void setRowNameWidth(int width) {
if (width >= 0) {
m_RowNameWidth = width;
}
}
/**
* returns the current width for the row names.
*
* @return the width
*/
public int getRowNameWidth() {
return m_RowNameWidth;
}
/**
* returns the default width for the row names.
*
* @return the width
*/
public int getDefaultRowNameWidth() {
return 0;
}
/**
* Returns the tip text for this property.
*
* @return tip text for this property suitable for displaying in the
* experimenter gui
*/
public String rowNameWidthTipText() {
return "The maximum width for the row names (0 = optimal).";
}
/**
* sets the width for the mean (0 = optimal).
*
* @param width the width
*/
public void setMeanWidth(int width) {
if (width >= 0) {
m_MeanWidth = width;
}
}
/**
* returns the current width for the mean.
*
* @return the width
*/
public int getMeanWidth() {
return m_MeanWidth;
}
/**
* returns the default width for the mean.
*
* @return the width
*/
public int getDefaultMeanWidth() {
return 0;
}
/**
* Returns the tip text for this property.
*
* @return tip text for this property suitable for displaying in the
* experimenter gui
*/
public String meanWidthTipText() {
return "The width of the mean (0 = optimal).";
}
/**
* sets the width for the std dev (0 = optimal).
*
* @param width the width
*/
public void setStdDevWidth(int width) {
if (width >= 0) {
m_StdDevWidth = width;
}
}
/**
* returns the current width for the std dev.
*
* @return the width
*/
public int getStdDevWidth() {
return m_StdDevWidth;
}
/**
* returns the default width for the std dev.
*
* @return the width
*/
public int getDefaultStdDevWidth() {
return 0;
}
/**
* Returns the tip text for this property.
*
* @return tip text for this property suitable for displaying in the
* experimenter gui
*/
public String stdDevWidthTipText() {
return "The width of the standard deviation (0 = optimal).";
}
/**
* sets the width for the significance (0 = optimal).
*
* @param width the width
*/
public void setSignificanceWidth(int width) {
if (width >= 0) {
m_SignificanceWidth = width;
}
}
/**
* returns the current width for the significance.
*
* @return the width
*/
public int getSignificanceWidth() {
return m_SignificanceWidth;
}
/**
* returns the default width for the significance.
*
* @return the width
*/
public int getDefaultSignificanceWidth() {
return 0;
}
/**
* Returns the tip text for this property.
*
* @return tip text for this property suitable for displaying in the
* experimenter gui
*/
public String significanceWidthTipText() {
return "The width of the significance indicator (0 = optimal).";
}
/**
* sets the width for the counts (0 = optimal).
*
* @param width the width
*/
public void setCountWidth(int width) {
if (width >= 0) {
m_CountWidth = width;
}
}
/**
* returns the current width for the counts.
*
* @return the width
*/
public int getCountWidth() {
return m_CountWidth;
}
/**
* returns the default width for the counts.
*
* @return the width
*/
public int getDefaultCountWidth() {
return 0;
}
/**
* Returns the tip text for this property.
*
* @return tip text for this property suitable for displaying in the
* experimenter gui
*/
public String countWidthTipText() {
return "The width of the counts (0 = optimal).";
}
/**
* sets whether to display the std deviations or not.
*
* @param show if true then the std deviations are displayed
*/
public void setShowStdDev(boolean show) {
m_ShowStdDev = show;
}
/**
* returns whether std deviations are displayed or not.
*
* @return true if the std deviations are displayed
*/
public boolean getShowStdDev() {
return m_ShowStdDev;
}
/**
* returns the default of whether std deviations are displayed or not.
*
* @return true if the std deviations are displayed
*/
public boolean getDefaultShowStdDev() {
return false;
}
/**
* Returns the tip text for this property.
*
* @return tip text for this property suitable for displaying in the
* experimenter gui
*/
public String showStdDevTipText() {
return "Whether to display the standard deviation column.";
}
/**
* sets whether to display the average per column or not.
*
* @param show if true then the average is displayed
*/
public void setShowAverage(boolean show) {
m_ShowAverage = show;
}
/**
* returns whether average per column is displayed or not.
*
* @return true if the average is displayed
*/
public boolean getShowAverage() {
return m_ShowAverage;
}
/**
* returns the default of whether average per column is displayed or not.
*
* @return true if the average is displayed
*/
public boolean getDefaultShowAverage() {
return false;
}
/**
* Returns the tip text for this property.
*
* @return tip text for this property suitable for displaying in the
* experimenter gui
*/
public String showAverageTipText() {
return "Whether to show the row with averages.";
}
/**
* sets whether to remove the filter classname from the dataset name.
*
* @param remove if true then the filter classnames are shortened
*/
public void setRemoveFilterName(boolean remove) {
m_RemoveFilterName = remove;
}
/**
* returns whether the filter classname is removed from the dataset name.
*
* @return true if the filter classnames are shortened
*/
public boolean getRemoveFilterName() {
return m_RemoveFilterName;
}
/**
* returns the default of whether the filter classname is removed from the
* dataset name.
*
* @return true if the filter classnames are shortened
*/
public boolean getDefaultRemoveFilterName() {
return false;
}
/**
* Returns the tip text for this property.
*
* @return tip text for this property suitable for displaying in the
* experimenter gui
*/
public String removeFilterNameTipText() {
return "Whether to remove the classname package prefixes from the filter names in datasets.";
}
/**
* sets whether the column names or numbers instead are printed. deactivating
* automatically sets m_EnumerateColNames to TRUE.
*
* @param print if true then the names are printed instead of numbers
* @see #setEnumerateColNames(boolean)
*/
public void setPrintColNames(boolean print) {
m_PrintColNames = print;
if (!print) {
setEnumerateColNames(true);
}
}
/**
* returns whether column names or numbers instead are printed.
*
* @return true if names instead of numbers are printed
*/
public boolean getPrintColNames() {
return m_PrintColNames;
}
/**
* returns the default of whether column names or numbers instead are printed.
*
* @return true if names instead of numbers are printed
*/
public boolean getDefaultPrintColNames() {
return true;
}
/**
* Returns the tip text for this property.
*
* @return tip text for this property suitable for displaying in the
* experimenter gui
*/
public String printColNamesTipText() {
return "Whether to output column names or just numbers representing them.";
}
/**
* sets whether the row names or numbers instead are printed deactivating
* automatically sets m_EnumerateColNames to TRUE.
*
* @param print if true then names instead of numbers are printed
* @see #setEnumerateRowNames(boolean)
*/
public void setPrintRowNames(boolean print) {
m_PrintRowNames = print;
if (!print) {
setEnumerateRowNames(true);
}
}
/**
* returns whether row names or numbers instead are printed.
*
* @return true if names instead of numbers are printed
*/
public boolean getPrintRowNames() {
return m_PrintRowNames;
}
/**
* returns the default of whether row names or numbers instead are printed.
*
* @return true if names instead of numbers are printed
*/
public boolean getDefaultPrintRowNames() {
return true;
}
/**
* Returns the tip text for this property.
*
* @return tip text for this property suitable for displaying in the
* experimenter gui
*/
public String printRowNamesTipText() {
return "Whether to output row names or just numbers representing them.";
}
/**
* sets whether the column names are prefixed with "(x)" where "x" is the
* index.
*
* @param enumerate if true then the names are prefixed
*/
public void setEnumerateColNames(boolean enumerate) {
m_EnumerateColNames = enumerate;
}
/**
* returns whether column names are prefixed with the index.
*
* @return true if the names are prefixed
*/
public boolean getEnumerateColNames() {
return m_EnumerateColNames;
}
/**
* returns the default of whether column names are prefixed with the index.
*
* @return true if the names are prefixed
*/
public boolean getDefaultEnumerateColNames() {
return true;
}
/**
* Returns the tip text for this property.
*
* @return tip text for this property suitable for displaying in the
* experimenter gui
*/
public String enumerateColNamesTipText() {
return "Whether to enumerate the column names (prefixing them with '(x)', with 'x' being the index).";
}
/**
* sets whether to the row names are prefixed with the index.
*
* @param enumerate if true then the names will be prefixed
*/
public void setEnumerateRowNames(boolean enumerate) {
m_EnumerateRowNames = enumerate;
}
/**
* returns whether row names or prefixed with the index.
*
* @return true if the names are prefixed
*/
public boolean getEnumerateRowNames() {
return m_EnumerateRowNames;
}
/**
* returns theh default of whether row names are prefixed with the index.
*
* @return true if the names are prefixed
*/
public boolean getDefaultEnumerateRowNames() {
return false;
}
/**
* Returns the tip text for this property.
*
* @return tip text for this property suitable for displaying in the
* experimenter gui
*/
public String enumerateRowNamesTipText() {
return "Whether to enumerate the row names (prefixing them with '(x)', with 'x' being the index).";
}
/**
* returns the number of columns.
*
* @return the number of columns
*/
public int getColCount() {
return m_ColNames.length;
}
/**
* returns the number of visible columns.
*
* @return the number of columns
*/
public int getVisibleColCount() {
int cols;
int i;
cols = 0;
for (i = 0; i < getColCount(); i++) {
if (!getColHidden(i)) {
cols++;
}
}
return cols;
}
/**
* returns the number of rows.
*
* @return the number of rows
*/
public int getRowCount() {
return m_RowNames.length;
}
/**
* returns the number of visible rows.
*
* @return the number of rows
*/
public int getVisibleRowCount() {
int rows;
int i;
rows = 0;
for (i = 0; i < getRowCount(); i++) {
if (!getRowHidden(i)) {
rows++;
}
}
return rows;
}
/**
* sets the name of the column (if the index is valid).
*
* @param index the index of the column
* @param name the name of the column
*/
public void setColName(int index, String name) {
if ((index >= 0) && (index < getColCount())) {
m_ColNames[index] = name;
}
}
/**
* returns the name of the row, if the index is valid, otherwise null. if
* getPrintColNames() is FALSE then an empty string is returned or if
* getEnumerateColNames() is TRUE then the 1-based index surrounded by
* parentheses.
*
* @param index the index of the column
* @return the name of the column
* @see #setPrintColNames(boolean)
* @see #getPrintColNames()
* @see #setEnumerateColNames(boolean)
* @see #getEnumerateColNames()
*/
public String getColName(int index) {
String result;
result = null;
if ((index >= 0) && (index < getColCount())) {
if (getPrintColNames()) {
result = m_ColNames[index];
} else {
result = "";
}
if (getEnumerateColNames()) {
result = LEFT_PARENTHESES + Integer.toString(index + 1)
+ RIGHT_PARENTHESES + " " + result;
result = result.trim();
}
}
return result;
}
/**
* sets the name of the row (if the index is valid).
*
* @param index the index of the row
* @param name the name of the row
*/
public void setRowName(int index, String name) {
if ((index >= 0) && (index < getRowCount())) {
m_RowNames[index] = name;
}
}
/**
* returns the name of the row, if the index is valid, otherwise null. if
* getPrintRowNames() is FALSE then an empty string is returned or if
* getEnumerateRowNames() is TRUE then the 1-based index surrounded by
* parentheses.
*
* @param index the index of the row
* @return the name of the row
* @see #setPrintRowNames(boolean)
* @see #getPrintRowNames()
* @see #setEnumerateRowNames(boolean)
* @see #getEnumerateRowNames()
*/
public String getRowName(int index) {
String result;
result = null;
if ((index >= 0) && (index < getRowCount())) {
if (getPrintRowNames()) {
result = m_RowNames[index];
} else {
result = "";
}
if (getEnumerateRowNames()) {
result = LEFT_PARENTHESES + Integer.toString(index + 1)
+ RIGHT_PARENTHESES + " " + result;
result = result.trim();
}
}
return result;
}
/**
* sets the hidden status of the column (if the index is valid).
*
* @param index the index of the column
* @param hidden the hidden status of the column
*/
public void setColHidden(int index, boolean hidden) {
if ((index >= 0) && (index < getColCount())) {
m_ColHidden[index] = hidden;
}
}
/**
* returns the hidden status of the column, if the index is valid, otherwise
* false.
*
* @param index the index of the column
* @return true if hidden
*/
public boolean getColHidden(int index) {
if ((index >= 0) && (index < getColCount())) {
return m_ColHidden[index];
} else {
return false;
}
}
/**
* sets the hidden status of the row (if the index is valid).
*
* @param index the index of the row
* @param hidden the hidden status of the row
*/
public void setRowHidden(int index, boolean hidden) {
if ((index >= 0) && (index < getRowCount())) {
m_RowHidden[index] = hidden;
}
}
/**
* returns the hidden status of the row, if the index is valid, otherwise
* false.
*
* @param index the index of the row
* @return true if hidden
*/
public boolean getRowHidden(int index) {
if ((index >= 0) && (index < getRowCount())) {
return m_RowHidden[index];
} else {
return false;
}
}
/**
* sets the count for the row (if the index is valid).
*
* @param index the index of the row
* @param count the count for the row
*/
public void setCount(int index, double count) {
if ((index >= 0) && (index < getRowCount())) {
m_Counts[index] = count;
}
}
/**
* returns the count for the row. if the index is invalid then 0.
*
* @param index the index of the row
* @return the count for the row
*/
public double getCount(int index) {
if ((index >= 0) && (index < getRowCount())) {
return m_Counts[index];
} else {
return 0;
}
}
/**
* sets the mean at the given position (if the position is valid).
*
* @param col the column of the mean
* @param row the row of the mean
* @param value the value of the mean
*/
public void setMean(int col, int row, double value) {
if ((col >= 0) && (col < getColCount()) && (row >= 0)
&& (row < getRowCount())) {
m_Mean[row][col] = value;
}
}
/**
* returns the mean at the given position, if the position is valid, otherwise
* 0.
*
* @param col the column index
* @param row the row index
* @return the mean
*/
public double getMean(int col, int row) {
if ((col >= 0) && (col < getColCount()) && (row >= 0)
&& (row < getRowCount())) {
return m_Mean[row][col];
} else {
return 0;
}
}
/**
* returns the average of the mean at the given position, if the position is
* valid, otherwise 0.
*
* @param col the column index
* @return the average
*/
public double getAverage(int col) {
int i;
double avg;
int count;
if ((col >= 0) && (col < getColCount())) {
avg = 0;
count = 0;
for (i = 0; i < getRowCount(); i++) {
if (!Double.isNaN(getMean(col, i))) {
avg += getMean(col, i);
count++;
}
}
return avg / count;
} else {
return 0;
}
}
/**
* sets the std deviation at the given position (if the position is valid).
*
* @param col the column of the std. deviation
* @param row the row of the std deviation
* @param value the value of the std deviation
*/
public void setStdDev(int col, int row, double value) {
if ((col >= 0) && (col < getColCount()) && (row >= 0)
&& (row < getRowCount())) {
m_StdDev[row][col] = value;
}
}
/**
* returns the std deviation at the given position, if the position is valid,
* otherwise 0.
*
* @param col the column index
* @param row the row index
* @return the std deviation
*/
public double getStdDev(int col, int row) {
if ((col >= 0) && (col < getColCount()) && (row >= 0)
&& (row < getRowCount())) {
return m_StdDev[row][col];
} else {
return 0;
}
}
/**
* sets the significance at the given position (if the position is valid).
*
* @param col the column of the significance
* @param row the row of the significance
* @param value the value of the significance
*/
public void setSignificance(int col, int row, int value) {
if ((col >= 0) && (col < getColCount()) && (row >= 0)
&& (row < getRowCount())) {
m_Significance[row][col] = value;
}
}
/**
* returns the significance at the given position, if the position is valid,
* otherwise SIGNIFICANCE_ATIE.
*
* @param col the column index
* @param row the row index
* @return the indicator
*/
public int getSignificance(int col, int row) {
if ((col >= 0) && (col < getColCount()) && (row >= 0)
&& (row < getRowCount())) {
return m_Significance[row][col];
} else {
return SIGNIFICANCE_TIE;
}
}
/**
* counts the occurrences of the given significance type in the given column.
*
* @param col the columnn to gather the information from
* @param type the significance type, WIN/TIE/LOSS
* @return the count
*/
public int getSignificanceCount(int col, int type) {
int result;
int i;
result = 0;
if ((col >= 0) && (col < getColCount())) {
for (i = 0; i < getRowCount(); i++) {
if (getRowHidden(i)) {
continue;
}
// no value?
if (Double.isNaN(getMean(col, i))) {
continue;
}
if (getSignificance(col, i) == type) {
result++;
}
}
}
return result;
}
/**
* sets the ordering of the rows, null means default.
*
* @param order the new order of the rows
*/
public void setRowOrder(int[] order) {
int i;
// default order?
if (order == null) {
m_RowOrder = null;
} else {
if (order.length == getRowCount()) {
m_RowOrder = new int[order.length];
for (i = 0; i < order.length; i++) {
m_RowOrder[i] = order[i];
}
} else {
System.err.println("setRowOrder: length does not match ("
+ order.length + " <> " + getRowCount() + ") - ignored!");
}
}
}
/**
* returns the current order of the rows, null means the default order.
*
* @return the current order of the rows
*/
public int[] getRowOrder() {
return m_RowOrder;
}
/**
* returns the displayed index of the given row, depending on the order of
* rows, returns -1 if index out of bounds.
*
* @param index the row to get the displayed index for
* @return the real index of the row
*/
public int getDisplayRow(int index) {
if ((index >= 0) && (index < getRowCount())) {
if (getRowOrder() == null) {
return index;
} else {
return getRowOrder()[index];
}
} else {
return -1;
}
}
/**
* sets the ordering of the columns, null means default.
*
* @param order the new order of the columns
*/
public void setColOrder(int[] order) {
int i;
// default order?
if (order == null) {
m_ColOrder = null;
} else {
if (order.length == getColCount()) {
m_ColOrder = new int[order.length];
for (i = 0; i < order.length; i++) {
m_ColOrder[i] = order[i];
}
} else {
System.err.println("setColOrder: length does not match ("
+ order.length + " <> " + getColCount() + ") - ignored!");
}
}
}
/**
* returns the current order of the columns, null means the default order.
*
* @return the current order of the columns
*/
public int[] getColOrder() {
return m_ColOrder;
}
/**
* returns the displayed index of the given col, depending on the order of
* columns, returns -1 if index out of bounds.
*
* @param index the column to get the displayed index for
* @return the real index of the column
*/
public int getDisplayCol(int index) {
if ((index >= 0) && (index < getColCount())) {
if (getColOrder() == null) {
return index;
} else {
return getColOrder()[index];
}
} else {
return -1;
}
}
/**
* returns the given number as string rounded to the given number of decimals.
* additional necessary 0's are added.
*
* @param d the number to format
* @param prec the number of decimals after the point
* @return the formatted number
*/
protected String doubleToString(double d, int prec) {
String result;
int currentPrec;
int i;
result = Utils.doubleToString(d, prec);
if (prec <= 0 || Double.isInfinite(d) || Double.isNaN(d)) {
return result;
}
// decimal point?
if (result.indexOf(".") == -1) {
result += ".";
}
// precision so far?
currentPrec = result.length() - result.indexOf(".") - 1;
for (i = currentPrec; i < prec; i++) {
result += "0";
}
return result;
}
/**
* trims the given string down to the given length if longer, otherwise leaves
* it unchanged. a length of "0" leaves the string always unchanged.
*
* @param s the string to trim (if too long)
* @param length the max. length (0 means infinity)
* @return the trimmed string
*/
protected String trimString(String s, int length) {
if ((length > 0) && (s.length() > length)) {
return s.substring(0, length);
} else {
return s;
}
}
/**
* pads the given string on the right until it reaches the given length, if
* longer cuts it down. if length is 0 then nothing is done.
*
* @param s the string to pad
* @param length the max. length of the string
* @return the padded string
*/
protected String padString(String s, int length) {
return padString(s, length, false);
}
/**
* pads the given string until it reaches the given length, if longer cuts it
* down. if length is 0 then nothing is done.
*
* @param s the string to pad
* @param length the max. length of the string
* @param left whether to pad left or right
* @return the padded string
*/
protected String padString(String s, int length, boolean left) {
String result;
int i;
result = s;
// pad with blanks
for (i = s.length(); i < length; i++) {
if (left) {
result = " " + result;
} else {
result = result + " ";
}
}
// too long?
if ((length > 0) && (result.length() > length)) {
result = result.substring(0, length);
}
return result;
}
/**
* returns the length of the longest cell in the given column.
*
* @param data the data to base the calculation on
* @param col the column to check
* @return the maximum length
*/
protected int getColSize(String[][] data, int col) {
return getColSize(data, col, false, false);
}
/**
* returns the length of the longest cell in the given column.
*
* @param data the data to base the calculation on
* @param col the column to check
* @param skipFirst whether to skip the first row
* @param skipLast whether to skip the last row
* @return the maximum length
*/
protected int getColSize(String[][] data, int col, boolean skipFirst,
boolean skipLast) {
int result;
int i;
result = 0;
if ((col >= 0) && (col < data[0].length)) {
for (i = 0; i < data.length; i++) {
// skip first?
if ((i == 0) && (skipFirst)) {
continue;
}
// skip last?
if ((i == data.length - 1) && (skipLast)) {
continue;
}
if (data[i][col].length() > result) {
result = data[i][col].length();
}
}
}
return result;
}
/**
* removes the filter classname from the given string if it should be removed,
* otherwise leaves the string alone.
*
* @param s the string to process
* @return the processed string
* @see #getRemoveFilterName()
*/
protected String removeFilterName(String s) {
if (getRemoveFilterName()) {
return s.replaceAll("-weka\\.filters\\..*", "")
.replaceAll("-unsupervised\\..*", "")
.replaceAll("-supervised\\..*", "");
} else {
return s;
}
}
/**
* returns a 2-dimensional array with the prepared data. includes the column
* and row names. hidden cols/rows are already excluded. <br>
* first row: column names<br>
* last row: wins/ties/losses<br>
* first col: row names<br>
*
* @return the generated array
*/
protected String[][] toArray() {
int i;
int n;
int ii;
int nn;
int x;
int y;
String[][] result;
String[][] tmpResult;
int cols;
int rows;
boolean valueExists;
// determine visible cols/rows
rows = getVisibleRowCount();
if (getShowAverage()) {
rows++;
}
cols = getVisibleColCount();
if (getShowStdDev()) {
cols = cols * 3; // mean + stddev + sign.
} else {
cols = cols * 2; // mean + stddev
}
result = new String[rows + 2][cols + 1];
// col names
result[0][0] = trimString("Dataset", getRowNameWidth());
x = 1;
for (ii = 0; ii < getColCount(); ii++) {
i = getDisplayCol(ii);
if (getColHidden(i)) {
continue;
}
result[0][x] = trimString(removeFilterName(getColName(i)),
getColNameWidth());
x++;
// std dev
if (getShowStdDev()) {
result[0][x] = "";
x++;
}
// sign.
result[0][x] = "";
x++;
}
// row names
y = 1;
for (ii = 0; ii < getRowCount(); ii++) {
i = getDisplayRow(ii);
if (!getRowHidden(i)) {
result[y][0] = trimString(removeFilterName(getRowName(i)),
getRowNameWidth());
y++;
}
}
// fill in mean/std dev
y = 1;
for (ii = 0; ii < getRowCount(); ii++) {
i = getDisplayRow(ii);
if (getRowHidden(i)) {
continue;
}
x = 1;
for (nn = 0; nn < getColCount(); nn++) {
n = getDisplayCol(nn);
if (getColHidden(n)) {
continue;
}
// do we have a value in the matrix?
valueExists = (!Double.isNaN(getMean(n, i)));
// mean
if (!valueExists) {
result[y][x] = "";
} else {
result[y][x] = doubleToString(getMean(n, i), getMeanPrec());
}
x++;
// stddev
if (getShowStdDev()) {
if (!valueExists) {
result[y][x] = "";
} else if (Double.isInfinite(getStdDev(n, i))) {
result[y][x] = "Inf";
} else {
result[y][x] = doubleToString(getStdDev(n, i), getStdDevPrec());
}
x++;
}
// significance
if (!valueExists) {
result[y][x] = "";
} else {
switch (getSignificance(n, i)) {
case SIGNIFICANCE_TIE:
result[y][x] = TIE_STRING;
break;
case SIGNIFICANCE_WIN:
result[y][x] = WIN_STRING;
break;
case SIGNIFICANCE_LOSS:
result[y][x] = LOSS_STRING;
break;
}
}
x++;
}
y++;
}
// the average
if (getShowAverage()) {
y = result.length - 2;
x = 0;
result[y][0] = "Average";
x++;
for (ii = 0; ii < getColCount(); ii++) {
i = getDisplayCol(ii);
if (getColHidden(i)) {
continue;
}
// mean-average
result[y][x] = doubleToString(getAverage(i), getMeanPrec());
x++;
// std dev.
if (getShowStdDev()) {
result[y][x] = "";
x++;
}
// significance
result[y][x] = "";
x++;
}
}
// wins/ties/losses
y = result.length - 1;
x = 0;
result[y][0] = LEFT_PARENTHESES + WIN_STRING + "/" + TIE_STRING + "/"
+ LOSS_STRING + RIGHT_PARENTHESES;
x++;
for (ii = 0; ii < getColCount(); ii++) {
i = getDisplayCol(ii);
if (getColHidden(i)) {
continue;
}
// mean
result[y][x] = "";
x++;
// std dev.
if (getShowStdDev()) {
result[y][x] = "";
x++;
}
// significance
result[y][x] = LEFT_PARENTHESES
+ getSignificanceCount(i, SIGNIFICANCE_WIN) + "/"
+ getSignificanceCount(i, SIGNIFICANCE_TIE) + "/"
+ getSignificanceCount(i, SIGNIFICANCE_LOSS) + RIGHT_PARENTHESES;
x++;
}
// base column has no significance -> remove these columns
tmpResult = new String[result.length][result[0].length - 1];
x = 0;
for (i = 0; i < result[0].length; i++) {
// significance
if (((i == 3) && (getShowStdDev())) || ((i == 2) && (!getShowStdDev()))) {
continue;
}
for (n = 0; n < result.length; n++) {
tmpResult[n][x] = result[n][i];
}
x++;
}
result = tmpResult;
return result;
}
/**
* returns true if the index (in the array produced by toArray(boolean)) is
* the row name.
*
* @param index the row index
* @return true if index represents a row name
*/
protected boolean isRowName(int index) {
return (index == 0);
}
/**
* returns true if the index (in the array produced by toArray(boolean))
* contains a mean.
*
* @param index the column index
* @return true if mean column
*/
protected boolean isMean(int index) {
index--; // dataset
if (index == 0) {
return true; // base column
} else {
index--; // base column
if (index < 0) {
return false;
}
if (getShowStdDev()) {
return (index % 3 == 1);
} else {
return (index % 2 == 0);
}
}
}
/**
* returns true if the row index (in the array produced by toArray(boolean))
* contains the average row.
*
* @param rowIndex the row index
* @return true if average row
*/
protected boolean isAverage(int rowIndex) {
if (getShowAverage()) {
return (getVisibleRowCount() + 1 == rowIndex);
} else {
return false;
}
}
/**
* returns true if the index (in the array produced by toArray(boolean))
* contains a std deviation.
*
* @param index the column index
* @return true if std dev column
*/
protected boolean isStdDev(int index) {
index--; // dataset
index--; // base column
if (getShowStdDev()) {
if (index == 0) {
return true; // stddev of base column
} else {
index--; // stddev of base column
if (index < 0) {
return false;
}
return (index % 3 == 1);
}
} else {
return false;
}
}
/**
* returns true if the index (in the array produced by toArray(boolean))
* contains a significance column.
*
* @param index the column index
* @return true if significance column
*/
protected boolean isSignificance(int index) {
index--; // dataset
index--; // base column
if (getShowStdDev()) {
index--; // stddev of base column
if (index < 0) {
return false;
}
return (index % 3 == 2);
} else {
if (index < 0) {
return false;
}
return (index % 2 == 1);
}
}
/**
* returns the matrix as a string.
*
* @return the matrix as string
*/
public abstract String toStringMatrix();
/**
* returns the matrix as a string.
*
* @return the matrix as string
* @see #toStringMatrix()
*/
@Override
public String toString() {
return toStringMatrix();
}
/**
* removes all the header information.
*/
public void clearHeader() {
m_HeaderKeys = new Vector<String>();
m_HeaderValues = new Vector<String>();
}
/**
* adds the key-value pair to the header.
*
* @param key the name of the header value
* @param value the value of the header value
*/
public void addHeader(String key, String value) {
int pos;
pos = m_HeaderKeys.indexOf(key);
if (pos > -1) {
m_HeaderValues.set(pos, value);
} else {
m_HeaderKeys.add(key);
m_HeaderValues.add(value);
}
}
/**
* returns the value associated with the given key, null if if cannot be
* found.
*
* @param key the key to retrieve the value for
* @return the associated value
*/
public String getHeader(String key) {
int pos;
pos = m_HeaderKeys.indexOf(key);
if (pos == 0) {
return null;
} else {
return m_HeaderKeys.get(pos);
}
}
/**
* returns an enumeration of the header keys.
*
* @return all stored keys
*/
public Enumeration<String> headerKeys() {
return m_HeaderKeys.elements();
}
/**
* returns the header of the matrix as a string.
*
* @return the header as string
* @see #m_HeaderKeys
* @see #m_HeaderValues
*/
public abstract String toStringHeader();
/**
* returns returns a key for all the col names, for better readability if the
* names got cut off.
*
* @return the key
*/
public abstract String toStringKey();
/**
* clears the current summary data.
*/
public void clearSummary() {
m_NonSigWins = null;
m_Wins = null;
}
/**
* sets the non-significant and significant wins of the resultsets.
*
* @param nonSigWins the non-significant wins
* @param wins the significant wins
*/
public void setSummary(int[][] nonSigWins, int[][] wins) {
int i;
int n;
m_NonSigWins = new int[nonSigWins.length][nonSigWins[0].length];
m_Wins = new int[wins.length][wins[0].length];
for (i = 0; i < m_NonSigWins.length; i++) {
for (n = 0; n < m_NonSigWins[i].length; n++) {
m_NonSigWins[i][n] = nonSigWins[i][n];
m_Wins[i][n] = wins[i][n];
}
}
}
/**
* returns the character representation of the given column.
*
* @param col the column index
* @return the title of the column
*/
protected String getSummaryTitle(int col) {
return "" + (char) ('a' + col % 26);
}
/**
* returns the summary as string.
*
* @return the summary
*/
public abstract String toStringSummary();
/**
* clears the currently stored ranking data.
*/
public void clearRanking() {
m_RankingWins = null;
m_RankingLosses = null;
m_RankingDiff = null;
}
/**
* sets the ranking data based on the wins.
*
* @param wins the wins
*/
public void setRanking(int[][] wins) {
int i;
int j;
m_RankingWins = new int[wins.length];
m_RankingLosses = new int[wins.length];
m_RankingDiff = new int[wins.length];
for (i = 0; i < wins.length; i++) {
for (j = 0; j < wins[i].length; j++) {
m_RankingWins[j] += wins[i][j];
m_RankingDiff[j] += wins[i][j];
m_RankingLosses[i] += wins[i][j];
m_RankingDiff[i] -= wins[i][j];
}
}
}
/**
* returns the ranking in a string representation.
*
* @return the ranking
*/
public abstract String toStringRanking();
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/experiment/ResultMatrixCSV.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* ResultMatrixCSV.java
* Copyright (C) 2005-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.experiment;
import weka.core.RevisionUtils;
import weka.core.Utils;
/**
<!-- globalinfo-start -->
* Generates the matrix in CSV ('comma-separated values') format.
* <p/>
<!-- globalinfo-end -->
*
<!-- options-start -->
* Valid options are: <p/>
*
* <pre> -mean-prec <int>
* The number of decimals after the decimal point for the mean.
* (default: 2)</pre>
*
* <pre> -stddev-prec <int>
* The number of decimals after the decimal point for the mean.
* (default: 2)</pre>
*
* <pre> -col-name-width <int>
* The maximum width for the column names (0 = optimal).
* (default: 0)</pre>
*
* <pre> -row-name-width <int>
* The maximum width for the row names (0 = optimal).
* (default: 0)</pre>
*
* <pre> -mean-width <int>
* The width of the mean (0 = optimal).
* (default: 0)</pre>
*
* <pre> -stddev-width <int>
* The width of the standard deviation (0 = optimal).
* (default: 0)</pre>
*
* <pre> -sig-width <int>
* The width of the significance indicator (0 = optimal).
* (default: 0)</pre>
*
* <pre> -count-width <int>
* The width of the counts (0 = optimal).
* (default: 0)</pre>
*
* <pre> -show-stddev
* Whether to display the standard deviation column.
* (default: no)</pre>
*
* <pre> -show-avg
* Whether to show the row with averages.
* (default: no)</pre>
*
* <pre> -remove-filter
* Whether to remove the classname package prefixes from the
* filter names in datasets.
* (default: no)</pre>
*
* <pre> -print-col-names
* Whether to output column names or just numbers representing them.
* (default: no)</pre>
*
* <pre> -print-row-names
* Whether to output row names or just numbers representing them.
* (default: no)</pre>
*
* <pre> -enum-col-names
* Whether to enumerate the column names (prefixing them with
* '(x)', with 'x' being the index).
* (default: no)</pre>
*
* <pre> -enum-row-names
* Whether to enumerate the row names (prefixing them with
* '(x)', with 'x' being the index).
* (default: no)</pre>
*
<!-- options-end -->
*
* @author FracPete (fracpete at waikato dot ac dot nz)
* @version $Revision$
*/
public class ResultMatrixCSV
extends ResultMatrix {
/** for serialization. */
private static final long serialVersionUID = -171838863135042743L;
/**
* initializes the matrix as 1x1 matrix.
*/
public ResultMatrixCSV() {
this(1, 1);
}
/**
* initializes the matrix with the given dimensions.
*
* @param cols the number of columns
* @param rows the number of rows
*/
public ResultMatrixCSV(int cols, int rows) {
super(cols, rows);
}
/**
* initializes the matrix with the values from the given matrix.
*
* @param matrix the matrix to get the values from
*/
public ResultMatrixCSV(ResultMatrix matrix) {
super(matrix);
}
/**
* Returns a string describing the matrix.
*
* @return a description suitable for
* displaying in the experimenter gui
*/
public String globalInfo() {
return "Generates the matrix in CSV ('comma-separated values') format.";
}
/**
* returns the name of the output format.
*
* @return the display name
*/
public String getDisplayName() {
return "CSV";
}
/**
* removes the stored data but retains the dimensions of the matrix.
*/
public void clear() {
super.clear();
LEFT_PARENTHESES = "[";
RIGHT_PARENTHESES = "]";
}
/**
* returns the default width for the row names.
*
* @return the width
*/
public int getDefaultRowNameWidth() {
return 25;
}
/**
* returns the default of whether column names or numbers instead are printed.
*
* @return true if names instead of numbers are printed
*/
public boolean getDefaultPrintColNames() {
return false;
}
/**
* returns the default of whether column names are prefixed with the index.
*
* @return true if the names are prefixed
*/
public boolean getDefaultEnumerateColNames() {
return true;
}
/**
* returns the header of the matrix as a string.
*
* @return the header
* @see #m_HeaderKeys
* @see #m_HeaderValues
*/
public String toStringHeader() {
return new ResultMatrixPlainText(this).toStringHeader();
}
/**
* returns the matrix in CSV format.
*
* @return the matrix as string
*/
public String toStringMatrix() {
StringBuffer result;
String[][] cells;
int i;
int n;
result = new StringBuffer();
cells = toArray();
for (i = 0; i < cells.length; i++) {
for (n = 0; n < cells[i].length; n++) {
if (n > 0)
result.append(",");
result.append(Utils.quote(cells[i][n]));
}
result.append("\n");
}
return result.toString();
}
/**
* returns a key for all the col names, for better readability if
* the names got cut off.
*
* @return the key
*/
public String toStringKey() {
String result;
int i;
result = "Key,\n";
for (i = 0; i < getColCount(); i++) {
if (getColHidden(i))
continue;
result += LEFT_PARENTHESES + (i+1) + RIGHT_PARENTHESES
+ "," + Utils.quote(removeFilterName(m_ColNames[i])) + "\n";
}
return result;
}
/**
* returns the summary as string.
*
* @return the summary
*/
public String toStringSummary() {
String result;
String titles;
int i;
int j;
String line;
if (m_NonSigWins == null)
return "-summary data not set-";
result = "";
titles = "";
for (i = 0; i < getColCount(); i++) {
if (getColHidden(i))
continue;
if (!titles.equals(""))
titles += ",";
titles += getSummaryTitle(i);
}
result += titles + ",'(No. of datasets where [col] >> [row])'\n";
for (i = 0; i < getColCount(); i++) {
if (getColHidden(i))
continue;
line = "";
for (j = 0; j < getColCount(); j++) {
if (getColHidden(j))
continue;
if (!line.equals(""))
line += ",";
if (j == i)
line += "-";
else
line += m_NonSigWins[i][j]
+ " (" + m_Wins[i][j] + ")";
}
result += line + "," + getSummaryTitle(i) + " = " + removeFilterName(m_ColNames[i]) + '\n';
}
return result;
}
/**
* returns the ranking in a string representation.
*
* @return the ranking
*/
public String toStringRanking() {
String result;
int[] ranking;
int i;
int curr;
if (m_RankingWins == null)
return "-ranking data not set-";
result = ">-<,>,<,Resultset\n";
ranking = Utils.sort(m_RankingDiff);
for (i = getColCount() - 1; i >= 0; i--) {
curr = ranking[i];
if (getColHidden(curr))
continue;
result += m_RankingDiff[curr] + ","
+ m_RankingWins[curr] + ","
+ m_RankingLosses[curr] + ","
+ removeFilterName(m_ColNames[curr]) + "\n";
}
return result;
}
/**
* Returns the revision string.
*
* @return the revision
*/
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
/**
* for testing only.
*
* @param args ignored
*/
public static void main(String[] args) {
ResultMatrix matrix;
int i;
int n;
matrix = new ResultMatrixCSV(3, 3);
// set header
matrix.addHeader("header1", "value1");
matrix.addHeader("header2", "value2");
matrix.addHeader("header2", "value3");
// set values
for (i = 0; i < matrix.getRowCount(); i++) {
for (n = 0; n < matrix.getColCount(); n++) {
matrix.setMean(n, i, (i+1)*n);
matrix.setStdDev(n, i, ((double) (i+1)*n) / 100);
if (i == n) {
if (i % 2 == 1)
matrix.setSignificance(n, i, SIGNIFICANCE_WIN);
else
matrix.setSignificance(n, i, SIGNIFICANCE_LOSS);
}
}
}
System.out.println("\n\n--> " + matrix.getDisplayName());
System.out.println("\n1. complete\n");
System.out.println(matrix.toStringHeader() + "\n");
System.out.println(matrix.toStringMatrix() + "\n");
System.out.println(matrix.toStringKey());
System.out.println("\n2. complete with std deviations\n");
matrix.setShowStdDev(true);
System.out.println(matrix.toStringMatrix());
System.out.println("\n3. cols numbered\n");
matrix.setPrintColNames(false);
System.out.println(matrix.toStringMatrix());
System.out.println("\n4. second col missing\n");
matrix.setColHidden(1, true);
System.out.println(matrix.toStringMatrix());
System.out.println("\n5. last row missing, rows numbered too\n");
matrix.setRowHidden(2, true);
matrix.setPrintRowNames(false);
System.out.println(matrix.toStringMatrix());
System.out.println("\n6. mean prec to 3\n");
matrix.setMeanPrec(3);
matrix.setPrintRowNames(false);
System.out.println(matrix.toStringMatrix());
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/experiment/ResultMatrixGnuPlot.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* ResultMatrixGnuPlot.java
* Copyright (C) 2005-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.experiment;
import weka.core.RevisionUtils;
import weka.core.Utils;
import weka.core.Version;
/**
<!-- globalinfo-start -->
* Generates output for a data and script file for GnuPlot.
* <p/>
<!-- globalinfo-end -->
*
<!-- options-start -->
* Valid options are: <p/>
*
* <pre> -mean-prec <int>
* The number of decimals after the decimal point for the mean.
* (default: 2)</pre>
*
* <pre> -stddev-prec <int>
* The number of decimals after the decimal point for the mean.
* (default: 2)</pre>
*
* <pre> -col-name-width <int>
* The maximum width for the column names (0 = optimal).
* (default: 50)</pre>
*
* <pre> -row-name-width <int>
* The maximum width for the row names (0 = optimal).
* (default: 50)</pre>
*
* <pre> -mean-width <int>
* The width of the mean (0 = optimal).
* (default: 0)</pre>
*
* <pre> -stddev-width <int>
* The width of the standard deviation (0 = optimal).
* (default: 0)</pre>
*
* <pre> -sig-width <int>
* The width of the significance indicator (0 = optimal).
* (default: 0)</pre>
*
* <pre> -count-width <int>
* The width of the counts (0 = optimal).
* (default: 0)</pre>
*
* <pre> -show-stddev
* Whether to display the standard deviation column.
* (default: no)</pre>
*
* <pre> -show-avg
* Whether to show the row with averages.
* (default: no)</pre>
*
* <pre> -remove-filter
* Whether to remove the classname package prefixes from the
* filter names in datasets.
* (default: no)</pre>
*
* <pre> -print-col-names
* Whether to output column names or just numbers representing them.
* (default: no)</pre>
*
* <pre> -print-row-names
* Whether to output row names or just numbers representing them.
* (default: no)</pre>
*
* <pre> -enum-col-names
* Whether to enumerate the column names (prefixing them with
* '(x)', with 'x' being the index).
* (default: no)</pre>
*
* <pre> -enum-row-names
* Whether to enumerate the row names (prefixing them with
* '(x)', with 'x' being the index).
* (default: no)</pre>
*
<!-- options-end -->
*
* @author FracPete (fracpete at waikato dot ac dot nz)
* @version $Revision$
*/
public class ResultMatrixGnuPlot
extends ResultMatrix {
/** for serialization. */
private static final long serialVersionUID = -234648254944790097L;
/**
* initializes the matrix as 1x1 matrix.
*/
public ResultMatrixGnuPlot() {
this(1, 1);
}
/**
* initializes the matrix with the given dimensions.
*
* @param cols the number of columns
* @param rows the number of rows
*/
public ResultMatrixGnuPlot(int cols, int rows) {
super(cols, rows);
}
/**
* initializes the matrix with the values from the given matrix.
*
* @param matrix the matrix to get the values from
*/
public ResultMatrixGnuPlot(ResultMatrix matrix) {
super(matrix);
}
/**
* Returns a string describing the matrix.
*
* @return a description suitable for
* displaying in the experimenter gui
*/
public String globalInfo() {
return "Generates output for a data and script file for GnuPlot.";
}
/**
* returns the name of the output format.
*
* @return the display name
*/
public String getDisplayName() {
return "GNUPlot";
}
/**
* removes the stored data but retains the dimensions of the matrix.
*/
public void clear() {
super.clear();
LEFT_PARENTHESES = "";
RIGHT_PARENTHESES = "";
}
/**
* returns the default width for the row names.
*
* @return the width
*/
public int getDefaultRowNameWidth() {
return 50;
}
/**
* returns the default width for the column names.
*
* @return the width
*/
public int getDefaultColNameWidth() {
return 50;
}
/**
* returns the default of whether column names are prefixed with the index.
*
* @return true if the names are prefixed
*/
public boolean getDefaultEnumerateColNames() {
return false;
}
/**
* returns the default of whether row names are prefixed with the index.
*
* @return true if the names are prefixed
*/
public boolean getDefaultEnumerateRowNames() {
return false;
}
/**
* returns the header of the matrix as a string.
*
* @return the header
* @see #m_HeaderKeys
* @see #m_HeaderValues
*/
public String toStringHeader() {
return new ResultMatrixPlainText(this).toStringHeader();
}
/**
* returns the matrix in CSV format.
*
* @return the matrix
*/
public String toStringMatrix() {
StringBuffer result;
String[][] cells;
int i;
int n;
String line;
String title;
String generated;
result = new StringBuffer();
cells = toArray();
// generation comment
generated = "# generated by WEKA " + Version.VERSION + "\n";
// data
result.append("\n");
result.append("##################\n");
result.append("# file: plot.dat #\n");
result.append("##################\n");
result.append(generated);
result.append("# contains the data for the plot\n");
// key for x-axis
result.append("\n");
result.append("# key for the x-axis\n");
for (i = 1; i < cells.length - 1; i++)
result.append("# " + i + " - " + cells[i][0] + "\n");
// the data itself
result.append("\n");
result.append("# data for the plot\n");
for (i = 1; i < cells.length - 1; i++) {
result.append(Integer.toString(i));
for (n = 1; n < cells[i].length; n++) {
if (isSignificance(n))
continue;
result.append(" ");
result.append(Utils.quote(cells[i][n]));
}
result.append("\n");
}
result.append("#######\n");
result.append("# end #\n");
result.append("#######\n");
// script
result.append("\n");
result.append("##################\n");
result.append("# file: plot.scr #\n");
result.append("##################\n");
result.append(generated);
result.append("# script to plot the data\n");
result.append("\n");
result.append("# display it in a window:\n");
result.append("set terminal x11\n");
result.append("set output\n");
result.append("\n");
result.append("# to display all data rows:\n");
result.append("set xrange [0:" + ((cells.length - 2) + 1) + "]\n");
result.append("\n");
result.append("# axis labels, e.g.:\n");
result.append("#set xlabel \"Datasets\"\n");
result.append("#set ylabel \"Accuracy in %\"\n");
result.append("\n");
result.append("# the plot commands\n");
n = 1;
i = 0;
while (i < cells[0].length - 1) {
i++;
if (isSignificance(i))
continue;
n++;
// plot
if (i == 1)
line = "plot";
else
line = "replot";
line += " \"plot.dat\"";
// title
title = "title \"" + cells[0][i] + "\"";
// columns
line += " using 1:" + n;
if (getShowStdDev()) {
n++;
i++;
// errorbars
line += ":" + n;
}
// options
line += " with";
if (getShowStdDev())
line += " yerrorbars";
else
line += " lines";
line += " " + title;
result.append(line + "\n");
}
result.append("\n");
result.append("# generate ps:\n");
result.append("#set terminal postscript\n");
result.append("#set output \"plot.ps\"\n");
result.append("#replot\n");
result.append("\n");
result.append("# generate png:\n");
result.append("#set terminal png size 800,600\n");
result.append("#set output \"plot.png\"\n");
result.append("#replot\n");
result.append("\n");
result.append("# wait for user to hit <Return>\n");
result.append("pause -1\n");
result.append("#######\n");
result.append("# end #\n");
result.append("#######\n");
return result.toString();
}
/**
* returns returns a key for all the col names, for better readability if
* the names got cut off.
*
* @return the key
*/
public String toStringKey() {
return new ResultMatrixPlainText(this).toStringKey();
}
/**
* returns the summary as string.
*
* @return the summary
*/
public String toStringSummary() {
return new ResultMatrixPlainText(this).toStringSummary();
}
/**
* returns the ranking in a string representation.
*
* @return the ranking
*/
public String toStringRanking() {
return new ResultMatrixPlainText(this).toStringRanking();
}
/**
* Returns the revision string.
*
* @return the revision
*/
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
/**
* for testing only.
*
* @param args ignored
*/
public static void main(String[] args) {
ResultMatrix matrix;
int i;
int n;
matrix = new ResultMatrixGnuPlot(3, 3);
// set header
matrix.addHeader("header1", "value1");
matrix.addHeader("header2", "value2");
matrix.addHeader("header2", "value3");
// set values
for (i = 0; i < matrix.getRowCount(); i++) {
for (n = 0; n < matrix.getColCount(); n++) {
matrix.setMean(n, i, (i+1)*n);
matrix.setStdDev(n, i, ((double) (i+1)*n) / 100);
if (i == n) {
if (i % 2 == 1)
matrix.setSignificance(n, i, SIGNIFICANCE_WIN);
else
matrix.setSignificance(n, i, SIGNIFICANCE_LOSS);
}
}
}
System.out.println("\n\n--> " + matrix.getDisplayName());
System.out.println("\n1. complete\n");
System.out.println(matrix.toStringHeader() + "\n");
System.out.println(matrix.toStringMatrix() + "\n");
System.out.println(matrix.toStringKey());
System.out.println("\n2. complete with std deviations\n");
matrix.setShowStdDev(true);
System.out.println(matrix.toStringMatrix());
System.out.println("\n3. cols numbered\n");
matrix.setPrintColNames(false);
System.out.println(matrix.toStringMatrix());
System.out.println("\n4. second col missing\n");
matrix.setColHidden(1, true);
System.out.println(matrix.toStringMatrix());
System.out.println("\n5. last row missing, rows numbered too\n");
matrix.setRowHidden(2, true);
matrix.setPrintRowNames(false);
System.out.println(matrix.toStringMatrix());
System.out.println("\n6. mean prec to 3\n");
matrix.setMeanPrec(3);
matrix.setPrintRowNames(false);
System.out.println(matrix.toStringMatrix());
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/experiment/ResultMatrixHTML.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* ResultMatrixHTML.java
* Copyright (C) 2005-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.experiment;
import weka.core.RevisionUtils;
import weka.core.Utils;
/**
<!-- globalinfo-start -->
* Generates the matrix output as HTML.
* <p/>
<!-- globalinfo-end -->
*
<!-- options-start -->
* Valid options are: <p/>
*
* <pre> -mean-prec <int>
* The number of decimals after the decimal point for the mean.
* (default: 2)</pre>
*
* <pre> -stddev-prec <int>
* The number of decimals after the decimal point for the mean.
* (default: 2)</pre>
*
* <pre> -col-name-width <int>
* The maximum width for the column names (0 = optimal).
* (default: 0)</pre>
*
* <pre> -row-name-width <int>
* The maximum width for the row names (0 = optimal).
* (default: 25)</pre>
*
* <pre> -mean-width <int>
* The width of the mean (0 = optimal).
* (default: 0)</pre>
*
* <pre> -stddev-width <int>
* The width of the standard deviation (0 = optimal).
* (default: 0)</pre>
*
* <pre> -sig-width <int>
* The width of the significance indicator (0 = optimal).
* (default: 0)</pre>
*
* <pre> -count-width <int>
* The width of the counts (0 = optimal).
* (default: 0)</pre>
*
* <pre> -show-stddev
* Whether to display the standard deviation column.
* (default: no)</pre>
*
* <pre> -show-avg
* Whether to show the row with averages.
* (default: no)</pre>
*
* <pre> -remove-filter
* Whether to remove the classname package prefixes from the
* filter names in datasets.
* (default: no)</pre>
*
* <pre> -print-col-names
* Whether to output column names or just numbers representing them.
* (default: no)</pre>
*
* <pre> -print-row-names
* Whether to output row names or just numbers representing them.
* (default: no)</pre>
*
* <pre> -enum-col-names
* Whether to enumerate the column names (prefixing them with
* '(x)', with 'x' being the index).
* (default: no)</pre>
*
* <pre> -enum-row-names
* Whether to enumerate the row names (prefixing them with
* '(x)', with 'x' being the index).
* (default: no)</pre>
*
<!-- options-end -->
*
* @author FracPete (fracpete at waikato dot ac dot nz)
* @version $Revision$
*/
public class ResultMatrixHTML
extends ResultMatrix {
/** for serialization. */
private static final long serialVersionUID = 6672380422544799990L;
/**
* initializes the matrix as 1x1 matrix.
*/
public ResultMatrixHTML() {
this(1, 1);
}
/**
* initializes the matrix with the given dimensions.
*
* @param cols the number of columns
* @param rows the number of rows
*/
public ResultMatrixHTML(int cols, int rows) {
super(cols, rows);
}
/**
* initializes the matrix with the values from the given matrix.
*
* @param matrix the matrix to get the values from
*/
public ResultMatrixHTML(ResultMatrix matrix) {
super(matrix);
}
/**
* Returns a string describing the matrix.
*
* @return a description suitable for
* displaying in the experimenter gui
*/
public String globalInfo() {
return "Generates the matrix output as HTML.";
}
/**
* returns the name of the output format.
*
* @return the display name
*/
public String getDisplayName() {
return "HTML";
}
/**
* returns the default width for the row names.
*
* @return the width
*/
public int getDefaultRowNameWidth() {
return 25;
}
/**
* returns the default of whether column names or numbers instead are printed.
*
* @return true if names instead of numbers are printed
*/
public boolean getDefaultPrintColNames() {
return false;
}
/**
* returns the default of whether column names are prefixed with the index.
*
* @return true if the names are prefixed
*/
public boolean getDefaultEnumerateColNames() {
return true;
}
/**
* returns the header of the matrix as a string.
*
* @return the header
* @see #m_HeaderKeys
* @see #m_HeaderValues
*/
public String toStringHeader() {
return new ResultMatrixPlainText(this).toStringHeader();
}
/**
* returns the matrix in an HTML table.
*
* @return the matrix
*/
public String toStringMatrix() {
StringBuffer result;
String[][] cells;
int i;
int n;
int cols;
result = new StringBuffer();
cells = toArray();
result.append("<table border=\"1\" cellpadding=\"3\" cellspacing=\"0\">\n");
// headings
result.append(" <tr>");
for (n = 0; n < cells[0].length; n++) {
if (isRowName(n)) {
result.append("<td><b>" + cells[0][n] + "</b></td>");
}
else if (isMean(n)) {
if (n == 1)
cols = 1;
else
cols = 2;
if (getShowStdDev())
cols++;
result.append("<td align=\"center\" colspan=\"" + cols + "\">");
result.append("<b>" + cells[0][n] + "</b>");
result.append("</td>");
}
}
result.append("</tr>\n");
// data
for (i = 1; i < cells.length; i++) {
result.append(" <tr>");
for (n = 0; n < cells[i].length; n++) {
if (isRowName(n))
result.append("<td>");
else if (isMean(n) || isStdDev(n))
result.append("<td align=\"right\">");
else if (isSignificance(n))
result.append("<td align=\"center\">");
else
result.append("<td>");
// content
if (cells[i][n].trim().equals(""))
result.append(" ");
else if (isStdDev(n))
result.append("± " + cells[i][n]);
else
result.append(cells[i][n]);
result.append("</td>");
}
result.append("</tr>\n");
}
result.append("</table>\n");
return result.toString();
}
/**
* returns returns a key for all the col names, for better readability if
* the names got cut off.
*
* @return the key
*/
public String toStringKey() {
String result;
int i;
result = "<table border=\"1\" cellpadding=\"3\" cellspacing=\"0\">\n"
+ " <tr><td colspan=\"2\"><b>Key</b></td></tr>\n";
for (i = 0; i < getColCount(); i++) {
if (getColHidden(i))
continue;
result += " <tr>"
+ "<td><b>(" + (i+1) + ")</b></td>"
+ "<td>" + removeFilterName(m_ColNames[i]) + "</td>"
+ "</tr>\n";
}
result += "</table>\n";
return result;
}
/**
* returns the summary as string.
*
* @return the summary
*/
public String toStringSummary() {
String result;
String titles;
int resultsetLength;
int i;
int j;
String content;
if (m_NonSigWins == null)
return "-summary data not set-";
result = "<table border=\"1\" cellpadding=\"3\" cellspacing=\"0\">\n";
titles = " <tr>";
resultsetLength = 1 + Math.max((int)(Math.log(getColCount())/Math.log(10)),
(int)(Math.log(getRowCount())/Math.log(10)));
for (i = 0; i < getColCount(); i++) {
if (getColHidden(i))
continue;
titles += "<td align=\"center\"><b>" + getSummaryTitle(i) + "</b></td>";
}
result += titles
+ "<td><b>(No. of datasets where [col] >> [row])</b></td></tr>\n";
for (i = 0; i < getColCount(); i++) {
if (getColHidden(i))
continue;
result += " <tr>";
for (j = 0; j < getColCount(); j++) {
if (getColHidden(j))
continue;
if (j == i)
content = Utils.padLeft("-", resultsetLength * 2 + 3);
else
content = Utils.padLeft("" + m_NonSigWins[i][j]
+ " (" + m_Wins[i][j] + ")",
resultsetLength * 2 + 3);
result += "<td>" + content.replaceAll(" ", " ") + "</td>";
}
result += "<td><b>" + getSummaryTitle(i) + "</b> = " + removeFilterName(m_ColNames[i]) + "</td></tr>\n";
}
result += "</table>\n";
return result;
}
/**
* returns the ranking in a string representation.
*
* @return the ranking
*/
public String toStringRanking() {
String result;
int[] ranking;
int i;
int curr;
if (m_RankingWins == null)
return "-ranking data not set-";
result = "<table border=\"1\" cellpadding=\"3\" cellspacing=\"0\">\n";
result += " <tr>"
+ "<td align=\"center\"><b>>-<</b></td>"
+ "<td align=\"center\"><b>></b></td>"
+ "<td align=\"center\"><b><</b></td>"
+ "<td><b>Resultset</b></td>"
+ "</tr>\n";
ranking = Utils.sort(m_RankingDiff);
for (i = getColCount() - 1; i >= 0; i--) {
curr = ranking[i];
if (getColHidden(curr))
continue;
result += " <tr>"
+ "<td align=\"right\">" + m_RankingDiff[curr] + "</td>"
+ "<td align=\"right\">" + m_RankingWins[curr] + "</td>"
+ "<td align=\"right\">" + m_RankingLosses[curr] + "</td>"
+ "<td>" + removeFilterName(m_ColNames[curr]) + "</td>"
+ "<tr>\n";
}
result += "</table>\n";
return result;
}
/**
* Returns the revision string.
*
* @return the revision
*/
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
/**
* for testing only.
*
* @param args ignored
*/
public static void main(String[] args) {
ResultMatrix matrix;
int i;
int n;
matrix = new ResultMatrixHTML(3, 3);
// set header
matrix.addHeader("header1", "value1");
matrix.addHeader("header2", "value2");
matrix.addHeader("header2", "value3");
// set values
for (i = 0; i < matrix.getRowCount(); i++) {
for (n = 0; n < matrix.getColCount(); n++) {
matrix.setMean(n, i, (i+1)*n);
matrix.setStdDev(n, i, ((double) (i+1)*n) / 100);
if (i == n) {
if (i % 2 == 1)
matrix.setSignificance(n, i, SIGNIFICANCE_WIN);
else
matrix.setSignificance(n, i, SIGNIFICANCE_LOSS);
}
}
}
System.out.println("\n\n--> " + matrix.getDisplayName());
System.out.println("\n1. complete\n");
System.out.println(matrix.toStringHeader() + "\n");
System.out.println(matrix.toStringMatrix() + "\n");
System.out.println(matrix.toStringKey());
System.out.println("\n2. complete with std deviations\n");
matrix.setShowStdDev(true);
System.out.println(matrix.toStringMatrix());
System.out.println("\n3. cols numbered\n");
matrix.setPrintColNames(false);
System.out.println(matrix.toStringMatrix());
System.out.println("\n4. second col missing\n");
matrix.setColHidden(1, true);
System.out.println(matrix.toStringMatrix());
System.out.println("\n5. last row missing, rows numbered too\n");
matrix.setRowHidden(2, true);
matrix.setPrintRowNames(false);
System.out.println(matrix.toStringMatrix());
System.out.println("\n6. mean prec to 3\n");
matrix.setMeanPrec(3);
matrix.setPrintRowNames(false);
System.out.println(matrix.toStringMatrix());
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/experiment/ResultMatrixLatex.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* ResultMatrixLatex.java
* Copyright (C) 2005-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.experiment;
import weka.core.RevisionUtils;
import weka.core.Utils;
/**
<!-- globalinfo-start -->
* Generates the matrix output in LaTeX-syntax.
* <p/>
<!-- globalinfo-end -->
*
<!-- options-start -->
* Valid options are: <p/>
*
* <pre> -mean-prec <int>
* The number of decimals after the decimal point for the mean.
* (default: 2)</pre>
*
* <pre> -stddev-prec <int>
* The number of decimals after the decimal point for the mean.
* (default: 2)</pre>
*
* <pre> -col-name-width <int>
* The maximum width for the column names (0 = optimal).
* (default: 0)</pre>
*
* <pre> -row-name-width <int>
* The maximum width for the row names (0 = optimal).
* (default: 0)</pre>
*
* <pre> -mean-width <int>
* The width of the mean (0 = optimal).
* (default: 0)</pre>
*
* <pre> -stddev-width <int>
* The width of the standard deviation (0 = optimal).
* (default: 0)</pre>
*
* <pre> -sig-width <int>
* The width of the significance indicator (0 = optimal).
* (default: 0)</pre>
*
* <pre> -count-width <int>
* The width of the counts (0 = optimal).
* (default: 0)</pre>
*
* <pre> -show-stddev
* Whether to display the standard deviation column.
* (default: no)</pre>
*
* <pre> -show-avg
* Whether to show the row with averages.
* (default: no)</pre>
*
* <pre> -remove-filter
* Whether to remove the classname package prefixes from the
* filter names in datasets.
* (default: no)</pre>
*
* <pre> -print-col-names
* Whether to output column names or just numbers representing them.
* (default: no)</pre>
*
* <pre> -print-row-names
* Whether to output row names or just numbers representing them.
* (default: no)</pre>
*
* <pre> -enum-col-names
* Whether to enumerate the column names (prefixing them with
* '(x)', with 'x' being the index).
* (default: no)</pre>
*
* <pre> -enum-row-names
* Whether to enumerate the row names (prefixing them with
* '(x)', with 'x' being the index).
* (default: no)</pre>
*
<!-- options-end -->
*
* @author FracPete (fracpete at waikato dot ac dot nz)
* @version $Revision$
*/
public class ResultMatrixLatex
extends ResultMatrix {
/** for serialization. */
private static final long serialVersionUID = 777690788447600978L;
/**
* initializes the matrix as 1x1 matrix.
*/
public ResultMatrixLatex() {
this(1, 1);
}
/**
* initializes the matrix with the given dimensions.
*
* @param cols the number of columns
* @param rows the number of rows
*/
public ResultMatrixLatex(int cols, int rows) {
super(cols, rows);
}
/**
* initializes the matrix with the values from the given matrix.
*
* @param matrix the matrix to get the values from
*/
public ResultMatrixLatex(ResultMatrix matrix) {
super(matrix);
}
/**
* Returns a string describing the matrix.
*
* @return a description suitable for
* displaying in the experimenter gui
*/
public String globalInfo() {
return "Generates the matrix output in LaTeX-syntax.";
}
/**
* returns the name of the output format.
*
* @return the display name
*/
public String getDisplayName() {
return "LaTeX";
}
/**
* removes the stored data but retains the dimensions of the matrix.
*/
public void clear() {
super.clear();
TIE_STRING = " ";
WIN_STRING = "$\\circ$";
LOSS_STRING = "$\\bullet$";
}
/**
* returns the default of whether column names or numbers instead are printed.
*
* @return true if names instead of numbers are printed
*/
public boolean getDefaultPrintColNames() {
return false;
}
/**
* returns the default of whether column names are prefixed with the index.
*
* @return true if the names are prefixed
*/
public boolean getDefaultEnumerateColNames() {
return true;
}
/**
* returns the header of the matrix as a string.
*
* @return the header
* @see #m_HeaderKeys
* @see #m_HeaderValues
*/
public String toStringHeader() {
return new ResultMatrixPlainText(this).toStringHeader();
}
/**
* returns the matrix as latex table.
*
* @return the matrix
*/
public String toStringMatrix() {
StringBuffer result;
String[][] cells;
int i;
int j;
int n;
int size;
result = new StringBuffer();
cells = toArray();
result.append( "\\begin{table}[thb]\n\\caption{\\label{labelname}"
+ "Table Caption}\n");
if (!getShowStdDev())
result.append("\\footnotesize\n");
else
result.append("\\scriptsize\n");
// output the column alignment characters
// one for the dataset name and one for the comparison column
if (!getShowStdDev()) {
result.append( "{\\centering \\begin{tabular}{"
+ "l" // dataset
+ "" // separator
+ "r" // mean
);
} else {
// dataset, mean, std dev
result.append( "{\\centering \\begin{tabular}{"
+ "l" // dataset
+ "" // separator
+ "r" // mean
+ "@{\\hspace{0cm}}" // separator
+ "c" // +/-
+ "@{\\hspace{0cm}}" // separator
+ "r" // stddev
);
}
for (j = 1; j < getColCount(); j++) {
if (getColHidden(j))
continue;
if (!getShowStdDev())
result.append( "r" // mean
+ "@{\\hspace{0.1cm}}" // separator
+ "c" // significance
);
else
result.append( "r" // mean
+ "@{\\hspace{0cm}}" // separator
+ "c" // +/-
+ "@{\\hspace{0cm}}" // separator
+ "r" // stddev
+ "@{\\hspace{0.1cm}}" // separator
+ "c" // significance
);
}
result.append("}\n\\\\\n\\hline\n");
if (!getShowStdDev())
result.append("Dataset & " + cells[0][1]);
else
result.append("Dataset & \\multicolumn{3}{c}{" + cells[0][1] + "}");
// now do the column names (numbers)
for (j = 2; j < cells[0].length; j++) {
if (!isMean(j))
continue;
if (!getShowStdDev())
result.append("& " + cells[0][j] + " & ");
else
result.append("& \\multicolumn{4}{c}{" + cells[0][j] + "} ");
}
result.append("\\\\\n\\hline\n");
// change "_" to "-" in names
for (i = 1; i < cells.length; i++)
cells[i][0] = cells[i][0].replace('_', '-');
// pad numbers
for (n = 1; n < cells[0].length; n++) {
size = getColSize(cells, n);
for (i = 1; i < cells.length; i++)
cells[i][n] = padString(cells[i][n], size, true);
}
// output data (w/o wins/ties/losses)
for (i = 1; i < cells.length - 1; i++) {
if (isAverage(i))
result.append("\\hline\n");
for (n = 0; n < cells[0].length; n++) {
if (n == 0) {
result.append(padString(cells[i][n], getRowNameWidth()));
}
else {
if (getShowStdDev()) {
if (isMean(n - 1)) {
if (!cells[i][n].trim().equals(""))
result.append(" & $\\pm$ & ");
else
result.append(" & & ");
}
else
result.append(" & ");
}
else {
result.append(" & ");
}
result.append(cells[i][n]);
}
}
result.append("\\\\\n");
}
result.append("\\hline\n\\multicolumn{" + cells[0].length + "}{c}{$\\circ$, $\\bullet$"
+" statistically significant improvement or degradation}"
+"\\\\\n\\end{tabular} ");
if (!getShowStdDev())
result.append("\\footnotesize ");
else
result.append("\\scriptsize ");
result.append("\\par}\n\\end{table}"
+"\n");
return result.toString();
}
/**
* returns returns a key for all the col names, for better readability if
* the names got cut off.
*
* @return the key
*/
public String toStringKey() {
String result;
int i;
result = "\\begin{table}[thb]\n\\caption{\\label{labelname}"
+ "Table Caption (Key)}\n";
result += "\\scriptsize\n";
result += "{\\centering\n";
result += "\\begin{tabular}{cl}\\\\\n";
for (i = 0; i < getColCount(); i++) {
if (getColHidden(i))
continue;
result += LEFT_PARENTHESES + (i+1) + RIGHT_PARENTHESES
+ " & " + removeFilterName(m_ColNames[i]).replace('_', '-')
.replaceAll("\\\\", "\\\\textbackslash")
+ " \\\\\n";
}
result += "\\end{tabular}\n";
result += "}\n";
result += "\\end{table}\n";
return result;
}
/**
* returns the summary as string.
*
* @return the summary
*/
public String toStringSummary() {
int resultsetLength;
String result;
String titles;
int i;
int j;
if (m_NonSigWins == null)
return "-summary data not set-";
resultsetLength = 1 + Math.max((int)(Math.log(getColCount())/Math.log(10)),
(int)(Math.log(getRowCount())/Math.log(10)));
result = "";
titles = "";
result += "{\\centering\n";
result += "\\begin{table}[thb]\n\\caption{\\label{labelname}"
+"Table Caption}\n";
result += "\\footnotesize\n";
result += "\\begin{tabular}{l";
for (i = 0; i < getColCount(); i++) {
if (getColHidden(i))
continue;
titles += " &";
result += "c";
titles += ' ' + Utils.padLeft("" + getSummaryTitle(i),
resultsetLength * 2 + 3);
}
result += "}\\\\\n\\hline\n";
result += titles + " \\\\\n\\hline\n";
for (i = 0; i < getColCount(); i++) {
if (getColHidden(i))
continue;
for (j = 0; j < getColCount(); j++) {
if (getColHidden(j))
continue;
if (j == 0)
result += (char)((int)'a' + i % 26);
if (j == i)
result += " & - ";
else
result += "& " + m_NonSigWins[i][j] + " (" + m_Wins[i][j] + ") ";
}
result += "\\\\\n";
}
result += "\\hline\n\\end{tabular} \\footnotesize \\par\n\\end{table}}";
return result;
}
/**
* returns the ranking in a string representation.
*
* @return the ranking
*/
public String toStringRanking() {
int biggest;
int width;
String result;
int[] ranking;
int i;
int curr;
if (m_RankingWins == null)
return "-ranking data not set-";
biggest = Math.max(m_RankingWins[Utils.maxIndex(m_RankingWins)],
m_RankingLosses[Utils.maxIndex(m_RankingLosses)]);
width = Math.max(2 + (int)(Math.log(biggest) / Math.log(10)),
">-<".length());
result = "\\begin{table}[thb]\n\\caption{\\label{labelname}Table Caption"
+ "}\n\\footnotesize\n{\\centering \\begin{tabular}{rlll}\\\\\n\\hline\n";
result += "Resultset & Wins$-$ & Wins & Losses \\\\\n& Losses & & "
+ "\\\\\n\\hline\n";
ranking = Utils.sort(m_RankingDiff);
for (i = getColCount() - 1; i >= 0; i--) {
curr = ranking[i];
if (getColHidden(curr))
continue;
result += "(" + (curr + 1) + ") & "
+ Utils.padLeft("" + m_RankingDiff[curr], width)
+ " & " + Utils.padLeft("" + m_RankingWins[curr], width)
+ " & " + Utils.padLeft("" + m_RankingLosses[curr], width)
+ "\\\\\n";
}
result += "\\hline\n\\end{tabular} \\footnotesize \\par}\n\\end{table}";
return result;
}
/**
* Returns the revision string.
*
* @return the revision
*/
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
/**
* for testing only.
*
* @param args ignored
*/
public static void main(String[] args) {
ResultMatrix matrix;
int i;
int n;
matrix = new ResultMatrixLatex(3, 3);
// set header
matrix.addHeader("header1", "value1");
matrix.addHeader("header2", "value2");
matrix.addHeader("header2", "value3");
// set values
for (i = 0; i < matrix.getRowCount(); i++) {
for (n = 0; n < matrix.getColCount(); n++) {
matrix.setMean(n, i, (i+1)*n);
matrix.setStdDev(n, i, ((double) (i+1)*n) / 100);
if (i == n) {
if (i % 2 == 1)
matrix.setSignificance(n, i, SIGNIFICANCE_WIN);
else
matrix.setSignificance(n, i, SIGNIFICANCE_LOSS);
}
}
}
System.out.println("\n\n--> " + matrix.getDisplayName());
System.out.println("\n1. complete\n");
System.out.println(matrix.toStringHeader() + "\n");
System.out.println(matrix.toStringMatrix() + "\n");
System.out.println(matrix.toStringKey());
System.out.println("\n2. complete with std deviations\n");
matrix.setShowStdDev(true);
System.out.println(matrix.toStringMatrix());
System.out.println("\n3. cols numbered\n");
matrix.setPrintColNames(false);
System.out.println(matrix.toStringMatrix());
System.out.println("\n4. second col missing\n");
matrix.setColHidden(1, true);
System.out.println(matrix.toStringMatrix());
System.out.println("\n5. last row missing, rows numbered too\n");
matrix.setRowHidden(2, true);
matrix.setPrintRowNames(false);
System.out.println(matrix.toStringMatrix());
System.out.println("\n6. mean prec to 3\n");
matrix.setMeanPrec(3);
matrix.setPrintRowNames(false);
System.out.println(matrix.toStringMatrix());
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/experiment/ResultMatrixPlainText.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* ResultMatrixPlainText.java
* Copyright (C) 2005-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.experiment;
import weka.core.RevisionUtils;
import weka.core.Utils;
/**
* <!-- globalinfo-start --> Generates the output as plain text (for fixed width
* fonts).
* <p/>
* <!-- globalinfo-end -->
*
* <!-- options-start --> Valid options are:
* <p/>
*
* <pre>
* -mean-prec <int>
* The number of decimals after the decimal point for the mean.
* (default: 2)
* </pre>
*
* <pre>
* -stddev-prec <int>
* The number of decimals after the decimal point for the mean.
* (default: 2)
* </pre>
*
* <pre>
* -col-name-width <int>
* The maximum width for the column names (0 = optimal).
* (default: 0)
* </pre>
*
* <pre>
* -row-name-width <int>
* The maximum width for the row names (0 = optimal).
* (default: 25)
* </pre>
*
* <pre>
* -mean-width <int>
* The width of the mean (0 = optimal).
* (default: 0)
* </pre>
*
* <pre>
* -stddev-width <int>
* The width of the standard deviation (0 = optimal).
* (default: 0)
* </pre>
*
* <pre>
* -sig-width <int>
* The width of the significance indicator (0 = optimal).
* (default: 0)
* </pre>
*
* <pre>
* -count-width <int>
* The width of the counts (0 = optimal).
* (default: 5)
* </pre>
*
* <pre>
* -show-stddev
* Whether to display the standard deviation column.
* (default: no)
* </pre>
*
* <pre>
* -show-avg
* Whether to show the row with averages.
* (default: no)
* </pre>
*
* <pre>
* -remove-filter
* Whether to remove the classname package prefixes from the
* filter names in datasets.
* (default: no)
* </pre>
*
* <pre>
* -print-col-names
* Whether to output column names or just numbers representing them.
* (default: no)
* </pre>
*
* <pre>
* -print-row-names
* Whether to output row names or just numbers representing them.
* (default: no)
* </pre>
*
* <pre>
* -enum-col-names
* Whether to enumerate the column names (prefixing them with
* '(x)', with 'x' being the index).
* (default: no)
* </pre>
*
* <pre>
* -enum-row-names
* Whether to enumerate the row names (prefixing them with
* '(x)', with 'x' being the index).
* (default: no)
* </pre>
*
* <!-- options-end -->
*
* @author FracPete (fracpete at waikato dot ac dot nz)
* @version $Revision$
*/
public class ResultMatrixPlainText extends ResultMatrix {
/** for serialization. */
private static final long serialVersionUID = 1502934525382357937L;
/**
* initializes the matrix as 1x1 matrix.
*/
public ResultMatrixPlainText() {
this(1, 1);
}
/**
* initializes the matrix with the given dimensions.
*
* @param cols the number of columns
* @param rows the number of rows
*/
public ResultMatrixPlainText(int cols, int rows) {
super(cols, rows);
}
/**
* initializes the matrix with the values from the given matrix.
*
* @param matrix the matrix to get the values from
*/
public ResultMatrixPlainText(ResultMatrix matrix) {
super(matrix);
}
/**
* Returns a string describing the matrix.
*
* @return a description suitable for displaying in the experimenter gui
*/
@Override
public String globalInfo() {
return "Generates the output as plain text (for fixed width fonts).";
}
/**
* returns the name of the output format.
*
* @return the display name
*/
@Override
public String getDisplayName() {
return "Plain Text";
}
/**
* returns the default width for the row names.
*
* @return the width
*/
@Override
public int getDefaultRowNameWidth() {
return 25;
}
/**
* returns the default width for the counts.
*
* @return the width
*/
@Override
public int getDefaultCountWidth() {
return 5;
}
/**
* returns the header of the matrix as a string.
*
* @return the header
* @see #m_HeaderKeys
* @see #m_HeaderValues
*/
@Override
public String toStringHeader() {
int i;
int size;
String[][] data;
String result;
result = "";
// fill in data
data = new String[m_HeaderKeys.size()][2];
for (i = 0; i < m_HeaderKeys.size(); i++) {
data[i][0] = m_HeaderKeys.get(i).toString() + ":";
data[i][1] = m_HeaderValues.get(i).toString();
}
// pad
size = getColSize(data, 0);
for (i = 0; i < data.length; i++) {
data[i][0] = padString(data[i][0], size);
}
// build result
for (i = 0; i < data.length; i++) {
result += data[i][0] + " " + data[i][1] + "\n";
}
return result;
}
/**
* returns the matrix as plain text.
*
* @return the matrix
*/
@Override
public String toStringMatrix() {
StringBuffer result;
String[][] cells;
int i;
int j;
int n;
int k;
int size;
String line;
int indexBase;
StringBuffer head;
StringBuffer body;
StringBuffer foot;
int[] startMeans;
int[] startSigs;
int maxLength;
result = new StringBuffer();
head = new StringBuffer();
body = new StringBuffer();
foot = new StringBuffer();
cells = toArray();
startMeans = new int[getColCount()];
startSigs = new int[getColCount() - 1];
maxLength = 0;
// pad numbers
for (n = 1; n < cells[0].length; n++) {
size = getColSize(cells, n, true, true);
for (i = 1; i < cells.length - 1; i++) {
cells[i][n] = padString(cells[i][n], size, true);
}
}
// index of base column in array
indexBase = 1;
if (getShowStdDev()) {
indexBase++;
}
if (getShowStdDev()) {
}
// output data (without "(v/ /*)")
j = 0;
k = 0;
for (i = 1; i < cells.length - 1; i++) {
if (isAverage(i)) {
body.append(padString("", maxLength).replaceAll(".", "-") + "\n");
}
line = "";
for (n = 0; n < cells[0].length; n++) {
// record starts
if (i == 1) {
if (isMean(n)) {
startMeans[j] = line.length();
j++;
}
if (isSignificance(n)) {
startSigs[k] = line.length();
k++;
}
}
if (n == 0) {
line += padString(cells[i][n], getRowNameWidth());
if (!isAverage(i)) {
line += padString(
"(" + Utils.doubleToString(getCount(getDisplayRow(i - 1)), 0)
+ ")", getCountWidth(), true);
} else {
line += padString("", getCountWidth(), true);
}
} else {
// additional space before means
if (isMean(n)) {
line += " ";
}
// print cell
if (getShowStdDev()) {
if (isMean(n - 1)) {
if (!cells[i][n].trim().equals("")) {
line += "(" + cells[i][n] + ")";
} else {
line += " " + cells[i][n] + " ";
}
} else {
line += " " + cells[i][n];
}
} else {
line += " " + cells[i][n];
}
}
// add separator after base column
if (n == indexBase) {
line += " |";
}
}
// record overall length
if (i == 1) {
maxLength = line.length();
}
body.append(line + "\n");
}
// column names
line = padString(cells[0][0], startMeans[0]);
i = -1;
for (n = 1; n < cells[0].length; n++) {
if (isMean(n)) {
i++;
if (i == 0) {
line = padString(line, startMeans[i] - getCountWidth());
} else if (i == 1) {
line = padString(line, startMeans[i] - " |".length());
} else if (i > 1) {
line = padString(line, startMeans[i]);
}
if (i == 1) {
line += " |";
}
line += " " + cells[0][n];
}
}
line = padString(line, maxLength);
head.append(line + "\n");
head.append(line.replaceAll(".", "-") + "\n");
body.append(line.replaceAll(".", "-") + "\n");
// output wins/losses/ties
if (getColCount() > 1) {
line = padString(cells[cells.length - 1][0], startMeans[1] - 2, true)
+ " |";
i = 0;
for (n = 1; n < cells[cells.length - 1].length; n++) {
if (isSignificance(n)) {
line = padString(line,
startSigs[i] + 1 - cells[cells.length - 1][n].length());
line += " " + cells[cells.length - 1][n];
i++;
}
}
line = padString(line, maxLength);
} else {
line = padString(cells[cells.length - 1][0], line.length() - 2) + " |";
}
foot.append(line + "\n");
// assemble output
result.append(head.toString());
result.append(body.toString());
result.append(foot.toString());
return result.toString();
}
/**
* returns returns a key for all the col names, for better readability if the
* names got cut off.
*
* @return the key
*/
@Override
public String toStringKey() {
String result;
int i;
result = "Key:\n";
for (i = 0; i < getColCount(); i++) {
if (getColHidden(i)) {
continue;
}
result += LEFT_PARENTHESES + (i + 1) + RIGHT_PARENTHESES + " "
+ removeFilterName(m_ColNames[i]) + "\n";
}
return result;
}
/**
* returns the summary as string.
*
* @return the summary
*/
@Override
public String toStringSummary() {
String result;
String titles;
int resultsetLength;
int i;
int j;
if (m_NonSigWins == null) {
return "-summary data not set-";
}
result = "";
titles = "";
resultsetLength = 1 + Math.max(
(int) (Math.log(getColCount()) / Math.log(10)),
(int) (Math.log(getRowCount()) / Math.log(10)));
for (i = 0; i < getColCount(); i++) {
if (getColHidden(i)) {
continue;
}
titles += " "
+ Utils.padLeft("" + getSummaryTitle(i), resultsetLength * 2 + 3);
}
result += titles + " (No. of datasets where [col] >> [row])\n";
for (i = 0; i < getColCount(); i++) {
if (getColHidden(i)) {
continue;
}
for (j = 0; j < getColCount(); j++) {
if (getColHidden(j)) {
continue;
}
result += " ";
if (j == i) {
result += Utils.padLeft("-", resultsetLength * 2 + 3);
} else {
result += Utils.padLeft("" + m_NonSigWins[i][j] + " (" + m_Wins[i][j]
+ ")", resultsetLength * 2 + 3);
}
}
result += " | " + getSummaryTitle(i) + " = " + getColName(i) + '\n';
}
return result;
}
/**
* returns the ranking in a string representation.
*
* @return the ranking
*/
@Override
public String toStringRanking() {
int biggest;
int width;
String result;
int[] ranking;
int i;
int curr;
if (m_RankingWins == null) {
return "-ranking data not set-";
}
biggest = Math.max(m_RankingWins[Utils.maxIndex(m_RankingWins)],
m_RankingLosses[Utils.maxIndex(m_RankingLosses)]);
width = Math.max(2 + (int) (Math.log(biggest) / Math.log(10)),
">-<".length());
result = Utils.padLeft(">-<", width) + ' ' + Utils.padLeft(">", width)
+ ' ' + Utils.padLeft("<", width) + " Resultset\n";
ranking = Utils.sort(m_RankingDiff);
for (i = getColCount() - 1; i >= 0; i--) {
curr = ranking[i];
if (getColHidden(curr)) {
continue;
}
result += Utils.padLeft("" + m_RankingDiff[curr], width) + ' '
+ Utils.padLeft("" + m_RankingWins[curr], width) + ' '
+ Utils.padLeft("" + m_RankingLosses[curr], width) + ' '
+ removeFilterName(m_ColNames[curr]) + '\n';
}
return result;
}
/**
* Returns the revision string.
*
* @return the revision
*/
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
/**
* for testing only.
*
* @param args ignored
*/
public static void main(String[] args) {
ResultMatrix matrix;
int i;
int n;
matrix = new ResultMatrixPlainText(3, 3);
// set header
matrix.addHeader("header1", "value1");
matrix.addHeader("header2", "value2");
matrix.addHeader("header2", "value3");
// set values
for (i = 0; i < matrix.getRowCount(); i++) {
for (n = 0; n < matrix.getColCount(); n++) {
matrix.setMean(n, i, (i + 1) * n);
matrix.setStdDev(n, i, ((double) (i + 1) * n) / 100);
if (i == n) {
if (i % 2 == 1) {
matrix.setSignificance(n, i, SIGNIFICANCE_WIN);
} else {
matrix.setSignificance(n, i, SIGNIFICANCE_LOSS);
}
}
}
}
System.out.println("\n\n--> " + matrix.getDisplayName());
System.out.println("\n1. complete\n");
System.out.println(matrix.toStringHeader() + "\n");
System.out.println(matrix.toStringMatrix() + "\n");
System.out.println(matrix.toStringKey());
System.out.println("\n2. complete with std deviations\n");
matrix.setShowStdDev(true);
System.out.println(matrix.toStringMatrix());
System.out.println("\n3. cols numbered\n");
matrix.setPrintColNames(false);
System.out.println(matrix.toStringMatrix());
System.out.println("\n4. second col missing\n");
matrix.setColHidden(1, true);
System.out.println(matrix.toStringMatrix());
System.out.println("\n5. last row missing, rows numbered too\n");
matrix.setRowHidden(2, true);
matrix.setPrintRowNames(false);
System.out.println(matrix.toStringMatrix());
System.out.println("\n6. mean prec to 3\n");
matrix.setMeanPrec(3);
matrix.setPrintRowNames(false);
System.out.println(matrix.toStringMatrix());
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/experiment/ResultMatrixSignificance.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* ResultMatrixSignificance.java
* Copyright (C) 2005-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.experiment;
import weka.core.RevisionUtils;
/**
<!-- globalinfo-start -->
* Only outputs the significance indicators. Can be used for spotting patterns.
* <p/>
<!-- globalinfo-end -->
*
<!-- options-start -->
* Valid options are: <p/>
*
* <pre> -mean-prec <int>
* The number of decimals after the decimal point for the mean.
* (default: 2)</pre>
*
* <pre> -stddev-prec <int>
* The number of decimals after the decimal point for the mean.
* (default: 2)</pre>
*
* <pre> -col-name-width <int>
* The maximum width for the column names (0 = optimal).
* (default: 0)</pre>
*
* <pre> -row-name-width <int>
* The maximum width for the row names (0 = optimal).
* (default: 40)</pre>
*
* <pre> -mean-width <int>
* The width of the mean (0 = optimal).
* (default: 0)</pre>
*
* <pre> -stddev-width <int>
* The width of the standard deviation (0 = optimal).
* (default: 0)</pre>
*
* <pre> -sig-width <int>
* The width of the significance indicator (0 = optimal).
* (default: 0)</pre>
*
* <pre> -count-width <int>
* The width of the counts (0 = optimal).
* (default: 0)</pre>
*
* <pre> -show-stddev
* Whether to display the standard deviation column.
* (default: no)</pre>
*
* <pre> -show-avg
* Whether to show the row with averages.
* (default: no)</pre>
*
* <pre> -remove-filter
* Whether to remove the classname package prefixes from the
* filter names in datasets.
* (default: no)</pre>
*
* <pre> -print-col-names
* Whether to output column names or just numbers representing them.
* (default: no)</pre>
*
* <pre> -print-row-names
* Whether to output row names or just numbers representing them.
* (default: no)</pre>
*
* <pre> -enum-col-names
* Whether to enumerate the column names (prefixing them with
* '(x)', with 'x' being the index).
* (default: no)</pre>
*
* <pre> -enum-row-names
* Whether to enumerate the row names (prefixing them with
* '(x)', with 'x' being the index).
* (default: no)</pre>
*
<!-- options-end -->
*
* @author FracPete (fracpete at waikato dot ac dot nz)
* @version $Revision$
*/
public class ResultMatrixSignificance
extends ResultMatrix {
/** for serialization. */
private static final long serialVersionUID = -1280545644109764206L;
/**
* initializes the matrix as 1x1 matrix.
*/
public ResultMatrixSignificance() {
this(1, 1);
}
/**
* initializes the matrix with the given dimensions.
*
* @param cols the number of columns
* @param rows the number of rows
*/
public ResultMatrixSignificance(int cols, int rows) {
super(cols, rows);
}
/**
* initializes the matrix with the values from the given matrix.
*
* @param matrix the matrix to get the values from
*/
public ResultMatrixSignificance(ResultMatrix matrix) {
super(matrix);
}
/**
* Returns a string describing the matrix.
*
* @return a description suitable for
* displaying in the experimenter gui
*/
public String globalInfo() {
return "Only outputs the significance indicators. Can be used for spotting patterns.";
}
/**
* returns the name of the output format.
*
* @return the display name
*/
public String getDisplayName() {
return "Significance only";
}
/**
* returns the default of whether column names or numbers instead are printed.
*
* @return true if names instead of numbers are printed
*/
public boolean getDefaultPrintColNames() {
return false;
}
/**
* returns the default width for the row names.
*
* @return the width
*/
public int getDefaultRowNameWidth() {
return 40;
}
/**
* returns the default of whether std deviations are displayed or not.
*
* @return true if the std deviations are displayed
*/
public boolean getDefaultShowStdDev() {
return false;
}
/**
* sets whether to display the std deviations or not - always false!
*
* @param show ignored
*/
public void setShowStdDev(boolean show) {
// ignore
}
/**
* returns the matrix as plain text.
*
* @return the matrix
*/
public String toStringMatrix() {
StringBuffer result;
String[][] cells;
int i;
int n;
int nameWidth;
String line;
String colStr;
int rows;
result = new StringBuffer();
cells = toArray();
// pad names
nameWidth = getColSize(cells, 0);
for (i = 0; i < cells.length - 1; i++)
cells[i][0] = padString(cells[i][0], nameWidth);
// determine number of displayed rows
rows = cells.length - 1;
if (getShowAverage())
rows--;
for (i = 0; i < rows; i++) {
line = "";
colStr = "";
for (n = 0; n < cells[i].length; n++) {
// the header of the column
if (isMean(n) || isRowName(n))
colStr = cells[0][n];
if ( (n > 1) && (!isSignificance(n)) )
continue;
// padding between cols
if (n > 0)
line += " ";
// padding for "(" below dataset line
if ( (i > 0) && (n > 1) )
line += " ";
if (i == 0) {
line += colStr;
}
else {
if (n == 0) {
line += cells[i][n];
}
else if (n == 1) {
line += colStr.replaceAll(".", " "); // base column has no significance!
}
else {
line += cells[i][n];
// add blanks dep. on length of #
line += colStr.replaceAll(".", " ").substring(2);
}
}
}
result.append(line + "\n");
// separator line
if (i == 0)
result.append(line.replaceAll(".", "-") + "\n");
}
return result.toString();
}
/**
* returns the header of the matrix as a string.
*
* @return the header
* @see #m_HeaderKeys
* @see #m_HeaderValues
*/
public String toStringHeader() {
return new ResultMatrixPlainText(this).toStringHeader();
}
/**
* returns returns a key for all the col names, for better readability if
* the names got cut off.
*
* @return the key
*/
public String toStringKey() {
return new ResultMatrixPlainText(this).toStringKey();
}
/**
* returns the summary as string.
*
* @return the summary
*/
public String toStringSummary() {
return new ResultMatrixPlainText(this).toStringSummary();
}
/**
* returns the ranking in a string representation.
*
* @return the ranking
*/
public String toStringRanking() {
return new ResultMatrixPlainText(this).toStringRanking();
}
/**
* Returns the revision string.
*
* @return the revision
*/
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
/**
* for testing only.
*
* @param args ignored
*/
public static void main(String[] args) {
ResultMatrix matrix;
int i;
int n;
matrix = new ResultMatrixSignificance(3, 3);
// set header
matrix.addHeader("header1", "value1");
matrix.addHeader("header2", "value2");
matrix.addHeader("header2", "value3");
// set values
for (i = 0; i < matrix.getRowCount(); i++) {
for (n = 0; n < matrix.getColCount(); n++) {
matrix.setMean(n, i, (i+1)*n);
matrix.setStdDev(n, i, ((double) (i+1)*n) / 100);
if (i == n) {
if (i % 2 == 1)
matrix.setSignificance(n, i, SIGNIFICANCE_WIN);
else
matrix.setSignificance(n, i, SIGNIFICANCE_LOSS);
}
}
}
System.out.println("\n\n--> " + matrix.getDisplayName());
System.out.println("\n1. complete\n");
System.out.println(matrix.toStringHeader() + "\n");
System.out.println(matrix.toStringMatrix() + "\n");
System.out.println(matrix.toStringKey());
System.out.println("\n2. complete with std deviations\n");
matrix.setShowStdDev(true);
System.out.println(matrix.toStringMatrix());
System.out.println("\n3. cols numbered\n");
matrix.setPrintColNames(false);
System.out.println(matrix.toStringMatrix());
System.out.println("\n4. second col missing\n");
matrix.setColHidden(1, true);
System.out.println(matrix.toStringMatrix());
System.out.println("\n5. last row missing, rows numbered too\n");
matrix.setRowHidden(2, true);
matrix.setPrintRowNames(false);
System.out.println(matrix.toStringMatrix());
System.out.println("\n6. mean prec to 3\n");
matrix.setMeanPrec(3);
matrix.setPrintRowNames(false);
System.out.println(matrix.toStringMatrix());
}
}
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/experiment/ResultProducer.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* ResultProducer.java
* Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.experiment;
import java.io.Serializable;
import weka.core.Instances;
/**
* This interface defines the methods required for an object
* that produces results for different randomizations of a dataset. <p>
*
* Possible implementations of ResultProducer: <br>
* <ul>
* <li>Random test/train splits
* <li>CrossValidation splits
* <li>LearningCurve splits (multiple results per run?)
* <li>Averaging results of other result producers
* </ul>
*
* @author Len Trigg (trigg@cs.waikato.ac.nz)
* @version $Revision$
*/
public interface ResultProducer extends Serializable {
/**
* Sets the dataset that results will be obtained for.
*
* @param instances a value of type 'Instances'.
*/
void setInstances(Instances instances);
/**
* Sets the object to send results of each run to.
*
* @param listener a value of type 'ResultListener'
*/
void setResultListener(ResultListener listener);
/**
* Sets a list of method names for additional measures to look for
* in SplitEvaluators.
* @param additionalMeasures a list of method names
*/
void setAdditionalMeasures(String [] additionalMeasures);
/**
* Prepare to generate results. The ResultProducer should call
* preProcess(this) on the ResultListener it is to send results to.
*
* @exception Exception if an error occurs during preprocessing.
*/
void preProcess() throws Exception;
/**
* Perform any postprocessing. When this method is called, it indicates
* that no more requests to generate results for the current experiment
* will be sent. The ResultProducer should call
* preProcess(this) on the ResultListener it is to send results to.
*
* @exception Exception if an error occurs
*/
void postProcess() throws Exception;
/**
* Gets the results for a specified run number. Different run
* numbers correspond to different randomizations of the data. Results
* produced should be sent to the current ResultListener, but only
* if the ResultListener says the result is required (it may already
* have that result). A single run may produce multiple results.
*
* @param run the run number to generate results for.
* @exception Exception if a problem occurs while getting the results
*/
void doRun(int run) throws Exception;
/**
* Gets the keys for a specified run number. Different run
* numbers correspond to different randomizations of the data. Keys
* produced should be sent to the current ResultListener
*
* @param run the run number to get keys for.
* @exception Exception if a problem occurs while getting the keys
*/
void doRunKeys(int run) throws Exception;
/**
* Gets the names of each of the key columns produced for a single run.
* The names should not contain spaces (use '_' instead for easy
* translation.)
*
* @return an array containing the name of each key column
* @exception Exception if the key names could not be determined (perhaps
* because of a problem from a nested sub-resultproducer)
*/
String [] getKeyNames() throws Exception;
/**
* Gets the data types of each of the key columns produced for a single run.
*
* @return an array containing objects of the type of each key column. The
* objects should be Strings, or Doubles.
* @exception Exception if the key types could not be determined (perhaps
* because of a problem from a nested sub-resultproducer)
*/
Object [] getKeyTypes() throws Exception;
/**
* Gets the names of each of the result columns produced for a single run.
* The names should not contain spaces (use '_' instead for easy
* translation.)
*
* @return an array containing the name of each result column
* @exception Exception if the result names could not be determined (perhaps
* because of a problem from a nested sub-resultproducer)
*/
String [] getResultNames() throws Exception;
/**
* Gets the data types of each of the result columns produced for a
* single run.
*
* @return an array containing objects of the type of each result column.
* The objects should be Strings, or Doubles.
* @exception Exception if the result types could not be determined (perhaps
* because of a problem from a nested sub-resultproducer)
*/
Object [] getResultTypes() throws Exception;
/**
* Gets a description of the internal settings of the result
* producer, sufficient for distinguishing a ResultProducer
* instance from another with different settings (ignoring
* those settings set through this interface). For example,
* a cross-validation ResultProducer may have a setting for the
* number of folds. For a given state, the results produced should
* be compatible. Typically if a ResultProducer is an OptionHandler,
* this string will represent those command line arguments required
* to set the ResultProducer to that state.
*
* @return the description of the ResultProducer state, or null
* if no state is defined
*/
String getCompatibilityState();
} // ResultProducer
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/experiment/SplitEvaluator.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* SplitEvaluator.java
* Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.experiment;
import java.io.Serializable;
import weka.core.Instances;
/**
* Interface to objects able to generate a fixed set of results for
* a particular split of a dataset. The set of results should contain
* fields related to any settings of the SplitEvaluator (not including
* the dataset name. For example, one field for the classifier used to
* get the results, another for the classifier options, etc). <p>
*
* Possible implementations of SplitEvaluator: <br>
* <ul>
* <li>StdClassification results
* <li>StdRegression results
* </ul>
*
* @author Len Trigg (trigg@cs.waikato.ac.nz)
* @version $Revision$
*/
public interface SplitEvaluator extends Serializable {
/**
* Sets a list of method names for additional measures to look for
* in SplitEvaluators.
* @param additionalMeasures a list of method names
*/
void setAdditionalMeasures(String [] additionalMeasures);
/**
* Gets the names of each of the key columns produced for a single run.
* The names should not contain spaces (use '_' instead for easy
* translation.) The number of key fields must be constant for a given
* SplitEvaluator.
*
* @return an array containing the name of each key column
*/
String [] getKeyNames();
/**
* Gets the data types of each of the key columns produced for a single run.
* The number of key fields must be constant
* for a given SplitEvaluator.
*
* @return an array containing objects of the type of each key column. The
* objects should be Strings, or Doubles.
*/
Object [] getKeyTypes();
/**
* Gets the names of each of the result columns produced for a single run.
* The names should not contain spaces (use '_' instead for easy
* translation.) The number of result fields must be constant
* for a given SplitEvaluator.
*
* @return an array containing the name of each result column
*/
String [] getResultNames();
/**
* Gets the data types of each of the result columns produced for a
* single run. The number of result fields must be constant
* for a given SplitEvaluator.
*
* @return an array containing objects of the type of each result column.
* The objects should be Strings, or Doubles.
*/
Object [] getResultTypes();
/**
* Gets the key describing the current SplitEvaluator. For example
* This may contain the name of the classifier used for classifier
* predictive evaluation. The number of key fields must be constant
* for a given SplitEvaluator.
*
* @return a value of type 'Object'
*/
Object [] getKey();
/**
* Gets the results for the supplied train and test datasets.
*
* @param train the training Instances.
* @param test the testing Instances.
* @return the results stored in an array. The objects stored in
* the array may be Strings, Doubles, or null (for the missing value).
* @exception Exception if a problem occurs while getting the results
*/
Object [] getResult(Instances train, Instances test) throws Exception;
/**
* Returns the raw output for the most recent call to getResult. Useful
* for debugging splitEvaluators.
*
* @return the raw output corresponding to the most recent call
* to getResut
*/
String getRawResultOutput();
} // SplitEvaluator
|
0
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka
|
java-sources/ai/libs/thirdparty/interruptible-weka/0.1.6/weka/experiment/Stats.java
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* Stats.java
* Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand
*
*/
package weka.experiment;
import java.io.Serializable;
import weka.core.RevisionHandler;
import weka.core.RevisionUtils;
import weka.core.Utils;
/**
* A class to store simple statistics.<p>
*
* Upon initialization the variables take the following values:<p>
* <code>
* {@link #count} = {@link #sum} = {@link #sumSq} = 0 <br />
* {@link #mean} = {@link #stdDev} = {@link #min} = {@link #max} = Double.NaN
* </code><p>
* This is called the initial state. <p>
*
* For signaling that a Stats object has been provided with values that hint
* that something is either wrong with the data used or the algorithm used there
* is also the invalid state where the variables take the following values: <p>
* <code>
* {@link #count} = {@link #sum} = {@link #sumSq} = {@link #mean} =
* {@link #stdDev} = {@link #min} = {@link #max} = Double.NaN
* <code><p>
* Once a Stats object goes into the invalid state it can't change its state
* anymore. <p>
*
* A Stats object assumes that only values are subtracted (by using the
* {@link #subtract(double)} or {@link #subtract(double, double)} methods)
* that have previously been added (by using the {@link #add(double)} or
* {@link #add(double, double)} methods) and the weights must be the same
* too.<br />
* Otherwise the Stats object's fields' values are implementation defined.<p>
*
* If the implementation detects a problem then the Stats object goes into the
* invalid state.<p>
*
* The fields {@link #count}, {@link #sum}, {@link #sumSq}, {@link #min} and
* {@link #max} are always updated whereas the field {@link #mean} and
* {@link #stdDev} are only guaranteed to be updated after a call to
* {@link #calculateDerived()}.<p>
*
* For the fields {@link #min} and {@link #max} the following rules apply:<p>
* <code>
* min(values_added \ values_subtracted) >= {@link #min} >= min(values_added)<br>
* max(values_added \ values_subtracted) <= {@link #max} <= max(values_added)
* </code><p>
* Where \ is the set difference.<p>
*
* For the field {@link #stdDev} the following rules apply:<p>
* <ol>
* <li>If count <= 1 then
* {@link #stdDev}=Double.NaN.</li>
* <li>Otherwise {@link #stdDev} >= 0 and it should take on the value by best
* effort of the implementation.</li>
* </ol>
*
* For the methods {@link #add(double)}, {@link #add(double, double)},
* {@link #subtract(double)} and {@link #subtract(double, double)} the following
* rules apply:<p>
*
* <ol>
* <li>if weight < 0 then {@link #subtract(double, double)} is used instead of
* {@link #add(double, double)} with weight = -weight and vice versa.</li>
* <li>if weight = +-inf or weight = NaN then the Stats object goes into the
* invalid state.</li>
* <li>if value = +-inf or value = NaN then the Stats object goes into the
* invalid state.</li>
* <li>if weight = 0 then the value gets ignored.</li>
* <li>Otherwise the fields get updated by the implementation's best effort.</li>
* </ol>
*
* For {@link #count} the following rules apply<p>
*
* <ol>
* <li>If {@link #count} goes below zero then all fields are set to
* <code>Double.NaN</code> except the {@link #count} field which gets tracked
* normally.</li>
* <li>If {@link #count} = 0 then the Stats object goes into the initial state.
* </li>
* <li>If {@link #count} > 0 for the first time, then the Stats object goes into
* initial state and gets updated with the corresponding value and weight.
* </li>
* </ol>
*
* @author Len Trigg (trigg@cs.waikato.ac.nz)
* @version $Revision$
*/
public class Stats
implements Serializable, RevisionHandler {
/** for serialization */
private static final long serialVersionUID = -8610544539090024102L;
/** The number of values seen */
public double count = 0;
/** The sum of values seen */
public double sum = 0;
/** The sum of values squared seen */
public double sumSq = 0;
/** The std deviation of values at the last calculateDerived() call */
public double stdDev = Double.NaN;
/** The mean of values, or Double.NaN if no values seen */
public double mean = Double.NaN;
/** The minimum value seen, or Double.NaN if no values seen */
public double min = Double.NaN;
/** The maximum value seen, or Double.NaN if no values seen */
public double max = Double.NaN;
/** an important factor to calculate the standard deviation incrementally */
private double stdDevFactor = 0;
private void reset() {
count = 0;
sum = 0;
sumSq = 0;
stdDev = Double.NaN;
mean = Double.NaN;
min = Double.NaN;
max = Double.NaN;
stdDevFactor = 0;
}
private void negativeCount() {
sum = Double.NaN;
sumSq = Double.NaN;
stdDev = Double.NaN;
mean = Double.NaN;
min = Double.NaN;
max = Double.NaN;
}
private void goInvalid() {
count = Double.NaN;
negativeCount();
}
private boolean isInvalid() {
return Double.isNaN(count);
}
/**
* Adds a value to the observed values<p>
*
* It's equivalent to <code>add(value, 1)</code><p>
*
* @param value the observed value
*/
public void add(double value) {
add(value, 1);
}
/**
* Adds a weighted value to the observed values
*
* @param value the observed value
* @param weight the weight of the observed value
*/
public void add(double value, double weight) {
// treat as subtract
if (weight < 0) {
subtract(value, -weight);
return;
}
// don't leave invalid state
if (isInvalid())
return;
// go invalid
if (Double.isInfinite(weight) || Double.isNaN(weight) ||
Double.isInfinite(value) || Double.isNaN(value)) {
goInvalid();
return;
}
// ignore
if (weight == 0)
return;
double newCount = count + weight;
if (count < 0 && (newCount > 0 || Utils.eq(newCount, 0))) {
reset();
return;
}
count = newCount;
if (count < 0) {
return;
}
double weightedValue = value*weight;
sum += weightedValue;
sumSq += value * weightedValue;
if (Double.isNaN(mean)) {
// For the first value the mean can suffer from loss of precision
// so we treat it separately and make sure the calculation stays accurate
mean = value;
stdDevFactor = 0;
} else {
double delta = weight*(value - mean);
mean += delta/count;
stdDevFactor += delta*(value - mean);
}
if (Double.isNaN(min)) {
min = max = value;
} else if (value < min) {
min = value;
} else if (value > max) {
max = value;
}
}
/**
* Removes a value to the observed values (no checking is done
* that the value being removed was actually added).<p>
*
* It's equivalent to <code>subtract(value, 1)</code><p>
*
* @param value the observed value
*/
public void subtract(double value) {
subtract(value, 1);
}
/**
* Subtracts a weighted value from the observed values
*
* @param value the observed value
* @param weight the weight of the observed value
*/
public void subtract(double value, double weight) {
// treat as add
if (weight < 0) {
add(value, -weight);
return;
}
// don't leave invalid state
if (isInvalid())
return;
// go invalid
if (Double.isInfinite(weight) || Double.isNaN(weight) ||
Double.isInfinite(value) || Double.isNaN(value)) {
goInvalid();
return;
}
// ignore
if (weight == 0)
return;
count -= weight;
if (Utils.eq(count, 0)) {
reset();
return;
} else if (count < 0) {
negativeCount();
return;
}
double weightedValue = value*weight;
sum -= weightedValue;
sumSq -= value * weightedValue;
double delta = weight*(value - mean);
mean -= delta/count;
stdDevFactor -= delta*(value - mean);
}
/**
* Tells the object to calculate any statistics that don't have their
* values automatically updated during add. Currently updates the standard deviation.
*/
public void calculateDerived() {
if (count <= 1) {
stdDev = Double.NaN;
return;
}
stdDev = stdDevFactor/(count - 1);
if (stdDev < 0) {
stdDev = 0;
return;
}
stdDev = Math.sqrt(stdDev);
}
/**
* Returns a string summarising the stats so far.
*
* @return the summary string
*/
public String toString() {
return
"Count " + Utils.doubleToString(count, 8) + '\n'
+ "Min " + Utils.doubleToString(min, 8) + '\n'
+ "Max " + Utils.doubleToString(max, 8) + '\n'
+ "Sum " + Utils.doubleToString(sum, 8) + '\n'
+ "SumSq " + Utils.doubleToString(sumSq, 8) + '\n'
+ "Mean " + Utils.doubleToString(mean, 8) + '\n'
+ "StdDev " + Utils.doubleToString(stdDev, 8) + '\n';
}
/**
* Returns the revision string.
*
* @return the revision
*/
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
/**
* Tests the paired stats object from the command line.
* reads line from stdin, expecting two values per line.
*
* @param args ignored.
*/
public static void main(String [] args) {
try {
Stats ps = new Stats();
java.io.LineNumberReader r = new java.io.LineNumberReader(
new java.io.InputStreamReader(System.in));
String line;
while ((line = r.readLine()) != null) {
line = line.trim();
if (line.equals("") || line.startsWith("@") || line.startsWith("%")) {
continue;
}
java.util.StringTokenizer s
= new java.util.StringTokenizer(line, " ,\t\n\r\f");
int count = 0;
double v1 = 0;
while (s.hasMoreTokens()) {
double val = (new Double(s.nextToken())).doubleValue();
if (count == 0) {
v1 = val;
} else {
System.err.println("MSG: Too many values in line \""
+ line + "\", skipped.");
break;
}
count++;
}
if (count == 1) {
ps.add(v1);
}
}
ps.calculateDerived();
System.err.println(ps);
} catch (Exception ex) {
ex.printStackTrace();
System.err.println(ex.getMessage());
}
}
} // Stats
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.