repo
stringlengths
1
191
file
stringlengths
23
351
code
stringlengths
0
5.32M
file_length
int64
0
5.32M
avg_line_length
float64
0
2.9k
max_line_length
int64
0
288k
extension_type
stringclasses
1 value
tsml-java
tsml-java-master/src/main/java/weka/classifiers/functions/SMO.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * SMO.java * Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.functions; import java.io.Serializable; import java.util.Enumeration; import java.util.Random; import java.util.Vector; import weka.classifiers.AbstractClassifier; import weka.classifiers.functions.supportVector.Kernel; import weka.classifiers.functions.supportVector.PolyKernel; import weka.classifiers.functions.supportVector.SMOset; import weka.core.Attribute; import weka.core.Capabilities; import weka.core.Capabilities.Capability; import weka.core.DenseInstance; import weka.core.FastVector; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.OptionHandler; import weka.core.RevisionUtils; import weka.core.SelectedTag; import weka.core.Tag; import weka.core.TechnicalInformation; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import weka.core.TechnicalInformationHandler; import weka.core.Utils; import weka.core.WeightedInstancesHandler; import weka.filters.Filter; import weka.filters.unsupervised.attribute.NominalToBinary; import weka.filters.unsupervised.attribute.Normalize; import weka.filters.unsupervised.attribute.ReplaceMissingValues; import weka.filters.unsupervised.attribute.Standardize; /** <!-- globalinfo-start --> * Implements John Platt's sequential minimal optimization algorithm for training a support vector classifier.<br/> * <br/> * This implementation globally replaces all missing values and transforms nominal attributes into binary ones. It also normalizes all attributes by default. (In that case the coefficients in the output are based on the normalized data, not the original data --- this is important for interpreting the classifier.)<br/> * <br/> * Multi-class problems are solved using pairwise classification (1-vs-1 and if logistic models are built pairwise coupling according to Hastie and Tibshirani, 1998).<br/> * <br/> * To obtain proper probability estimates, use the option that fits logistic regression models to the outputs of the support vector machine. In the multi-class case the predicted probabilities are coupled using Hastie and Tibshirani's pairwise coupling method.<br/> * <br/> * Note: for improved speed normalization should be turned off when operating on SparseInstances.<br/> * <br/> * For more information on the SMO algorithm, see<br/> * <br/> * J. Platt: Fast Training of Support Vector Machines using Sequential Minimal Optimization. In B. Schoelkopf and C. Burges and A. Smola, editors, Advances in Kernel Methods - Support Vector Learning, 1998.<br/> * <br/> * S.S. Keerthi, S.K. Shevade, C. Bhattacharyya, K.R.K. Murthy (2001). Improvements to Platt's SMO Algorithm for SVM Classifier Design. Neural Computation. 13(3):637-649.<br/> * <br/> * Trevor Hastie, Robert Tibshirani: Classification by Pairwise Coupling. In: Advances in Neural Information Processing Systems, 1998. * <p/> <!-- globalinfo-end --> * <!-- technical-bibtex-start --> * BibTeX: * <pre> * &#64;incollection{Platt1998, * author = {J. Platt}, * booktitle = {Advances in Kernel Methods - Support Vector Learning}, * editor = {B. Schoelkopf and C. Burges and A. Smola}, * publisher = {MIT Press}, * title = {Fast Training of Support Vector Machines using Sequential Minimal Optimization}, * year = {1998}, * URL = {http://research.microsoft.com/\~jplatt/smo.html}, * PS = {http://research.microsoft.com/\~jplatt/smo-book.ps.gz}, * PDF = {http://research.microsoft.com/\~jplatt/smo-book.pdf} * } * * &#64;article{Keerthi2001, * author = {S.S. Keerthi and S.K. Shevade and C. Bhattacharyya and K.R.K. Murthy}, * journal = {Neural Computation}, * number = {3}, * pages = {637-649}, * title = {Improvements to Platt's SMO Algorithm for SVM Classifier Design}, * volume = {13}, * year = {2001}, * PS = {http://guppy.mpe.nus.edu.sg/\~mpessk/svm/smo_mod_nc.ps.gz} * } * * &#64;inproceedings{Hastie1998, * author = {Trevor Hastie and Robert Tibshirani}, * booktitle = {Advances in Neural Information Processing Systems}, * editor = {Michael I. Jordan and Michael J. Kearns and Sara A. Solla}, * publisher = {MIT Press}, * title = {Classification by Pairwise Coupling}, * volume = {10}, * year = {1998}, * PS = {http://www-stat.stanford.edu/\~hastie/Papers/2class.ps} * } * </pre> * <p/> <!-- technical-bibtex-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -D * If set, classifier is run in debug mode and * may output additional info to the console</pre> * * <pre> -no-checks * Turns off all checks - use with caution! * Turning them off assumes that data is purely numeric, doesn't * contain any missing values, and has a nominal class. Turning them * off also means that no header information will be stored if the * machine is linear. Finally, it also assumes that no instance has * a weight equal to 0. * (default: checks on)</pre> * * <pre> -C &lt;double&gt; * The complexity constant C. (default 1)</pre> * * <pre> -N * Whether to 0=normalize/1=standardize/2=neither. (default 0=normalize)</pre> * * <pre> -L &lt;double&gt; * The tolerance parameter. (default 1.0e-3)</pre> * * <pre> -P &lt;double&gt; * The epsilon for round-off error. (default 1.0e-12)</pre> * * <pre> -M * Fit logistic models to SVM outputs. </pre> * * <pre> -V &lt;double&gt; * The number of folds for the internal * cross-validation. (default -1, use training data)</pre> * * <pre> -W &lt;double&gt; * The random number seed. (default 1)</pre> * * <pre> -K &lt;classname and parameters&gt; * The Kernel to use. * (default: weka.classifiers.functions.supportVector.PolyKernel)</pre> * * <pre> * Options specific to kernel weka.classifiers.functions.supportVector.PolyKernel: * </pre> * * <pre> -D * Enables debugging output (if available) to be printed. * (default: off)</pre> * * <pre> -no-checks * Turns off all checks - use with caution! * (default: checks on)</pre> * * <pre> -C &lt;num&gt; * The size of the cache (a prime number), 0 for full cache and * -1 to turn it off. * (default: 250007)</pre> * * <pre> -E &lt;num&gt; * The Exponent to use. * (default: 1.0)</pre> * * <pre> -L * Use lower-order terms. * (default: no)</pre> * <!-- options-end --> * * @author Eibe Frank (eibe@cs.waikato.ac.nz) * @author Shane Legg (shane@intelligenesis.net) (sparse vector code) * @author Stuart Inglis (stuart@reeltwo.com) (sparse vector code) * @version $Revision: 8034 $ */ public class SMO extends AbstractClassifier implements WeightedInstancesHandler, TechnicalInformationHandler { /** for serialization */ static final long serialVersionUID = -6585883636378691736L; /** * Returns a string describing classifier * @return a description suitable for * displaying in the explorer/experimenter gui */ public String globalInfo() { return "Implements John Platt's sequential minimal optimization " + "algorithm for training a support vector classifier.\n\n" + "This implementation globally replaces all missing values and " + "transforms nominal attributes into binary ones. It also " + "normalizes all attributes by default. (In that case the coefficients " + "in the output are based on the normalized data, not the " + "original data --- this is important for interpreting the classifier.)\n\n" + "Multi-class problems are solved using pairwise classification " + "(1-vs-1 and if logistic models are built pairwise coupling " + "according to Hastie and Tibshirani, 1998).\n\n" + "To obtain proper probability estimates, use the option that fits " + "logistic regression models to the outputs of the support vector " + "machine. In the multi-class case the predicted probabilities " + "are coupled using Hastie and Tibshirani's pairwise coupling " + "method.\n\n" + "Note: for improved speed normalization should be turned off when " + "operating on SparseInstances.\n\n" + "For more information on the SMO algorithm, see\n\n" + getTechnicalInformation().toString(); } /** * Returns an instance of a TechnicalInformation object, containing * detailed information about the technical background of this class, * e.g., paper reference or book this class is based on. * * @return the technical information about this class */ public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; TechnicalInformation additional; result = new TechnicalInformation(Type.INCOLLECTION); result.setValue(Field.AUTHOR, "J. Platt"); result.setValue(Field.YEAR, "1998"); result.setValue(Field.TITLE, "Fast Training of Support Vector Machines using Sequential Minimal Optimization"); result.setValue(Field.BOOKTITLE, "Advances in Kernel Methods - Support Vector Learning"); result.setValue(Field.EDITOR, "B. Schoelkopf and C. Burges and A. Smola"); result.setValue(Field.PUBLISHER, "MIT Press"); result.setValue(Field.URL, "http://research.microsoft.com/~jplatt/smo.html"); result.setValue(Field.PDF, "http://research.microsoft.com/~jplatt/smo-book.pdf"); result.setValue(Field.PS, "http://research.microsoft.com/~jplatt/smo-book.ps.gz"); additional = result.add(Type.ARTICLE); additional.setValue(Field.AUTHOR, "S.S. Keerthi and S.K. Shevade and C. Bhattacharyya and K.R.K. Murthy"); additional.setValue(Field.YEAR, "2001"); additional.setValue(Field.TITLE, "Improvements to Platt's SMO Algorithm for SVM Classifier Design"); additional.setValue(Field.JOURNAL, "Neural Computation"); additional.setValue(Field.VOLUME, "13"); additional.setValue(Field.NUMBER, "3"); additional.setValue(Field.PAGES, "637-649"); additional.setValue(Field.PS, "http://guppy.mpe.nus.edu.sg/~mpessk/svm/smo_mod_nc.ps.gz"); additional = result.add(Type.INPROCEEDINGS); additional.setValue(Field.AUTHOR, "Trevor Hastie and Robert Tibshirani"); additional.setValue(Field.YEAR, "1998"); additional.setValue(Field.TITLE, "Classification by Pairwise Coupling"); additional.setValue(Field.BOOKTITLE, "Advances in Neural Information Processing Systems"); additional.setValue(Field.VOLUME, "10"); additional.setValue(Field.PUBLISHER, "MIT Press"); additional.setValue(Field.EDITOR, "Michael I. Jordan and Michael J. Kearns and Sara A. Solla"); additional.setValue(Field.PS, "http://www-stat.stanford.edu/~hastie/Papers/2class.ps"); return result; } /** * Class for building a binary support vector machine. */ public class BinarySMO implements Serializable { /** for serialization */ static final long serialVersionUID = -8246163625699362456L; /** The Lagrange multipliers. */ protected double[] m_alpha; /** The thresholds. */ protected double m_b, m_bLow, m_bUp; /** The indices for m_bLow and m_bUp */ protected int m_iLow, m_iUp; /** The training data. */ protected Instances m_data; /** Weight vector for linear machine. */ protected double[] m_weights; /** Variables to hold weight vector in sparse form. (To reduce storage requirements.) */ protected double[] m_sparseWeights; protected int[] m_sparseIndices; /** Kernel to use **/ protected Kernel m_kernel; /** The transformed class values. */ protected double[] m_class; /** The current set of errors for all non-bound examples. */ protected double[] m_errors; /* The five different sets used by the algorithm. */ /** {i: 0 < m_alpha[i] < C} */ protected SMOset m_I0; /** {i: m_class[i] = 1, m_alpha[i] = 0} */ protected SMOset m_I1; /** {i: m_class[i] = -1, m_alpha[i] =C} */ protected SMOset m_I2; /** {i: m_class[i] = 1, m_alpha[i] = C} */ protected SMOset m_I3; /** {i: m_class[i] = -1, m_alpha[i] = 0} */ protected SMOset m_I4; /** The set of support vectors */ protected SMOset m_supportVectors; // {i: 0 < m_alpha[i]} /** Stores logistic regression model for probability estimate */ protected Logistic m_logistic = null; /** Stores the weight of the training instances */ protected double m_sumOfWeights = 0; /** * Fits logistic regression model to SVM outputs analogue * to John Platt's method. * * @param insts the set of training instances * @param cl1 the first class' index * @param cl2 the second class' index * @param numFolds the number of folds for cross-validation * @param random for randomizing the data * @throws Exception if the sigmoid can't be fit successfully */ protected void fitLogistic(Instances insts, int cl1, int cl2, int numFolds, Random random) throws Exception { // Create header of instances object FastVector atts = new FastVector(2); atts.addElement(new Attribute("pred")); FastVector attVals = new FastVector(2); attVals.addElement(insts.classAttribute().value(cl1)); attVals.addElement(insts.classAttribute().value(cl2)); atts.addElement(new Attribute("class", attVals)); Instances data = new Instances("data", atts, insts.numInstances()); data.setClassIndex(1); // Collect data for fitting the logistic model if (numFolds <= 0) { // Use training data for (int j = 0; j < insts.numInstances(); j++) { Instance inst = insts.instance(j); double[] vals = new double[2]; vals[0] = SVMOutput(-1, inst); if (inst.classValue() == cl2) { vals[1] = 1; } data.add(new DenseInstance(inst.weight(), vals)); } } else { // Check whether number of folds too large if (numFolds > insts.numInstances()) { numFolds = insts.numInstances(); } // Make copy of instances because we will shuffle them around insts = new Instances(insts); // Perform three-fold cross-validation to collect // unbiased predictions insts.randomize(random); insts.stratify(numFolds); for (int i = 0; i < numFolds; i++) { Instances train = insts.trainCV(numFolds, i, random); /* SerializedObject so = new SerializedObject(this); BinarySMO smo = (BinarySMO)so.getObject(); */ BinarySMO smo = new BinarySMO(); smo.setKernel(Kernel.makeCopy(SMO.this.m_kernel)); smo.buildClassifier(train, cl1, cl2, false, -1, -1); Instances test = insts.testCV(numFolds, i); for (int j = 0; j < test.numInstances(); j++) { double[] vals = new double[2]; vals[0] = smo.SVMOutput(-1, test.instance(j)); if (test.instance(j).classValue() == cl2) { vals[1] = 1; } data.add(new DenseInstance(test.instance(j).weight(), vals)); } } } // Build logistic regression model m_logistic = new Logistic(); //Tony cheekily inserts a limit on the Iterations, because it is fucking up //his experiments, 1000 iterations seems a reasonable limit, given r defaults to 25 and this //is only to work out the probability estimates, doesnt effect accuracy,so may set it //lower m_logistic.setMaxIts(100); m_logistic.buildClassifier(data); } /** * sets the kernel to use * * @param value the kernel to use */ public void setKernel(Kernel value) { m_kernel = value; } /** * Returns the kernel to use * * @return the current kernel */ public Kernel getKernel() { return m_kernel; } /** * Method for building the binary classifier. * * @param insts the set of training instances * @param cl1 the first class' index * @param cl2 the second class' index * @param fitLogistic true if logistic model is to be fit * @param numFolds number of folds for internal cross-validation * @param randomSeed random number generator for cross-validation * @throws Exception if the classifier can't be built successfully */ protected void buildClassifier(Instances insts, int cl1, int cl2, boolean fitLogistic, int numFolds, int randomSeed) throws Exception { // Initialize some variables m_bUp = -1; m_bLow = 1; m_b = 0; m_alpha = null; m_data = null; m_weights = null; m_errors = null; m_logistic = null; m_I0 = null; m_I1 = null; m_I2 = null; m_I3 = null; m_I4 = null; m_sparseWeights = null; m_sparseIndices = null; // Store the sum of weights m_sumOfWeights = insts.sumOfWeights(); // Set class values m_class = new double[insts.numInstances()]; m_iUp = -1; m_iLow = -1; for (int i = 0; i < m_class.length; i++) { if ((int) insts.instance(i).classValue() == cl1) { m_class[i] = -1; m_iLow = i; } else if ((int) insts.instance(i).classValue() == cl2) { m_class[i] = 1; m_iUp = i; } else { throw new Exception ("This should never happen!"); } } // Check whether one or both classes are missing if ((m_iUp == -1) || (m_iLow == -1)) { if (m_iUp != -1) { m_b = -1; } else if (m_iLow != -1) { m_b = 1; } else { m_class = null; return; } if (m_KernelIsLinear) { m_sparseWeights = new double[0]; m_sparseIndices = new int[0]; m_class = null; } else { m_supportVectors = new SMOset(0); m_alpha = new double[0]; m_class = new double[0]; } // Fit sigmoid if requested if (fitLogistic) { fitLogistic(insts, cl1, cl2, numFolds, new Random(randomSeed)); } return; } // Set the reference to the data m_data = insts; // If machine is linear, reserve space for weights if (m_KernelIsLinear) { m_weights = new double[m_data.numAttributes()]; } else { m_weights = null; } // Initialize alpha array to zero m_alpha = new double[m_data.numInstances()]; // Initialize sets m_supportVectors = new SMOset(m_data.numInstances()); m_I0 = new SMOset(m_data.numInstances()); m_I1 = new SMOset(m_data.numInstances()); m_I2 = new SMOset(m_data.numInstances()); m_I3 = new SMOset(m_data.numInstances()); m_I4 = new SMOset(m_data.numInstances()); // Clean out some instance variables m_sparseWeights = null; m_sparseIndices = null; // init kernel m_kernel.buildKernel(m_data); // Initialize error cache m_errors = new double[m_data.numInstances()]; m_errors[m_iLow] = 1; m_errors[m_iUp] = -1; // Build up I1 and I4 for (int i = 0; i < m_class.length; i++ ) { if (m_class[i] == 1) { m_I1.insert(i); } else { m_I4.insert(i); } } // Loop to find all the support vectors int numChanged = 0; boolean examineAll = true; while ((numChanged > 0) || examineAll) { numChanged = 0; if (examineAll) { for (int i = 0; i < m_alpha.length; i++) { if (examineExample(i)) { numChanged++; } } } else { // This code implements Modification 1 from Keerthi et al.'s paper for (int i = 0; i < m_alpha.length; i++) { if ((m_alpha[i] > 0) && (m_alpha[i] < m_C * m_data.instance(i).weight())) { if (examineExample(i)) { numChanged++; } // Is optimality on unbound vectors obtained? if (m_bUp > m_bLow - 2 * m_tol) { numChanged = 0; break; } } } //This is the code for Modification 2 from Keerthi et al.'s paper /*boolean innerLoopSuccess = true; numChanged = 0; while ((m_bUp < m_bLow - 2 * m_tol) && (innerLoopSuccess == true)) { innerLoopSuccess = takeStep(m_iUp, m_iLow, m_errors[m_iLow]); }*/ } if (examineAll) { examineAll = false; } else if (numChanged == 0) { examineAll = true; } } // Set threshold m_b = (m_bLow + m_bUp) / 2.0; // Save memory m_kernel.clean(); m_errors = null; m_I0 = m_I1 = m_I2 = m_I3 = m_I4 = null; // If machine is linear, delete training data // and store weight vector in sparse format if (m_KernelIsLinear) { // We don't need to store the set of support vectors m_supportVectors = null; // We don't need to store the class values either m_class = null; // Clean out training data if (!m_checksTurnedOff) { m_data = new Instances(m_data, 0); } else { m_data = null; } // Convert weight vector double[] sparseWeights = new double[m_weights.length]; int[] sparseIndices = new int[m_weights.length]; int counter = 0; for (int i = 0; i < m_weights.length; i++) { if (m_weights[i] != 0.0) { sparseWeights[counter] = m_weights[i]; sparseIndices[counter] = i; counter++; } } m_sparseWeights = new double[counter]; m_sparseIndices = new int[counter]; System.arraycopy(sparseWeights, 0, m_sparseWeights, 0, counter); System.arraycopy(sparseIndices, 0, m_sparseIndices, 0, counter); // Clean out weight vector m_weights = null; // We don't need the alphas in the linear case m_alpha = null; } // Fit sigmoid if requested if (fitLogistic) { fitLogistic(insts, cl1, cl2, numFolds, new Random(randomSeed)); } } /** * Computes SVM output for given instance. * * @param index the instance for which output is to be computed * @param inst the instance * @return the output of the SVM for the given instance * @throws Exception in case of an error */ public double SVMOutput(int index, Instance inst) throws Exception { double result = 0; // Is the machine linear? if (m_KernelIsLinear) { // Is weight vector stored in sparse format? if (m_sparseWeights == null) { int n1 = inst.numValues(); for (int p = 0; p < n1; p++) { if (inst.index(p) != m_classIndex) { result += m_weights[inst.index(p)] * inst.valueSparse(p); } } } else { int n1 = inst.numValues(); int n2 = m_sparseWeights.length; for (int p1 = 0, p2 = 0; p1 < n1 && p2 < n2;) { int ind1 = inst.index(p1); int ind2 = m_sparseIndices[p2]; if (ind1 == ind2) { if (ind1 != m_classIndex) { result += inst.valueSparse(p1) * m_sparseWeights[p2]; } p1++; p2++; } else if (ind1 > ind2) { p2++; } else { p1++; } } } } else { for (int i = m_supportVectors.getNext(-1); i != -1; i = m_supportVectors.getNext(i)) { result += m_class[i] * m_alpha[i] * m_kernel.eval(index, i, inst); } } result -= m_b; return result; } /** * Prints out the classifier. * * @return a description of the classifier as a string */ public String toString() { StringBuffer text = new StringBuffer(); int printed = 0; if ((m_alpha == null) && (m_sparseWeights == null)) { return "BinarySMO: No model built yet.\n"; } try { text.append("BinarySMO\n\n"); // If machine linear, print weight vector if (m_KernelIsLinear) { text.append("Machine linear: showing attribute weights, "); text.append("not support vectors.\n\n"); // We can assume that the weight vector is stored in sparse // format because the classifier has been built for (int i = 0; i < m_sparseWeights.length; i++) { if (m_sparseIndices[i] != (int)m_classIndex) { if (printed > 0) { text.append(" + "); } else { text.append(" "); } text.append(Utils.doubleToString(m_sparseWeights[i], 12, 4) + " * "); if (m_filterType == FILTER_STANDARDIZE) { text.append("(standardized) "); } else if (m_filterType == FILTER_NORMALIZE) { text.append("(normalized) "); } if (!m_checksTurnedOff) { text.append(m_data.attribute(m_sparseIndices[i]).name()+"\n"); } else { text.append("attribute with index " + m_sparseIndices[i] +"\n"); } printed++; } } } else { for (int i = 0; i < m_alpha.length; i++) { if (m_supportVectors.contains(i)) { double val = m_alpha[i]; if (m_class[i] == 1) { if (printed > 0) { text.append(" + "); } } else { text.append(" - "); } text.append(Utils.doubleToString(val, 12, 4) + " * <"); for (int j = 0; j < m_data.numAttributes(); j++) { if (j != m_data.classIndex()) { text.append(m_data.instance(i).toString(j)); } if (j != m_data.numAttributes() - 1) { text.append(" "); } } text.append("> * X]\n"); printed++; } } } if (m_b > 0) { text.append(" - " + Utils.doubleToString(m_b, 12, 4)); } else { text.append(" + " + Utils.doubleToString(-m_b, 12, 4)); } if (!m_KernelIsLinear) { text.append("\n\nNumber of support vectors: " + m_supportVectors.numElements()); } int numEval = 0; int numCacheHits = -1; if (m_kernel != null) { numEval = m_kernel.numEvals(); numCacheHits = m_kernel.numCacheHits(); } text.append("\n\nNumber of kernel evaluations: " + numEval); if (numCacheHits >= 0 && numEval > 0) { double hitRatio = 1 - numEval*1.0/(numCacheHits+numEval); text.append(" (" + Utils.doubleToString(hitRatio*100, 7, 3).trim() + "% cached)"); } } catch (Exception e) { e.printStackTrace(); return "Can't print BinarySMO classifier."; } return text.toString(); } /** * Examines instance. * * @param i2 index of instance to examine * @return true if examination was successfull * @throws Exception if something goes wrong */ protected boolean examineExample(int i2) throws Exception { double y2, F2; int i1 = -1; y2 = m_class[i2]; if (m_I0.contains(i2)) { F2 = m_errors[i2]; } else { F2 = SVMOutput(i2, m_data.instance(i2)) + m_b - y2; m_errors[i2] = F2; // Update thresholds if ((m_I1.contains(i2) || m_I2.contains(i2)) && (F2 < m_bUp)) { m_bUp = F2; m_iUp = i2; } else if ((m_I3.contains(i2) || m_I4.contains(i2)) && (F2 > m_bLow)) { m_bLow = F2; m_iLow = i2; } } // Check optimality using current bLow and bUp and, if // violated, find an index i1 to do joint optimization // with i2... boolean optimal = true; if (m_I0.contains(i2) || m_I1.contains(i2) || m_I2.contains(i2)) { if (m_bLow - F2 > 2 * m_tol) { optimal = false; i1 = m_iLow; } } if (m_I0.contains(i2) || m_I3.contains(i2) || m_I4.contains(i2)) { if (F2 - m_bUp > 2 * m_tol) { optimal = false; i1 = m_iUp; } } if (optimal) { return false; } // For i2 unbound choose the better i1... if (m_I0.contains(i2)) { if (m_bLow - F2 > F2 - m_bUp) { i1 = m_iLow; } else { i1 = m_iUp; } } if (i1 == -1) { throw new Exception("This should never happen!"); } return takeStep(i1, i2, F2); } /** * Method solving for the Lagrange multipliers for * two instances. * * @param i1 index of the first instance * @param i2 index of the second instance * @param F2 * @return true if multipliers could be found * @throws Exception if something goes wrong */ protected boolean takeStep(int i1, int i2, double F2) throws Exception { double alph1, alph2, y1, y2, F1, s, L, H, k11, k12, k22, eta, a1, a2, f1, f2, v1, v2, Lobj, Hobj; double C1 = m_C * m_data.instance(i1).weight(); double C2 = m_C * m_data.instance(i2).weight(); // Don't do anything if the two instances are the same if (i1 == i2) { return false; } // Initialize variables alph1 = m_alpha[i1]; alph2 = m_alpha[i2]; y1 = m_class[i1]; y2 = m_class[i2]; F1 = m_errors[i1]; s = y1 * y2; // Find the constraints on a2 if (y1 != y2) { L = Math.max(0, alph2 - alph1); H = Math.min(C2, C1 + alph2 - alph1); } else { L = Math.max(0, alph1 + alph2 - C1); H = Math.min(C2, alph1 + alph2); } if (L >= H) { return false; } // Compute second derivative of objective function k11 = m_kernel.eval(i1, i1, m_data.instance(i1)); k12 = m_kernel.eval(i1, i2, m_data.instance(i1)); k22 = m_kernel.eval(i2, i2, m_data.instance(i2)); eta = 2 * k12 - k11 - k22; // Check if second derivative is negative if (eta < 0) { // Compute unconstrained maximum a2 = alph2 - y2 * (F1 - F2) / eta; // Compute constrained maximum if (a2 < L) { a2 = L; } else if (a2 > H) { a2 = H; } } else { // Look at endpoints of diagonal f1 = SVMOutput(i1, m_data.instance(i1)); f2 = SVMOutput(i2, m_data.instance(i2)); v1 = f1 + m_b - y1 * alph1 * k11 - y2 * alph2 * k12; v2 = f2 + m_b - y1 * alph1 * k12 - y2 * alph2 * k22; double gamma = alph1 + s * alph2; Lobj = (gamma - s * L) + L - 0.5 * k11 * (gamma - s * L) * (gamma - s * L) - 0.5 * k22 * L * L - s * k12 * (gamma - s * L) * L - y1 * (gamma - s * L) * v1 - y2 * L * v2; Hobj = (gamma - s * H) + H - 0.5 * k11 * (gamma - s * H) * (gamma - s * H) - 0.5 * k22 * H * H - s * k12 * (gamma - s * H) * H - y1 * (gamma - s * H) * v1 - y2 * H * v2; if (Lobj > Hobj + m_eps) { a2 = L; } else if (Lobj < Hobj - m_eps) { a2 = H; } else { a2 = alph2; } } if (Math.abs(a2 - alph2) < m_eps * (a2 + alph2 + m_eps)) { return false; } // To prevent precision problems if (a2 > C2 - m_Del * C2) { a2 = C2; } else if (a2 <= m_Del * C2) { a2 = 0; } // Recompute a1 a1 = alph1 + s * (alph2 - a2); // To prevent precision problems if (a1 > C1 - m_Del * C1) { a1 = C1; } else if (a1 <= m_Del * C1) { a1 = 0; } // Update sets if (a1 > 0) { m_supportVectors.insert(i1); } else { m_supportVectors.delete(i1); } if ((a1 > 0) && (a1 < C1)) { m_I0.insert(i1); } else { m_I0.delete(i1); } if ((y1 == 1) && (a1 == 0)) { m_I1.insert(i1); } else { m_I1.delete(i1); } if ((y1 == -1) && (a1 == C1)) { m_I2.insert(i1); } else { m_I2.delete(i1); } if ((y1 == 1) && (a1 == C1)) { m_I3.insert(i1); } else { m_I3.delete(i1); } if ((y1 == -1) && (a1 == 0)) { m_I4.insert(i1); } else { m_I4.delete(i1); } if (a2 > 0) { m_supportVectors.insert(i2); } else { m_supportVectors.delete(i2); } if ((a2 > 0) && (a2 < C2)) { m_I0.insert(i2); } else { m_I0.delete(i2); } if ((y2 == 1) && (a2 == 0)) { m_I1.insert(i2); } else { m_I1.delete(i2); } if ((y2 == -1) && (a2 == C2)) { m_I2.insert(i2); } else { m_I2.delete(i2); } if ((y2 == 1) && (a2 == C2)) { m_I3.insert(i2); } else { m_I3.delete(i2); } if ((y2 == -1) && (a2 == 0)) { m_I4.insert(i2); } else { m_I4.delete(i2); } // Update weight vector to reflect change a1 and a2, if linear SVM if (m_KernelIsLinear) { Instance inst1 = m_data.instance(i1); for (int p1 = 0; p1 < inst1.numValues(); p1++) { if (inst1.index(p1) != m_data.classIndex()) { m_weights[inst1.index(p1)] += y1 * (a1 - alph1) * inst1.valueSparse(p1); } } Instance inst2 = m_data.instance(i2); for (int p2 = 0; p2 < inst2.numValues(); p2++) { if (inst2.index(p2) != m_data.classIndex()) { m_weights[inst2.index(p2)] += y2 * (a2 - alph2) * inst2.valueSparse(p2); } } } // Update error cache using new Lagrange multipliers for (int j = m_I0.getNext(-1); j != -1; j = m_I0.getNext(j)) { if ((j != i1) && (j != i2)) { m_errors[j] += y1 * (a1 - alph1) * m_kernel.eval(i1, j, m_data.instance(i1)) + y2 * (a2 - alph2) * m_kernel.eval(i2, j, m_data.instance(i2)); } } // Update error cache for i1 and i2 m_errors[i1] += y1 * (a1 - alph1) * k11 + y2 * (a2 - alph2) * k12; m_errors[i2] += y1 * (a1 - alph1) * k12 + y2 * (a2 - alph2) * k22; // Update array with Lagrange multipliers m_alpha[i1] = a1; m_alpha[i2] = a2; // Update thresholds m_bLow = -Double.MAX_VALUE; m_bUp = Double.MAX_VALUE; m_iLow = -1; m_iUp = -1; for (int j = m_I0.getNext(-1); j != -1; j = m_I0.getNext(j)) { if (m_errors[j] < m_bUp) { m_bUp = m_errors[j]; m_iUp = j; } if (m_errors[j] > m_bLow) { m_bLow = m_errors[j]; m_iLow = j; } } if (!m_I0.contains(i1)) { if (m_I3.contains(i1) || m_I4.contains(i1)) { if (m_errors[i1] > m_bLow) { m_bLow = m_errors[i1]; m_iLow = i1; } } else { if (m_errors[i1] < m_bUp) { m_bUp = m_errors[i1]; m_iUp = i1; } } } if (!m_I0.contains(i2)) { if (m_I3.contains(i2) || m_I4.contains(i2)) { if (m_errors[i2] > m_bLow) { m_bLow = m_errors[i2]; m_iLow = i2; } } else { if (m_errors[i2] < m_bUp) { m_bUp = m_errors[i2]; m_iUp = i2; } } } if ((m_iLow == -1) || (m_iUp == -1)) { throw new Exception("This should never happen!"); } // Made some progress. return true; } /** * Quick and dirty check whether the quadratic programming problem is solved. * * @throws Exception if checking fails */ protected void checkClassifier() throws Exception { double sum = 0; for (int i = 0; i < m_alpha.length; i++) { if (m_alpha[i] > 0) { sum += m_class[i] * m_alpha[i]; } } System.err.println("Sum of y(i) * alpha(i): " + sum); for (int i = 0; i < m_alpha.length; i++) { double output = SVMOutput(i, m_data.instance(i)); if (Utils.eq(m_alpha[i], 0)) { if (Utils.sm(m_class[i] * output, 1)) { System.err.println("KKT condition 1 violated: " + m_class[i] * output); } } if (Utils.gr(m_alpha[i], 0) && Utils.sm(m_alpha[i], m_C * m_data.instance(i).weight())) { if (!Utils.eq(m_class[i] * output, 1)) { System.err.println("KKT condition 2 violated: " + m_class[i] * output); } } if (Utils.eq(m_alpha[i], m_C * m_data.instance(i).weight())) { if (Utils.gr(m_class[i] * output, 1)) { System.err.println("KKT condition 3 violated: " + m_class[i] * output); } } } } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } } /** filter: Normalize training data */ public static final int FILTER_NORMALIZE = 0; /** filter: Standardize training data */ public static final int FILTER_STANDARDIZE = 1; /** filter: No normalization/standardization */ public static final int FILTER_NONE = 2; /** The filter to apply to the training data */ public static final Tag [] TAGS_FILTER = { new Tag(FILTER_NORMALIZE, "Normalize training data"), new Tag(FILTER_STANDARDIZE, "Standardize training data"), new Tag(FILTER_NONE, "No normalization/standardization"), }; /** The binary classifier(s) */ protected BinarySMO[][] m_classifiers = null; /** The complexity parameter. */ protected double m_C = 1.0; /** Epsilon for rounding. */ protected double m_eps = 1.0e-12; /** Tolerance for accuracy of result. */ protected double m_tol = 1.0e-3; /** Whether to normalize/standardize/neither */ protected int m_filterType = FILTER_NORMALIZE; /** The filter used to make attributes numeric. */ protected NominalToBinary m_NominalToBinary; /** The filter used to standardize/normalize all values. */ protected Filter m_Filter = null; /** The filter used to get rid of missing values. */ protected ReplaceMissingValues m_Missing; /** The class index from the training data */ protected int m_classIndex = -1; /** The class attribute */ protected Attribute m_classAttribute; /** whether the kernel is a linear one */ protected boolean m_KernelIsLinear = false; /** Turn off all checks and conversions? Turning them off assumes that data is purely numeric, doesn't contain any missing values, and has a nominal class. Turning them off also means that no header information will be stored if the machine is linear. Finally, it also assumes that no instance has a weight equal to 0.*/ protected boolean m_checksTurnedOff; /** Precision constant for updating sets */ protected static double m_Del = 1000 * Double.MIN_VALUE; /** Whether logistic models are to be fit */ protected boolean m_fitLogisticModels = false; /** The number of folds for the internal cross-validation */ protected int m_numFolds = -1; /** The random number seed */ protected int m_randomSeed = 1; /** the kernel to use */ protected Kernel m_kernel = new PolyKernel(); /** * Turns off checks for missing values, etc. Use with caution. */ public void turnChecksOff() { m_checksTurnedOff = true; } /** * Turns on checks for missing values, etc. */ public void turnChecksOn() { m_checksTurnedOff = false; } /** * Returns default capabilities of the classifier. * * @return the capabilities of this classifier */ public Capabilities getCapabilities() { Capabilities result = getKernel().getCapabilities(); result.setOwner(this); // attribute result.enableAllAttributeDependencies(); // with NominalToBinary we can also handle nominal attributes, but only // if the kernel can handle numeric attributes if (result.handles(Capability.NUMERIC_ATTRIBUTES)) result.enable(Capability.NOMINAL_ATTRIBUTES); result.enable(Capability.MISSING_VALUES); // class result.disableAllClasses(); result.disableAllClassDependencies(); result.enable(Capability.NOMINAL_CLASS); result.enable(Capability.MISSING_CLASS_VALUES); return result; } /** * Method for building the classifier. Implements a one-against-one * wrapper for multi-class problems. * * @param insts the set of training instances * @throws Exception if the classifier can't be built successfully */ public void buildClassifier(Instances insts) throws Exception { if (!m_checksTurnedOff) { // can classifier handle the data? getCapabilities().testWithFail(insts); // remove instances with missing class insts = new Instances(insts); insts.deleteWithMissingClass(); /* Removes all the instances with weight equal to 0. MUST be done since condition (8) of Keerthi's paper is made with the assertion Ci > 0 (See equation (3a). */ Instances data = new Instances(insts, insts.numInstances()); for(int i = 0; i < insts.numInstances(); i++){ if(insts.instance(i).weight() > 0) data.add(insts.instance(i)); } if (data.numInstances() == 0) { throw new Exception("No training instances left after removing " + "instances with weight 0!"); } insts = data; } if (!m_checksTurnedOff) { m_Missing = new ReplaceMissingValues(); m_Missing.setInputFormat(insts); insts = Filter.useFilter(insts, m_Missing); } else { m_Missing = null; } if (getCapabilities().handles(Capability.NUMERIC_ATTRIBUTES)) { boolean onlyNumeric = true; if (!m_checksTurnedOff) { for (int i = 0; i < insts.numAttributes(); i++) { if (i != insts.classIndex()) { if (!insts.attribute(i).isNumeric()) { onlyNumeric = false; break; } } } } if (!onlyNumeric) { m_NominalToBinary = new NominalToBinary(); m_NominalToBinary.setInputFormat(insts); insts = Filter.useFilter(insts, m_NominalToBinary); } else { m_NominalToBinary = null; } } else { m_NominalToBinary = null; } if (m_filterType == FILTER_STANDARDIZE) { m_Filter = new Standardize(); m_Filter.setInputFormat(insts); insts = Filter.useFilter(insts, m_Filter); } else if (m_filterType == FILTER_NORMALIZE) { m_Filter = new Normalize(); m_Filter.setInputFormat(insts); insts = Filter.useFilter(insts, m_Filter); } else { m_Filter = null; } m_classIndex = insts.classIndex(); m_classAttribute = insts.classAttribute(); m_KernelIsLinear = (m_kernel instanceof PolyKernel) && (((PolyKernel) m_kernel).getExponent() == 1.0); // Generate subsets representing each class Instances[] subsets = new Instances[insts.numClasses()]; for (int i = 0; i < insts.numClasses(); i++) { subsets[i] = new Instances(insts, insts.numInstances()); } for (int j = 0; j < insts.numInstances(); j++) { Instance inst = insts.instance(j); subsets[(int)inst.classValue()].add(inst); } for (int i = 0; i < insts.numClasses(); i++) { subsets[i].compactify(); } // Build the binary classifiers Random rand = new Random(m_randomSeed); m_classifiers = new BinarySMO[insts.numClasses()][insts.numClasses()]; for (int i = 0; i < insts.numClasses(); i++) { for (int j = i + 1; j < insts.numClasses(); j++) { m_classifiers[i][j] = new BinarySMO(); m_classifiers[i][j].setKernel(Kernel.makeCopy(getKernel())); Instances data = new Instances(insts, insts.numInstances()); for (int k = 0; k < subsets[i].numInstances(); k++) { data.add(subsets[i].instance(k)); } for (int k = 0; k < subsets[j].numInstances(); k++) { data.add(subsets[j].instance(k)); } data.compactify(); data.randomize(rand); m_classifiers[i][j].buildClassifier(data, i, j, m_fitLogisticModels, m_numFolds, m_randomSeed); } } } /** * Estimates class probabilities for given instance. * * @param inst the instance to compute the probabilities for * @throws Exception in case of an error */ public double[] distributionForInstance(Instance inst) throws Exception { // Filter instance if (!m_checksTurnedOff) { m_Missing.input(inst); m_Missing.batchFinished(); inst = m_Missing.output(); } if (m_NominalToBinary != null) { m_NominalToBinary.input(inst); m_NominalToBinary.batchFinished(); inst = m_NominalToBinary.output(); } if (m_Filter != null) { m_Filter.input(inst); m_Filter.batchFinished(); inst = m_Filter.output(); } if (!m_fitLogisticModels) { double[] result = new double[inst.numClasses()]; for (int i = 0; i < inst.numClasses(); i++) { for (int j = i + 1; j < inst.numClasses(); j++) { if ((m_classifiers[i][j].m_alpha != null) || (m_classifiers[i][j].m_sparseWeights != null)) { double output = m_classifiers[i][j].SVMOutput(-1, inst); if (output > 0) { result[j] += 1; } else { result[i] += 1; } } } } Utils.normalize(result); return result; } else { // We only need to do pairwise coupling if there are more // then two classes. if (inst.numClasses() == 2) { double[] newInst = new double[2]; newInst[0] = m_classifiers[0][1].SVMOutput(-1, inst); newInst[1] = Utils.missingValue(); return m_classifiers[0][1].m_logistic. distributionForInstance(new DenseInstance(1, newInst)); } double[][] r = new double[inst.numClasses()][inst.numClasses()]; double[][] n = new double[inst.numClasses()][inst.numClasses()]; for (int i = 0; i < inst.numClasses(); i++) { for (int j = i + 1; j < inst.numClasses(); j++) { if ((m_classifiers[i][j].m_alpha != null) || (m_classifiers[i][j].m_sparseWeights != null)) { double[] newInst = new double[2]; newInst[0] = m_classifiers[i][j].SVMOutput(-1, inst); newInst[1] = Utils.missingValue(); r[i][j] = m_classifiers[i][j].m_logistic. distributionForInstance(new DenseInstance(1, newInst))[0]; n[i][j] = m_classifiers[i][j].m_sumOfWeights; } } } return weka.classifiers.meta.MultiClassClassifier.pairwiseCoupling(n, r); } } /** * Returns an array of votes for the given instance. * @param inst the instance * @return array of votex * @throws Exception if something goes wrong */ public int[] obtainVotes(Instance inst) throws Exception { // Filter instance if (!m_checksTurnedOff) { m_Missing.input(inst); m_Missing.batchFinished(); inst = m_Missing.output(); } if (m_NominalToBinary != null) { m_NominalToBinary.input(inst); m_NominalToBinary.batchFinished(); inst = m_NominalToBinary.output(); } if (m_Filter != null) { m_Filter.input(inst); m_Filter.batchFinished(); inst = m_Filter.output(); } int[] votes = new int[inst.numClasses()]; for (int i = 0; i < inst.numClasses(); i++) { for (int j = i + 1; j < inst.numClasses(); j++) { double output = m_classifiers[i][j].SVMOutput(-1, inst); if (output > 0) { votes[j] += 1; } else { votes[i] += 1; } } } return votes; } /** * Returns the weights in sparse format. */ public double [][][] sparseWeights() { int numValues = m_classAttribute.numValues(); double [][][] sparseWeights = new double[numValues][numValues][]; for (int i = 0; i < numValues; i++) { for (int j = i + 1; j < numValues; j++) { sparseWeights[i][j] = m_classifiers[i][j].m_sparseWeights; } } return sparseWeights; } /** * Returns the indices in sparse format. */ public int [][][] sparseIndices() { int numValues = m_classAttribute.numValues(); int [][][] sparseIndices = new int[numValues][numValues][]; for (int i = 0; i < numValues; i++) { for (int j = i + 1; j < numValues; j++) { sparseIndices[i][j] = m_classifiers[i][j].m_sparseIndices; } } return sparseIndices; } /** * Returns the bias of each binary SMO. */ public double [][] bias() { int numValues = m_classAttribute.numValues(); double [][] bias = new double[numValues][numValues]; for (int i = 0; i < numValues; i++) { for (int j = i + 1; j < numValues; j++) { bias[i][j] = m_classifiers[i][j].m_b; } } return bias; } /* * Returns the number of values of the class attribute. */ public int numClassAttributeValues() { return m_classAttribute.numValues(); } /* * Returns the names of the class attributes. */ public String [] classAttributeNames() { int numValues = m_classAttribute.numValues(); String [] classAttributeNames = new String[numValues]; for (int i = 0; i < numValues; i++) { classAttributeNames[i] = m_classAttribute.value(i); } return classAttributeNames; } /** * Returns the attribute names. */ public String [][][] attributeNames() { int numValues = m_classAttribute.numValues(); String [][][] attributeNames = new String[numValues][numValues][]; for (int i = 0; i < numValues; i++) { for (int j = i + 1; j < numValues; j++) { // int numAttributes = m_classifiers[i][j].m_data.numAttributes(); int numAttributes = m_classifiers[i][j].m_sparseIndices.length; String [] attrNames = new String[numAttributes]; for (int k = 0; k < numAttributes; k++) { attrNames[k] = m_classifiers[i][j]. m_data.attribute(m_classifiers[i][j].m_sparseIndices[k]).name(); } attributeNames[i][j] = attrNames; } } return attributeNames; } /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ public Enumeration listOptions() { Vector result = new Vector(); Enumeration enm = super.listOptions(); while (enm.hasMoreElements()) result.addElement(enm.nextElement()); result.addElement(new Option( "\tTurns off all checks - use with caution!\n" + "\tTurning them off assumes that data is purely numeric, doesn't\n" + "\tcontain any missing values, and has a nominal class. Turning them\n" + "\toff also means that no header information will be stored if the\n" + "\tmachine is linear. Finally, it also assumes that no instance has\n" + "\ta weight equal to 0.\n" + "\t(default: checks on)", "no-checks", 0, "-no-checks")); result.addElement(new Option( "\tThe complexity constant C. (default 1)", "C", 1, "-C <double>")); result.addElement(new Option( "\tWhether to 0=normalize/1=standardize/2=neither. " + "(default 0=normalize)", "N", 1, "-N")); result.addElement(new Option( "\tThe tolerance parameter. " + "(default 1.0e-3)", "L", 1, "-L <double>")); result.addElement(new Option( "\tThe epsilon for round-off error. " + "(default 1.0e-12)", "P", 1, "-P <double>")); result.addElement(new Option( "\tFit logistic models to SVM outputs. ", "M", 0, "-M")); result.addElement(new Option( "\tThe number of folds for the internal\n" + "\tcross-validation. " + "(default -1, use training data)", "V", 1, "-V <double>")); result.addElement(new Option( "\tThe random number seed. " + "(default 1)", "W", 1, "-W <double>")); result.addElement(new Option( "\tThe Kernel to use.\n" + "\t(default: weka.classifiers.functions.supportVector.PolyKernel)", "K", 1, "-K <classname and parameters>")); result.addElement(new Option( "", "", 0, "\nOptions specific to kernel " + getKernel().getClass().getName() + ":")); enm = ((OptionHandler) getKernel()).listOptions(); while (enm.hasMoreElements()) result.addElement(enm.nextElement()); return result.elements(); } /** * Parses a given list of options. <p/> * <!-- options-start --> * Valid options are: <p/> * * <pre> -D * If set, classifier is run in debug mode and * may output additional info to the console</pre> * * <pre> -no-checks * Turns off all checks - use with caution! * Turning them off assumes that data is purely numeric, doesn't * contain any missing values, and has a nominal class. Turning them * off also means that no header information will be stored if the * machine is linear. Finally, it also assumes that no instance has * a weight equal to 0. * (default: checks on)</pre> * * <pre> -C &lt;double&gt; * The complexity constant C. (default 1)</pre> * * <pre> -N * Whether to 0=normalize/1=standardize/2=neither. (default 0=normalize)</pre> * * <pre> -L &lt;double&gt; * The tolerance parameter. (default 1.0e-3)</pre> * * <pre> -P &lt;double&gt; * The epsilon for round-off error. (default 1.0e-12)</pre> * * <pre> -M * Fit logistic models to SVM outputs. </pre> * * <pre> -V &lt;double&gt; * The number of folds for the internal * cross-validation. (default -1, use training data)</pre> * * <pre> -W &lt;double&gt; * The random number seed. (default 1)</pre> * * <pre> -K &lt;classname and parameters&gt; * The Kernel to use. * (default: weka.classifiers.functions.supportVector.PolyKernel)</pre> * * <pre> * Options specific to kernel weka.classifiers.functions.supportVector.PolyKernel: * </pre> * * <pre> -D * Enables debugging output (if available) to be printed. * (default: off)</pre> * * <pre> -no-checks * Turns off all checks - use with caution! * (default: checks on)</pre> * * <pre> -C &lt;num&gt; * The size of the cache (a prime number), 0 for full cache and * -1 to turn it off. * (default: 250007)</pre> * * <pre> -E &lt;num&gt; * The Exponent to use. * (default: 1.0)</pre> * * <pre> -L * Use lower-order terms. * (default: no)</pre> * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { String tmpStr; String[] tmpOptions; setChecksTurnedOff(Utils.getFlag("no-checks", options)); tmpStr = Utils.getOption('C', options); if (tmpStr.length() != 0) setC(Double.parseDouble(tmpStr)); else setC(1.0); tmpStr = Utils.getOption('L', options); if (tmpStr.length() != 0) setToleranceParameter(Double.parseDouble(tmpStr)); else setToleranceParameter(1.0e-3); tmpStr = Utils.getOption('P', options); if (tmpStr.length() != 0) setEpsilon(Double.parseDouble(tmpStr)); else setEpsilon(1.0e-12); tmpStr = Utils.getOption('N', options); if (tmpStr.length() != 0) setFilterType(new SelectedTag(Integer.parseInt(tmpStr), TAGS_FILTER)); else setFilterType(new SelectedTag(FILTER_NORMALIZE, TAGS_FILTER)); setBuildLogisticModels(Utils.getFlag('M', options)); tmpStr = Utils.getOption('V', options); if (tmpStr.length() != 0) setNumFolds(Integer.parseInt(tmpStr)); else setNumFolds(-1); tmpStr = Utils.getOption('W', options); if (tmpStr.length() != 0) setRandomSeed(Integer.parseInt(tmpStr)); else setRandomSeed(1); tmpStr = Utils.getOption('K', options); tmpOptions = Utils.splitOptions(tmpStr); if (tmpOptions.length != 0) { tmpStr = tmpOptions[0]; tmpOptions[0] = ""; setKernel(Kernel.forName(tmpStr, tmpOptions)); } super.setOptions(options); } /** * Gets the current settings of the classifier. * * @return an array of strings suitable for passing to setOptions */ public String[] getOptions() { int i; Vector result; String[] options; result = new Vector(); options = super.getOptions(); for (i = 0; i < options.length; i++) result.add(options[i]); if (getChecksTurnedOff()) result.add("-no-checks"); result.add("-C"); result.add("" + getC()); result.add("-L"); result.add("" + getToleranceParameter()); result.add("-P"); result.add("" + getEpsilon()); result.add("-N"); result.add("" + m_filterType); if (getBuildLogisticModels()) result.add("-M"); result.add("-V"); result.add("" + getNumFolds()); result.add("-W"); result.add("" + getRandomSeed()); result.add("-K"); result.add("" + getKernel().getClass().getName() + " " + Utils.joinOptions(getKernel().getOptions())); return (String[]) result.toArray(new String[result.size()]); } /** * Disables or enables the checks (which could be time-consuming). Use with * caution! * * @param value if true turns off all checks */ public void setChecksTurnedOff(boolean value) { if (value) turnChecksOff(); else turnChecksOn(); } /** * Returns whether the checks are turned off or not. * * @return true if the checks are turned off */ public boolean getChecksTurnedOff() { return m_checksTurnedOff; } /** * Returns the tip text for this property * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String checksTurnedOffTipText() { return "Turns time-consuming checks off - use with caution."; } /** * Returns the tip text for this property * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String kernelTipText() { return "The kernel to use."; } /** * sets the kernel to use * * @param value the kernel to use */ public void setKernel(Kernel value) { m_kernel = value; } /** * Returns the kernel to use * * @return the current kernel */ public Kernel getKernel() { return m_kernel; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String cTipText() { return "The complexity parameter C."; } /** * Get the value of C. * * @return Value of C. */ public double getC() { return m_C; } /** * Set the value of C. * * @param v Value to assign to C. */ public void setC(double v) { m_C = v; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String toleranceParameterTipText() { return "The tolerance parameter (shouldn't be changed)."; } /** * Get the value of tolerance parameter. * @return Value of tolerance parameter. */ public double getToleranceParameter() { return m_tol; } /** * Set the value of tolerance parameter. * @param v Value to assign to tolerance parameter. */ public void setToleranceParameter(double v) { m_tol = v; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String epsilonTipText() { return "The epsilon for round-off error (shouldn't be changed)."; } /** * Get the value of epsilon. * @return Value of epsilon. */ public double getEpsilon() { return m_eps; } /** * Set the value of epsilon. * @param v Value to assign to epsilon. */ public void setEpsilon(double v) { m_eps = v; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String filterTypeTipText() { return "Determines how/if the data will be transformed."; } /** * Gets how the training data will be transformed. Will be one of * FILTER_NORMALIZE, FILTER_STANDARDIZE, FILTER_NONE. * * @return the filtering mode */ public SelectedTag getFilterType() { return new SelectedTag(m_filterType, TAGS_FILTER); } /** * Sets how the training data will be transformed. Should be one of * FILTER_NORMALIZE, FILTER_STANDARDIZE, FILTER_NONE. * * @param newType the new filtering mode */ public void setFilterType(SelectedTag newType) { if (newType.getTags() == TAGS_FILTER) { m_filterType = newType.getSelectedTag().getID(); } } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String buildLogisticModelsTipText() { return "Whether to fit logistic models to the outputs (for proper " + "probability estimates)."; } /** * Get the value of buildLogisticModels. * * @return Value of buildLogisticModels. */ public boolean getBuildLogisticModels() { return m_fitLogisticModels; } /** * Set the value of buildLogisticModels. * * @param newbuildLogisticModels Value to assign to buildLogisticModels. */ public void setBuildLogisticModels(boolean newbuildLogisticModels) { m_fitLogisticModels = newbuildLogisticModels; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String numFoldsTipText() { return "The number of folds for cross-validation used to generate " + "training data for logistic models (-1 means use training data)."; } /** * Get the value of numFolds. * * @return Value of numFolds. */ public int getNumFolds() { return m_numFolds; } /** * Set the value of numFolds. * * @param newnumFolds Value to assign to numFolds. */ public void setNumFolds(int newnumFolds) { m_numFolds = newnumFolds; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String randomSeedTipText() { return "Random number seed for the cross-validation."; } /** * Get the value of randomSeed. * * @return Value of randomSeed. */ public int getRandomSeed() { return m_randomSeed; } /** * Set the value of randomSeed. * * @param newrandomSeed Value to assign to randomSeed. */ public void setRandomSeed(int newrandomSeed) { m_randomSeed = newrandomSeed; } /** * Prints out the classifier. * * @return a description of the classifier as a string */ public String toString() { StringBuffer text = new StringBuffer(); if ((m_classAttribute == null)) { return "SMO: No model built yet."; } try { text.append("SMO\n\n"); text.append("Kernel used:\n " + m_kernel.toString() + "\n\n"); for (int i = 0; i < m_classAttribute.numValues(); i++) { for (int j = i + 1; j < m_classAttribute.numValues(); j++) { text.append("Classifier for classes: " + m_classAttribute.value(i) + ", " + m_classAttribute.value(j) + "\n\n"); text.append(m_classifiers[i][j]); if (m_fitLogisticModels) { text.append("\n\n"); if ( m_classifiers[i][j].m_logistic == null) { text.append("No logistic model has been fit.\n"); } else { text.append(m_classifiers[i][j].m_logistic); } } text.append("\n\n"); } } } catch (Exception e) { return "Can't print SMO classifier."; } return text.toString(); } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } /** * Main method for testing this class. */ public static void main(String[] argv) { runClassifier(new SMO(), argv); } }
62,792
28.260485
319
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/functions/SMOreg.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * SMOreg.java * Copyright (C) 2006-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.functions; import java.util.Enumeration; import java.util.Vector; import weka.classifiers.AbstractClassifier; import weka.classifiers.functions.supportVector.Kernel; import weka.classifiers.functions.supportVector.PolyKernel; import weka.classifiers.functions.supportVector.RegOptimizer; import weka.classifiers.functions.supportVector.RegSMOImproved; import weka.core.AdditionalMeasureProducer; import weka.core.Capabilities; import weka.core.Capabilities.Capability; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.OptionHandler; import weka.core.RevisionUtils; import weka.core.SelectedTag; import weka.core.Tag; import weka.core.TechnicalInformation; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import weka.core.TechnicalInformationHandler; import weka.core.Utils; import weka.core.WeightedInstancesHandler; import weka.filters.Filter; import weka.filters.unsupervised.attribute.NominalToBinary; import weka.filters.unsupervised.attribute.Normalize; import weka.filters.unsupervised.attribute.ReplaceMissingValues; import weka.filters.unsupervised.attribute.Standardize; /** <!-- globalinfo-start --> * SMOreg implements the support vector machine for regression. The parameters can be learned using various algorithms. The algorithm is selected by setting the RegOptimizer. The most popular algorithm (RegSMOImproved) is due to Shevade, Keerthi et al and this is the default RegOptimizer.<br/> * <br/> * For more information see:<br/> * <br/> * S.K. Shevade, S.S. Keerthi, C. Bhattacharyya, K.R.K. Murthy: Improvements to the SMO Algorithm for SVM Regression. In: IEEE Transactions on Neural Networks, 1999.<br/> * <br/> * A.J. Smola, B. Schoelkopf (1998). A tutorial on support vector regression. * <p/> <!-- globalinfo-end --> * <!-- technical-bibtex-start --> * BibTeX: * <pre> * &#64;inproceedings{Shevade1999, * author = {S.K. Shevade and S.S. Keerthi and C. Bhattacharyya and K.R.K. Murthy}, * booktitle = {IEEE Transactions on Neural Networks}, * title = {Improvements to the SMO Algorithm for SVM Regression}, * year = {1999}, * PS = {http://guppy.mpe.nus.edu.sg/\~mpessk/svm/ieee_smo_reg.ps.gz} * } * * &#64;techreport{Smola1998, * author = {A.J. Smola and B. Schoelkopf}, * note = {NeuroCOLT2 Technical Report NC2-TR-1998-030}, * title = {A tutorial on support vector regression}, * year = {1998} * } * </pre> * <p/> <!-- technical-bibtex-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -C &lt;double&gt; * The complexity constant C. * (default 1)</pre> * * <pre> -N * Whether to 0=normalize/1=standardize/2=neither. * (default 0=normalize)</pre> * * <pre> -I &lt;classname and parameters&gt; * Optimizer class used for solving quadratic optimization problem * (default weka.classifiers.functions.supportVector.RegSMOImproved)</pre> * * <pre> -K &lt;classname and parameters&gt; * The Kernel to use. * (default: weka.classifiers.functions.supportVector.PolyKernel)</pre> * * <pre> * Options specific to optimizer ('-I') weka.classifiers.functions.supportVector.RegSMOImproved: * </pre> * * <pre> -T &lt;double&gt; * The tolerance parameter for checking the stopping criterion. * (default 0.001)</pre> * * <pre> -V * Use variant 1 of the algorithm when true, otherwise use variant 2. * (default true)</pre> * * <pre> -P &lt;double&gt; * The epsilon for round-off error. * (default 1.0e-12)</pre> * * <pre> -L &lt;double&gt; * The epsilon parameter in epsilon-insensitive loss function. * (default 1.0e-3)</pre> * * <pre> -W &lt;double&gt; * The random number seed. * (default 1)</pre> * * <pre> * Options specific to kernel ('-K') weka.classifiers.functions.supportVector.PolyKernel: * </pre> * * <pre> -D * Enables debugging output (if available) to be printed. * (default: off)</pre> * * <pre> -no-checks * Turns off all checks - use with caution! * (default: checks on)</pre> * * <pre> -C &lt;num&gt; * The size of the cache (a prime number), 0 for full cache and * -1 to turn it off. * (default: 250007)</pre> * * <pre> -E &lt;num&gt; * The Exponent to use. * (default: 1.0)</pre> * * <pre> -L * Use lower-order terms. * (default: no)</pre> * <!-- options-end --> * * @author Remco Bouckaert (remco@cs.waikato.ac.nz,rrb@xm.co.nz) * @version $Revision: 8123 $ */ public class SMOreg extends AbstractClassifier implements WeightedInstancesHandler, AdditionalMeasureProducer, TechnicalInformationHandler { /** for serialization */ private static final long serialVersionUID = -7149606251113102827L; /** The filter to apply to the training data: Normalzie */ public static final int FILTER_NORMALIZE = 0; /** The filter to apply to the training data: Standardize */ public static final int FILTER_STANDARDIZE = 1; /** The filter to apply to the training data: None */ public static final int FILTER_NONE = 2; /** The filter to apply to the training data */ public static final Tag[] TAGS_FILTER = { new Tag(FILTER_NORMALIZE, "Normalize training data"), new Tag(FILTER_STANDARDIZE, "Standardize training data"), new Tag(FILTER_NONE, "No normalization/standardization"), }; /** Whether to normalize/standardize/neither */ protected int m_filterType = FILTER_NORMALIZE; /** The filter used to make attributes numeric. */ protected NominalToBinary m_NominalToBinary; /** The filter used to standardize/normalize all values. */ protected Filter m_Filter = null; /** The filter used to get rid of missing values. */ protected ReplaceMissingValues m_Missing; /** Only numeric attributes in the dataset? If so, less need to filter */ protected boolean m_onlyNumeric; /** capacity parameter **/ protected double m_C = 1.0; /** coefficients used by normalization filter for doing its linear transformation * so that result = svmoutput * m_x1 + m_x0 **/ protected double m_x1 = 1.0; protected double m_x0 = 0.0; /** contains the algorithm used for learning **/ protected RegOptimizer m_optimizer = new RegSMOImproved(); /** the configured kernel */ protected Kernel m_kernel = new PolyKernel(); /** * Returns a string describing classifier * * @return a description suitable for * displaying in the explorer/experimenter gui */ public String globalInfo() { return "SMOreg implements the support vector machine for regression. " + "The parameters can be learned using various algorithms. The " + "algorithm is selected by setting the RegOptimizer. The most " + "popular algorithm (" + RegSMOImproved.class.getName().replaceAll(".*\\.", "") + ") is due to Shevade, Keerthi " + "et al and this is the default RegOptimizer.\n\n" + "For more information see:\n\n" + getTechnicalInformation().toString(); } /** * Returns an instance of a TechnicalInformation object, containing * detailed information about the technical background of this class, * e.g., paper reference or book this class is based on. * * @return the technical information about this class */ public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; TechnicalInformation additional; result = new TechnicalInformation(Type.INPROCEEDINGS); result.setValue(Field.AUTHOR, "S.K. Shevade and S.S. Keerthi and C. Bhattacharyya and K.R.K. Murthy"); result.setValue(Field.TITLE, "Improvements to the SMO Algorithm for SVM Regression"); result.setValue(Field.BOOKTITLE, "IEEE Transactions on Neural Networks"); result.setValue(Field.YEAR, "1999"); result.setValue(Field.PS, "http://guppy.mpe.nus.edu.sg/~mpessk/svm/ieee_smo_reg.ps.gz"); additional = result.add(Type.TECHREPORT); additional.setValue(Field.AUTHOR, "A.J. Smola and B. Schoelkopf"); additional.setValue(Field.TITLE, "A tutorial on support vector regression"); additional.setValue(Field.NOTE, "NeuroCOLT2 Technical Report NC2-TR-1998-030"); additional.setValue(Field.YEAR, "1998"); return result; } /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ public Enumeration listOptions() { Enumeration enm; Vector result = new Vector(); result.addElement(new Option( "\tThe complexity constant C.\n" + "\t(default 1)", "C", 1, "-C <double>")); result.addElement(new Option( "\tWhether to 0=normalize/1=standardize/2=neither.\n" + "\t(default 0=normalize)", "N", 1, "-N")); result.addElement(new Option( "\tOptimizer class used for solving quadratic optimization problem\n" + "\t(default " + RegSMOImproved.class.getName() + ")", "I", 1, "-I <classname and parameters>")); result.addElement(new Option( "\tThe Kernel to use.\n" + "\t(default: weka.classifiers.functions.supportVector.PolyKernel)", "K", 1, "-K <classname and parameters>")); result.addElement(new Option( "", "", 0, "\nOptions specific to optimizer ('-I') " + getRegOptimizer().getClass().getName() + ":")); enm = ((OptionHandler) getRegOptimizer()).listOptions(); while (enm.hasMoreElements()) result.addElement(enm.nextElement()); result.addElement(new Option( "", "", 0, "\nOptions specific to kernel ('-K') " + getKernel().getClass().getName() + ":")); enm = ((OptionHandler) getKernel()).listOptions(); while (enm.hasMoreElements()) result.addElement(enm.nextElement()); return result.elements(); } /** * Parses a given list of options. <p/> * <!-- options-start --> * Valid options are: <p/> * * <pre> -C &lt;double&gt; * The complexity constant C. * (default 1)</pre> * * <pre> -N * Whether to 0=normalize/1=standardize/2=neither. * (default 0=normalize)</pre> * * <pre> -I &lt;classname and parameters&gt; * Optimizer class used for solving quadratic optimization problem * (default weka.classifiers.functions.supportVector.RegSMOImproved)</pre> * * <pre> -K &lt;classname and parameters&gt; * The Kernel to use. * (default: weka.classifiers.functions.supportVector.PolyKernel)</pre> * * <pre> * Options specific to optimizer ('-I') weka.classifiers.functions.supportVector.RegSMOImproved: * </pre> * * <pre> -T &lt;double&gt; * The tolerance parameter for checking the stopping criterion. * (default 0.001)</pre> * * <pre> -V * Use variant 1 of the algorithm when true, otherwise use variant 2. * (default true)</pre> * * <pre> -P &lt;double&gt; * The epsilon for round-off error. * (default 1.0e-12)</pre> * * <pre> -L &lt;double&gt; * The epsilon parameter in epsilon-insensitive loss function. * (default 1.0e-3)</pre> * * <pre> -W &lt;double&gt; * The random number seed. * (default 1)</pre> * * <pre> * Options specific to kernel ('-K') weka.classifiers.functions.supportVector.PolyKernel: * </pre> * * <pre> -D * Enables debugging output (if available) to be printed. * (default: off)</pre> * * <pre> -no-checks * Turns off all checks - use with caution! * (default: checks on)</pre> * * <pre> -C &lt;num&gt; * The size of the cache (a prime number), 0 for full cache and * -1 to turn it off. * (default: 250007)</pre> * * <pre> -E &lt;num&gt; * The Exponent to use. * (default: 1.0)</pre> * * <pre> -L * Use lower-order terms. * (default: no)</pre> * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { String tmpStr; String[] tmpOptions; tmpStr = Utils.getOption('C', options); if (tmpStr.length() != 0) { setC(Double.parseDouble(tmpStr)); } else { setC(1.0); } String nString = Utils.getOption('N', options); if (nString.length() != 0) { setFilterType(new SelectedTag(Integer.parseInt(nString), TAGS_FILTER)); } else { setFilterType(new SelectedTag(FILTER_NORMALIZE, TAGS_FILTER)); } tmpStr = Utils.getOption('I', options); tmpOptions = Utils.splitOptions(tmpStr); if (tmpOptions.length != 0) { tmpStr = tmpOptions[0]; tmpOptions[0] = ""; setRegOptimizer( (RegOptimizer) Utils.forName(RegOptimizer.class, tmpStr, tmpOptions)); } else { setRegOptimizer(new RegSMOImproved()); } tmpStr = Utils.getOption('K', options); tmpOptions = Utils.splitOptions(tmpStr); if (tmpOptions.length != 0) { tmpStr = tmpOptions[0]; tmpOptions[0] = ""; setKernel(Kernel.forName(tmpStr, tmpOptions)); } else { setKernel(new PolyKernel()); } } /** * Gets the current settings of the classifier. * * @return an array of strings suitable for passing to setOptions */ public String[] getOptions() { int i; Vector result; String[] options; result = new Vector(); options = super.getOptions(); for (i = 0; i < options.length; i++) result.add(options[i]); result.add("-C"); result.add("" + getC()); result.add("-N"); result.add("" + m_filterType); result.add("-I"); result.add("" + getRegOptimizer().getClass().getName() + " " + Utils.joinOptions(getRegOptimizer().getOptions())); result.add("-K"); result.add("" + getKernel().getClass().getName() + " " + Utils.joinOptions(getKernel().getOptions())); return (String[]) result.toArray(new String[result.size()]); } /** * Returns default capabilities of the classifier. * * @return the capabilities of this classifier */ public Capabilities getCapabilities() { Capabilities result = getKernel().getCapabilities(); result.setOwner(this); // attribute result.enableAllAttributeDependencies(); // with NominalToBinary we can also handle nominal attributes, but only // if the kernel can handle numeric attributes if (result.handles(Capability.NUMERIC_ATTRIBUTES)) result.enable(Capability.NOMINAL_ATTRIBUTES); result.enable(Capability.MISSING_VALUES); // class result.disableAllClasses(); result.disableAllClassDependencies(); result.enable(Capability.NUMERIC_CLASS); result.enable(Capability.DATE_CLASS); result.enable(Capability.MISSING_CLASS_VALUES); return result; } /** * Method for building the classifier. * * @param instances the set of training instances * @throws Exception if the classifier can't be built successfully */ public void buildClassifier(Instances instances) throws Exception { // can classifier handle the data? getCapabilities().testWithFail(instances); // remove instances with missing class instances = new Instances(instances); instances.deleteWithMissingClass(); // Removes all the instances with weight equal to 0. // MUST be done since condition (8) of Keerthi's paper // is made with the assertion Ci > 0 (See equation (3a). Instances data = new Instances(instances, 0); for (int i = 0; i < instances.numInstances(); i++) { if (instances.instance(i).weight() > 0) { data.add(instances.instance(i)); } } if (data.numInstances() == 0) { throw new Exception("No training instances left after removing " + "instance with either a weight null or a missing class!"); } instances = data; m_onlyNumeric = true; for (int i = 0; i < instances.numAttributes(); i++) { if (i != instances.classIndex()) { if (!instances.attribute(i).isNumeric()) { m_onlyNumeric = false; break; } } } m_Missing = new ReplaceMissingValues(); m_Missing.setInputFormat(instances); instances = Filter.useFilter(instances, m_Missing); if (getCapabilities().handles(Capability.NUMERIC_ATTRIBUTES)) { if (!m_onlyNumeric) { m_NominalToBinary = new NominalToBinary(); m_NominalToBinary.setInputFormat(instances); instances = Filter.useFilter(instances, m_NominalToBinary); } else { m_NominalToBinary = null; } } else { m_NominalToBinary = null; } // retrieve two different class values used to determine filter transformation double y0 = instances.instance(0).classValue(); int index = 1; while (index < instances.numInstances() && instances.instance(index).classValue() == y0) { index++; } if (index == instances.numInstances()) { // degenerate case, all class values are equal // we don't want to deal with this, too much hassle throw new Exception("All class values are the same. At least two class values should be different"); } double y1 = instances.instance(index).classValue(); // apply filters if (m_filterType == FILTER_STANDARDIZE) { m_Filter = new Standardize(); ((Standardize)m_Filter).setIgnoreClass(true); m_Filter.setInputFormat(instances); instances = Filter.useFilter(instances, m_Filter); } else if (m_filterType == FILTER_NORMALIZE) { m_Filter = new Normalize(); ((Normalize)m_Filter).setIgnoreClass(true); m_Filter.setInputFormat(instances); instances = Filter.useFilter(instances, m_Filter); } else { m_Filter = null; } if (m_Filter != null) { double z0 = instances.instance(0).classValue(); double z1 = instances.instance(index).classValue(); m_x1 = (y0-y1) / (z0 - z1); // no division by zero, since y0 != y1 guaranteed => z0 != z1 ??? m_x0 = (y0 - m_x1 * z0); // = y1 - m_x1 * z1 } else { m_x1 = 1.0; m_x0 = 0.0; } m_optimizer.setSMOReg(this); m_optimizer.buildClassifier(instances); } /** * Classifies the given instance using the linear regression function. * * @param instance the test instance * @return the classification * @throws Exception if classification can't be done successfully */ public double classifyInstance(Instance instance) throws Exception { // Filter instance m_Missing.input(instance); m_Missing.batchFinished(); instance = m_Missing.output(); if (!m_onlyNumeric && m_NominalToBinary != null) { m_NominalToBinary.input(instance); m_NominalToBinary.batchFinished(); instance = m_NominalToBinary.output(); } if (m_Filter != null) { m_Filter.input(instance); m_Filter.batchFinished(); instance = m_Filter.output(); } double result = m_optimizer.SVMOutput(instance); return result * m_x1 + m_x0; } /** * Returns the tip text for this property * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String regOptimizerTipText() { return "The learning algorithm."; } /** * sets the learning algorithm * * @param regOptimizer the learning algorithm */ public void setRegOptimizer(RegOptimizer regOptimizer) { m_optimizer = regOptimizer; } /** * returns the learning algorithm * * @return the learning algorithm */ public RegOptimizer getRegOptimizer() { return m_optimizer; } /** * Returns the tip text for this property * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String kernelTipText() { return "The kernel to use."; } /** * sets the kernel to use * * @param value the kernel to use */ public void setKernel(Kernel value) { m_kernel = value; } /** * Returns the kernel to use * * @return the current kernel */ public Kernel getKernel() { return m_kernel; } /** * Returns the tip text for this property * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String cTipText() { return "The complexity parameter C."; } /** * Get the value of C. * * @return Value of C. */ public double getC() { return m_C; } /** * Set the value of C. * * @param v Value to assign to C. */ public void setC(double v) { m_C = v; } /** * Returns the tip text for this property * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String filterTypeTipText() { return "Determines how/if the data will be transformed."; } /** * Gets how the training data will be transformed. Will be one of * FILTER_NORMALIZE, FILTER_STANDARDIZE, FILTER_NONE. * * @return the filtering mode */ public SelectedTag getFilterType() { return new SelectedTag(m_filterType, TAGS_FILTER); } /** * Sets how the training data will be transformed. Should be one of * FILTER_NORMALIZE, FILTER_STANDARDIZE, FILTER_NONE. * * @param newType the new filtering mode */ public void setFilterType(SelectedTag newType) { if (newType.getTags() == TAGS_FILTER) { m_filterType = newType.getSelectedTag().getID(); } } /** * Prints out the classifier. * * @return a description of the classifier as a string */ public String toString() { StringBuffer text = new StringBuffer(); if (m_optimizer == null || !m_optimizer.modelBuilt()) { return "SMOreg: No model built yet."; } try { text.append(m_optimizer.toString()); } catch (Exception e) { return "Can't print SMVreg classifier."; } return text.toString(); } /** * Returns an enumeration of the measure names. Additional measures * must follow the naming convention of starting with "measure", eg. * double measureBlah() * * @return an enumeration of the measure names */ public Enumeration enumerateMeasures() { Vector result = new Vector(); result.addElement("measureKernelEvaluations"); result.addElement("measureCacheHits"); return result.elements(); } /** * Returns the value of the named measure * @param measureName the name of the measure to query for its value * @return the value of the named measure * @throws IllegalArgumentException if the named measure is not supported */ public double getMeasure(String measureName) { if (measureName.equalsIgnoreCase("measureKernelEvaluations")) return measureKernelEvaluations(); else if (measureName.equalsIgnoreCase("measureCacheHits")) return measureCacheHits(); else throw new IllegalArgumentException("Measure '" + measureName + "' is not supported!"); } /** * number of kernel evaluations used in learing * * @return the number of kernel evaluations */ protected double measureKernelEvaluations() { if (m_optimizer != null) { return m_optimizer.getKernelEvaluations(); } else { return 0; } } /** * number of kernel cache hits used during learing * * @return the number of kernel cache hits */ protected double measureCacheHits() { if (m_optimizer != null) { return m_optimizer.getCacheHits(); } else { return 0; } } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8123 $"); } /** * Main method for running this classifier. * * @param args the commandline options */ public static void main(String[] args) { runClassifier(new SMOreg(), args); } }
24,823
29.273171
294
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/functions/SPegasos.java
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* * SPegasos.java * Copyright (C) 2009 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.functions; import java.util.ArrayList; import java.util.Enumeration; import java.util.Vector; import weka.classifiers.AbstractClassifier; import weka.classifiers.Classifier; import weka.classifiers.UpdateableClassifier; import weka.core.Capabilities; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.OptionHandler; import weka.core.RevisionUtils; import weka.core.SelectedTag; import weka.core.Tag; import weka.core.TechnicalInformation; import weka.core.TechnicalInformationHandler; import weka.core.Utils; import weka.core.Capabilities.Capability; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import weka.filters.Filter; import weka.filters.unsupervised.attribute.NominalToBinary; import weka.filters.unsupervised.attribute.ReplaceMissingValues; import weka.filters.unsupervised.attribute.Normalize; /** <!-- globalinfo-start --> * Implements the stochastic variant of the Pegasos (Primal Estimated sub-GrAdient SOlver for SVM) method of Shalev-Shwartz et al. (2007). This implementation globally replaces all missing values and transforms nominal attributes into binary ones. It also normalizes all attributes, so the coefficients in the output are based on the normalized data. For more information, see<br/> * <br/> * S. Shalev-Shwartz, Y. Singer, N. Srebro: Pegasos: Primal Estimated sub-GrAdient SOlver for SVM. In: 24th International Conference on MachineLearning, 807-814, 2007. * <p/> <!-- globalinfo-end --> * <!-- technical-bibtex-start --> * BibTeX: * <pre> * &#64;inproceedings{Shalev-Shwartz2007, * author = {S. Shalev-Shwartz and Y. Singer and N. Srebro}, * booktitle = {24th International Conference on MachineLearning}, * pages = {807-814}, * title = {Pegasos: Primal Estimated sub-GrAdient SOlver for SVM}, * year = {2007} * } * </pre> * <p/> <!-- technical-bibtex-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -L &lt;double&gt; * The lambda regularization constant (default = 0.0001)</pre> * * <pre> -E &lt;integer&gt; * The number of epochs to perform (batch learning only, default = 500)</pre> * * <pre> -N * Don't normalize the data</pre> * * <pre> -M * Don't replace missing values</pre> * <!-- options-end --> * * @author Mark Hall (mhall{[at]}pentaho{[dot]}com) * @version $Revision: 6580 $ * */ public class SPegasos extends AbstractClassifier implements TechnicalInformationHandler, UpdateableClassifier, OptionHandler { /** For serialization */ private static final long serialVersionUID = -3732968666673530290L; /** Replace missing values */ protected ReplaceMissingValues m_replaceMissing; /** Convert nominal attributes to numerically coded binary ones */ protected NominalToBinary m_nominalToBinary; /** Normalize the training data */ protected Normalize m_normalize; /** The regularization parameter */ protected double m_lambda = 0.0001; /** Stores the weights (+ bias in the last element) */ protected double[] m_weights; /** Holds the current iteration number */ protected double m_t; /** * The number of epochs to perform (batch learning). Total iterations is * m_epochs * num instances */ protected int m_epochs = 500; /** * Turn off normalization of the input data. This option gets * forced for incremental training. */ protected boolean m_dontNormalize = false; /** * Turn off global replacement of missing values. Missing values * will be ignored instead. This option gets forced for * incremental training. */ protected boolean m_dontReplaceMissing = false; /** Holds the header of the training data */ protected Instances m_data; /** * Returns default capabilities of the classifier. * * @return the capabilities of this classifier */ public Capabilities getCapabilities() { Capabilities result = super.getCapabilities(); result.disableAll(); //attributes result.enable(Capability.NOMINAL_ATTRIBUTES); result.enable(Capability.NUMERIC_ATTRIBUTES); result.enable(Capability.MISSING_VALUES); // class result.enable(Capability.BINARY_CLASS); result.enable(Capability.MISSING_CLASS_VALUES); // instances result.setMinimumNumberInstances(0); return result; } /** * Returns the tip text for this property * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String lambdaTipText() { return "The regularization constant. (default = 0.0001)"; } /** * Set the value of lambda to use * * @param lambda the value of lambda to use */ public void setLambda(double lambda) { m_lambda = lambda; } /** * Get the current value of lambda * * @return the current value of lambda */ public double getLambda() { return m_lambda; } /** * Returns the tip text for this property * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String epochsTipText() { return "The number of epochs to perform (batch learning). " + "The total number of iterations is epochs * num" + " instances."; } /** * Set the number of epochs to use * * @param e the number of epochs to use */ public void setEpochs(int e) { m_epochs = e; } /** * Get current number of epochs * * @return the current number of epochs */ public int getEpochs() { return m_epochs; } /** * Turn normalization off/on. * * @param m true if normalization is to be disabled. */ public void setDontNormalize(boolean m) { m_dontNormalize = m; } /** * Get whether normalization has been turned off. * * @return true if normalization has been disabled. */ public boolean getDontNormalize() { return m_dontNormalize; } /** * Returns the tip text for this property * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String dontNormalizeTipText() { return "Turn normalization off"; } /** * Turn global replacement of missing values off/on. If turned off, * then missing values are effectively ignored. * * @param m true if global replacement of missing values is to be * turned off. */ public void setDontReplaceMissing(boolean m) { m_dontReplaceMissing = m; } /** * Get whether global replacement of missing values has been * disabled. * * @return true if global replacement of missing values has been turned * off */ public boolean getDontReplaceMissing() { return m_dontReplaceMissing; } /** * Returns the tip text for this property * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String dontReplaceMissingTipText() { return "Turn off global replacement of missing values"; } /** * Set the loss function to use. * * @param function the loss function to use. */ public void setLossFunction(SelectedTag function) { if (function.getTags() == TAGS_SELECTION) { m_loss = function.getSelectedTag().getID(); } } /** * Get the current loss function. * * @return the current loss function. */ public SelectedTag getLossFunction() { return new SelectedTag(m_loss, TAGS_SELECTION); } /** * Returns the tip text for this property * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String lossFunctionTipText() { return "The loss function to use. Hinge loss (SVM) " + "or log loss (logistic regression)."; } /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ public Enumeration<Option> listOptions() { Vector<Option> newVector = new Vector<Option>(); newVector.add(new Option("\tSet the loss function to minimize. 0 = " + "hinge loss (SVM), 1 = log loss (logistic regression).\n" + "\t(default = 0)", "F", 1, "-F")); newVector.add(new Option("\tThe lambda regularization constant " + "(default = 0.0001)", "L", 1, "-L <double>")); newVector.add(new Option("\tThe number of epochs to perform (" + "batch learning only, default = 500)", "E", 1, "-E <integer>")); newVector.add(new Option("\tDon't normalize the data", "N", 0, "-N")); newVector.add(new Option("\tDon't replace missing values", "M", 0, "-M")); return newVector.elements(); } /** * * Parses a given list of options. <p/> * <!-- options-start --> * Valid options are: <p/> * * <pre> -L &lt;double&gt; * The lambda regularization constant (default = 0.0001)</pre> * * <pre> -E &lt;integer&gt; * The number of epochs to perform (batch learning only, default = 500)</pre> * * <pre> -N * Don't normalize the data</pre> * * <pre> -M * Don't replace missing values</pre> * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { reset(); String lossString = Utils.getOption('F', options); if (lossString.length() != 0) { setLossFunction(new SelectedTag(Integer.parseInt(lossString), TAGS_SELECTION)); } else { setLossFunction(new SelectedTag(HINGE, TAGS_SELECTION)); } String lambdaString = Utils.getOption('L', options); if (lambdaString.length() > 0) { setLambda(Double.parseDouble(lambdaString)); } String epochsString = Utils.getOption("E", options); if (epochsString.length() > 0) { setEpochs(Integer.parseInt(epochsString)); } setDontNormalize(Utils.getFlag("N", options)); setDontReplaceMissing(Utils.getFlag('M', options)); } /** * Gets the current settings of the classifier. * * @return an array of strings suitable for passing to setOptions */ public String[] getOptions() { ArrayList<String> options = new ArrayList<String>(); options.add("-F"); options.add("" + getLossFunction().getSelectedTag().getID()); options.add("-L"); options.add("" + getLambda()); options.add("-E"); options.add("" + getEpochs()); if (getDontNormalize()) { options.add("-N"); } if (getDontReplaceMissing()) { options.add("-M"); } return options.toArray(new String[1]); } /** * Returns a string describing classifier * @return a description suitable for * displaying in the explorer/experimenter gui */ public String globalInfo() { return "Implements the stochastic variant of the Pegasos" + " (Primal Estimated sub-GrAdient SOlver for SVM)" + " method of Shalev-Shwartz et al. (2007). This implementation" + " globally replaces all missing values and transforms nominal" + " attributes into binary ones. It also normalizes all attributes," + " so the coefficients in the output are based on the normalized" + " data. For more information, see\n\n" + getTechnicalInformation().toString(); } /** * Returns an instance of a TechnicalInformation object, containing * detailed information about the technical background of this class, * e.g., paper reference or book this class is based on. * * @return the technical information about this class */ public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; result = new TechnicalInformation(Type.INPROCEEDINGS); result.setValue(Field.AUTHOR, "S. Shalev-Shwartz and Y. Singer and N. Srebro"); result.setValue(Field.YEAR, "2007"); result.setValue(Field.TITLE, "Pegasos: Primal Estimated sub-GrAdient " + "SOlver for SVM"); result.setValue(Field.BOOKTITLE, "24th International Conference on Machine" + "Learning"); result.setValue(Field.PAGES, "807-814"); return result; } /** * Reset the classifier. */ public void reset() { m_t = 2; m_weights = null; } /** * Method for building the classifier. * * @param data the set of training instances. * @throws Exception if the classifier can't be built successfully. */ public void buildClassifier(Instances data) throws Exception { reset(); // can classifier handle the data? getCapabilities().testWithFail(data); data = new Instances(data); data.deleteWithMissingClass(); if (data.numInstances() > 0 && !m_dontReplaceMissing) { m_replaceMissing = new ReplaceMissingValues(); m_replaceMissing.setInputFormat(data); data = Filter.useFilter(data, m_replaceMissing); } // check for only numeric attributes boolean onlyNumeric = true; for (int i = 0; i < data.numAttributes(); i++) { if (i != data.classIndex()) { if (!data.attribute(i).isNumeric()) { onlyNumeric = false; break; } } } if (!onlyNumeric) { m_nominalToBinary = new NominalToBinary(); m_nominalToBinary.setInputFormat(data); data = Filter.useFilter(data, m_nominalToBinary); } if (!m_dontNormalize && data.numInstances() > 0) { m_normalize = new Normalize(); m_normalize.setInputFormat(data); data = Filter.useFilter(data, m_normalize); } m_weights = new double[data.numAttributes() + 1]; m_data = new Instances(data, 0); if (data.numInstances() > 0) { train(data); } } private void train(Instances data) throws Exception { for (int e = 0; e < m_epochs; e++) { for (int i = 0; i < data.numInstances(); i++) { updateClassifier(data.instance(i)); } } } protected static double dotProd(Instance inst1, double[] weights, int classIndex) { double result = 0; int n1 = inst1.numValues(); int n2 = weights.length - 1; for (int p1 = 0, p2 = 0; p1 < n1 && p2 < n2;) { int ind1 = inst1.index(p1); int ind2 = p2; if (ind1 == ind2) { if (ind1 != classIndex && !inst1.isMissingSparse(p1)) { result += inst1.valueSparse(p1) * weights[p2]; } p1++; p2++; } else if (ind1 > ind2) { p2++; } else { p1++; } } return (result); } protected static final int HINGE = 0; protected static final int LOGLOSS = 1; /** The current loss function to minimize */ protected int m_loss = HINGE; /** Loss functions to choose from */ public static final Tag [] TAGS_SELECTION = { new Tag(HINGE, "Hinge loss (SVM)"), new Tag(LOGLOSS, "Log loss (logistic regression)") }; protected double dloss(double z) { if (m_loss == HINGE) { return (z < 1) ? 1 : 0; } // log loss if (z < 0) { return 1.0 / (Math.exp(z) + 1.0); } else { double t = Math.exp(-z); return t / (t + 1); } } /** * Updates the classifier with the given instance. * * @param instance the new training instance to include in the model * @exception Exception if the instance could not be incorporated in * the model. */ public void updateClassifier(Instance instance) throws Exception { if (!instance.classIsMissing()) { double learningRate = 1.0 / (m_lambda * m_t); //double scale = 1.0 - learningRate * m_lambda; double scale = 1.0 - 1.0 / m_t; double y = (instance.classValue() == 0) ? -1 : 1; double wx = dotProd(instance, m_weights, instance.classIndex()); double z = y * (wx + m_weights[m_weights.length - 1]); for (int j = 0; j < m_weights.length - 1; j++) { if (j != instance.classIndex()) { m_weights[j] *= scale; } } if (m_loss == LOGLOSS || (z < 1)) { double loss = dloss(z); int n1 = instance.numValues(); for (int p1 = 0; p1 < n1; p1++) { int indS = instance.index(p1); if (indS != instance.classIndex() && !instance.isMissingSparse(p1)) { double m = learningRate * loss * (instance.valueSparse(p1) * y); m_weights[indS] += m; } } // update the bias m_weights[m_weights.length - 1] += learningRate * loss * y; } double norm = 0; for (int k = 0; k < m_weights.length - 1; k++) { if (k != instance.classIndex()) { norm += (m_weights[k] * m_weights[k]); } } double scale2 = Math.min(1.0, (1.0 / (m_lambda * norm))); if (scale2 < 1.0) { scale2 = Math.sqrt(scale2); for (int j = 0; j < m_weights.length - 1; j++) { if (j != instance.classIndex()) { m_weights[j] *= scale2; } } } m_t++; } } /** * Computes the distribution for a given instance * * @param instance the instance for which distribution is computed * @return the distribution * @throws Exception if the distribution can't be computed successfully */ public double[] distributionForInstance(Instance inst) throws Exception { double[] result = new double[2]; if (m_replaceMissing != null) { m_replaceMissing.input(inst); inst = m_replaceMissing.output(); } if (m_nominalToBinary != null) { m_nominalToBinary.input(inst); inst = m_nominalToBinary.output(); } if (m_normalize != null){ m_normalize.input(inst); inst = m_normalize.output(); } double wx = dotProd(inst, m_weights, inst.classIndex());// * m_wScale; double z = (wx + m_weights[m_weights.length - 1]); //System.out.print("" + z + ": "); // System.out.println(1.0 / (1.0 + Math.exp(-z))); if (z <= 0) { // z = 0; if (m_loss == LOGLOSS) { result[0] = 1.0 / (1.0 + Math.exp(z)); result[1] = 1.0 - result[0]; } else { result[0] = 1; } } else { if (m_loss == LOGLOSS) { result[1] = 1.0 / (1.0 + Math.exp(-z)); result[0] = 1.0 - result[1]; } else { result[1] = 1; } } return result; } /** * Prints out the classifier. * * @return a description of the classifier as a string */ public String toString() { if (m_weights == null) { return "SPegasos: No model built yet.\n"; } StringBuffer buff = new StringBuffer(); buff.append("Loss function: "); if (m_loss == HINGE) { buff.append("Hinge loss (SVM)\n\n"); } else { buff.append("Log loss (logistic regression)\n\n"); } int printed = 0; for (int i = 0 ; i < m_weights.length - 1; i++) { if (i != m_data.classIndex()) { if (printed > 0) { buff.append(" + "); } else { buff.append(" "); } buff.append(Utils.doubleToString(m_weights[i], 12, 4) + " " + ((m_normalize != null) ? "(normalized) " : "") + m_data.attribute(i).name() + "\n"); printed++; } } if (m_weights[m_weights.length - 1] > 0) { buff.append(" + " + Utils.doubleToString(m_weights[m_weights.length - 1], 12, 4)); } else { buff.append(" - " + Utils.doubleToString(-m_weights[m_weights.length - 1], 12, 4)); } return buff.toString(); } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 6580 $"); } /** * Main method for testing this class. */ public static void main(String[] args) { runClassifier(new SPegasos(), args); } }
20,903
27.674897
381
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/functions/SimpleLinearRegression.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * SimpleLinearRegression.java * Copyright (C) 2002-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.functions; import weka.classifiers.AbstractClassifier; import weka.core.Attribute; import weka.core.Capabilities; import weka.core.Capabilities.Capability; import weka.core.Instance; import weka.core.Instances; import weka.core.RevisionUtils; import weka.core.Utils; import weka.core.WeightedInstancesHandler; /** <!-- globalinfo-start --> * Learns a simple linear regression model. Picks the attribute that results in the lowest squared error. Missing values are not allowed. Can only deal with numeric attributes. * <p/> <!-- globalinfo-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -D * If set, classifier is run in debug mode and * may output additional info to the console</pre> * <!-- options-end --> * * @author Eibe Frank (eibe@cs.waikato.ac.nz) * @version $Revision: 8034 $ */ public class SimpleLinearRegression extends AbstractClassifier implements WeightedInstancesHandler { /** for serialization */ static final long serialVersionUID = 1679336022895414137L; /** The chosen attribute */ private Attribute m_attribute; /** The index of the chosen attribute */ private int m_attributeIndex; /** The slope */ private double m_slope; /** The intercept */ private double m_intercept; /** If true, suppress error message if no useful attribute was found*/ private boolean m_suppressErrorMessage = false; /** * Returns a string describing this classifier * @return a description of the classifier suitable for * displaying in the explorer/experimenter gui */ public String globalInfo() { return "Learns a simple linear regression model. " +"Picks the attribute that results in the lowest squared error. " +"Missing values are not allowed. Can only deal with numeric attributes."; } /** * Generate a prediction for the supplied instance. * * @param inst the instance to predict. * @return the prediction * @throws Exception if an error occurs */ public double classifyInstance(Instance inst) throws Exception { if (m_attribute == null) { return m_intercept; } else { if (inst.isMissing(m_attribute.index())) { throw new Exception("SimpleLinearRegression: No missing values!"); } return m_intercept + m_slope * inst.value(m_attribute.index()); } } /** * Returns default capabilities of the classifier. * * @return the capabilities of this classifier */ public Capabilities getCapabilities() { Capabilities result = super.getCapabilities(); result.disableAll(); // attributes result.enable(Capability.NUMERIC_ATTRIBUTES); result.enable(Capability.DATE_ATTRIBUTES); // class result.enable(Capability.NUMERIC_CLASS); result.enable(Capability.DATE_CLASS); result.enable(Capability.MISSING_CLASS_VALUES); return result; } /** * Builds a simple linear regression model given the supplied training data. * * @param insts the training data. * @throws Exception if an error occurs */ public void buildClassifier(Instances insts) throws Exception { // can classifier handle the data? getCapabilities().testWithFail(insts); // remove instances with missing class insts = new Instances(insts); insts.deleteWithMissingClass(); // Compute mean of target value double yMean = insts.meanOrMode(insts.classIndex()); // Choose best attribute double minMsq = Double.MAX_VALUE; m_attribute = null; int chosen = -1; double chosenSlope = Double.NaN; double chosenIntercept = Double.NaN; for (int i = 0; i < insts.numAttributes(); i++) { if (i != insts.classIndex()) { m_attribute = insts.attribute(i); // Compute slope and intercept double xMean = insts.meanOrMode(i); double sumWeightedXDiffSquared = 0; double sumWeightedYDiffSquared = 0; m_slope = 0; for (int j = 0; j < insts.numInstances(); j++) { Instance inst = insts.instance(j); if (!inst.isMissing(i) && !inst.classIsMissing()) { double xDiff = inst.value(i) - xMean; double yDiff = inst.classValue() - yMean; double weightedXDiff = inst.weight() * xDiff; double weightedYDiff = inst.weight() * yDiff; m_slope += weightedXDiff * yDiff; sumWeightedXDiffSquared += weightedXDiff * xDiff; sumWeightedYDiffSquared += weightedYDiff * yDiff; } } // Skip attribute if not useful if (sumWeightedXDiffSquared == 0) { continue; } double numerator = m_slope; m_slope /= sumWeightedXDiffSquared; m_intercept = yMean - m_slope * xMean; // Compute sum of squared errors double msq = sumWeightedYDiffSquared - m_slope * numerator; // Check whether this is the best attribute if (msq < minMsq) { minMsq = msq; chosen = i; chosenSlope = m_slope; chosenIntercept = m_intercept; } } } // Set parameters if (chosen == -1) { if (!m_suppressErrorMessage) System.err.println("----- no useful attribute found"); m_attribute = null; m_attributeIndex = 0; m_slope = 0; m_intercept = yMean; } else { m_attribute = insts.attribute(chosen); m_attributeIndex = chosen; m_slope = chosenSlope; m_intercept = chosenIntercept; } } /** * Returns true if a usable attribute was found. * * @return true if a usable attribute was found. */ public boolean foundUsefulAttribute(){ return (m_attribute != null); } /** * Returns the index of the attribute used in the regression. * * @return the index of the attribute. */ public int getAttributeIndex(){ return m_attributeIndex; } /** * Returns the slope of the function. * * @return the slope. */ public double getSlope(){ return m_slope; } /** * Returns the intercept of the function. * * @return the intercept. */ public double getIntercept(){ return m_intercept; } /** * Turn off the error message that is reported when no useful attribute is found. * * @param s if set to true turns off the error message */ public void setSuppressErrorMessage(boolean s){ m_suppressErrorMessage = s; } /** * Returns a description of this classifier as a string * * @return a description of the classifier. */ public String toString() { StringBuffer text = new StringBuffer(); if (m_attribute == null) { text.append("Predicting constant " + m_intercept); } else { text.append("Linear regression on " + m_attribute.name() + "\n\n"); text.append(Utils.doubleToString(m_slope,2) + " * " + m_attribute.name()); if (m_intercept > 0) { text.append(" + " + Utils.doubleToString(m_intercept, 2)); } else { text.append(" - " + Utils.doubleToString((-m_intercept), 2)); } } text.append("\n"); return text.toString(); } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } /** * Main method for testing this class * * @param argv options */ public static void main(String [] argv){ runClassifier(new SimpleLinearRegression(), argv); } }
8,073
26.556314
176
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/functions/SimpleLogistic.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * SimpleLogistic.java * Copyright (C) 2003-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.functions; import java.util.Enumeration; import java.util.Vector; import weka.classifiers.AbstractClassifier; import weka.classifiers.trees.lmt.LogisticBase; import weka.core.AdditionalMeasureProducer; import weka.core.Capabilities; import weka.core.Capabilities.Capability; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.OptionHandler; import weka.core.RevisionUtils; import weka.core.TechnicalInformation; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import weka.core.TechnicalInformationHandler; import weka.core.Utils; import weka.core.WeightedInstancesHandler; import weka.filters.Filter; import weka.filters.unsupervised.attribute.NominalToBinary; import weka.filters.unsupervised.attribute.ReplaceMissingValues; /** <!-- globalinfo-start --> * Classifier for building linear logistic regression models. LogitBoost with simple regression functions as base learners is used for fitting the logistic models. The optimal number of LogitBoost iterations to perform is cross-validated, which leads to automatic attribute selection. For more information see:<br/> * Niels Landwehr, Mark Hall, Eibe Frank (2005). Logistic Model Trees.<br/> * <br/> * Marc Sumner, Eibe Frank, Mark Hall: Speeding up Logistic Model Tree Induction. In: 9th European Conference on Principles and Practice of Knowledge Discovery in Databases, 675-683, 2005. * <p/> <!-- globalinfo-end --> * <!-- technical-bibtex-start --> * BibTeX: * <pre> * &#64;article{Landwehr2005, * author = {Niels Landwehr and Mark Hall and Eibe Frank}, * booktitle = {Machine Learning}, * number = {1-2}, * pages = {161-205}, * title = {Logistic Model Trees}, * volume = {95}, * year = {2005} * } * * &#64;inproceedings{Sumner2005, * author = {Marc Sumner and Eibe Frank and Mark Hall}, * booktitle = {9th European Conference on Principles and Practice of Knowledge Discovery in Databases}, * pages = {675-683}, * publisher = {Springer}, * title = {Speeding up Logistic Model Tree Induction}, * year = {2005} * } * </pre> * <p/> <!-- technical-bibtex-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -I &lt;iterations&gt; * Set fixed number of iterations for LogitBoost</pre> * * <pre> -S * Use stopping criterion on training set (instead of * cross-validation)</pre> * * <pre> -P * Use error on probabilities (rmse) instead of * misclassification error for stopping criterion</pre> * * <pre> -M &lt;iterations&gt; * Set maximum number of boosting iterations</pre> * * <pre> -H &lt;iterations&gt; * Set parameter for heuristic for early stopping of * LogitBoost. * If enabled, the minimum is selected greedily, stopping * if the current minimum has not changed for iter iterations. * By default, heuristic is enabled with value 50. Set to * zero to disable heuristic.</pre> * * <pre> -W &lt;beta&gt; * Set beta for weight trimming for LogitBoost. Set to 0 for no weight trimming. * </pre> * * <pre> -A * The AIC is used to choose the best iteration (instead of CV or training error). * </pre> * <!-- options-end --> * * @author Niels Landwehr * @author Marc Sumner * @version $Revision: 8034 $ */ public class SimpleLogistic extends AbstractClassifier implements OptionHandler, AdditionalMeasureProducer, WeightedInstancesHandler, TechnicalInformationHandler { /** for serialization */ static final long serialVersionUID = 7397710626304705059L; /**The actual logistic regression model */ protected LogisticBase m_boostedModel; /**Filter for converting nominal attributes to binary ones*/ protected NominalToBinary m_NominalToBinary = null; /**Filter for replacing missing values*/ protected ReplaceMissingValues m_ReplaceMissingValues = null; /**If non-negative, use this as fixed number of LogitBoost iterations*/ protected int m_numBoostingIterations; /**Maximum number of iterations for LogitBoost*/ protected int m_maxBoostingIterations = 500; /**Parameter for the heuristic for early stopping of LogitBoost*/ protected int m_heuristicStop = 50; /**If true, cross-validate number of LogitBoost iterations*/ protected boolean m_useCrossValidation; /**If true, use minimize error on probabilities instead of misclassification error*/ protected boolean m_errorOnProbabilities; /**Threshold for trimming weights. Instances with a weight lower than this (as a percentage * of total weights) are not included in the regression fit. */ protected double m_weightTrimBeta = 0; /** If true, the AIC is used to choose the best iteration*/ private boolean m_useAIC = false; /** * Constructor for creating SimpleLogistic object with standard options. */ public SimpleLogistic() { m_numBoostingIterations = 0; m_useCrossValidation = true; m_errorOnProbabilities = false; m_weightTrimBeta = 0; m_useAIC = false; } /** * Constructor for creating SimpleLogistic object. * @param numBoostingIterations if non-negative, use this as fixed number of iterations for LogitBoost * @param useCrossValidation cross-validate number of LogitBoost iterations. * @param errorOnProbabilities minimize error on probabilities instead of misclassification error */ public SimpleLogistic(int numBoostingIterations, boolean useCrossValidation, boolean errorOnProbabilities) { m_numBoostingIterations = numBoostingIterations; m_useCrossValidation = useCrossValidation; m_errorOnProbabilities = errorOnProbabilities; m_weightTrimBeta = 0; m_useAIC = false; } /** * Returns default capabilities of the classifier. * * @return the capabilities of this classifier */ public Capabilities getCapabilities() { Capabilities result = super.getCapabilities(); result.disableAll(); // attributes result.enable(Capability.NOMINAL_ATTRIBUTES); result.enable(Capability.NUMERIC_ATTRIBUTES); result.enable(Capability.DATE_ATTRIBUTES); result.enable(Capability.MISSING_VALUES); // class result.enable(Capability.NOMINAL_CLASS); result.enable(Capability.MISSING_CLASS_VALUES); return result; } /** * Builds the logistic regression using LogitBoost. * @param data the training data * @throws Exception if something goes wrong */ public void buildClassifier(Instances data) throws Exception { // can classifier handle the data? getCapabilities().testWithFail(data); // remove instances with missing class data = new Instances(data); data.deleteWithMissingClass(); //replace missing values m_ReplaceMissingValues = new ReplaceMissingValues(); m_ReplaceMissingValues.setInputFormat(data); data = Filter.useFilter(data, m_ReplaceMissingValues); //convert nominal attributes m_NominalToBinary = new NominalToBinary(); m_NominalToBinary.setInputFormat(data); data = Filter.useFilter(data, m_NominalToBinary); //create actual logistic model m_boostedModel = new LogisticBase(m_numBoostingIterations, m_useCrossValidation, m_errorOnProbabilities); m_boostedModel.setMaxIterations(m_maxBoostingIterations); m_boostedModel.setHeuristicStop(m_heuristicStop); m_boostedModel.setWeightTrimBeta(m_weightTrimBeta); m_boostedModel.setUseAIC(m_useAIC); //build logistic model m_boostedModel.buildClassifier(data); } /** * Returns class probabilities for an instance. * * @param inst the instance to compute the probabilities for * @return the probabilities * @throws Exception if distribution can't be computed successfully */ public double[] distributionForInstance(Instance inst) throws Exception { //replace missing values / convert nominal atts m_ReplaceMissingValues.input(inst); inst = m_ReplaceMissingValues.output(); m_NominalToBinary.input(inst); inst = m_NominalToBinary.output(); //obtain probs from logistic model return m_boostedModel.distributionForInstance(inst); } /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ public Enumeration listOptions() { Vector newVector = new Vector(); newVector.addElement(new Option( "\tSet fixed number of iterations for LogitBoost", "I",1,"-I <iterations>")); newVector.addElement(new Option( "\tUse stopping criterion on training set (instead of\n" + "\tcross-validation)", "S",0,"-S")); newVector.addElement(new Option( "\tUse error on probabilities (rmse) instead of\n" + "\tmisclassification error for stopping criterion", "P",0,"-P")); newVector.addElement(new Option( "\tSet maximum number of boosting iterations", "M",1,"-M <iterations>")); newVector.addElement(new Option( "\tSet parameter for heuristic for early stopping of\n" + "\tLogitBoost.\n" + "\tIf enabled, the minimum is selected greedily, stopping\n" + "\tif the current minimum has not changed for iter iterations.\n" + "\tBy default, heuristic is enabled with value 50. Set to\n" + "\tzero to disable heuristic.", "H",1,"-H <iterations>")); newVector.addElement(new Option("\tSet beta for weight trimming for LogitBoost. Set to 0 for no weight trimming.\n", "W",1,"-W <beta>")); newVector.addElement(new Option("\tThe AIC is used to choose the best iteration (instead of CV or training error).\n", "A", 0, "-A")); return newVector.elements(); } /** * Parses a given list of options. <p/> * <!-- options-start --> * Valid options are: <p/> * * <pre> -I &lt;iterations&gt; * Set fixed number of iterations for LogitBoost</pre> * * <pre> -S * Use stopping criterion on training set (instead of * cross-validation)</pre> * * <pre> -P * Use error on probabilities (rmse) instead of * misclassification error for stopping criterion</pre> * * <pre> -M &lt;iterations&gt; * Set maximum number of boosting iterations</pre> * * <pre> -H &lt;iterations&gt; * Set parameter for heuristic for early stopping of * LogitBoost. * If enabled, the minimum is selected greedily, stopping * if the current minimum has not changed for iter iterations. * By default, heuristic is enabled with value 50. Set to * zero to disable heuristic.</pre> * * <pre> -W &lt;beta&gt; * Set beta for weight trimming for LogitBoost. Set to 0 for no weight trimming. * </pre> * * <pre> -A * The AIC is used to choose the best iteration (instead of CV or training error). * </pre> * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { String optionString = Utils.getOption('I', options); if (optionString.length() != 0) { setNumBoostingIterations((new Integer(optionString)).intValue()); } setUseCrossValidation(!Utils.getFlag('S', options)); setErrorOnProbabilities(Utils.getFlag('P', options)); optionString = Utils.getOption('M', options); if (optionString.length() != 0) { setMaxBoostingIterations((new Integer(optionString)).intValue()); } optionString = Utils.getOption('H', options); if (optionString.length() != 0) { setHeuristicStop((new Integer(optionString)).intValue()); } optionString = Utils.getOption('W', options); if (optionString.length() != 0) { setWeightTrimBeta((new Double(optionString)).doubleValue()); } setUseAIC(Utils.getFlag('A', options)); Utils.checkForRemainingOptions(options); } /** * Gets the current settings of the Classifier. * * @return an array of strings suitable for passing to setOptions */ public String[] getOptions() { String[] options = new String[11]; int current = 0; options[current++] = "-I"; options[current++] = ""+getNumBoostingIterations(); if (!getUseCrossValidation()) { options[current++] = "-S"; } if (getErrorOnProbabilities()) { options[current++] = "-P"; } options[current++] = "-M"; options[current++] = ""+getMaxBoostingIterations(); options[current++] = "-H"; options[current++] = ""+getHeuristicStop(); options[current++] = "-W"; options[current++] = ""+getWeightTrimBeta(); if (getUseAIC()) { options[current++] = "-A"; } while (current < options.length) { options[current++] = ""; } return options; } /** * Get the value of numBoostingIterations. * * @return the number of boosting iterations */ public int getNumBoostingIterations(){ return m_numBoostingIterations; } /** * Get the value of useCrossValidation. * * @return true if cross-validation is used */ public boolean getUseCrossValidation(){ return m_useCrossValidation; } /** * Get the value of errorOnProbabilities. * * @return If true, use minimize error on probabilities instead of * misclassification error */ public boolean getErrorOnProbabilities(){ return m_errorOnProbabilities; } /** * Get the value of maxBoostingIterations. * * @return the maximum number of boosting iterations */ public int getMaxBoostingIterations(){ return m_maxBoostingIterations; } /** * Get the value of heuristicStop. * * @return the value of heuristicStop */ public int getHeuristicStop(){ return m_heuristicStop; } /** * Get the value of weightTrimBeta. */ public double getWeightTrimBeta(){ return m_weightTrimBeta; } /** * Get the value of useAIC. * * @return Value of useAIC. */ public boolean getUseAIC(){ return m_useAIC; } /** * Set the value of numBoostingIterations. * * @param n the number of boosting iterations */ public void setNumBoostingIterations(int n){ m_numBoostingIterations = n; } /** * Set the value of useCrossValidation. * * @param l whether to use cross-validation */ public void setUseCrossValidation(boolean l){ m_useCrossValidation = l; } /** * Set the value of errorOnProbabilities. * * @param l If true, use minimize error on probabilities instead of * misclassification error */ public void setErrorOnProbabilities(boolean l){ m_errorOnProbabilities = l; } /** * Set the value of maxBoostingIterations. * * @param n the maximum number of boosting iterations */ public void setMaxBoostingIterations(int n){ m_maxBoostingIterations = n; } /** * Set the value of heuristicStop. * * @param n the value of heuristicStop */ public void setHeuristicStop(int n){ if (n == 0) m_heuristicStop = m_maxBoostingIterations; else m_heuristicStop = n; } /** * Set the value of weightTrimBeta. */ public void setWeightTrimBeta(double n){ m_weightTrimBeta = n; } /** * Set the value of useAIC. * * @param c Value to assign to useAIC. */ public void setUseAIC(boolean c){ m_useAIC = c; } /** * Get the number of LogitBoost iterations performed (= the number of * regression functions fit by LogitBoost). * * @return the number of LogitBoost iterations performed */ public int getNumRegressions(){ return m_boostedModel.getNumRegressions(); } /** * Returns a description of the logistic model (attributes/coefficients). * * @return the model as string */ public String toString(){ if (m_boostedModel == null) return "No model built"; return "SimpleLogistic:\n" + m_boostedModel.toString(); } /** * Returns the fraction of all attributes in the data that are used in the * logistic model (in percent). An attribute is used in the model if it is * used in any of the models for the different classes. * * @return percentage of attributes used in the model */ public double measureAttributesUsed(){ return m_boostedModel.percentAttributesUsed(); } /** * Returns an enumeration of the additional measure names * @return an enumeration of the measure names */ public Enumeration enumerateMeasures() { Vector newVector = new Vector(3); newVector.addElement("measureAttributesUsed"); newVector.addElement("measureNumIterations"); return newVector.elements(); } /** * Returns the value of the named measure * @param additionalMeasureName the name of the measure to query for its value * @return the value of the named measure * @throws IllegalArgumentException if the named measure is not supported */ public double getMeasure(String additionalMeasureName) { if (additionalMeasureName.compareToIgnoreCase("measureAttributesUsed") == 0) { return measureAttributesUsed(); } else if(additionalMeasureName.compareToIgnoreCase("measureNumIterations") == 0){ return getNumRegressions(); } else { throw new IllegalArgumentException(additionalMeasureName + " not supported (SimpleLogistic)"); } } /** * Returns a string describing classifier * @return a description suitable for * displaying in the explorer/experimenter gui */ public String globalInfo() { return "Classifier for building linear logistic regression models. LogitBoost with simple regression " +"functions as base learners is used for fitting the logistic models. The optimal number of LogitBoost " +"iterations to perform is cross-validated, which leads to automatic attribute selection. " +"For more information see:\n" + getTechnicalInformation().toString(); } /** * Returns an instance of a TechnicalInformation object, containing * detailed information about the technical background of this class, * e.g., paper reference or book this class is based on. * * @return the technical information about this class */ public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; TechnicalInformation additional; result = new TechnicalInformation(Type.ARTICLE); result.setValue(Field.AUTHOR, "Niels Landwehr and Mark Hall and Eibe Frank"); result.setValue(Field.TITLE, "Logistic Model Trees"); result.setValue(Field.BOOKTITLE, "Machine Learning"); result.setValue(Field.YEAR, "2005"); result.setValue(Field.VOLUME, "95"); result.setValue(Field.PAGES, "161-205"); result.setValue(Field.NUMBER, "1-2"); additional = result.add(Type.INPROCEEDINGS); additional.setValue(Field.AUTHOR, "Marc Sumner and Eibe Frank and Mark Hall"); additional.setValue(Field.TITLE, "Speeding up Logistic Model Tree Induction"); additional.setValue(Field.BOOKTITLE, "9th European Conference on Principles and Practice of Knowledge Discovery in Databases"); additional.setValue(Field.YEAR, "2005"); additional.setValue(Field.PAGES, "675-683"); additional.setValue(Field.PUBLISHER, "Springer"); return result; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String numBoostingIterationsTipText() { return "Set fixed number of iterations for LogitBoost. If >= 0, this sets the number of LogitBoost iterations " +"to perform. If < 0, the number is cross-validated or a stopping criterion on the training set is used " +"(depending on the value of useCrossValidation)."; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String useCrossValidationTipText() { return "Sets whether the number of LogitBoost iterations is to be cross-validated or the stopping criterion " +"on the training set should be used. If not set (and no fixed number of iterations was given), " +"the number of LogitBoost iterations is used that minimizes the error on the training set " +"(misclassification error or error on probabilities depending on errorOnProbabilities)."; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String errorOnProbabilitiesTipText() { return "Use error on the probabilties as error measure when determining the best number of LogitBoost iterations. " +"If set, the number of LogitBoost iterations is chosen that minimizes the root mean squared error " +"(either on the training set or in the cross-validation, depending on useCrossValidation)."; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String maxBoostingIterationsTipText() { return "Sets the maximum number of iterations for LogitBoost. Default value is 500, for very small/large " +"datasets a lower/higher value might be preferable."; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String heuristicStopTipText() { return "If heuristicStop > 0, the heuristic for greedy stopping while cross-validating the number of " +"LogitBoost iterations is enabled. This means LogitBoost is stopped if no new error minimum " +"has been reached in the last heuristicStop iterations. It is recommended to use this heuristic, " +"it gives a large speed-up especially on small datasets. The default value is 50."; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String weightTrimBetaTipText() { return "Set the beta value used for weight trimming in LogitBoost. " +"Only instances carrying (1 - beta)% of the weight from previous iteration " +"are used in the next iteration. Set to 0 for no weight trimming. " +"The default value is 0."; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String useAICTipText() { return "The AIC is used to determine when to stop LogitBoost iterations " +"(instead of cross-validation or training error)."; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } /** * Main method for testing this class * * @param argv commandline options */ public static void main(String[] argv) { runClassifier(new SimpleLogistic(), argv); } }
24,690
32.186828
315
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/functions/VotedPerceptron.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * VotedPerceptron.java * Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.functions; import java.util.Enumeration; import java.util.Random; import java.util.Vector; import weka.classifiers.AbstractClassifier; import weka.core.Capabilities; import weka.core.Capabilities.Capability; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.OptionHandler; import weka.core.RevisionUtils; import weka.core.TechnicalInformation; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import weka.core.TechnicalInformationHandler; import weka.core.Utils; import weka.filters.Filter; import weka.filters.unsupervised.attribute.NominalToBinary; import weka.filters.unsupervised.attribute.ReplaceMissingValues; /** <!-- globalinfo-start --> * Implementation of the voted perceptron algorithm by Freund and Schapire. Globally replaces all missing values, and transforms nominal attributes into binary ones.<br/> * <br/> * For more information, see:<br/> * <br/> * Y. Freund, R. E. Schapire: Large margin classification using the perceptron algorithm. In: 11th Annual Conference on Computational Learning Theory, New York, NY, 209-217, 1998. * <p/> <!-- globalinfo-end --> * <!-- technical-bibtex-start --> * BibTeX: * <pre> * &#64;inproceedings{Freund1998, * address = {New York, NY}, * author = {Y. Freund and R. E. Schapire}, * booktitle = {11th Annual Conference on Computational Learning Theory}, * pages = {209-217}, * publisher = {ACM Press}, * title = {Large margin classification using the perceptron algorithm}, * year = {1998} * } * </pre> * <p/> <!-- technical-bibtex-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -I &lt;int&gt; * The number of iterations to be performed. * (default 1)</pre> * * <pre> -E &lt;double&gt; * The exponent for the polynomial kernel. * (default 1)</pre> * * <pre> -S &lt;int&gt; * The seed for the random number generation. * (default 1)</pre> * * <pre> -M &lt;int&gt; * The maximum number of alterations allowed. * (default 10000)</pre> * <!-- options-end --> * * @author Eibe Frank (eibe@cs.waikato.ac.nz) * @version $Revision: 8034 $ */ public class VotedPerceptron extends AbstractClassifier implements OptionHandler, TechnicalInformationHandler { /** for serialization */ static final long serialVersionUID = -1072429260104568698L; /** The maximum number of alterations to the perceptron */ private int m_MaxK = 10000; /** The number of iterations */ private int m_NumIterations = 1; /** The exponent */ private double m_Exponent = 1.0; /** The actual number of alterations */ private int m_K = 0; /** The training instances added to the perceptron */ private int[] m_Additions = null; /** Addition or subtraction? */ private boolean[] m_IsAddition = null; /** The weights for each perceptron */ private int[] m_Weights = null; /** The training instances */ private Instances m_Train = null; /** Seed used for shuffling the dataset */ private int m_Seed = 1; /** The filter used to make attributes numeric. */ private NominalToBinary m_NominalToBinary; /** The filter used to get rid of missing values. */ private ReplaceMissingValues m_ReplaceMissingValues; /** * Returns a string describing this classifier * @return a description of the classifier suitable for * displaying in the explorer/experimenter gui */ public String globalInfo() { return "Implementation of the voted perceptron algorithm by Freund and " + "Schapire. Globally replaces all missing values, and transforms " + "nominal attributes into binary ones.\n\n" + "For more information, see:\n\n" + getTechnicalInformation().toString(); } /** * Returns an instance of a TechnicalInformation object, containing * detailed information about the technical background of this class, * e.g., paper reference or book this class is based on. * * @return the technical information about this class */ public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; result = new TechnicalInformation(Type.INPROCEEDINGS); result.setValue(Field.AUTHOR, "Y. Freund and R. E. Schapire"); result.setValue(Field.TITLE, "Large margin classification using the perceptron algorithm"); result.setValue(Field.BOOKTITLE, "11th Annual Conference on Computational Learning Theory"); result.setValue(Field.YEAR, "1998"); result.setValue(Field.PAGES, "209-217"); result.setValue(Field.PUBLISHER, "ACM Press"); result.setValue(Field.ADDRESS, "New York, NY"); return result; } /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ public Enumeration listOptions() { Vector newVector = new Vector(4); newVector.addElement(new Option("\tThe number of iterations to be performed.\n" + "\t(default 1)", "I", 1, "-I <int>")); newVector.addElement(new Option("\tThe exponent for the polynomial kernel.\n" + "\t(default 1)", "E", 1, "-E <double>")); newVector.addElement(new Option("\tThe seed for the random number generation.\n" + "\t(default 1)", "S", 1, "-S <int>")); newVector.addElement(new Option("\tThe maximum number of alterations allowed.\n" + "\t(default 10000)", "M", 1, "-M <int>")); return newVector.elements(); } /** * Parses a given list of options. <p/> * <!-- options-start --> * Valid options are: <p/> * * <pre> -I &lt;int&gt; * The number of iterations to be performed. * (default 1)</pre> * * <pre> -E &lt;double&gt; * The exponent for the polynomial kernel. * (default 1)</pre> * * <pre> -S &lt;int&gt; * The seed for the random number generation. * (default 1)</pre> * * <pre> -M &lt;int&gt; * The maximum number of alterations allowed. * (default 10000)</pre> * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { String iterationsString = Utils.getOption('I', options); if (iterationsString.length() != 0) { m_NumIterations = Integer.parseInt(iterationsString); } else { m_NumIterations = 1; } String exponentsString = Utils.getOption('E', options); if (exponentsString.length() != 0) { m_Exponent = (new Double(exponentsString)).doubleValue(); } else { m_Exponent = 1.0; } String seedString = Utils.getOption('S', options); if (seedString.length() != 0) { m_Seed = Integer.parseInt(seedString); } else { m_Seed = 1; } String alterationsString = Utils.getOption('M', options); if (alterationsString.length() != 0) { m_MaxK = Integer.parseInt(alterationsString); } else { m_MaxK = 10000; } } /** * Gets the current settings of the classifier. * * @return an array of strings suitable for passing to setOptions */ public String[] getOptions() { String[] options = new String [8]; int current = 0; options[current++] = "-I"; options[current++] = "" + m_NumIterations; options[current++] = "-E"; options[current++] = "" + m_Exponent; options[current++] = "-S"; options[current++] = "" + m_Seed; options[current++] = "-M"; options[current++] = "" + m_MaxK; while (current < options.length) { options[current++] = ""; } return options; } /** * Returns default capabilities of the classifier. * * @return the capabilities of this classifier */ public Capabilities getCapabilities() { Capabilities result = super.getCapabilities(); result.disableAll(); // attributes result.enable(Capability.NOMINAL_ATTRIBUTES); result.enable(Capability.NUMERIC_ATTRIBUTES); result.enable(Capability.DATE_ATTRIBUTES); result.enable(Capability.MISSING_VALUES); // class result.enable(Capability.BINARY_CLASS); result.enable(Capability.MISSING_CLASS_VALUES); // instances result.setMinimumNumberInstances(0); return result; } /** * Builds the ensemble of perceptrons. * * @param insts the data to train the classifier with * @throws Exception if something goes wrong during building */ public void buildClassifier(Instances insts) throws Exception { // can classifier handle the data? getCapabilities().testWithFail(insts); // remove instances with missing class insts = new Instances(insts); insts.deleteWithMissingClass(); // Filter data m_Train = new Instances(insts); m_ReplaceMissingValues = new ReplaceMissingValues(); m_ReplaceMissingValues.setInputFormat(m_Train); m_Train = Filter.useFilter(m_Train, m_ReplaceMissingValues); m_NominalToBinary = new NominalToBinary(); m_NominalToBinary.setInputFormat(m_Train); m_Train = Filter.useFilter(m_Train, m_NominalToBinary); /** Randomize training data */ m_Train.randomize(new Random(m_Seed)); /** Make space to store perceptrons */ m_Additions = new int[m_MaxK + 1]; m_IsAddition = new boolean[m_MaxK + 1]; m_Weights = new int[m_MaxK + 1]; /** Compute perceptrons */ m_K = 0; out: for (int it = 0; it < m_NumIterations; it++) { for (int i = 0; i < m_Train.numInstances(); i++) { Instance inst = m_Train.instance(i); if (!inst.classIsMissing()) { int prediction = makePrediction(m_K, inst); int classValue = (int) inst.classValue(); if (prediction == classValue) { m_Weights[m_K]++; } else { m_IsAddition[m_K] = (classValue == 1); m_Additions[m_K] = i; m_K++; m_Weights[m_K]++; } if (m_K == m_MaxK) { break out; } } } } } /** * Outputs the distribution for the given output. * * Pipes output of SVM through sigmoid function. * @param inst the instance for which distribution is to be computed * @return the distribution * @throws Exception if something goes wrong */ public double[] distributionForInstance(Instance inst) throws Exception { // Filter instance m_ReplaceMissingValues.input(inst); m_ReplaceMissingValues.batchFinished(); inst = m_ReplaceMissingValues.output(); m_NominalToBinary.input(inst); m_NominalToBinary.batchFinished(); inst = m_NominalToBinary.output(); // Get probabilities double output = 0, sumSoFar = 0; if (m_K > 0) { for (int i = 0; i <= m_K; i++) { if (sumSoFar < 0) { output -= m_Weights[i]; } else { output += m_Weights[i]; } if (m_IsAddition[i]) { sumSoFar += innerProduct(m_Train.instance(m_Additions[i]), inst); } else { sumSoFar -= innerProduct(m_Train.instance(m_Additions[i]), inst); } } } double[] result = new double[2]; result[1] = 1 / (1 + Math.exp(-output)); result[0] = 1 - result[1]; return result; } /** * Returns textual description of classifier. * * @return the model as string */ public String toString() { return "VotedPerceptron: Number of perceptrons=" + m_K; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String maxKTipText() { return "The maximum number of alterations to the perceptron."; } /** * Get the value of maxK. * * @return Value of maxK. */ public int getMaxK() { return m_MaxK; } /** * Set the value of maxK. * * @param v Value to assign to maxK. */ public void setMaxK(int v) { m_MaxK = v; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String numIterationsTipText() { return "Number of iterations to be performed."; } /** * Get the value of NumIterations. * * @return Value of NumIterations. */ public int getNumIterations() { return m_NumIterations; } /** * Set the value of NumIterations. * * @param v Value to assign to NumIterations. */ public void setNumIterations(int v) { m_NumIterations = v; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String exponentTipText() { return "Exponent for the polynomial kernel."; } /** * Get the value of exponent. * * @return Value of exponent. */ public double getExponent() { return m_Exponent; } /** * Set the value of exponent. * * @param v Value to assign to exponent. */ public void setExponent(double v) { m_Exponent = v; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String seedTipText() { return "Seed for the random number generator."; } /** * Get the value of Seed. * * @return Value of Seed. */ public int getSeed() { return m_Seed; } /** * Set the value of Seed. * * @param v Value to assign to Seed. */ public void setSeed(int v) { m_Seed = v; } /** * Computes the inner product of two instances * * @param i1 first instance * @param i2 second instance * @return the inner product * @throws Exception if computation fails */ private double innerProduct(Instance i1, Instance i2) throws Exception { // we can do a fast dot product double result = 0; int n1 = i1.numValues(); int n2 = i2.numValues(); int classIndex = m_Train.classIndex(); for (int p1 = 0, p2 = 0; p1 < n1 && p2 < n2;) { int ind1 = i1.index(p1); int ind2 = i2.index(p2); if (ind1 == ind2) { if (ind1 != classIndex) { result += i1.valueSparse(p1) * i2.valueSparse(p2); } p1++; p2++; } else if (ind1 > ind2) { p2++; } else { p1++; } } result += 1.0; if (m_Exponent != 1) { return Math.pow(result, m_Exponent); } else { return result; } } /** * Compute a prediction from a perceptron * * @param k * @param inst the instance to make a prediction for * @return the prediction * @throws Exception if computation fails */ private int makePrediction(int k, Instance inst) throws Exception { double result = 0; for (int i = 0; i < k; i++) { if (m_IsAddition[i]) { result += innerProduct(m_Train.instance(m_Additions[i]), inst); } else { result -= innerProduct(m_Train.instance(m_Additions[i]), inst); } } if (result < 0) { return 0; } else { return 1; } } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } /** * Main method. * * @param argv the commandline options */ public static void main(String[] argv) { runClassifier(new VotedPerceptron(), argv); } }
16,272
25.942053
179
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/functions/Winnow.java
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* * Winnow.java * Copyright (C) 2002 J. Lindgren * */ package weka.classifiers.functions; import weka.classifiers.Classifier; import weka.classifiers.UpdateableClassifier; import weka.core.Capabilities; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.RevisionUtils; import weka.core.TechnicalInformation; import weka.core.TechnicalInformationHandler; import weka.core.Utils; import weka.core.Capabilities.Capability; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import weka.filters.Filter; import weka.filters.unsupervised.attribute.NominalToBinary; import weka.filters.unsupervised.attribute.ReplaceMissingValues; import java.util.Enumeration; import java.util.Random; import java.util.Vector; import weka.classifiers.AbstractClassifier; /** <!-- globalinfo-start --> * Implements Winnow and Balanced Winnow algorithms by Littlestone.<br/> * <br/> * For more information, see<br/> * <br/> * N. Littlestone (1988). Learning quickly when irrelevant attributes are abound: A new linear threshold algorithm. Machine Learning. 2:285-318.<br/> * <br/> * N. Littlestone (1989). Mistake bounds and logarithmic linear-threshold learning algorithms. University of California, Santa Cruz.<br/> * <br/> * Does classification for problems with nominal attributes (which it converts into binary attributes). * <p/> <!-- globalinfo-end --> * <!-- technical-bibtex-start --> * BibTeX: * <pre> * &#64;article{Littlestone1988, * author = {N. Littlestone}, * journal = {Machine Learning}, * pages = {285-318}, * title = {Learning quickly when irrelevant attributes are abound: A new linear threshold algorithm}, * volume = {2}, * year = {1988} * } * * &#64;techreport{Littlestone1989, * address = {University of California, Santa Cruz}, * author = {N. Littlestone}, * institution = {University of California}, * note = {Technical Report UCSC-CRL-89-11}, * title = {Mistake bounds and logarithmic linear-threshold learning algorithms}, * year = {1989} * } * </pre> * <p/> <!-- technical-bibtex-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -L * Use the baLanced version * (default false)</pre> * * <pre> -I &lt;int&gt; * The number of iterations to be performed. * (default 1)</pre> * * <pre> -A &lt;double&gt; * Promotion coefficient alpha. * (default 2.0)</pre> * * <pre> -B &lt;double&gt; * Demotion coefficient beta. * (default 0.5)</pre> * * <pre> -H &lt;double&gt; * Prediction threshold. * (default -1.0 == number of attributes)</pre> * * <pre> -W &lt;double&gt; * Starting weights. * (default 2.0)</pre> * * <pre> -S &lt;int&gt; * Default random seed. * (default 1)</pre> * <!-- options-end --> * * @author J. Lindgren (jtlindgr at cs.helsinki.fi) * @version $Revision: 5523 $ */ public class Winnow extends AbstractClassifier implements UpdateableClassifier, TechnicalInformationHandler { /** for serialization */ static final long serialVersionUID = 3543770107994321324L; /** Use the balanced variant? **/ protected boolean m_Balanced; /** The number of iterations **/ protected int m_numIterations = 1; /** The promotion coefficient **/ protected double m_Alpha = 2.0; /** The demotion coefficient **/ protected double m_Beta = 0.5; /** Prediction threshold, <0 == numAttributes **/ protected double m_Threshold = -1.0; /** Random seed used for shuffling the dataset, -1 == disable **/ protected int m_Seed = 1; /** Accumulated mistake count (for statistics) **/ protected int m_Mistakes; /** Starting weights for the prediction vector(s) **/ protected double m_defaultWeight = 2.0; /** The weight vector for prediction (pos) */ private double[] m_predPosVector = null; /** The weight vector for prediction (neg) */ private double[] m_predNegVector = null; /** The true threshold used for prediction **/ private double m_actualThreshold; /** The training instances */ private Instances m_Train = null; /** The filter used to make attributes numeric. */ private NominalToBinary m_NominalToBinary; /** The filter used to get rid of missing values. */ private ReplaceMissingValues m_ReplaceMissingValues; /** * Returns a string describing classifier * @return a description suitable for * displaying in the explorer/experimenter gui */ public String globalInfo() { return "Implements Winnow and Balanced Winnow algorithms by " + "Littlestone.\n\n" + "For more information, see\n\n" + getTechnicalInformation().toString() + "\n\n" + "Does classification for problems with nominal attributes " + "(which it converts into binary attributes)."; } /** * Returns an instance of a TechnicalInformation object, containing * detailed information about the technical background of this class, * e.g., paper reference or book this class is based on. * * @return the technical information about this class */ public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; TechnicalInformation additional; result = new TechnicalInformation(Type.ARTICLE); result.setValue(Field.AUTHOR, "N. Littlestone"); result.setValue(Field.YEAR, "1988"); result.setValue(Field.TITLE, "Learning quickly when irrelevant attributes are abound: A new linear threshold algorithm"); result.setValue(Field.JOURNAL, "Machine Learning"); result.setValue(Field.VOLUME, "2"); result.setValue(Field.PAGES, "285-318"); additional = result.add(Type.TECHREPORT); additional.setValue(Field.AUTHOR, "N. Littlestone"); additional.setValue(Field.YEAR, "1989"); additional.setValue(Field.TITLE, "Mistake bounds and logarithmic linear-threshold learning algorithms"); additional.setValue(Field.INSTITUTION, "University of California"); additional.setValue(Field.ADDRESS, "University of California, Santa Cruz"); additional.setValue(Field.NOTE, "Technical Report UCSC-CRL-89-11"); return result; } /** * Returns an enumeration describing the available options * * @return an enumeration of all the available options */ public Enumeration listOptions() { Vector newVector = new Vector(7); newVector.addElement(new Option("\tUse the baLanced version\n" + "\t(default false)", "L", 0, "-L")); newVector.addElement(new Option("\tThe number of iterations to be performed.\n" + "\t(default 1)", "I", 1, "-I <int>")); newVector.addElement(new Option("\tPromotion coefficient alpha.\n" + "\t(default 2.0)", "A", 1, "-A <double>")); newVector.addElement(new Option("\tDemotion coefficient beta.\n" + "\t(default 0.5)", "B", 1, "-B <double>")); newVector.addElement(new Option("\tPrediction threshold.\n" + "\t(default -1.0 == number of attributes)", "H", 1, "-H <double>")); newVector.addElement(new Option("\tStarting weights.\n" + "\t(default 2.0)", "W", 1, "-W <double>")); newVector.addElement(new Option("\tDefault random seed.\n" + "\t(default 1)", "S", 1, "-S <int>")); return newVector.elements(); } /** * Parses a given list of options.<p/> * <!-- options-start --> * Valid options are: <p/> * * <pre> -L * Use the baLanced version * (default false)</pre> * * <pre> -I &lt;int&gt; * The number of iterations to be performed. * (default 1)</pre> * * <pre> -A &lt;double&gt; * Promotion coefficient alpha. * (default 2.0)</pre> * * <pre> -B &lt;double&gt; * Demotion coefficient beta. * (default 0.5)</pre> * * <pre> -H &lt;double&gt; * Prediction threshold. * (default -1.0 == number of attributes)</pre> * * <pre> -W &lt;double&gt; * Starting weights. * (default 2.0)</pre> * * <pre> -S &lt;int&gt; * Default random seed. * (default 1)</pre> * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { m_Balanced = Utils.getFlag('L', options); String iterationsString = Utils.getOption('I', options); if (iterationsString.length() != 0) { m_numIterations = Integer.parseInt(iterationsString); } String alphaString = Utils.getOption('A', options); if (alphaString.length() != 0) { m_Alpha = (new Double(alphaString)).doubleValue(); } String betaString = Utils.getOption('B', options); if (betaString.length() != 0) { m_Beta = (new Double(betaString)).doubleValue(); } String tString = Utils.getOption('H', options); if (tString.length() != 0) { m_Threshold = (new Double(tString)).doubleValue(); } String wString = Utils.getOption('W', options); if (wString.length() != 0) { m_defaultWeight = (new Double(wString)).doubleValue(); } String rString = Utils.getOption('S', options); if (rString.length() != 0) { m_Seed = Integer.parseInt(rString); } } /** * Gets the current settings of the classifier. * * @return an array of strings suitable for passing to setOptions */ public String[] getOptions() { String[] options = new String [20]; int current = 0; if(m_Balanced) { options[current++] = "-L"; } options[current++] = "-I"; options[current++] = "" + m_numIterations; options[current++] = "-A"; options[current++] = "" + m_Alpha; options[current++] = "-B"; options[current++] = "" + m_Beta; options[current++] = "-H"; options[current++] = "" + m_Threshold; options[current++] = "-W"; options[current++] = "" + m_defaultWeight; options[current++] = "-S"; options[current++] = "" + m_Seed; while (current < options.length) { options[current++] = ""; } return options; } /** * Returns default capabilities of the classifier. * * @return the capabilities of this classifier */ public Capabilities getCapabilities() { Capabilities result = super.getCapabilities(); result.disableAll(); // attributes result.enable(Capability.NOMINAL_ATTRIBUTES); result.enable(Capability.MISSING_VALUES); // class result.enable(Capability.BINARY_CLASS); result.enable(Capability.MISSING_CLASS_VALUES); // instances result.setMinimumNumberInstances(0); return result; } /** * Builds the classifier * * @param insts the data to train the classifier with * @throws Exception if something goes wrong during building */ public void buildClassifier(Instances insts) throws Exception { // can classifier handle the data? getCapabilities().testWithFail(insts); // remove instances with missing class insts = new Instances(insts); insts.deleteWithMissingClass(); // Filter data m_Train = new Instances(insts); m_ReplaceMissingValues = new ReplaceMissingValues(); m_ReplaceMissingValues.setInputFormat(m_Train); m_Train = Filter.useFilter(m_Train, m_ReplaceMissingValues); m_NominalToBinary = new NominalToBinary(); m_NominalToBinary.setInputFormat(m_Train); m_Train = Filter.useFilter(m_Train, m_NominalToBinary); /** Randomize training data */ if(m_Seed != -1) { m_Train.randomize(new Random(m_Seed)); } /** Make space to store weights */ m_predPosVector = new double[m_Train.numAttributes()]; if(m_Balanced) { m_predNegVector = new double[m_Train.numAttributes()]; } /** Initialize the weights to starting values **/ for(int i = 0; i < m_Train.numAttributes(); i++) m_predPosVector[i] = m_defaultWeight; if(m_Balanced) { for(int i = 0; i < m_Train.numAttributes(); i++) { m_predNegVector[i] = m_defaultWeight; } } /** Set actual prediction threshold **/ if(m_Threshold<0) { m_actualThreshold = (double)m_Train.numAttributes()-1; } else { m_actualThreshold = m_Threshold; } m_Mistakes=0; /** Compute the weight vectors **/ if(m_Balanced) { for (int it = 0; it < m_numIterations; it++) { for (int i = 0; i < m_Train.numInstances(); i++) { actualUpdateClassifierBalanced(m_Train.instance(i)); } } } else { for (int it = 0; it < m_numIterations; it++) { for (int i = 0; i < m_Train.numInstances(); i++) { actualUpdateClassifier(m_Train.instance(i)); } } } } /** * Updates the classifier with a new learning example * * @param instance the instance to update the classifier with * @throws Exception if something goes wrong */ public void updateClassifier(Instance instance) throws Exception { m_ReplaceMissingValues.input(instance); m_ReplaceMissingValues.batchFinished(); Instance filtered = m_ReplaceMissingValues.output(); m_NominalToBinary.input(filtered); m_NominalToBinary.batchFinished(); filtered = m_NominalToBinary.output(); if(m_Balanced) { actualUpdateClassifierBalanced(filtered); } else { actualUpdateClassifier(filtered); } } /** * Actual update routine for prefiltered instances * * @param inst the instance to update the classifier with * @throws Exception if something goes wrong */ private void actualUpdateClassifier(Instance inst) throws Exception { double posmultiplier; if (!inst.classIsMissing()) { double prediction = makePrediction(inst); if (prediction != inst.classValue()) { m_Mistakes++; if(prediction == 0) { /* false neg: promote */ posmultiplier=m_Alpha; } else { /* false pos: demote */ posmultiplier=m_Beta; } int n1 = inst.numValues(); int classIndex = m_Train.classIndex(); for(int l = 0 ; l < n1 ; l++) { if(inst.index(l) != classIndex && inst.valueSparse(l)==1) { m_predPosVector[inst.index(l)]*=posmultiplier; } } //Utils.normalize(m_predPosVector); } } else { System.out.println("CLASS MISSING"); } } /** * Actual update routine (balanced) for prefiltered instances * * @param inst the instance to update the classifier with * @throws Exception if something goes wrong */ private void actualUpdateClassifierBalanced(Instance inst) throws Exception { double posmultiplier,negmultiplier; if (!inst.classIsMissing()) { double prediction = makePredictionBalanced(inst); if (prediction != inst.classValue()) { m_Mistakes++; if(prediction == 0) { /* false neg: promote positive, demote negative*/ posmultiplier=m_Alpha; negmultiplier=m_Beta; } else { /* false pos: demote positive, promote negative */ posmultiplier=m_Beta; negmultiplier=m_Alpha; } int n1 = inst.numValues(); int classIndex = m_Train.classIndex(); for(int l = 0 ; l < n1 ; l++) { if(inst.index(l) != classIndex && inst.valueSparse(l)==1) { m_predPosVector[inst.index(l)]*=posmultiplier; m_predNegVector[inst.index(l)]*=negmultiplier; } } //Utils.normalize(m_predPosVector); //Utils.normalize(m_predNegVector); } } else { System.out.println("CLASS MISSING"); } } /** * Outputs the prediction for the given instance. * * @param inst the instance for which prediction is to be computed * @return the prediction * @throws Exception if something goes wrong */ public double classifyInstance(Instance inst) throws Exception { m_ReplaceMissingValues.input(inst); m_ReplaceMissingValues.batchFinished(); Instance filtered = m_ReplaceMissingValues.output(); m_NominalToBinary.input(filtered); m_NominalToBinary.batchFinished(); filtered = m_NominalToBinary.output(); if(m_Balanced) { return(makePredictionBalanced(filtered)); } else { return(makePrediction(filtered)); } } /** * Compute the actual prediction for prefiltered instance * * @param inst the instance for which prediction is to be computed * @return the prediction * @throws Exception if something goes wrong */ private double makePrediction(Instance inst) throws Exception { double total = 0; int n1 = inst.numValues(); int classIndex = m_Train.classIndex(); for(int i=0;i<n1;i++) { if(inst.index(i) != classIndex && inst.valueSparse(i)==1) { total+=m_predPosVector[inst.index(i)]; } } if(total > m_actualThreshold) { return(1); } else { return(0); } } /** * Compute our prediction (Balanced) for prefiltered instance * * @param inst the instance for which prediction is to be computed * @return the prediction * @throws Exception if something goes wrong */ private double makePredictionBalanced(Instance inst) throws Exception { double total=0; int n1 = inst.numValues(); int classIndex = m_Train.classIndex(); for(int i=0;i<n1;i++) { if(inst.index(i) != classIndex && inst.valueSparse(i)==1) { total+=(m_predPosVector[inst.index(i)]-m_predNegVector[inst.index(i)]); } } if(total > m_actualThreshold) { return(1); } else { return(0); } } /** * Returns textual description of the classifier. * * @return textual description of the classifier */ public String toString() { if(m_predPosVector==null) return("Winnow: No model built yet."); String result = "Winnow\n\nAttribute weights\n\n"; int classIndex = m_Train.classIndex(); if(!m_Balanced) { for( int i = 0 ; i < m_Train.numAttributes(); i++) { if(i!=classIndex) result += "w" + i + " " + m_predPosVector[i] + "\n"; } } else { for( int i = 0 ; i < m_Train.numAttributes(); i++) { if(i!=classIndex) { result += "w" + i + " p " + m_predPosVector[i]; result += " n " + m_predNegVector[i]; double wdiff=m_predPosVector[i]-m_predNegVector[i]; result += " d " + wdiff + "\n"; } } } result += "\nCumulated mistake count: " + m_Mistakes + "\n\n"; return(result); } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String balancedTipText() { return "Whether to use the balanced version of the algorithm."; } /** * Get the value of Balanced. * * @return Value of Balanced. */ public boolean getBalanced() { return m_Balanced; } /** * Set the value of Balanced. * * @param b Value to assign to Balanced. */ public void setBalanced(boolean b) { m_Balanced = b; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String alphaTipText() { return "Promotion coefficient alpha."; } /** * Get the value of Alpha. * * @return Value of Alpha. */ public double getAlpha() { return(m_Alpha); } /** * Set the value of Alpha. * * @param a Value to assign to Alpha. */ public void setAlpha(double a) { m_Alpha = a; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String betaTipText() { return "Demotion coefficient beta."; } /** * Get the value of Beta. * * @return Value of Beta. */ public double getBeta() { return(m_Beta); } /** * Set the value of Beta. * * @param b Value to assign to Beta. */ public void setBeta(double b) { m_Beta = b; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String thresholdTipText() { return "Prediction threshold (-1 means: set to number of attributes)."; } /** * Get the value of Threshold. * * @return Value of Threshold. */ public double getThreshold() { return m_Threshold; } /** * Set the value of Threshold. * * @param t Value to assign to Threshold. */ public void setThreshold(double t) { m_Threshold = t; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String defaultWeightTipText() { return "Initial value of weights/coefficients."; } /** * Get the value of defaultWeight. * * @return Value of defaultWeight. */ public double getDefaultWeight() { return m_defaultWeight; } /** * Set the value of defaultWeight. * * @param w Value to assign to defaultWeight. */ public void setDefaultWeight(double w) { m_defaultWeight = w; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String numIterationsTipText() { return "The number of iterations to be performed."; } /** * Get the value of numIterations. * * @return Value of numIterations. */ public int getNumIterations() { return m_numIterations; } /** * Set the value of numIterations. * * @param v Value to assign to numIterations. */ public void setNumIterations(int v) { m_numIterations = v; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String seedTipText() { return "Random number seed used for data shuffling (-1 means no " + "randomization)."; } /** * Get the value of Seed. * * @return Value of Seed. */ public int getSeed() { return m_Seed; } /** * Set the value of Seed. * * @param v Value to assign to Seed. */ public void setSeed(int v) { m_Seed = v; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 5523 $"); } /** * Main method. * * @param argv the commandline options */ public static void main(String[] argv) { runClassifier(new Winnow(), argv); } }
23,407
25.936709
149
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/functions/neural/LinearUnit.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * LinearUnit.java * Copyright (C) 2001-2012 University of Waikato, Hamilton, New Zealand */ package weka.classifiers.functions.neural; import weka.core.RevisionHandler; import weka.core.RevisionUtils; /** * This can be used by the * neuralnode to perform all it's computations (as a Linear unit). * * @author Malcolm Ware (mfw4@cs.waikato.ac.nz) * @version $Revision: 8034 $ */ public class LinearUnit implements NeuralMethod, RevisionHandler { /** for serialization */ private static final long serialVersionUID = 8572152807755673630L; /** * This function calculates what the output value should be. * @param node The node to calculate the value for. * @return The value. */ public double outputValue(NeuralNode node) { double[] weights = node.getWeights(); NeuralConnection[] inputs = node.getInputs(); double value = weights[0]; for (int noa = 0; noa < node.getNumInputs(); noa++) { value += inputs[noa].outputValue(true) * weights[noa+1]; } return value; } /** * This function calculates what the error value should be. * @param node The node to calculate the error for. * @return The error. */ public double errorValue(NeuralNode node) { //then calculate the error. NeuralConnection[] outputs = node.getOutputs(); int[] oNums = node.getOutputNums(); double error = 0; for (int noa = 0; noa < node.getNumOutputs(); noa++) { error += outputs[noa].errorValue(true) * outputs[noa].weightValue(oNums[noa]); } return error; } /** * This function will calculate what the change in weights should be * and also update them. * @param node The node to update the weights for. * @param learn The learning rate to use. * @param momentum The momentum to use. */ public void updateWeights(NeuralNode node, double learn, double momentum) { NeuralConnection[] inputs = node.getInputs(); double[] cWeights = node.getChangeInWeights(); double[] weights = node.getWeights(); double learnTimesError = 0; learnTimesError = learn * node.errorValue(false); double c = learnTimesError + momentum * cWeights[0]; weights[0] += c; cWeights[0] = c; int stopValue = node.getNumInputs() + 1; for (int noa = 1; noa < stopValue; noa++) { c = learnTimesError * inputs[noa-1].outputValue(false); c += momentum * cWeights[noa]; weights[noa] += c; cWeights[noa] = c; } } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } }
3,374
28.094828
77
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/functions/neural/NeuralConnection.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * NeuralConnection.java * Copyright (C) 2000-2012 University of Waikato, Hamilton, New Zealand */ package weka.classifiers.functions.neural; import java.awt.Color; import java.awt.Graphics; import java.io.Serializable; import weka.core.RevisionHandler; /** * Abstract unit in a NeuralNetwork. * * @author Malcolm Ware (mfw4@cs.waikato.ac.nz) * @version $Revision: 8034 $ */ public abstract class NeuralConnection implements Serializable, RevisionHandler { /** for serialization */ private static final long serialVersionUID = -286208828571059163L; //bitwise flags for the types of unit. /** This unit is not connected to any others. */ public static final int UNCONNECTED = 0; /** This unit is a pure input unit. */ public static final int PURE_INPUT = 1; /** This unit is a pure output unit. */ public static final int PURE_OUTPUT = 2; /** This unit is an input unit. */ public static final int INPUT = 4; /** This unit is an output unit. */ public static final int OUTPUT = 8; /** This flag is set once the unit has a connection. */ public static final int CONNECTED = 16; /////The difference between pure and not is that pure is used to feed /////the neural network the attribute values and the errors on the outputs /////Beyond that they do no calculations, and have certain restrictions /////on the connections they can make. /** The list of inputs to this unit. */ protected NeuralConnection[] m_inputList; /** The list of outputs from this unit. */ protected NeuralConnection[] m_outputList; /** The numbering for the connections at the other end of the input lines. */ protected int[] m_inputNums; /** The numbering for the connections at the other end of the out lines. */ protected int[] m_outputNums; /** The number of inputs. */ protected int m_numInputs; /** The number of outputs. */ protected int m_numOutputs; /** The output value for this unit, NaN if not calculated. */ protected double m_unitValue; /** The error value for this unit, NaN if not calculated. */ protected double m_unitError; /** True if the weights have already been updated. */ protected boolean m_weightsUpdated; /** The string that uniquely (provided naming is done properly) identifies * this unit. */ protected String m_id; /** The type of unit this is. */ protected int m_type; /** The x coord of this unit purely for displaying purposes. */ protected double m_x; /** The y coord of this unit purely for displaying purposes. */ protected double m_y; /** * Constructs The unit with the basic connection information prepared for * use. * * @param id the unique id of the unit */ public NeuralConnection(String id) { m_id = id; m_inputList = new NeuralConnection[0]; m_outputList = new NeuralConnection[0]; m_inputNums = new int[0]; m_outputNums = new int[0]; m_numInputs = 0; m_numOutputs = 0; m_unitValue = Double.NaN; m_unitError = Double.NaN; m_weightsUpdated = false; m_x = 0; m_y = 0; m_type = UNCONNECTED; } /** * @return The identity string of this unit. */ public String getId() { return m_id; } /** * @return The type of this unit. */ public int getType() { return m_type; } /** * @param t The new type of this unit. */ public void setType(int t) { m_type = t; } /** * Call this to reset the unit for another run. * It is expected by that this unit will call the reset functions of all * input units to it. It is also expected that this will not be done * if the unit has already been reset (or atleast appears to be). */ public abstract void reset(); /** * Call this to get the output value of this unit. * @param calculate True if the value should be calculated if it hasn't been * already. * @return The output value, or NaN, if the value has not been calculated. */ public abstract double outputValue(boolean calculate); /** * Call this to get the error value of this unit. * @param calculate True if the value should be calculated if it hasn't been * already. * @return The error value, or NaN, if the value has not been calculated. */ public abstract double errorValue(boolean calculate); /** * Call this to have the connection save the current * weights. */ public abstract void saveWeights(); /** * Call this to have the connection restore from the saved * weights. */ public abstract void restoreWeights(); /** * Call this to get the weight value on a particular connection. * @param n The connection number to get the weight for, -1 if The threshold * weight should be returned. * @return This function will default to return 1. If overridden, it should * return the value for the specified connection or if -1 then it should * return the threshold value. If no value exists for the specified * connection, NaN will be returned. */ public double weightValue(int n) { return 1; } /** * Call this function to update the weight values at this unit. * After the weights have been updated at this unit, All the * input connections will then be called from this to have their * weights updated. * @param l The learning Rate to use. * @param m The momentum to use. */ public void updateWeights(double l, double m) { //the action the subclasses should perform is upto them //but if they coverride they should make a call to this to //call the method for all their inputs. if (!m_weightsUpdated) { for (int noa = 0; noa < m_numInputs; noa++) { m_inputList[noa].updateWeights(l, m); } m_weightsUpdated = true; } } /** * Use this to get easy access to the inputs. * It is not advised to change the entries in this list * (use the connecting and disconnecting functions to do that) * @return The inputs list. */ public NeuralConnection[] getInputs() { return m_inputList; } /** * Use this to get easy access to the outputs. * It is not advised to change the entries in this list * (use the connecting and disconnecting functions to do that) * @return The outputs list. */ public NeuralConnection[] getOutputs() { return m_outputList; } /** * Use this to get easy access to the input numbers. * It is not advised to change the entries in this list * (use the connecting and disconnecting functions to do that) * @return The input nums list. */ public int[] getInputNums() { return m_inputNums; } /** * Use this to get easy access to the output numbers. * It is not advised to change the entries in this list * (use the connecting and disconnecting functions to do that) * @return The outputs list. */ public int[] getOutputNums() { return m_outputNums; } /** * @return the x coord. */ public double getX() { return m_x; } /** * @return the y coord. */ public double getY() { return m_y; } /** * @param x The new value for it's x pos. */ public void setX(double x) { m_x = x; } /** * @param y The new value for it's y pos. */ public void setY(double y) { m_y = y; } /** * Call this function to determine if the point at x,y is on the unit. * @param g The graphics context for font size info. * @param x The x coord. * @param y The y coord. * @param w The width of the display. * @param h The height of the display. * @return True if the point is on the unit, false otherwise. */ public boolean onUnit(Graphics g, int x, int y, int w, int h) { int m = (int)(m_x * w); int c = (int)(m_y * h); if (x > m + 10 || x < m - 10 || y > c + 10 || y < c - 10) { return false; } return true; } /** * Call this function to draw the node. * @param g The graphics context. * @param w The width of the drawing area. * @param h The height of the drawing area. */ public void drawNode(Graphics g, int w, int h) { if ((m_type & OUTPUT) == OUTPUT) { g.setColor(Color.orange); } else { g.setColor(Color.red); } g.fillOval((int)(m_x * w) - 9, (int)(m_y * h) - 9, 19, 19); g.setColor(Color.gray); g.fillOval((int)(m_x * w) - 5, (int)(m_y * h) - 5, 11, 11); } /** * Call this function to draw the node highlighted. * @param g The graphics context. * @param w The width of the drawing area. * @param h The height of the drawing area. */ public void drawHighlight(Graphics g, int w, int h) { drawNode(g, w, h); g.setColor(Color.yellow); g.fillOval((int)(m_x * w) - 5, (int)(m_y * h) - 5, 11, 11); } /** * Call this function to draw the nodes input connections. * @param g The graphics context. * @param w The width of the drawing area. * @param h The height of the drawing area. */ public void drawInputLines(Graphics g, int w, int h) { g.setColor(Color.black); int px = (int)(m_x * w); int py = (int)(m_y * h); for (int noa = 0; noa < m_numInputs; noa++) { g.drawLine((int)(m_inputList[noa].getX() * w) , (int)(m_inputList[noa].getY() * h) , px, py); } } /** * Call this function to draw the nodes output connections. * @param g The graphics context. * @param w The width of the drawing area. * @param h The height of the drawing area. */ public void drawOutputLines(Graphics g, int w, int h) { g.setColor(Color.black); int px = (int)(m_x * w); int py = (int)(m_y * h); for (int noa = 0; noa < m_numOutputs; noa++) { g.drawLine(px, py , (int)(m_outputList[noa].getX() * w) , (int)(m_outputList[noa].getY() * h)); } } /** * This will connect the specified unit to be an input to this unit. * @param i The unit. * @param n It's connection number for this connection. * @return True if the connection was made, false otherwise. */ protected boolean connectInput(NeuralConnection i, int n) { for (int noa = 0; noa < m_numInputs; noa++) { if (i == m_inputList[noa]) { return false; } } if (m_numInputs >= m_inputList.length) { //then allocate more space to it. allocateInputs(); } m_inputList[m_numInputs] = i; m_inputNums[m_numInputs] = n; m_numInputs++; return true; } /** * This will allocate more space for input connection information * if the arrays for this have been filled up. */ protected void allocateInputs() { NeuralConnection[] temp1 = new NeuralConnection[m_inputList.length + 15]; int[] temp2 = new int[m_inputNums.length + 15]; for (int noa = 0; noa < m_numInputs; noa++) { temp1[noa] = m_inputList[noa]; temp2[noa] = m_inputNums[noa]; } m_inputList = temp1; m_inputNums = temp2; } /** * This will connect the specified unit to be an output to this unit. * @param o The unit. * @param n It's connection number for this connection. * @return True if the connection was made, false otherwise. */ protected boolean connectOutput(NeuralConnection o, int n) { for (int noa = 0; noa < m_numOutputs; noa++) { if (o == m_outputList[noa]) { return false; } } if (m_numOutputs >= m_outputList.length) { //then allocate more space to it. allocateOutputs(); } m_outputList[m_numOutputs] = o; m_outputNums[m_numOutputs] = n; m_numOutputs++; return true; } /** * Allocates more space for output connection information * if the arrays have been filled up. */ protected void allocateOutputs() { NeuralConnection[] temp1 = new NeuralConnection[m_outputList.length + 15]; int[] temp2 = new int[m_outputNums.length + 15]; for (int noa = 0; noa < m_numOutputs; noa++) { temp1[noa] = m_outputList[noa]; temp2[noa] = m_outputNums[noa]; } m_outputList = temp1; m_outputNums = temp2; } /** * This will disconnect the input with the specific connection number * From this node (only on this end however). * @param i The unit to disconnect. * @param n The connection number at the other end, -1 if all the connections * to this unit should be severed. * @return True if the connection was removed, false if the connection was * not found. */ protected boolean disconnectInput(NeuralConnection i, int n) { int loc = -1; boolean removed = false; do { loc = -1; for (int noa = 0; noa < m_numInputs; noa++) { if (i == m_inputList[noa] && (n == -1 || n == m_inputNums[noa])) { loc = noa; break; } } if (loc >= 0) { for (int noa = loc+1; noa < m_numInputs; noa++) { m_inputList[noa-1] = m_inputList[noa]; m_inputNums[noa-1] = m_inputNums[noa]; //set the other end to have the right connection number. m_inputList[noa-1].changeOutputNum(m_inputNums[noa-1], noa-1); } m_numInputs--; removed = true; } } while (n == -1 && loc != -1); return removed; } /** * This function will remove all the inputs to this unit. * In doing so it will also terminate the connections at the other end. */ public void removeAllInputs() { for (int noa = 0; noa < m_numInputs; noa++) { //this command will simply remove any connections this node has //with the other in 1 go, rather than seperately. m_inputList[noa].disconnectOutput(this, -1); } //now reset the inputs. m_inputList = new NeuralConnection[0]; setType(getType() & (~INPUT)); if (getNumOutputs() == 0) { setType(getType() & (~CONNECTED)); } m_inputNums = new int[0]; m_numInputs = 0; } /** * Changes the connection value information for one of the connections. * @param n The connection number to change. * @param v The value to change it to. */ protected void changeInputNum(int n, int v) { if (n >= m_numInputs || n < 0) { return; } m_inputNums[n] = v; } /** * This will disconnect the output with the specific connection number * From this node (only on this end however). * @param o The unit to disconnect. * @param n The connection number at the other end, -1 if all the connections * to this unit should be severed. * @return True if the connection was removed, false if the connection was * not found. */ protected boolean disconnectOutput(NeuralConnection o, int n) { int loc = -1; boolean removed = false; do { loc = -1; for (int noa = 0; noa < m_numOutputs; noa++) { if (o == m_outputList[noa] && (n == -1 || n == m_outputNums[noa])) { loc =noa; break; } } if (loc >= 0) { for (int noa = loc+1; noa < m_numOutputs; noa++) { m_outputList[noa-1] = m_outputList[noa]; m_outputNums[noa-1] = m_outputNums[noa]; //set the other end to have the right connection number m_outputList[noa-1].changeInputNum(m_outputNums[noa-1], noa-1); } m_numOutputs--; removed = true; } } while (n == -1 && loc != -1); return removed; } /** * This function will remove all outputs to this unit. * In doing so it will also terminate the connections at the other end. */ public void removeAllOutputs() { for (int noa = 0; noa < m_numOutputs; noa++) { //this command will simply remove any connections this node has //with the other in 1 go, rather than seperately. m_outputList[noa].disconnectInput(this, -1); } //now reset the inputs. m_outputList = new NeuralConnection[0]; m_outputNums = new int[0]; setType(getType() & (~OUTPUT)); if (getNumInputs() == 0) { setType(getType() & (~CONNECTED)); } m_numOutputs = 0; } /** * Changes the connection value information for one of the connections. * @param n The connection number to change. * @param v The value to change it to. */ protected void changeOutputNum(int n, int v) { if (n >= m_numOutputs || n < 0) { return; } m_outputNums[n] = v; } /** * @return The number of input connections. */ public int getNumInputs() { return m_numInputs; } /** * @return The number of output connections. */ public int getNumOutputs() { return m_numOutputs; } /** * Connects two units together. * @param s The source unit. * @param t The target unit. * @return True if the units were connected, false otherwise. */ public static boolean connect(NeuralConnection s, NeuralConnection t) { if (s == null || t == null) { return false; } //this ensures that there is no existing connection between these //two units already. This will also cause the current weight there to be //lost disconnect(s, t); if (s == t) { return false; } if ((t.getType() & PURE_INPUT) == PURE_INPUT) { return false; //target is an input node. } if ((s.getType() & PURE_OUTPUT) == PURE_OUTPUT) { return false; //source is an output node } if ((s.getType() & PURE_INPUT) == PURE_INPUT && (t.getType() & PURE_OUTPUT) == PURE_OUTPUT) { return false; //there is no actual working node in use } if ((t.getType() & PURE_OUTPUT) == PURE_OUTPUT && t.getNumInputs() > 0) { return false; //more than 1 node is trying to feed a particular output } if ((t.getType() & PURE_OUTPUT) == PURE_OUTPUT && (s.getType() & OUTPUT) == OUTPUT) { return false; //an output node already feeding out a final answer } if (!s.connectOutput(t, t.getNumInputs())) { return false; } if (!t.connectInput(s, s.getNumOutputs() - 1)) { s.disconnectOutput(t, t.getNumInputs()); return false; } //now ammend the type. if ((s.getType() & PURE_INPUT) == PURE_INPUT) { t.setType(t.getType() | INPUT); } else if ((t.getType() & PURE_OUTPUT) == PURE_OUTPUT) { s.setType(s.getType() | OUTPUT); } t.setType(t.getType() | CONNECTED); s.setType(s.getType() | CONNECTED); return true; } /** * Disconnects two units. * @param s The source unit. * @param t The target unit. * @return True if the units were disconnected, false if they weren't * (probably due to there being no connection). */ public static boolean disconnect(NeuralConnection s, NeuralConnection t) { if (s == null || t == null) { return false; } boolean stat1 = s.disconnectOutput(t, -1); boolean stat2 = t.disconnectInput(s, -1); if (stat1 && stat2) { if ((s.getType() & PURE_INPUT) == PURE_INPUT) { t.setType(t.getType() & (~INPUT)); } else if ((t.getType() & (PURE_OUTPUT)) == PURE_OUTPUT) { s.setType(s.getType() & (~OUTPUT)); } if (s.getNumInputs() == 0 && s.getNumOutputs() == 0) { s.setType(s.getType() & (~CONNECTED)); } if (t.getNumInputs() == 0 && t.getNumOutputs() == 0) { t.setType(t.getType() & (~CONNECTED)); } } return stat1 && stat2; } }
19,966
25.801342
79
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/functions/neural/NeuralMethod.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * NeuralMethod.java * Copyright (C) 2001-2012 University of Waikato, Hamilton, New Zealand */ package weka.classifiers.functions.neural; import java.io.Serializable; /** * This is an interface used to create classes that can be used by the * neuralnode to perform all it's computations. * * @author Malcolm Ware (mfw4@cs.waikato.ac.nz) * @version $Revision: 8034 $ */ public interface NeuralMethod extends Serializable { /** * This function calculates what the output value should be. * @param node The node to calculate the value for. * @return The value. */ double outputValue(NeuralNode node); /** * This function calculates what the error value should be. * @param node The node to calculate the error for. * @return The error. */ double errorValue(NeuralNode node); /** * This function will calculate what the change in weights should be * and also update them. * @param node The node to update the weights for. * @param learn The learning rate to use. * @param momentum The momentum to use. */ void updateWeights(NeuralNode node, double learn, double momentum); }
1,827
29.983051
74
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/functions/neural/NeuralNode.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * NeuralNode.java * Copyright (C) 2000-2012 University of Waikato, Hamilton, New Zealand */ package weka.classifiers.functions.neural; import weka.core.RevisionUtils; import java.util.Random; /** * This class is used to represent a node in the neuralnet. * * @author Malcolm Ware (mfw4@cs.waikato.ac.nz) * @version $Revision: 8034 $ */ public class NeuralNode extends NeuralConnection { /** for serialization */ private static final long serialVersionUID = -1085750607680839163L; /** The weights for each of the input connections, and the threshold. */ private double[] m_weights; /** The best (lowest error) weights. Only used when validation set is used */ private double[] m_bestWeights; /** The change in the weights. */ private double[] m_changeInWeights; private Random m_random; /** Performs the operations for this node. Currently this * defines that the node is either a sigmoid or a linear unit. */ private NeuralMethod m_methods; /** * @param id The string name for this node (used to id this node). * @param r A random number generator used to generate initial weights. * @param m The methods this node should use to update. */ public NeuralNode(String id, Random r, NeuralMethod m) { super(id); m_weights = new double[1]; m_bestWeights = new double[1]; m_changeInWeights = new double[1]; m_random = r; m_weights[0] = m_random.nextDouble() * .1 - .05; m_changeInWeights[0] = 0; m_methods = m; } /** * Set how this node should operate (note that the neural method has no * internal state, so the same object can be used by any number of nodes. * @param m The new method. */ public void setMethod(NeuralMethod m) { m_methods = m; } public NeuralMethod getMethod() { return m_methods; } /** * Call this to get the output value of this unit. * @param calculate True if the value should be calculated if it hasn't been * already. * @return The output value, or NaN, if the value has not been calculated. */ public double outputValue(boolean calculate) { if (Double.isNaN(m_unitValue) && calculate) { //then calculate the output value; m_unitValue = m_methods.outputValue(this); } return m_unitValue; } /** * Call this to get the error value of this unit. * @param calculate True if the value should be calculated if it hasn't been * already. * @return The error value, or NaN, if the value has not been calculated. */ public double errorValue(boolean calculate) { if (!Double.isNaN(m_unitValue) && Double.isNaN(m_unitError) && calculate) { //then calculate the error. m_unitError = m_methods.errorValue(this); } return m_unitError; } /** * Call this to reset the value and error for this unit, ready for the next * run. This will also call the reset function of all units that are * connected as inputs to this one. * This is also the time that the update for the listeners will be performed. */ public void reset() { if (!Double.isNaN(m_unitValue) || !Double.isNaN(m_unitError)) { m_unitValue = Double.NaN; m_unitError = Double.NaN; m_weightsUpdated = false; for (int noa = 0; noa < m_numInputs; noa++) { m_inputList[noa].reset(); } } } /** * Call this to have the connection save the current * weights. */ public void saveWeights() { // copy the current weights System.arraycopy(m_weights, 0, m_bestWeights, 0, m_weights.length); // tell inputs to save weights for (int i = 0; i < m_numInputs; i++) { m_inputList[i].saveWeights(); } } /** * Call this to have the connection restore from the saved * weights. */ public void restoreWeights() { // copy the saved best weights back into the weights System.arraycopy(m_bestWeights, 0, m_weights, 0, m_weights.length); // tell inputs to restore weights for (int i = 0; i < m_numInputs; i++) { m_inputList[i].restoreWeights(); } } /** * Call this to get the weight value on a particular connection. * @param n The connection number to get the weight for, -1 if The threshold * weight should be returned. * @return The value for the specified connection or if -1 then it should * return the threshold value. If no value exists for the specified * connection, NaN will be returned. */ public double weightValue(int n) { if (n >= m_numInputs || n < -1) { return Double.NaN; } return m_weights[n + 1]; } /** * call this function to get the weights array. * This will also allow the weights to be updated. * @return The weights array. */ public double[] getWeights() { return m_weights; } /** * call this function to get the chnage in weights array. * This will also allow the change in weights to be updated. * @return The change in weights array. */ public double[] getChangeInWeights() { return m_changeInWeights; } /** * Call this function to update the weight values at this unit. * After the weights have been updated at this unit, All the * input connections will then be called from this to have their * weights updated. * @param l The learning rate to use. * @param m The momentum to use. */ public void updateWeights(double l, double m) { if (!m_weightsUpdated && !Double.isNaN(m_unitError)) { m_methods.updateWeights(this, l, m); //note that the super call to update the inputs is done here and //not in the m_method updateWeights, because it is not deemed to be //required to update the weights at this node (while the error and output //value ao need to be recursively calculated) super.updateWeights(l, m); //to call all of the inputs. } } /** * This will connect the specified unit to be an input to this unit. * @param i The unit. * @param n It's connection number for this connection. * @return True if the connection was made, false otherwise. */ protected boolean connectInput(NeuralConnection i, int n) { //the function that this overrides can do most of the work. if (!super.connectInput(i, n)) { return false; } //note that the weights are shifted 1 forward in the array so //it leaves the numinputs aligned on the space the weight needs to go. m_weights[m_numInputs] = m_random.nextDouble() * .1 - .05; m_changeInWeights[m_numInputs] = 0; return true; } /** * This will allocate more space for input connection information * if the arrays for this have been filled up. */ protected void allocateInputs() { NeuralConnection[] temp1 = new NeuralConnection[m_inputList.length + 15]; int[] temp2 = new int[m_inputNums.length + 15]; double[] temp4 = new double[m_weights.length + 15]; double[] temp5 = new double[m_changeInWeights.length + 15]; double[] temp6 = new double[m_bestWeights.length + 15]; temp4[0] = m_weights[0]; temp5[0] = m_changeInWeights[0]; temp6[0] = m_bestWeights[0]; for (int noa = 0; noa < m_numInputs; noa++) { temp1[noa] = m_inputList[noa]; temp2[noa] = m_inputNums[noa]; temp4[noa+1] = m_weights[noa+1]; temp5[noa+1] = m_changeInWeights[noa+1]; temp6[noa+1] = m_bestWeights[noa+1]; } m_inputList = temp1; m_inputNums = temp2; m_weights = temp4; m_changeInWeights = temp5; m_bestWeights = temp6; } /** * This will disconnect the input with the specific connection number * From this node (only on this end however). * @param i The unit to disconnect. * @param n The connection number at the other end, -1 if all the connections * to this unit should be severed (not the same as removeAllInputs). * @return True if the connection was removed, false if the connection was * not found. */ protected boolean disconnectInput(NeuralConnection i, int n) { int loc = -1; boolean removed = false; do { loc = -1; for (int noa = 0; noa < m_numInputs; noa++) { if (i == m_inputList[noa] && (n == -1 || n == m_inputNums[noa])) { loc = noa; break; } } if (loc >= 0) { for (int noa = loc+1; noa < m_numInputs; noa++) { m_inputList[noa-1] = m_inputList[noa]; m_inputNums[noa-1] = m_inputNums[noa]; m_weights[noa] = m_weights[noa+1]; m_changeInWeights[noa] = m_changeInWeights[noa+1]; m_inputList[noa-1].changeOutputNum(m_inputNums[noa-1], noa-1); } m_numInputs--; removed = true; } } while (n == -1 && loc != -1); return removed; } /** * This function will remove all the inputs to this unit. * In doing so it will also terminate the connections at the other end. */ public void removeAllInputs() { super.removeAllInputs(); double temp1 = m_weights[0]; double temp2 = m_changeInWeights[0]; m_weights = new double[1]; m_changeInWeights = new double[1]; m_weights[0] = temp1; m_changeInWeights[0] = temp2; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } }
10,039
28.616519
79
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/functions/neural/SigmoidUnit.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * SigmoidUnit.java * Copyright (C) 2001-2012 University of Waikato, Hamilton, New Zealand */ package weka.classifiers.functions.neural; import weka.core.RevisionHandler; import weka.core.RevisionUtils; /** * This can be used by the * neuralnode to perform all it's computations (as a sigmoid unit). * * @author Malcolm Ware (mfw4@cs.waikato.ac.nz) * @version $Revision: 8034 $ */ public class SigmoidUnit implements NeuralMethod, RevisionHandler { /** for serialization */ private static final long serialVersionUID = -5162958458177475652L; /** * This function calculates what the output value should be. * @param node The node to calculate the value for. * @return The value. */ public double outputValue(NeuralNode node) { double[] weights = node.getWeights(); NeuralConnection[] inputs = node.getInputs(); double value = weights[0]; for (int noa = 0; noa < node.getNumInputs(); noa++) { value += inputs[noa].outputValue(true) * weights[noa+1]; } //this I got from the Neural Network faq to combat overflow //pretty simple solution really :) if (value < -45) { value = 0; } else if (value > 45) { value = 1; } else { value = 1 / (1 + Math.exp(-value)); } return value; } /** * This function calculates what the error value should be. * @param node The node to calculate the error for. * @return The error. */ public double errorValue(NeuralNode node) { //then calculate the error. NeuralConnection[] outputs = node.getOutputs(); int[] oNums = node.getOutputNums(); double error = 0; for (int noa = 0; noa < node.getNumOutputs(); noa++) { error += outputs[noa].errorValue(true) * outputs[noa].weightValue(oNums[noa]); } double value = node.outputValue(false); error *= value * (1 - value); return error; } /** * This function will calculate what the change in weights should be * and also update them. * @param node The node to update the weights for. * @param learn The learning rate to use. * @param momentum The momentum to use. */ public void updateWeights(NeuralNode node, double learn, double momentum) { NeuralConnection[] inputs = node.getInputs(); double[] cWeights = node.getChangeInWeights(); double[] weights = node.getWeights(); double learnTimesError = 0; learnTimesError = learn * node.errorValue(false); double c = learnTimesError + momentum * cWeights[0]; weights[0] += c; cWeights[0] = c; int stopValue = node.getNumInputs() + 1; for (int noa = 1; noa < stopValue; noa++) { c = learnTimesError * inputs[noa-1].outputValue(false); c += momentum * cWeights[noa]; weights[noa] += c; cWeights[noa] = c; } } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } }
3,709
27.984375
77
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/functions/pace/ChisqMixture.java
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or (at * your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* * ChisqMixture.java * Copyright (C) 2002 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.functions.pace; import weka.core.RevisionUtils; import weka.core.matrix.DoubleVector; import weka.core.matrix.Maths; import java.util.Random; /** * Class for manipulating chi-square mixture distributions. <p/> * * For more information see: <p/> * <!-- technical-plaintext-start --> * Wang, Y (2000). A new approach to fitting linear models in high dimensional spaces. Hamilton, New Zealand.<br/> * <br/> * Wang, Y., Witten, I. H.: Modeling for optimal probability prediction. In: Proceedings of the Nineteenth International Conference in Machine Learning, Sydney, Australia, 650-657, 2002. <!-- technical-plaintext-end --> * <!-- technical-bibtex-start --> * BibTeX: * <pre> * &#64;phdthesis{Wang2000, * address = {Hamilton, New Zealand}, * author = {Wang, Y}, * school = {Department of Computer Science, University of Waikato}, * title = {A new approach to fitting linear models in high dimensional spaces}, * year = {2000} * } * * &#64;inproceedings{Wang2002, * address = {Sydney, Australia}, * author = {Wang, Y. and Witten, I. H.}, * booktitle = {Proceedings of the Nineteenth International Conference in Machine Learning}, * pages = {650-657}, * title = {Modeling for optimal probability prediction}, * year = {2002} * } * </pre> * <p/> <!-- technical-bibtex-end --> * * @author Yong Wang (yongwang@cs.waikato.ac.nz) * @version $Revision: 1.5 $ */ public class ChisqMixture extends MixtureDistribution { /** the separating threshold value */ protected double separatingThreshold = 0.05; /** the triming thresholding */ protected double trimingThreshold = 0.5; protected double supportThreshold = 0.5; protected int maxNumSupportPoints = 200; // for computational reason protected int fittingIntervalLength = 3; protected double fittingIntervalThreshold = 0.5; /** Contructs an empty ChisqMixture */ public ChisqMixture() {} /** * Gets the separating threshold value. This value is used by the method * separatable * * @return the separating threshold */ public double getSeparatingThreshold() { return separatingThreshold; } /** * Sets the separating threshold value * * @param t the threshold value */ public void setSeparatingThreshold( double t ) { separatingThreshold = t; } /** * Gets the triming thresholding value. This value is usef by the method trim. * * @return the triming threshold */ public double getTrimingThreshold() { return trimingThreshold; } /** * Sets the triming thresholding value. * * @param t the triming threshold */ public void setTrimingThreshold( double t ){ trimingThreshold = t; } /** * Return true if a value can be considered for mixture estimation * separately from the data indexed between i0 and i1 * * @param data the data supposedly generated from the mixture * @param i0 the index of the first element in the group * @param i1 the index of the last element in the group * @param x the value * @return true if the value can be considered */ public boolean separable( DoubleVector data, int i0, int i1, double x ) { DoubleVector dataSqrt = data.sqrt(); double xh = Math.sqrt( x ); NormalMixture m = new NormalMixture(); m.setSeparatingThreshold( separatingThreshold ); return m.separable( dataSqrt, i0, i1, xh ); } /** * Contructs the set of support points for mixture estimation. * * @param data the data supposedly generated from the mixture * @param ne the number of extra data that are suppposedly discarded * earlier and not passed into here * @return the set of support points */ public DoubleVector supportPoints( DoubleVector data, int ne ) { DoubleVector sp = new DoubleVector(); sp.setCapacity( data.size() + 1 ); if( data.get(0) < supportThreshold || ne != 0 ) sp.addElement( 0 ); for( int i = 0; i < data.size(); i++ ) if( data.get( i ) > supportThreshold ) sp.addElement( data.get(i) ); // The following will be fixed later??? if( sp.size() > maxNumSupportPoints ) throw new IllegalArgumentException( "Too many support points. " ); return sp; } /** * Contructs the set of fitting intervals for mixture estimation. * * @param data the data supposedly generated from the mixture * @return the set of fitting intervals */ public PaceMatrix fittingIntervals( DoubleVector data ) { PaceMatrix a = new PaceMatrix( data.size() * 2, 2 ); DoubleVector v = data.sqrt(); int count = 0; double left, right; for( int i = 0; i < data.size(); i++ ) { left = v.get(i) - fittingIntervalLength; if( left < fittingIntervalThreshold ) left = 0; left = left * left; right = data.get(i); if( right < fittingIntervalThreshold ) right = fittingIntervalThreshold; a.set( count, 0, left ); a.set( count, 1, right ); count++; } for( int i = 0; i < data.size(); i++ ) { left = data.get(i); if( left < fittingIntervalThreshold ) left = 0; right = v.get(i) + fittingIntervalThreshold; right = right * right; a.set( count, 0, left ); a.set( count, 1, right ); count++; } a.setRowDimension( count ); return a; } /** * Contructs the probability matrix for mixture estimation, given a set * of support points and a set of intervals. * * @param s the set of support points * @param intervals the intervals * @return the probability matrix */ public PaceMatrix probabilityMatrix(DoubleVector s, PaceMatrix intervals) { int ns = s.size(); int nr = intervals.getRowDimension(); PaceMatrix p = new PaceMatrix(nr, ns); for( int i = 0; i < nr; i++ ) { for( int j = 0; j < ns; j++ ) { p.set( i, j, Maths.pchisq( intervals.get(i, 1), s.get(j) ) - Maths.pchisq( intervals.get(i, 0), s.get(j) ) ); } } return p; } /** * Returns the pace6 estimate of a single value. * * @param x the value * @return the pace6 estimate */ public double pace6 ( double x ) { if( x > 100 ) return x; // pratical consideration. will modify later DoubleVector points = mixingDistribution.getPointValues(); DoubleVector values = mixingDistribution.getFunctionValues(); DoubleVector mean = points.sqrt(); DoubleVector d = Maths.dchisqLog( x, points ); d.minusEquals( d.max() ); d = d.map("java.lang.Math", "exp").timesEquals( values ); double atilde = mean.innerProduct( d ) / d.sum(); return atilde * atilde; } /** * Returns the pace6 estimate of a vector. * * @param x the vector * @return the pace6 estimate */ public DoubleVector pace6( DoubleVector x ) { DoubleVector pred = new DoubleVector( x.size() ); for(int i = 0; i < x.size(); i++ ) pred.set(i, pace6(x.get(i)) ); trim( pred ); return pred; } /** * Returns the pace2 estimate of a vector. * * @param x the vector * @return the pace2 estimate */ public DoubleVector pace2( DoubleVector x ) { DoubleVector chf = new DoubleVector( x.size() ); for(int i = 0; i < x.size(); i++ ) chf.set( i, hf( x.get(i) ) ); chf.cumulateInPlace(); int index = chf.indexOfMax(); DoubleVector copy = x.copy(); if( index < x.size()-1 ) copy.set( index + 1, x.size()-1, 0 ); trim( copy ); return copy; } /** * Returns the pace4 estimate of a vector. * * @param x the vector * @return the pace4 estimate */ public DoubleVector pace4( DoubleVector x ) { DoubleVector h = h( x ); DoubleVector copy = x.copy(); for( int i = 0; i < x.size(); i++ ) if( h.get(i) <= 0 ) copy.set(i, 0); trim( copy ); return copy; } /** * Trims the small values of the estaimte * * @param x the estimate vector */ public void trim( DoubleVector x ) { for(int i = 0; i < x.size(); i++ ) { if( x.get(i) <= trimingThreshold ) x.set(i, 0); } } /** * Computes the value of h(x) / f(x) given the mixture. The * implementation avoided overflow. * * @param AHat the value * @return the value of h(x) / f(x) */ public double hf( double AHat ) { DoubleVector points = mixingDistribution.getPointValues(); DoubleVector values = mixingDistribution.getFunctionValues(); double x = Math.sqrt( AHat ); DoubleVector mean = points.sqrt(); DoubleVector d1 = Maths.dnormLog( x, mean, 1 ); double d1max = d1.max(); d1.minusEquals( d1max ); DoubleVector d2 = Maths.dnormLog( -x, mean, 1 ); d2.minusEquals( d1max ); d1 = d1.map("java.lang.Math", "exp"); d1.timesEquals( values ); d2 = d2.map("java.lang.Math", "exp"); d2.timesEquals( values ); return ( ( points.minus(x/2)).innerProduct( d1 ) - ( points.plus(x/2)).innerProduct( d2 ) ) / (d1.sum() + d2.sum()); } /** * Computes the value of h(x) given the mixture. * * @param AHat the value * @return the value of h(x) */ public double h( double AHat ) { if( AHat == 0.0 ) return 0.0; DoubleVector points = mixingDistribution.getPointValues(); DoubleVector values = mixingDistribution.getFunctionValues(); double aHat = Math.sqrt( AHat ); DoubleVector aStar = points.sqrt(); DoubleVector d1 = Maths.dnorm( aHat, aStar, 1 ).timesEquals( values ); DoubleVector d2 = Maths.dnorm( -aHat, aStar, 1 ).timesEquals( values ); return points.minus(aHat/2).innerProduct( d1 ) - points.plus(aHat/2).innerProduct( d2 ); } /** * Computes the value of h(x) given the mixture, where x is a vector. * * @param AHat the vector * @return the value of h(x) */ public DoubleVector h( DoubleVector AHat ) { DoubleVector h = new DoubleVector( AHat.size() ); for( int i = 0; i < AHat.size(); i++ ) h.set( i, h( AHat.get(i) ) ); return h; } /** * Computes the value of f(x) given the mixture. * * @param x the value * @return the value of f(x) */ public double f( double x ) { DoubleVector points = mixingDistribution.getPointValues(); DoubleVector values = mixingDistribution.getFunctionValues(); return Maths.dchisq(x, points).timesEquals(values).sum(); } /** * Computes the value of f(x) given the mixture, where x is a vector. * * @param x the vector * @return the value of f(x) */ public DoubleVector f( DoubleVector x ) { DoubleVector f = new DoubleVector( x.size() ); for( int i = 0; i < x.size(); i++ ) f.set( i, h( f.get(i) ) ); return f; } /** * Converts to a string * * @return a string representation */ public String toString() { return mixingDistribution.toString(); } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 1.5 $"); } /** * Method to test this class * * @param args the commandline arguments */ public static void main(String args[]) { int n1 = 50; int n2 = 50; double ncp1 = 0; double ncp2 = 10; double mu1 = Math.sqrt( ncp1 ); double mu2 = Math.sqrt( ncp2 ); DoubleVector a = Maths.rnorm( n1, mu1, 1, new Random() ); a = a.cat( Maths.rnorm(n2, mu2, 1, new Random()) ); DoubleVector aNormal = a; a = a.square(); a.sort(); DoubleVector means = (new DoubleVector( n1, mu1 )).cat(new DoubleVector(n2, mu2)); System.out.println("=========================================================="); System.out.println("This is to test the estimation of the mixing\n" + "distribution of the mixture of non-central Chi-square\n" + "distributions. The example mixture used is of the form: \n\n" + " 0.5 * Chi^2_1(ncp1) + 0.5 * Chi^2_1(ncp2)\n" ); System.out.println("It also tests the PACE estimators. Quadratic losses of the\n" + "estimators are given, measuring their performance."); System.out.println("=========================================================="); System.out.println( "ncp1 = " + ncp1 + " ncp2 = " + ncp2 +"\n" ); System.out.println( a.size() + " observations are: \n\n" + a ); System.out.println( "\nQuadratic loss of the raw data (i.e., the MLE) = " + aNormal.sum2( means ) ); System.out.println("=========================================================="); // find the mixing distribution ChisqMixture d = new ChisqMixture(); d.fit( a, NNMMethod ); System.out.println( "The estimated mixing distribution is\n" + d ); DoubleVector pred = d.pace2( a.rev() ).rev(); System.out.println( "\nThe PACE2 Estimate = \n" + pred ); System.out.println( "Quadratic loss = " + pred.sqrt().times(aNormal.sign()).sum2( means ) ); pred = d.pace4( a ); System.out.println( "\nThe PACE4 Estimate = \n" + pred ); System.out.println( "Quadratic loss = " + pred.sqrt().times(aNormal.sign()).sum2( means ) ); pred = d.pace6( a ); System.out.println( "\nThe PACE6 Estimate = \n" + pred ); System.out.println( "Quadratic loss = " + pred.sqrt().times(aNormal.sign()).sum2( means ) ); } }
14,370
28.44877
186
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/functions/pace/DiscreteFunction.java
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or (at * your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* * ChisqMixture.java * Copyright (C) 2002 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.functions.pace; import weka.core.RevisionHandler; import weka.core.RevisionUtils; import weka.core.matrix.DoubleVector; import weka.core.matrix.FlexibleDecimalFormat; import weka.core.matrix.IntVector; /** Class for handling discrete functions. <p> * * A discrete function here is one that takes non-zero values over a finite * set of points. <p> * * @author Yong Wang (yongwang@cs.waikato.ac.nz) * @version $Revision: 1.4 $ */ public class DiscreteFunction implements RevisionHandler { protected DoubleVector points; protected DoubleVector values; /** Constructs an empty discrete function */ public DiscreteFunction() { this(null, null); } /** Constructs a discrete function with the point values provides and the * function values are all 1/n. * @param p the point values */ public DiscreteFunction( DoubleVector p ) { this( p, null ); } /** Constructs a discrete function with both the point values and * function values provided. * @param p the point values * @param v the function values */ public DiscreteFunction( DoubleVector p, DoubleVector v ) { points = p; values = v; formalize(); } private DiscreteFunction formalize() { if( points == null ) points = new DoubleVector(); if( values == null ) values = new DoubleVector(); if( points.isEmpty() ) { if( ! values.isEmpty() ) throw new IllegalArgumentException("sizes not match"); } else { int n = points.size(); if( values.isEmpty() ) { values = new DoubleVector( n, 1./n ); } else { if( values.size() != n ) throw new IllegalArgumentException("sizes not match"); } } return this; } /** * Normalizes the function values with L1-norm. */ public DiscreteFunction normalize() { if ( ! values.isEmpty() ) { double s = values.sum(); if( s != 0.0 && s != 1.0 ) values.timesEquals( 1. / s ); } return this; } /** * Sorts the point values of the discrete function. */ public void sort() { IntVector index = points.sortWithIndex(); values = values.subvector( index ); } /** * Clones the discrete function */ public Object clone() { DiscreteFunction d = new DiscreteFunction(); d.points = (DoubleVector) points.clone(); d.values = (DoubleVector) values.clone(); return d; } /** * Makes each individual point value unique */ public DiscreteFunction unique() { int count = 0; if( size() < 2 ) return this; for(int i = 1; i <= size() - 1; i++ ) { if( points.get( count ) != points.get( i ) ) { count++; points.set( count, points.get( i ) ); values.set( count, values.get( i ) ); } else { values.set( count, values.get(count) + values.get(i) ); } } points = (DoubleVector) points.subvector(0, count); values = (DoubleVector) values.subvector(0, count); return this; } /** * Returns the size of the point set. */ public int size() { if( points == null ) return 0; return points.size(); } /** * Gets a particular point value * @param i the index */ public double getPointValue( int i ) { return points.get(i); } /** * Gets a particular function value * @param i the index */ public double getFunctionValue( int i ) { return values.get(i); } /** * Sets a particular point value * @param i the index */ public void setPointValue( int i, double p ) { points.set(i, p); } /** * Sets a particular function value * @param i the index */ public void setFunctionValue( int i, double v ) { values.set(i, v); } /** * Gets all point values */ protected DoubleVector getPointValues() { return points; } /** * Gets all function values */ protected DoubleVector getFunctionValues() { return values; } /** * Returns true if it is empty. */ public boolean isEmpty() { if( size() == 0 ) return true; return false; } // public void addPoint( double x, double y ) { // points.addPoint( x ); // values.addPoint( y ); // } /** * Returns the combined of two discrete functions * @param d the second discrete function * @return the combined discrte function */ public DiscreteFunction plus( DiscreteFunction d ) { return ((DiscreteFunction) clone()).plusEquals( d ); } /** * Returns the combined of two discrete functions. The first function is * replaced with the new one. * @param d the second discrete function * @return the combined discrte function */ public DiscreteFunction plusEquals( DiscreteFunction d ) { points = points.cat( d.points ); values = values.cat( d.values ); return this; } /** * All function values are multiplied by a double * @param x the multiplier */ public DiscreteFunction timesEquals( double x ) { values.timesEquals( x ); return this; } /** * Converts the discrete function to string. */ public String toString() { StringBuffer text = new StringBuffer(); FlexibleDecimalFormat nf1 = new FlexibleDecimalFormat( 5 ); nf1.grouping( true ); FlexibleDecimalFormat nf2 = new FlexibleDecimalFormat( 5 ); nf2.grouping( true ); for(int i = 0; i < size(); i++) { nf1.update( points.get(i) ); nf2.update( values.get(i) ); } text.append("\t" + nf1.formatString("Points") + "\t" + nf2.formatString("Values") + "\n\n"); for(int i = 0; i <= size() - 1; i++) { text.append( "\t" + nf1.format( points.get(i) ) + "\t" + nf2.format( values.get(i) ) + "\n" ); } return text.toString(); } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 1.4 $"); } public static void main( String args[] ) { double points[] = {2,1,2,3,3}; double values[] = {3,2,4,1,3}; DiscreteFunction d = new DiscreteFunction( new DoubleVector( points ), new DoubleVector( values )); System.out.println( d ); d.normalize(); System.out.println( "d (after normalize) = \n" + d ); points[1] = 10; System.out.println( "d (after setting [1]) = \n" + d); d.sort(); System.out.println( "d (after sorting) = \n" + d); d.unique(); System.out.println( "d (after unique) = \n" + d ); } }
7,434
23.13961
75
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/functions/pace/MixtureDistribution.java
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or (at * your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* * MixtureDistribution.java * Copyright (C) 2002 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.functions.pace; import weka.core.RevisionHandler; import weka.core.TechnicalInformation; import weka.core.TechnicalInformationHandler; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import weka.core.matrix.DoubleVector; import weka.core.matrix.IntVector; /** * Abtract class for manipulating mixture distributions. <p> * * REFERENCES <p> * * Wang, Y. (2000). "A new approach to fitting linear models in high * dimensional spaces." PhD Thesis. Department of Computer Science, * University of Waikato, New Zealand. <p> * * Wang, Y. and Witten, I. H. (2002). "Modeling for optimal probability * prediction." Proceedings of ICML'2002. Sydney. <p> * * @author Yong Wang (yongwang@cs.waikato.ac.nz) * @version $Revision: 1.5 $ */ public abstract class MixtureDistribution implements TechnicalInformationHandler, RevisionHandler { protected DiscreteFunction mixingDistribution; /** The nonnegative-measure-based method */ public static final int NNMMethod = 1; /** The probability-measure-based method */ public static final int PMMethod = 2; // The CDF-based method // public static final int CDFMethod = 3; // The method based on the Kolmogrov and von Mises measure // public static final int ModifiedCDFMethod = 4; /** * Returns an instance of a TechnicalInformation object, containing * detailed information about the technical background of this class, * e.g., paper reference or book this class is based on. * * @return the technical information about this class */ public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; TechnicalInformation additional; result = new TechnicalInformation(Type.PHDTHESIS); result.setValue(Field.AUTHOR, "Wang, Y"); result.setValue(Field.YEAR, "2000"); result.setValue(Field.TITLE, "A new approach to fitting linear models in high dimensional spaces"); result.setValue(Field.SCHOOL, "Department of Computer Science, University of Waikato"); result.setValue(Field.ADDRESS, "Hamilton, New Zealand"); additional = result.add(Type.INPROCEEDINGS); additional.setValue(Field.AUTHOR, "Wang, Y. and Witten, I. H."); additional.setValue(Field.YEAR, "2002"); additional.setValue(Field.TITLE, "Modeling for optimal probability prediction"); additional.setValue(Field.BOOKTITLE, "Proceedings of the Nineteenth International Conference in Machine Learning"); additional.setValue(Field.YEAR, "2002"); additional.setValue(Field.PAGES, "650-657"); additional.setValue(Field.ADDRESS, "Sydney, Australia"); return result; } /** * Gets the mixing distribution * * @return the mixing distribution */ public DiscreteFunction getMixingDistribution() { return mixingDistribution; } /** Sets the mixing distribution * @param d the mixing distribution */ public void setMixingDistribution( DiscreteFunction d ) { mixingDistribution = d; } /** Fits the mixture (or mixing) distribution to the data. The default * method is the nonnegative-measure-based method. * @param data the data, supposedly generated from the mixture model */ public void fit( DoubleVector data ) { fit( data, NNMMethod ); } /** Fits the mixture (or mixing) distribution to the data. * @param data the data supposedly generated from the mixture * @param method the method to be used. Refer to the static final * variables of this class. */ public void fit( DoubleVector data, int method ) { DoubleVector data2 = (DoubleVector) data.clone(); if( data2.unsorted() ) data2.sort(); int n = data2.size(); int start = 0; DoubleVector subset; DiscreteFunction d = new DiscreteFunction(); for( int i = 0; i < n-1; i++ ) { if( separable( data2, start, i, data2.get(i+1) ) && separable( data2, i+1, n-1, data2.get(i) ) ) { subset = (DoubleVector) data2.subvector( start, i ); d.plusEquals( fitForSingleCluster( subset, method ). timesEquals(i - start + 1) ); start = i + 1; } } subset = (DoubleVector) data2.subvector( start, n-1 ); d.plusEquals( fitForSingleCluster( subset, method ). timesEquals(n - start) ); d.sort(); d.normalize(); mixingDistribution = d; } /** * Fits the mixture (or mixing) distribution to the data. The data is * not pre-clustered for computational efficiency. * * @param data the data supposedly generated from the mixture * @param method the method to be used. Refer to the static final * variables of this class. * @return the generated distribution */ public DiscreteFunction fitForSingleCluster( DoubleVector data, int method ) { if( data.size() < 2 ) return new DiscreteFunction( data ); DoubleVector sp = supportPoints( data, 0 ); PaceMatrix fi = fittingIntervals( data ); PaceMatrix pm = probabilityMatrix( sp, fi ); PaceMatrix epm = new PaceMatrix( empiricalProbability( data, fi ). timesEquals( 1. / data.size() ) ); IntVector pvt = (IntVector) IntVector.seq(0, sp.size()-1); DoubleVector weights; switch( method ) { case NNMMethod: weights = pm.nnls( epm, pvt ); break; case PMMethod: weights = pm.nnlse1( epm, pvt ); break; default: throw new IllegalArgumentException("unknown method"); } DoubleVector sp2 = new DoubleVector( pvt.size() ); for( int i = 0; i < sp2.size(); i++ ){ sp2.set( i, sp.get(pvt.get(i)) ); } DiscreteFunction d = new DiscreteFunction( sp2, weights ); d.sort(); d.normalize(); return d; } /** * Return true if a value can be considered for mixture estimatino * separately from the data indexed between i0 and i1 * * @param data the data supposedly generated from the mixture * @param i0 the index of the first element in the group * @param i1 the index of the last element in the group * @param x the value * @return true if a value can be considered */ public abstract boolean separable( DoubleVector data, int i0, int i1, double x ); /** * Contructs the set of support points for mixture estimation. * * @param data the data supposedly generated from the mixture * @param ne the number of extra data that are suppposedly discarded * earlier and not passed into here * @return the set of support points */ public abstract DoubleVector supportPoints( DoubleVector data, int ne ); /** * Contructs the set of fitting intervals for mixture estimation. * * @param data the data supposedly generated from the mixture * @return the set of fitting intervals */ public abstract PaceMatrix fittingIntervals( DoubleVector data ); /** * Contructs the probability matrix for mixture estimation, given a set * of support points and a set of intervals. * * @param s the set of support points * @param intervals the intervals * @return the probability matrix */ public abstract PaceMatrix probabilityMatrix( DoubleVector s, PaceMatrix intervals ); /** * Computes the empirical probabilities of the data over a set of * intervals. * * @param data the data * @param intervals the intervals * @return the empirical probabilities */ public PaceMatrix empiricalProbability( DoubleVector data, PaceMatrix intervals ) { int n = data.size(); int k = intervals.getRowDimension(); PaceMatrix epm = new PaceMatrix( k, 1, 0 ); double point; for( int j = 0; j < n; j ++ ) { for(int i = 0; i < k; i++ ) { point = 0.0; if( intervals.get(i, 0) == data.get(j) || intervals.get(i, 1) == data.get(j) ) point = 0.5; else if( intervals.get(i, 0) < data.get(j) && intervals.get(i, 1) > data.get(j) ) point = 1.0; epm.setPlus( i, 0, point); } } return epm; } /** * Converts to a string * * @return a string representation */ public String toString() { return "The mixing distribution:\n" + mixingDistribution.toString(); } }
9,125
32.306569
119
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/functions/pace/NormalMixture.java
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or (at * your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* * NormalMixture.java * Copyright (C) 2002 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.functions.pace; import java.util.Random; import weka.core.RevisionUtils; import weka.core.matrix.DoubleVector; import weka.core.matrix.Maths; /** * Class for manipulating normal mixture distributions. <p> * * For more information see: <p/> * <!-- technical-plaintext-start --> * Wang, Y (2000). A new approach to fitting linear models in high dimensional spaces. Hamilton, New Zealand.<br/> * <br/> * Wang, Y., Witten, I. H.: Modeling for optimal probability prediction. In: Proceedings of the Nineteenth International Conference in Machine Learning, Sydney, Australia, 650-657, 2002. <!-- technical-plaintext-end --> * <!-- technical-bibtex-start --> * BibTeX: * <pre> * &#64;phdthesis{Wang2000, * address = {Hamilton, New Zealand}, * author = {Wang, Y}, * school = {Department of Computer Science, University of Waikato}, * title = {A new approach to fitting linear models in high dimensional spaces}, * year = {2000} * } * * &#64;inproceedings{Wang2002, * address = {Sydney, Australia}, * author = {Wang, Y. and Witten, I. H.}, * booktitle = {Proceedings of the Nineteenth International Conference in Machine Learning}, * pages = {650-657}, * title = {Modeling for optimal probability prediction}, * year = {2002} * } * </pre> * <p/> <!-- technical-bibtex-end --> * * @author Yong Wang (yongwang@cs.waikato.ac.nz) * @version $Revision: 1.5 $ */ public class NormalMixture extends MixtureDistribution { /** the separating threshold */ protected double separatingThreshold = 0.05; /** the triming thresholding */ protected double trimingThreshold = 0.7; protected double fittingIntervalLength = 3; /** * Contructs an empty NormalMixture */ public NormalMixture() {} /** * Gets the separating threshold value. This value is used by the method * separatable * * @return the separating threshold */ public double getSeparatingThreshold(){ return separatingThreshold; } /** * Sets the separating threshold value * * @param t the threshold value */ public void setSeparatingThreshold( double t ){ separatingThreshold = t; } /** * Gets the triming thresholding value. This value is usef by the method * trim. * * @return the triming thresholding */ public double getTrimingThreshold(){ return trimingThreshold; } /** * Sets the triming thresholding value. * * @param t the triming thresholding */ public void setTrimingThreshold( double t ){ trimingThreshold = t; } /** * Return true if a value can be considered for mixture estimatino * separately from the data indexed between i0 and i1 * * @param data the data supposedly generated from the mixture * @param i0 the index of the first element in the group * @param i1 the index of the last element in the group * @param x the value * @return true if the value can be considered */ public boolean separable( DoubleVector data, int i0, int i1, double x ) { double p = 0; for( int i = i0; i <= i1; i++ ) { p += Maths.pnorm( - Math.abs(x - data.get(i)) ); } if( p < separatingThreshold ) return true; else return false; } /** * Contructs the set of support points for mixture estimation. * * @param data the data supposedly generated from the mixture * @param ne the number of extra data that are suppposedly discarded * earlier and not passed into here * @return the set of support points */ public DoubleVector supportPoints( DoubleVector data, int ne ) { if( data.size() < 2 ) throw new IllegalArgumentException("data size < 2"); return data.copy(); } /** * Contructs the set of fitting intervals for mixture estimation. * * @param data the data supposedly generated from the mixture * @return the set of fitting intervals */ public PaceMatrix fittingIntervals( DoubleVector data ) { DoubleVector left = data.cat( data.minus( fittingIntervalLength ) ); DoubleVector right = data.plus( fittingIntervalLength ).cat( data ); PaceMatrix a = new PaceMatrix(left.size(), 2); a.setMatrix(0, left.size()-1, 0, left); a.setMatrix(0, right.size()-1, 1, right); return a; } /** * Contructs the probability matrix for mixture estimation, given a set * of support points and a set of intervals. * * @param s the set of support points * @param intervals the intervals * @return the probability matrix */ public PaceMatrix probabilityMatrix( DoubleVector s, PaceMatrix intervals ) { int ns = s.size(); int nr = intervals.getRowDimension(); PaceMatrix p = new PaceMatrix(nr, ns); for( int i = 0; i < nr; i++ ) { for( int j = 0; j < ns; j++ ) { p.set( i, j, Maths.pnorm( intervals.get(i, 1), s.get(j), 1 ) - Maths.pnorm( intervals.get(i, 0), s.get(j), 1 ) ); } } return p; } /** * Returns the empirical Bayes estimate of a single value. * * @param x the value * @return the empirical Bayes estimate */ public double empiricalBayesEstimate ( double x ) { if( Math.abs(x) > 10 ) return x; // pratical consideration; modify later DoubleVector d = Maths.dnormLog( x, mixingDistribution.getPointValues(), 1 ); d.minusEquals( d.max() ); d = d.map("java.lang.Math", "exp"); d.timesEquals( mixingDistribution.getFunctionValues() ); return mixingDistribution.getPointValues().innerProduct( d ) / d.sum(); } /** * Returns the empirical Bayes estimate of a vector. * * @param x the vector * @return the empirical Bayes estimate */ public DoubleVector empiricalBayesEstimate( DoubleVector x ) { DoubleVector pred = new DoubleVector( x.size() ); for(int i = 0; i < x.size(); i++ ) pred.set(i, empiricalBayesEstimate(x.get(i)) ); trim( pred ); return pred; } /** * Returns the optimal nested model estimate of a vector. * * @param x the vector * @return the optimal nested model estimate */ public DoubleVector nestedEstimate( DoubleVector x ) { DoubleVector chf = new DoubleVector( x.size() ); for(int i = 0; i < x.size(); i++ ) chf.set( i, hf( x.get(i) ) ); chf.cumulateInPlace(); int index = chf.indexOfMax(); DoubleVector copy = x.copy(); if( index < x.size()-1 ) copy.set( index + 1, x.size()-1, 0 ); trim( copy ); return copy; } /** * Returns the estimate of optimal subset selection. * * @param x the vector * @return the estimate of optimal subset selection */ public DoubleVector subsetEstimate( DoubleVector x ) { DoubleVector h = h( x ); DoubleVector copy = x.copy(); for( int i = 0; i < x.size(); i++ ) if( h.get(i) <= 0 ) copy.set(i, 0); trim( copy ); return copy; } /** * Trims the small values of the estaimte * * @param x the estimate vector */ public void trim( DoubleVector x ) { for(int i = 0; i < x.size(); i++ ) { if( Math.abs(x.get(i)) <= trimingThreshold ) x.set(i, 0); } } /** * Computes the value of h(x) / f(x) given the mixture. The * implementation avoided overflow. * * @param x the value * @return the value of h(x) / f(x) */ public double hf( double x ) { DoubleVector points = mixingDistribution.getPointValues(); DoubleVector values = mixingDistribution.getFunctionValues(); DoubleVector d = Maths.dnormLog( x, points, 1 ); d.minusEquals( d.max() ); d = (DoubleVector) d.map("java.lang.Math", "exp"); d.timesEquals( values ); return ((DoubleVector) points.times(2*x).minusEquals(x*x)) .innerProduct( d ) / d.sum(); } /** * Computes the value of h(x) given the mixture. * * @param x the value * @return the value of h(x) */ public double h( double x ) { DoubleVector points = mixingDistribution.getPointValues(); DoubleVector values = mixingDistribution.getFunctionValues(); DoubleVector d = (DoubleVector) Maths.dnorm( x, points, 1 ).timesEquals( values ); return ((DoubleVector) points.times(2*x).minusEquals(x*x)) .innerProduct( d ); } /** * Computes the value of h(x) given the mixture, where x is a vector. * * @param x the vector * @return the value of h(x) */ public DoubleVector h( DoubleVector x ) { DoubleVector h = new DoubleVector( x.size() ); for( int i = 0; i < x.size(); i++ ) h.set( i, h( x.get(i) ) ); return h; } /** * Computes the value of f(x) given the mixture. * * @param x the value * @return the value of f(x) */ public double f( double x ) { DoubleVector points = mixingDistribution.getPointValues(); DoubleVector values = mixingDistribution.getFunctionValues(); return Maths.dchisq( x, points ).timesEquals( values ).sum(); } /** * Computes the value of f(x) given the mixture, where x is a vector. * * @param x the vector * @return the value of f(x) */ public DoubleVector f( DoubleVector x ) { DoubleVector f = new DoubleVector( x.size() ); for( int i = 0; i < x.size(); i++ ) f.set( i, h( f.get(i) ) ); return f; } /** * Converts to a string * * @return a string representation */ public String toString() { return mixingDistribution.toString(); } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 1.5 $"); } /** * Method to test this class * * @param args the commandline arguments - ignored */ public static void main(String args[]) { int n1 = 50; int n2 = 50; double mu1 = 0; double mu2 = 5; DoubleVector a = Maths.rnorm( n1, mu1, 1, new Random() ); a = a.cat( Maths.rnorm( n2, mu2, 1, new Random() ) ); DoubleVector means = (new DoubleVector( n1, mu1 )).cat(new DoubleVector(n2, mu2)); System.out.println("=========================================================="); System.out.println("This is to test the estimation of the mixing\n" + "distribution of the mixture of unit variance normal\n" + "distributions. The example mixture used is of the form: \n\n" + " 0.5 * N(mu1, 1) + 0.5 * N(mu2, 1)\n" ); System.out.println("It also tests three estimators: the subset\n" + "selector, the nested model selector, and the empirical Bayes\n" + "estimator. Quadratic losses of the estimators are given, \n" + "and are taken as the measure of their performance."); System.out.println("=========================================================="); System.out.println( "mu1 = " + mu1 + " mu2 = " + mu2 +"\n" ); System.out.println( a.size() + " observations are: \n\n" + a ); System.out.println( "\nQuadratic loss of the raw data (i.e., the MLE) = " + a.sum2( means ) ); System.out.println("=========================================================="); // find the mixing distribution NormalMixture d = new NormalMixture(); d.fit( a, NNMMethod ); System.out.println( "The estimated mixing distribution is:\n" + d ); DoubleVector pred = d.nestedEstimate( a.rev() ).rev(); System.out.println( "\nThe Nested Estimate = \n" + pred ); System.out.println( "Quadratic loss = " + pred.sum2( means ) ); pred = d.subsetEstimate( a ); System.out.println( "\nThe Subset Estimate = \n" + pred ); System.out.println( "Quadratic loss = " + pred.sum2( means ) ); pred = d.empiricalBayesEstimate( a ); System.out.println( "\nThe Empirical Bayes Estimate = \n" + pred ); System.out.println( "Quadratic loss = " + pred.sum2( means ) ); } }
12,753
29.511962
186
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/functions/pace/PaceMatrix.java
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or (at * your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* * PaceMatrix.java * Copyright (C) 2002 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.functions.pace; import weka.core.RevisionUtils; import weka.core.matrix.DoubleVector; import weka.core.matrix.FlexibleDecimalFormat; import weka.core.matrix.IntVector; import weka.core.matrix.Matrix; import weka.core.matrix.Maths; import java.util.Random; import java.text.DecimalFormat; /** * Class for matrix manipulation used for pace regression. <p> * * REFERENCES <p> * * Wang, Y. (2000). "A new approach to fitting linear models in high * dimensional spaces." PhD Thesis. Department of Computer Science, * University of Waikato, New Zealand. <p> * * Wang, Y. and Witten, I. H. (2002). "Modeling for optimal probability * prediction." Proceedings of ICML'2002. Sydney. <p> * * @author Yong Wang (yongwang@cs.waikato.ac.nz) * @version $Revision: 1.6 $ */ public class PaceMatrix extends Matrix { /** for serialization */ static final long serialVersionUID = 2699925616857843973L; /* ------------------------ Constructors * ------------------------ */ /** Construct an m-by-n PACE matrix of zeros. @param m Number of rows. @param n Number of colums. */ public PaceMatrix( int m, int n ) { super( m, n ); } /** Construct an m-by-n constant PACE matrix. @param m Number of rows. @param n Number of colums. @param s Fill the matrix with this scalar value. */ public PaceMatrix( int m, int n, double s ) { super( m, n, s ); } /** Construct a PACE matrix from a 2-D array. @param A Two-dimensional array of doubles. @throws IllegalArgumentException All rows must have the same length */ public PaceMatrix( double[][] A ) { super( A ); } /** Construct a PACE matrix quickly without checking arguments. @param A Two-dimensional array of doubles. @param m Number of rows. @param n Number of colums. */ public PaceMatrix( double[][] A, int m, int n ) { super( A, m, n ); } /** Construct a PaceMatrix from a one-dimensional packed array @param vals One-dimensional array of doubles, packed by columns (ala Fortran). @param m Number of rows. @throws IllegalArgumentException Array length must be a multiple of m. */ public PaceMatrix( double vals[], int m ) { super( vals, m ); } /** Construct a PaceMatrix with a single column from a DoubleVector @param v DoubleVector */ public PaceMatrix( DoubleVector v ) { this( v.size(), 1 ); setMatrix( 0, v.size()-1, 0, v ); } /** Construct a PaceMatrix from a Matrix @param X Matrix */ public PaceMatrix( Matrix X ) { super( X.getRowDimension(), X.getColumnDimension() ); A = X.getArray(); } /* ------------------------ Public Methods * ------------------------ */ /** Set the row dimenion of the matrix * @param rowDimension the row dimension */ public void setRowDimension( int rowDimension ) { m = rowDimension; } /** Set the column dimenion of the matrix * @param columnDimension the column dimension */ public void setColumnDimension( int columnDimension ) { n = columnDimension; } /** * Clone the PaceMatrix object. * * @return the clone */ public Object clone () { PaceMatrix X = new PaceMatrix(m,n); double[][] C = X.getArray(); for (int i = 0; i < m; i++) { for (int j = 0; j < n; j++) { C[i][j] = A[i][j]; } } return (Object) X; } /** Add a value to an element and reset the element * @param i the row number of the element * @param j the column number of the element * @param s the double value to be added with */ public void setPlus(int i, int j, double s) { A[i][j] += s; } /** Multiply a value with an element and reset the element * @param i the row number of the element * @param j the column number of the element * @param s the double value to be multiplied with */ public void setTimes(int i, int j, double s) { A[i][j] *= s; } /** Set the submatrix A[i0:i1][j0:j1] with a same value * @param i0 the index of the first element of the column * @param i1 the index of the last element of the column * @param j0 the index of the first column * @param j1 the index of the last column * @param s the value to be set to */ public void setMatrix( int i0, int i1, int j0, int j1, double s ) { try { for( int i = i0; i <= i1; i++ ) { for( int j = j0; j <= j1; j++ ) { A[i][j] = s; } } } catch( ArrayIndexOutOfBoundsException e ) { throw new ArrayIndexOutOfBoundsException( "Index out of bounds" ); } } /** Set the submatrix A[i0:i1][j] with the values stored in a * DoubleVector * @param i0 the index of the first element of the column * @param i1 the index of the last element of the column * @param j the index of the column * @param v the vector that stores the values*/ public void setMatrix( int i0, int i1, int j, DoubleVector v ) { for( int i = i0; i <= i1; i++ ) { A[i][j] = v.get(i-i0); } } /** Set the whole matrix from a 1-D array * @param v 1-D array of doubles * @param columnFirst Whether to fill the column first or the row. * @throws ArrayIndexOutOfBoundsException Submatrix indices */ public void setMatrix ( double[] v, boolean columnFirst ) { try { if( v.length != m * n ) throw new IllegalArgumentException("sizes not match."); int i, j, count = 0; if( columnFirst ) { for( i = 0; i < m; i++ ) { for( j = 0; j < n; j++ ) { A[i][j] = v[count]; count ++; } } } else { for( j = 0; j < n; j++ ) { for( i = 0; i < m; i++ ){ A[i][j] = v[count]; count ++; } } } } catch( ArrayIndexOutOfBoundsException e ) { throw new ArrayIndexOutOfBoundsException( "Submatrix indices" ); } } /** Returns the maximum absolute value of all elements @return the maximum value */ public double maxAbs () { double ma = Math.abs(A[0][0]); for (int j = 0; j < n; j++) { for (int i = 0; i < m; i++) { ma = Math.max(ma, Math.abs(A[i][j])); } } return ma; } /** Returns the maximum absolute value of some elements of a column, that is, the elements of A[i0:i1][j]. @param i0 the index of the first element of the column @param i1 the index of the last element of the column @param j the index of the column @return the maximum value */ public double maxAbs ( int i0, int i1, int j ) { double m = Math.abs(A[i0][j]); for (int i = i0+1; i <= i1; i++) { m = Math.max(m, Math.abs(A[i][j])); } return m; } /** Returns the minimum absolute value of some elements of a column, that is, the elements of A[i0:i1][j]. @param i0 the index of the first element of the column @param i1 the index of the last element of the column @param column the index of the column @return the minimum value */ public double minAbs ( int i0, int i1, int column ) { double m = Math.abs(A[i0][column]); for (int i = i0+1; i <= i1; i++) { m = Math.min(m, Math.abs(A[i][column])); } return m; } /** Check if the matrix is empty * @return true if the matrix is empty */ public boolean isEmpty(){ if(m == 0 || n == 0) return true; if(A == null) return true; return false; } /** Return a DoubleVector that stores a column of the matrix * @param j the index of the column * @return the column */ public DoubleVector getColumn( int j ) { DoubleVector v = new DoubleVector( m ); double [] a = v.getArray(); for(int i = 0; i < m; i++) a[i] = A[i][j]; return v; } /** Return a DoubleVector that stores some elements of a column of the * matrix * @param i0 the index of the first element of the column * @param i1 the index of the last element of the column * @param j the index of the column * @return the DoubleVector */ public DoubleVector getColumn( int i0, int i1, int j ) { DoubleVector v = new DoubleVector( i1-i0+1 ); double [] a = v.getArray(); int count = 0; for( int i = i0; i <= i1; i++ ) { a[count] = A[i][j]; count++; } return v; } /** Multiplication between a row (or part of a row) of the first matrix * and a column (or part or a column) of the second matrix. * @param i the index of the row in the first matrix * @param j0 the index of the first column in the first matrix * @param j1 the index of the last column in the first matrix * @param B the second matrix * @param l the index of the column in the second matrix * @return the result of the multiplication */ public double times( int i, int j0, int j1, PaceMatrix B, int l ) { double s = 0.0; for(int j = j0; j <= j1; j++ ) { s += A[i][j] * B.A[j][l]; } return s; } /** Decimal format for converting a matrix into a string * @return the default decimal format */ protected DecimalFormat [] format() { return format(0, m-1, 0, n-1, 7, false ); } /** Decimal format for converting a matrix into a string * @param digits the number of digits * @return the decimal format */ protected DecimalFormat [] format( int digits ) { return format(0, m-1, 0, n-1, digits, false); } /** Decimal format for converting a matrix into a string * @param digits the number of digits * @param trailing * @return the decimal format */ protected DecimalFormat [] format( int digits, boolean trailing ) { return format(0, m-1, 0, n-1, digits, trailing); } /** Decimal format for converting a matrix into a string * @param i0 * @param i1 * @param j * @param digits the number of digits * @param trailing * @return the decimal format */ protected DecimalFormat format(int i0, int i1, int j, int digits, boolean trailing) { FlexibleDecimalFormat df = new FlexibleDecimalFormat(digits, trailing); df.grouping( true ); for(int i = i0; i <= i1; i ++ ) df.update( A[i][j] ); return df; } /** Decimal format for converting a matrix into a string * @param i0 * @param i1 * @param j0 * @param j1 * @param trailing * @param digits the number of digits * @return the decimal format */ protected DecimalFormat [] format(int i0, int i1, int j0, int j1, int digits, boolean trailing) { DecimalFormat [] f = new DecimalFormat[j1-j0+1]; for( int j = j0; j <= j1; j++ ) { f[j] = format(i0, i1, j, digits, trailing); } return f; } /** * Converts matrix to string * * @return the matrix as string */ public String toString() { return toString( 5, false ); } /** * Converts matrix to string * * @param digits number of digits after decimal point * @param trailing true if trailing zeros are padded * @return the matrix as string */ public String toString( int digits, boolean trailing ) { if( isEmpty() ) return "null matrix"; StringBuffer text = new StringBuffer(); DecimalFormat [] nf = format( digits, trailing ); int numCols = 0; int count = 0; int width = 80; int lenNumber; int [] nCols = new int[n]; int nk=0; for( int j = 0; j < n; j++ ) { lenNumber = nf[j].format( A[0][j]).length(); if( count + 1 + lenNumber > width -1 ) { nCols[nk++] = numCols; count = 0; numCols = 0; } count += 1 + lenNumber; ++numCols; } nCols[nk] = numCols; nk = 0; for( int k = 0; k < n; ) { for( int i = 0; i < m; i++ ) { for( int j = k; j < k + nCols[nk]; j++) text.append( " " + nf[j].format( A[i][j]) ); text.append("\n"); } k += nCols[nk]; ++nk; text.append("\n"); } return text.toString(); } /** Squared sum of a column or row in a matrix * @param j the index of the column or row * @param i0 the index of the first element * @param i1 the index of the last element * @param col if true, sum over a column; otherwise, over a row * @return the squared sum */ public double sum2( int j, int i0, int i1, boolean col ) { double s2 = 0; if( col ) { // column for( int i = i0; i <= i1; i++ ) s2 += A[i][j] * A[i][j]; } else { for( int i = i0; i <= i1; i++ ) s2 += A[j][i] * A[j][i]; } return s2; } /** Squared sum of columns or rows of a matrix * @param col if true, sum over columns; otherwise, over rows * @return the squared sum */ public double[] sum2( boolean col ) { int l = col ? n : m; int p = col ? m : n; double [] s2 = new double[l]; for( int i = 0; i < l; i++ ) s2[i] = sum2( i, 0, p-1, col ); return s2; } /** Constructs single Householder transformation for a column * @param j the index of the column @param k the index of the row @return d and q */ public double [] h1( int j, int k ) { double dq[] = new double[2]; double s2 = sum2(j, k, m-1, true); dq[0] = A[k][j] >= 0 ? - Math.sqrt( s2 ) : Math.sqrt( s2 ); A[k][j] -= dq[0]; dq[1] = A[k][j] * dq[0]; return dq; } /** Performs single Householder transformation on one column of a matrix * @param j the index of the column @param k the index of the row @param q q = - u'u/2; must be negative @param b the matrix to be transformed @param l the column of the matrix b */ public void h2( int j, int k, double q, PaceMatrix b, int l ) { double s = 0, alpha; for( int i = k; i < m; i++ ) s += A[i][j] * b.A[i][l]; alpha = s / q; for( int i = k; i < m; i++ ) b.A[i][l] += alpha * A[i][j]; } /** Constructs the Givens rotation * @param a * @param b * @return a double array that stores the cosine and sine values */ public double [] g1( double a, double b ) { double cs[] = new double[2]; double r = Maths.hypot(a, b); if( r == 0.0 ) { cs[0] = 1; cs[1] = 0; } else { cs[0] = a / r; cs[1] = b / r; } return cs; } /** Performs the Givens rotation * @param cs a array storing the cosine and sine values * @param i0 the index of the row of the first element * @param i1 the index of the row of the second element * @param j the index of the column */ public void g2( double cs[], int i0, int i1, int j ){ double w = cs[0] * A[i0][j] + cs[1] * A[i1][j]; A[i1][j] = - cs[1] * A[i0][j] + cs[0] * A[i1][j]; A[i0][j] = w; } /** Forward ordering of columns in terms of response explanation. On * input, matrices A and b are already QR-transformed. The indices of * transformed columns are stored in the pivoting vector. * *@param b the PaceMatrix b *@param pvt the pivoting vector *@param k0 the first k0 columns (in pvt) of A are not to be changed **/ public void forward( PaceMatrix b, IntVector pvt, int k0 ) { for( int j = k0; j < Math.min(pvt.size(), m); j++ ) { steplsqr( b, pvt, j, mostExplainingColumn(b, pvt, j), true ); } } /** Returns the index of the column that has the largest (squared) * response, when each of columns pvt[ks:] is moved to become the * ks-th column. On input, A and b are both QR-transformed. * * @param b response * @param pvt pivoting index of A * @param ks columns pvt[ks:] of A are to be tested * @return the index of the column */ public int mostExplainingColumn( PaceMatrix b, IntVector pvt, int ks ) { double val; int [] p = pvt.getArray(); double ma = columnResponseExplanation( b, pvt, ks, ks ); int jma = ks; for( int i = ks+1; i < pvt.size(); i++ ) { val = columnResponseExplanation( b, pvt, i, ks ); if( val > ma ) { ma = val; jma = i; } } return jma; } /** Backward ordering of columns in terms of response explanation. On * input, matrices A and b are already QR-transformed. The indices of * transformed columns are stored in the pivoting vector. * * A and b must have the same number of rows, being the (pseudo-)rank. * * @param b PaceMatrix b * @param pvt pivoting vector * @param ks number of QR-transformed columns; psuedo-rank of A * @param k0 first k0 columns in pvt[] are not to be ordered. */ public void backward( PaceMatrix b, IntVector pvt, int ks, int k0 ) { for( int j = ks; j > k0; j-- ) { steplsqr( b, pvt, j, leastExplainingColumn(b, pvt, j, k0), false ); } } /** Returns the index of the column that has the smallest (squared) * response, when the column is moved to become the (ks-1)-th * column. On input, A and b are both QR-transformed. * * @param b response * @param pvt pivoting index of A * @param ks psudo-rank of A * @param k0 A[][pvt[0:(k0-1)]] are excluded from the testing. * @return the index of the column */ public int leastExplainingColumn( PaceMatrix b, IntVector pvt, int ks, int k0 ) { double val; int [] p = pvt.getArray(); double mi = columnResponseExplanation( b, pvt, ks-1, ks ); int jmi = ks-1; for( int i = k0; i < ks - 1; i++ ) { val = columnResponseExplanation( b, pvt, i, ks ); if( val <= mi ) { mi = val; jmi = i; } } return jmi; } /** Returns the squared ks-th response value if the j-th column becomes * the ks-th after orthogonal transformation. A[][pvt[ks:j]] (or * A[][pvt[j:ks]], if ks > j) and b[] are already QR-transformed * on input and will remain unchanged on output. * * More generally, it returns the inner product of the corresponding * row vector of the response PaceMatrix. (To be implemented.) * *@param b PaceMatrix b *@param pvt pivoting vector *@param j the column A[pvt[j]][] is to be moved *@param ks the target column A[pvt[ks]][] *@return the squared response value */ public double columnResponseExplanation( PaceMatrix b, IntVector pvt, int j, int ks ) { /* Implementation: * * If j == ks - 1, returns the squared ks-th response directly. * * If j > ks -1, returns the ks-th response after * Householder-transforming the j-th column and the response. * * If j < ks - 1, returns the ks-th response after a sequence of * Givens rotations starting from the j-th row. */ int k, l; double [] xxx = new double[n]; int [] p = pvt.getArray(); double val; if( j == ks -1 ) val = b.A[j][0]; else if( j > ks - 1 ) { int jm = Math.min(n-1, j); DoubleVector u = getColumn(ks,jm,p[j]); DoubleVector v = b.getColumn(ks,jm,0); val = v.innerProduct(u) / u.norm2(); } else { // ks > j for( k = j+1; k < ks; k++ ) // make a copy of A[j][] xxx[k] = A[j][p[k]]; val = b.A[j][0]; double [] cs; for( k = j+1; k < ks; k++ ) { cs = g1( xxx[k], A[k][p[k]] ); for( l = k+1; l < ks; l++ ) xxx[l] = - cs[1] * xxx[l] + cs[0] * A[k][p[l]]; val = - cs[1] * val + cs[0] * b.A[k][0]; } } return val * val; // or inner product in later implementation??? } /** * QR transformation for a least squares problem<br/> * A x = b<br/> * implicitly both A and b are transformed. pvt.size() is the psuedo-rank of * A. * * @param b PaceMatrix b * @param pvt pivoting vector * @param k0 the first k0 columns of A (indexed by pvt) are pre-chosen. * (But subject to rank examination.) * * For example, the constant term may be reserved, in which * case k0 = 1. **/ public void lsqr( PaceMatrix b, IntVector pvt, int k0 ) { final double TINY = 1e-15; int [] p = pvt.getArray(); int ks = 0; // psuedo-rank for(int j = 0; j < k0; j++ ) // k0 pre-chosen columns if( sum2(p[j],ks,m-1,true) > TINY ){ // large diagonal element steplsqr(b, pvt, ks, j, true); ks++; } else { // collinear column pvt.shiftToEnd( j ); pvt.setSize(pvt.size()-1); k0--; j--; } // initial QR transformation for(int j = k0; j < Math.min( pvt.size(), m ); j++ ) { if( sum2(p[j], ks, m-1, true) > TINY ) { steplsqr(b, pvt, ks, j, true); ks++; } else { // collinear column pvt.shiftToEnd( j ); pvt.setSize(pvt.size()-1); j--; } } b.m = m = ks; // reset number of rows pvt.setSize( ks ); } /** QR transformation for a least squares problem <br/> * A x = b <br/> * implicitly both A and b are transformed. pvt.size() is the psuedo-rank of A. * * @param b PaceMatrix b * @param pvt pivoting vector * @param k0 the first k0 columns of A (indexed by pvt) are pre-chosen. * (But subject to rank examination.) * * For example, the constant term may be reserved, in which * case k0 = 1. **/ public void lsqrSelection( PaceMatrix b, IntVector pvt, int k0 ) { int numObs = m; // number of instances int numXs = pvt.size(); lsqr( b, pvt, k0 ); if( numXs > 200 || numXs > numObs ) { // too many columns. forward(b, pvt, k0); } backward(b, pvt, pvt.size(), k0); } /** * Sets all diagonal elements to be positive (or nonnegative) without * changing the least squares solution * @param Y the response * @param pvt the pivoted column index */ public void positiveDiagonal( PaceMatrix Y, IntVector pvt ) { int [] p = pvt.getArray(); for( int i = 0; i < pvt.size(); i++ ) { if( A[i][p[i]] < 0.0 ) { for( int j = i; j < pvt.size(); j++ ) A[i][p[j]] = - A[i][p[j]]; Y.A[i][0] = - Y.A[i][0]; } } } /** Stepwise least squares QR-decomposition of the problem * A x = b @param b PaceMatrix b @param pvt pivoting vector @param ks number of transformed columns @param j pvt[j], the column to adjoin or delete @param adjoin to adjoin if true; otherwise, to delete */ public void steplsqr( PaceMatrix b, IntVector pvt, int ks, int j, boolean adjoin ) { final int kp = pvt.size(); // number of columns under consideration int [] p = pvt.getArray(); if( adjoin ) { // adjoining int pj = p[j]; pvt.swap( ks, j ); double dq[] = h1( pj, ks ); int pk; for( int k = ks+1; k < kp; k++ ){ pk = p[k]; h2( pj, ks, dq[1], this, pk); } h2( pj, ks, dq[1], b, 0 ); // for matrix. ??? A[ks][pj] = dq[0]; for( int k = ks+1; k < m; k++ ) A[k][pj] = 0; } else { // removing int pj = p[j]; for( int i = j; i < ks-1; i++ ) p[i] = p[i+1]; p[ks-1] = pj; double [] cs; for( int i = j; i < ks-1; i++ ){ cs = g1( A[i][p[i]], A[i+1][p[i]] ); for( int l = i; l < kp; l++ ) g2( cs, i, i+1, p[l] ); for( int l = 0; l < b.n; l++ ) b.g2( cs, i, i+1, l ); } } } /** Solves upper-triangular equation <br/> * R x = b <br/> * On output, the solution is stored in b * @param b the response * @param pvt the pivoting vector * @param kp the number of the first columns involved */ public void rsolve( PaceMatrix b, IntVector pvt, int kp) { if(kp == 0) b.m = 0; int i, j, k; int [] p = pvt.getArray(); double s; double [][] ba = b.getArray(); for( k = 0; k < b.n; k++ ) { ba[kp-1][k] /= A[kp-1][p[kp-1]]; for( i = kp - 2; i >= 0; i-- ){ s = 0; for( j = i + 1; j < kp; j++ ) s += A[i][p[j]] * ba[j][k]; ba[i][k] -= s; ba[i][k] /= A[i][p[i]]; } } b.m = kp; } /** Returns a new matrix which binds two matrices together with rows. * @param b the second matrix * @return the combined matrix */ public PaceMatrix rbind( PaceMatrix b ){ if( n != b.n ) throw new IllegalArgumentException("unequal numbers of rows."); PaceMatrix c = new PaceMatrix( m + b.m, n ); c.setMatrix( 0, m - 1, 0, n - 1, this ); c.setMatrix( m, m + b.m - 1, 0, n - 1, b ); return c; } /** Returns a new matrix which binds two matrices with columns. * @param b the second matrix * @return the combined matrix */ public PaceMatrix cbind( PaceMatrix b ) { if( m != b.m ) throw new IllegalArgumentException("unequal numbers of rows: " + m + " and " + b.m); PaceMatrix c = new PaceMatrix(m, n + b.n); c.setMatrix( 0, m - 1, 0, n - 1, this ); c.setMatrix( 0, m - 1, n, n + b.n - 1, b ); return c; } /** Solves the nonnegative linear squares problem. That is, <p> * <center> min || A x - b||, subject to x >= 0. </center> <p> * * For algorithm, refer to P161, Chapter 23 of C. L. Lawson and * R. J. Hanson (1974). "Solving Least Squares * Problems". Prentice-Hall. * @param b the response * @param pvt vector storing pivoting column indices * @return solution */ public DoubleVector nnls( PaceMatrix b, IntVector pvt ) { int j, t, counter = 0, jm = -1, n = pvt.size(); double ma, max, alpha, wj; int [] p = pvt.getArray(); DoubleVector x = new DoubleVector( n ); double [] xA = x.getArray(); PaceMatrix z = new PaceMatrix(n, 1); PaceMatrix bt; // step 1 int kp = 0; // #variables in the positive set P while ( true ) { // step 2 if( ++counter > 3*n ) // should never happen throw new RuntimeException("Does not converge"); t = -1; max = 0.0; bt = new PaceMatrix( b.transpose() ); for( j = kp; j <= n-1; j++ ) { // W = A' (b - A x) wj = bt.times( 0, kp, m-1, this, p[j] ); if( wj > max ) { // step 4 max = wj; t = j; } } // step 3 if ( t == -1) break; // optimum achieved // step 5 pvt.swap( kp, t ); // move variable from set Z to set P kp++; xA[kp-1] = 0; steplsqr( b, pvt, kp-1, kp-1, true ); // step 6 ma = 0; while ( ma < 1.5 ) { for( j = 0; j <= kp-1; j++ ) z.A[j][0] = b.A[j][0]; rsolve(z, pvt, kp); ma = 2; jm = -1; for( j = 0; j <= kp-1; j++ ) { // step 7, 8 and 9 if( z.A[j][0] <= 0.0 ) { // alpha always between 0 and 1 alpha = xA[j] / ( xA[j] - z.A[j][0] ); if( alpha < ma ) { ma = alpha; jm = j; } } } if( ma > 1.5 ) for( j = 0; j <= kp-1; j++ ) xA[j] = z.A[j][0]; // step 7 else { for( j = kp-1; j >= 0; j-- ) { // step 10 // Modified to avoid round-off error (which seemingly // can cause infinite loop). if( j == jm ) { // step 11 xA[j] = 0.0; steplsqr( b, pvt, kp, j, false ); kp--; // move variable from set P to set Z } else xA[j] += ma * ( z.A[j][0] - xA[j] ); } } } } x.setSize(kp); pvt.setSize(kp); return x; } /** Solves the nonnegative least squares problem with equality * constraint. That is, <p> * <center> min ||A x - b||, subject to x >= 0 and c x = d. </center> <p> * * @param b the response * @param c coeficients of equality constraints * @param d constants of equality constraints * @param pvt vector storing pivoting column indices * @return the solution */ public DoubleVector nnlse( PaceMatrix b, PaceMatrix c, PaceMatrix d, IntVector pvt ) { double eps = 1e-10 * Math.max( c.maxAbs(), d.maxAbs() ) / Math.max( maxAbs(), b.maxAbs() ); PaceMatrix e = c.rbind( new PaceMatrix( times(eps) ) ); PaceMatrix f = d.rbind( new PaceMatrix( b.times(eps) ) ); return e.nnls( f, pvt ); } /** Solves the nonnegative least squares problem with equality * constraint. That is, <p> * <center> min ||A x - b||, subject to x >= 0 and || x || = 1. </center> * <p> * @param b the response * @param pvt vector storing pivoting column indices * @return the solution */ public DoubleVector nnlse1( PaceMatrix b, IntVector pvt ) { PaceMatrix c = new PaceMatrix( 1, n, 1 ); PaceMatrix d = new PaceMatrix( 1, b.n, 1 ); return nnlse(b, c, d, pvt); } /** Generate matrix with standard-normally distributed random elements @param m Number of rows. @param n Number of colums. @return An m-by-n matrix with random elements. */ public static Matrix randomNormal( int m, int n ) { Random random = new Random(); Matrix A = new Matrix(m,n); double[][] X = A.getArray(); for (int i = 0; i < m; i++) { for (int j = 0; j < n; j++) { X[i][j] = random.nextGaussian(); } } return A; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 1.6 $"); } /** * for testing only * * @param args the commandline arguments - ignored */ public static void main( String args[] ) { System.out.println("================================================" + "==========="); System.out.println("To test the pace estimators of linear model\n" + "coefficients.\n"); double sd = 2; // standard deviation of the random error term int n = 200; // total number of observations double beta0 = 100; // intercept int k1 = 20; // number of coefficients of the first cluster double beta1 = 0; // coefficient value of the first cluster int k2 = 20; // number of coefficients of the second cluster double beta2 = 5; // coefficient value of the second cluster int k = 1 + k1 + k2; DoubleVector beta = new DoubleVector( 1 + k1 + k2 ); beta.set( 0, beta0 ); beta.set( 1, k1, beta1 ); beta.set( k1+1, k1+k2, beta2 ); System.out.println("The data set contains " + n + " observations plus " + (k1 + k2) + " variables.\n\nThe coefficients of the true model" + " are:\n\n" + beta ); System.out.println("\nThe standard deviation of the error term is " + sd ); System.out.println("===============================================" + "============"); PaceMatrix X = new PaceMatrix( n, k1+k2+1 ); X.setMatrix( 0, n-1, 0, 0, 1 ); X.setMatrix( 0, n-1, 1, k1+k2, random(n, k1+k2) ); PaceMatrix Y = new PaceMatrix( X.times( new PaceMatrix(beta) ). plusEquals( randomNormal(n,1).times(sd) ) ); IntVector pvt = (IntVector) IntVector.seq(0, k1+k2); /*System.out.println( "The OLS estimate (by jama.Matrix.solve()) is:\n\n" + (new PaceMatrix(X.solve(Y))).getColumn(0) );*/ X.lsqrSelection( Y, pvt, 1 ); X.positiveDiagonal( Y, pvt ); PaceMatrix sol = (PaceMatrix) Y.clone(); X.rsolve( sol, pvt, pvt.size() ); DoubleVector betaHat = sol.getColumn(0).unpivoting( pvt, k ); System.out.println( "\nThe OLS estimate (through lsqr()) is: \n\n" + betaHat ); System.out.println( "\nQuadratic loss of the OLS estimate (||X b - X bHat||^2) = " + ( new PaceMatrix( X.times( new PaceMatrix(beta.minus(betaHat)) ))) .getColumn(0).sum2() ); System.out.println("=============================================" + "=============="); System.out.println(" *** Pace estimation *** \n"); DoubleVector r = Y.getColumn( pvt.size(), n-1, 0); double sde = Math.sqrt(r.sum2() / r.size()); System.out.println( "Estimated standard deviation = " + sde ); DoubleVector aHat = Y.getColumn( 0, pvt.size()-1, 0).times( 1./sde ); System.out.println("\naHat = \n" + aHat ); System.out.println("\n========= Based on chi-square mixture ============"); ChisqMixture d2 = new ChisqMixture(); int method = MixtureDistribution.NNMMethod; DoubleVector AHat = aHat.square(); d2.fit( AHat, method ); System.out.println( "\nEstimated mixing distribution is:\n" + d2 ); DoubleVector ATilde = d2.pace2( AHat ); DoubleVector aTilde = ATilde.sqrt().times(aHat.sign()); PaceMatrix YTilde = new PaceMatrix((new PaceMatrix(aTilde)).times( sde )); X.rsolve( YTilde, pvt, pvt.size() ); DoubleVector betaTilde = YTilde.getColumn(0).unpivoting( pvt, k ); System.out.println( "\nThe pace2 estimate of coefficients = \n" + betaTilde ); System.out.println( "Quadratic loss = " + ( new PaceMatrix( X.times( new PaceMatrix(beta.minus(betaTilde)) ))) .getColumn(0).sum2() ); ATilde = d2.pace4( AHat ); aTilde = ATilde.sqrt().times(aHat.sign()); YTilde = new PaceMatrix((new PaceMatrix(aTilde)).times( sde )); X.rsolve( YTilde, pvt, pvt.size() ); betaTilde = YTilde.getColumn(0).unpivoting( pvt, k ); System.out.println( "\nThe pace4 estimate of coefficients = \n" + betaTilde ); System.out.println( "Quadratic loss = " + ( new PaceMatrix( X.times( new PaceMatrix(beta.minus(betaTilde)) ))) .getColumn(0).sum2() ); ATilde = d2.pace6( AHat ); aTilde = ATilde.sqrt().times(aHat.sign()); YTilde = new PaceMatrix((new PaceMatrix(aTilde)).times( sde )); X.rsolve( YTilde, pvt, pvt.size() ); betaTilde = YTilde.getColumn(0).unpivoting( pvt, k ); System.out.println( "\nThe pace6 estimate of coefficients = \n" + betaTilde ); System.out.println( "Quadratic loss = " + ( new PaceMatrix( X.times( new PaceMatrix(beta.minus(betaTilde)) ))) .getColumn(0).sum2() ); System.out.println("\n========= Based on normal mixture ============"); NormalMixture d = new NormalMixture(); d.fit( aHat, method ); System.out.println( "\nEstimated mixing distribution is:\n" + d ); aTilde = d.nestedEstimate( aHat ); YTilde = new PaceMatrix((new PaceMatrix(aTilde)).times( sde )); X.rsolve( YTilde, pvt, pvt.size() ); betaTilde = YTilde.getColumn(0).unpivoting( pvt, k ); System.out.println( "The nested estimate of coefficients = \n" + betaTilde ); System.out.println( "Quadratic loss = " + ( new PaceMatrix( X.times( new PaceMatrix(beta.minus(betaTilde)) ))) .getColumn(0).sum2() ); aTilde = d.subsetEstimate( aHat ); YTilde = new PaceMatrix((new PaceMatrix(aTilde)).times( sde )); X.rsolve( YTilde, pvt, pvt.size() ); betaTilde = YTilde.getColumn(0).unpivoting( pvt, k ); System.out.println( "\nThe subset estimate of coefficients = \n" + betaTilde ); System.out.println( "Quadratic loss = " + ( new PaceMatrix( X.times( new PaceMatrix(beta.minus(betaTilde)) ))) .getColumn(0).sum2() ); aTilde = d.empiricalBayesEstimate( aHat ); YTilde = new PaceMatrix((new PaceMatrix(aTilde)).times( sde )); X.rsolve( YTilde, pvt, pvt.size() ); betaTilde = YTilde.getColumn(0).unpivoting( pvt, k ); System.out.println( "\nThe empirical Bayes estimate of coefficients = \n"+ betaTilde ); System.out.println( "Quadratic loss = " + ( new PaceMatrix( X.times( new PaceMatrix(beta.minus(betaTilde)) ))) .getColumn(0).sum2() ); } }
36,194
29.98887
89
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/functions/supportVector/CachedKernel.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * CachedKernel.java * Copyright (C) 2005-2012 University of Waikato, Hamilton, New Zealand */ package weka.classifiers.functions.supportVector; import java.util.Enumeration; import java.util.Vector; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.Utils; /** * Base class for RBFKernel and PolyKernel that implements a simple LRU. * (least-recently-used) cache if the cache size is set to a value > 0. * Otherwise it uses a full cache. * * @author Eibe Frank (eibe@cs.waikato.ac.nz) * @author Shane Legg (shane@intelligenesis.net) (sparse vector code) * @author Stuart Inglis (stuart@reeltwo.com) (sparse vector code) * @author J. Lindgren (jtlindgr{at}cs.helsinki.fi) (RBF kernel) * @author Steven Hugg (hugg@fasterlight.com) (refactored, LRU cache) * @author Bernhard Pfahringer (bernhard@cs.waikato.ac.nz) (full cache) * @version $Revision: 8034 $ */ public abstract class CachedKernel extends Kernel { /** for serialization */ private static final long serialVersionUID = 702810182699015136L; /** Counts the number of kernel evaluations. */ protected int m_kernelEvals; /** Counts the number of kernel cache hits. */ protected int m_cacheHits; /** The size of the cache (a prime number) */ protected int m_cacheSize = 250007; /** Kernel cache */ protected double[] m_storage; protected long[] m_keys; /** The kernel matrix if full cache is used (i.e. size is set to 0) */ protected double[][] m_kernelMatrix; /** The number of instance in the dataset */ protected int m_numInsts; /** number of cache slots in an entry */ protected int m_cacheSlots = 4; /** * default constructor - does nothing. */ public CachedKernel() { super(); } /** * Initializes the kernel cache. The actual size of the cache in bytes is * (64 * cacheSize). * * @param data the data to use * @param cacheSize the cache size * @throws Exception if something goes wrong */ protected CachedKernel(Instances data, int cacheSize) throws Exception { super(); setCacheSize(cacheSize); buildKernel(data); } /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ public Enumeration listOptions() { Vector result; Enumeration en; result = new Vector(); en = super.listOptions(); while (en.hasMoreElements()) result.addElement(en.nextElement()); result.addElement(new Option( "\tThe size of the cache (a prime number), 0 for full cache and \n" + "\t-1 to turn it off.\n" + "\t(default: 250007)", "C", 1, "-C <num>")); return result.elements(); } /** * Parses a given list of options. <p/> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { String tmpStr; tmpStr = Utils.getOption('C', options); if (tmpStr.length() != 0) setCacheSize(Integer.parseInt(tmpStr)); else setCacheSize(250007); super.setOptions(options); } /** * Gets the current settings of the Kernel. * * @return an array of strings suitable for passing to setOptions */ public String[] getOptions() { int i; Vector result; String[] options; result = new Vector(); options = super.getOptions(); for (i = 0; i < options.length; i++) result.add(options[i]); result.add("-C"); result.add("" + getCacheSize()); return (String[]) result.toArray(new String[result.size()]); } /** * This method is overridden in subclasses to implement specific kernels. * * @param id1 the index of instance 1 * @param id2 the index of instance 2 * @param inst1 the instance 1 object * @return the dot product * @throws Exception if something goes wrong */ protected abstract double evaluate(int id1, int id2, Instance inst1) throws Exception; /** * Implements the abstract function of Kernel using the cache. This method * uses the evaluate() method to do the actual dot product. * * @param id1 the index of the first instance in the dataset * @param id2 the index of the second instance in the dataset * @param inst1 the instance corresponding to id1 (used if id1 == -1) * @return the result of the kernel function * @throws Exception if something goes wrong */ public double eval(int id1, int id2, Instance inst1) throws Exception { double result = 0; long key = -1; int location = -1; // we can only cache if we know the indexes and caching is not // disbled (m_cacheSize == -1) if ( (id1 >= 0) && (m_cacheSize != -1) ) { // Use full cache? if (m_cacheSize == 0) { if (m_kernelMatrix == null) { m_kernelMatrix = new double[m_data.numInstances()][]; for(int i = 0; i < m_data.numInstances(); i++) { m_kernelMatrix[i] = new double[i + 1]; for(int j = 0; j <= i; j++) { m_kernelEvals++; m_kernelMatrix[i][j] = evaluate(i, j, m_data.instance(i)); } } } m_cacheHits++; result = (id1 > id2) ? m_kernelMatrix[id1][id2] : m_kernelMatrix[id2][id1]; return result; } // Use LRU cache if (id1 > id2) { key = (id1 + ((long) id2 * m_numInsts)); } else { key = (id2 + ((long) id1 * m_numInsts)); } location = (int) (key % m_cacheSize) * m_cacheSlots; int loc = location; for (int i = 0; i < m_cacheSlots; i++) { long thiskey = m_keys[loc]; if (thiskey == 0) break; // empty slot, so break out of loop early if (thiskey == (key + 1)) { m_cacheHits++; // move entry to front of cache (LRU) by swapping // only if it's not already at the front of cache if (i > 0) { double tmps = m_storage[loc]; m_storage[loc] = m_storage[location]; m_keys[loc] = m_keys[location]; m_storage[location] = tmps; m_keys[location] = thiskey; return tmps; } else return m_storage[loc]; } loc++; } } result = evaluate(id1, id2, inst1); m_kernelEvals++; // store result in cache if ( (key != -1) && (m_cacheSize != -1) ) { // move all cache slots forward one array index // to make room for the new entry System.arraycopy(m_keys, location, m_keys, location + 1, m_cacheSlots - 1); System.arraycopy(m_storage, location, m_storage, location + 1, m_cacheSlots - 1); m_storage[location] = result; m_keys[location] = (key + 1); } return result; } /** * Returns the number of time Eval has been called. * * @return the number of kernel evaluation. */ public int numEvals() { return m_kernelEvals; } /** * Returns the number of cache hits on dot products. * * @return the number of cache hits. */ public int numCacheHits() { return m_cacheHits; } /** * Frees the cache used by the kernel. */ public void clean() { m_storage = null; m_keys = null; m_kernelMatrix = null; } /** * Calculates a dot product between two instances * * @param inst1 the first instance * @param inst2 the second instance * @return the dot product of the two instances. * @throws Exception if an error occurs */ protected final double dotProd(Instance inst1, Instance inst2) throws Exception { double result = 0; // we can do a fast dot product int n1 = inst1.numValues(); int n2 = inst2.numValues(); int classIndex = m_data.classIndex(); for (int p1 = 0, p2 = 0; p1 < n1 && p2 < n2;) { int ind1 = inst1.index(p1); int ind2 = inst2.index(p2); if (ind1 == ind2) { if (ind1 != classIndex) { result += inst1.valueSparse(p1) * inst2.valueSparse(p2); } p1++; p2++; } else if (ind1 > ind2) { p2++; } else { p1++; } } return (result); } /** * Sets the size of the cache to use (a prime number) * * @param value the size of the cache */ public void setCacheSize(int value) { if (value >= -1) { m_cacheSize = value; clean(); } else { System.out.println( "Cache size cannot be smaller than -1 (provided: " + value + ")!"); } } /** * Gets the size of the cache * * @return the cache size */ public int getCacheSize() { return m_cacheSize; } /** * Returns the tip text for this property * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String cacheSizeTipText() { return "The size of the cache (a prime number), 0 for full cache and -1 to turn it off."; } /** * initializes variables etc. * * @param data the data to use */ protected void initVars(Instances data) { super.initVars(data); m_kernelEvals = 0; m_cacheHits = 0; m_numInsts = m_data.numInstances(); if (getCacheSize() > 0) { // Use LRU cache m_storage = new double[m_cacheSize * m_cacheSlots]; m_keys = new long[m_cacheSize * m_cacheSlots]; } else { m_storage = null; m_keys = null; m_kernelMatrix = null; } } /** * builds the kernel with the given data. Initializes the kernel cache. * The actual size of the cache in bytes is (64 * cacheSize). * * @param data the data to base the kernel on * @throws Exception if something goes wrong */ public void buildKernel(Instances data) throws Exception { // does kernel handle the data? if (!getChecksTurnedOff()) getCapabilities().testWithFail(data); initVars(data); } }
10,469
25.777494
93
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/functions/supportVector/CheckKernel.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * CheckKernel.java * Copyright (C) 2006-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.functions.supportVector; import java.util.Enumeration; import java.util.Random; import java.util.Vector; import weka.core.Attribute; import weka.core.CheckScheme; import weka.core.FastVector; import weka.core.Instances; import weka.core.MultiInstanceCapabilitiesHandler; import weka.core.Option; import weka.core.OptionHandler; import weka.core.RevisionUtils; import weka.core.SerializationHelper; import weka.core.TestInstances; import weka.core.Utils; import weka.core.WeightedInstancesHandler; /** * Class for examining the capabilities and finding problems with * kernels. If you implement an kernels using the WEKA.libraries, * you should run the checks on it to ensure robustness and correct * operation. Passing all the tests of this object does not mean * bugs in the kernels don't exist, but this will help find some * common ones. <p/> * * Typical usage: <p/> * <code>java weka.classifiers.functions.supportVector.CheckKernel -W kernel_name * -- kernel_options </code><p/> * * CheckKernel reports on the following: * <ul> * <li> Kernel abilities * <ul> * <li> Possible command line options to the kernels </li> * <li> Whether the kernels can predict nominal, numeric, string, * date or relational class attributes. </li> * <li> Whether the kernels can handle numeric predictor attributes </li> * <li> Whether the kernels can handle nominal predictor attributes </li> * <li> Whether the kernels can handle string predictor attributes </li> * <li> Whether the kernels can handle date predictor attributes </li> * <li> Whether the kernels can handle relational predictor attributes </li> * <li> Whether the kernels can handle multi-instance data </li> * <li> Whether the kernels can handle missing predictor values </li> * <li> Whether the kernels can handle missing class values </li> * <li> Whether a nominal kernels only handles 2 class problems </li> * <li> Whether the kernels can handle instance weights </li> * </ul> * </li> * <li> Correct functioning * <ul> * <li> Correct initialisation during buildKernel (i.e. no result * changes when buildKernel called repeatedly) </li> * <li> Whether the kernels alters the data passed to it * (number of instances, instance order, instance weights, etc) </li> * </ul> * </li> * <li> Degenerate cases * <ul> * <li> building kernels with zero training instances </li> * <li> all but one predictor attribute values missing </li> * <li> all predictor attribute values missing </li> * <li> all but one class values missing </li> * <li> all class values missing </li> * </ul> * </li> * </ul> * Running CheckKernel with the debug option set will output the * training and test datasets for any failed tests.<p/> * * The <code>weka.classifiers.AbstractKernelTest</code> uses this * class to test all the kernels. Any changes here, have to be * checked in that abstract test class, too. <p/> * <!-- options-start --> * Valid options are: <p/> * * <pre> -D * Turn on debugging output.</pre> * * <pre> -S * Silent mode - prints nothing to stdout.</pre> * * <pre> -N &lt;num&gt; * The number of instances in the datasets (default 20).</pre> * * <pre> -nominal &lt;num&gt; * The number of nominal attributes (default 2).</pre> * * <pre> -nominal-values &lt;num&gt; * The number of values for nominal attributes (default 1).</pre> * * <pre> -numeric &lt;num&gt; * The number of numeric attributes (default 1).</pre> * * <pre> -string &lt;num&gt; * The number of string attributes (default 1).</pre> * * <pre> -date &lt;num&gt; * The number of date attributes (default 1).</pre> * * <pre> -relational &lt;num&gt; * The number of relational attributes (default 1).</pre> * * <pre> -num-instances-relational &lt;num&gt; * The number of instances in relational/bag attributes (default 10).</pre> * * <pre> -words &lt;comma-separated-list&gt; * The words to use in string attributes.</pre> * * <pre> -word-separators &lt;chars&gt; * The word separators to use in string attributes.</pre> * * <pre> -W * Full name of the kernel analysed. * eg: weka.classifiers.functions.supportVector.RBFKernel * (default weka.classifiers.functions.supportVector.RBFKernel)</pre> * * <pre> * Options specific to kernel weka.classifiers.functions.supportVector.RBFKernel: * </pre> * * <pre> -D * Enables debugging output (if available) to be printed. * (default: off)</pre> * * <pre> -no-checks * Turns off all checks - use with caution! * (default: checks on)</pre> * * <pre> -C &lt;num&gt; * The size of the cache (a prime number), 0 for full cache and * -1 to turn it off. * (default: 250007)</pre> * * <pre> -G &lt;num&gt; * The Gamma parameter. * (default: 0.01)</pre> * <!-- options-end --> * * Options after -- are passed to the designated kernel.<p/> * * @author Len Trigg (trigg@cs.waikato.ac.nz) * @author FracPete (fracpete at waikato dot ac dot nz) * @version $Revision: 8034 $ * @see TestInstances */ public class CheckKernel extends CheckScheme { /* * Note about test methods: * - methods return array of booleans * - first index: success or not * - second index: acceptable or not (e.g., Exception is OK) * * FracPete (fracpete at waikato dot ac dot nz) */ /*** The kernel to be examined */ protected Kernel m_Kernel = new weka.classifiers.functions.supportVector.RBFKernel(); /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ public Enumeration listOptions() { Vector result = new Vector(); Enumeration en = super.listOptions(); while (en.hasMoreElements()) result.addElement(en.nextElement()); result.addElement(new Option( "\tFull name of the kernel analysed.\n" +"\teg: weka.classifiers.functions.supportVector.RBFKernel\n" + "\t(default weka.classifiers.functions.supportVector.RBFKernel)", "W", 1, "-W")); if ((m_Kernel != null) && (m_Kernel instanceof OptionHandler)) { result.addElement(new Option("", "", 0, "\nOptions specific to kernel " + m_Kernel.getClass().getName() + ":")); Enumeration enu = ((OptionHandler)m_Kernel).listOptions(); while (enu.hasMoreElements()) result.addElement(enu.nextElement()); } return result.elements(); } /** * Parses a given list of options. * <!-- options-start --> * Valid options are: <p/> * * <pre> -D * Turn on debugging output.</pre> * * <pre> -S * Silent mode - prints nothing to stdout.</pre> * * <pre> -N &lt;num&gt; * The number of instances in the datasets (default 20).</pre> * * <pre> -nominal &lt;num&gt; * The number of nominal attributes (default 2).</pre> * * <pre> -nominal-values &lt;num&gt; * The number of values for nominal attributes (default 1).</pre> * * <pre> -numeric &lt;num&gt; * The number of numeric attributes (default 1).</pre> * * <pre> -string &lt;num&gt; * The number of string attributes (default 1).</pre> * * <pre> -date &lt;num&gt; * The number of date attributes (default 1).</pre> * * <pre> -relational &lt;num&gt; * The number of relational attributes (default 1).</pre> * * <pre> -num-instances-relational &lt;num&gt; * The number of instances in relational/bag attributes (default 10).</pre> * * <pre> -words &lt;comma-separated-list&gt; * The words to use in string attributes.</pre> * * <pre> -word-separators &lt;chars&gt; * The word separators to use in string attributes.</pre> * * <pre> -W * Full name of the kernel analysed. * eg: weka.classifiers.functions.supportVector.RBFKernel * (default weka.classifiers.functions.supportVector.RBFKernel)</pre> * * <pre> * Options specific to kernel weka.classifiers.functions.supportVector.RBFKernel: * </pre> * * <pre> -D * Enables debugging output (if available) to be printed. * (default: off)</pre> * * <pre> -no-checks * Turns off all checks - use with caution! * (default: checks on)</pre> * * <pre> -C &lt;num&gt; * The size of the cache (a prime number), 0 for full cache and * -1 to turn it off. * (default: 250007)</pre> * * <pre> -G &lt;num&gt; * The Gamma parameter. * (default: 0.01)</pre> * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { String tmpStr; super.setOptions(options); tmpStr = Utils.getOption('W', options); if (tmpStr.length() == 0) tmpStr = weka.classifiers.functions.supportVector.RBFKernel.class.getName(); setKernel( (Kernel) forName( "weka.classifiers.functions.supportVector", Kernel.class, tmpStr, Utils.partitionOptions(options))); } /** * Gets the current settings of the CheckKernel. * * @return an array of strings suitable for passing to setOptions */ public String[] getOptions() { Vector result; String[] options; int i; result = new Vector(); options = super.getOptions(); for (i = 0; i < options.length; i++) result.add(options[i]); if (getKernel() != null) { result.add("-W"); result.add(getKernel().getClass().getName()); } if ((m_Kernel != null) && (m_Kernel instanceof OptionHandler)) options = ((OptionHandler) m_Kernel).getOptions(); else options = new String[0]; if (options.length > 0) { result.add("--"); for (i = 0; i < options.length; i++) result.add(options[i]); } return (String[]) result.toArray(new String[result.size()]); } /** * Begin the tests, reporting results to System.out */ public void doTests() { if (getKernel() == null) { println("\n=== No kernel set ==="); return; } println("\n=== Check on kernel: " + getKernel().getClass().getName() + " ===\n"); // Start tests m_ClasspathProblems = false; println("--> Checking for interfaces"); canTakeOptions(); boolean weightedInstancesHandler = weightedInstancesHandler()[0]; boolean multiInstanceHandler = multiInstanceHandler()[0]; println("--> Kernel tests"); declaresSerialVersionUID(); testsPerClassType(Attribute.NOMINAL, weightedInstancesHandler, multiInstanceHandler); testsPerClassType(Attribute.NUMERIC, weightedInstancesHandler, multiInstanceHandler); testsPerClassType(Attribute.DATE, weightedInstancesHandler, multiInstanceHandler); testsPerClassType(Attribute.STRING, weightedInstancesHandler, multiInstanceHandler); testsPerClassType(Attribute.RELATIONAL, weightedInstancesHandler, multiInstanceHandler); } /** * Set the lernel to test. * * @param value the kernel to use. */ public void setKernel(Kernel value) { m_Kernel = value; } /** * Get the kernel being tested * * @return the kernel being tested */ public Kernel getKernel() { return m_Kernel; } /** * Run a battery of tests for a given class attribute type * * @param classType true if the class attribute should be numeric * @param weighted true if the kernel says it handles weights * @param multiInstance true if the kernel is a multi-instance kernel */ protected void testsPerClassType(int classType, boolean weighted, boolean multiInstance) { boolean PNom = canPredict(true, false, false, false, false, multiInstance, classType)[0]; boolean PNum = canPredict(false, true, false, false, false, multiInstance, classType)[0]; boolean PStr = canPredict(false, false, true, false, false, multiInstance, classType)[0]; boolean PDat = canPredict(false, false, false, true, false, multiInstance, classType)[0]; boolean PRel; if (!multiInstance) PRel = canPredict(false, false, false, false, true, multiInstance, classType)[0]; else PRel = false; if (PNom || PNum || PStr || PDat || PRel) { if (weighted) instanceWeights(PNom, PNum, PStr, PDat, PRel, multiInstance, classType); if (classType == Attribute.NOMINAL) canHandleNClasses(PNom, PNum, PStr, PDat, PRel, multiInstance, 4); if (!multiInstance) { canHandleClassAsNthAttribute(PNom, PNum, PStr, PDat, PRel, multiInstance, classType, 0); canHandleClassAsNthAttribute(PNom, PNum, PStr, PDat, PRel, multiInstance, classType, 1); } canHandleZeroTraining(PNom, PNum, PStr, PDat, PRel, multiInstance, classType); boolean handleMissingPredictors = canHandleMissing(PNom, PNum, PStr, PDat, PRel, multiInstance, classType, true, false, 20)[0]; if (handleMissingPredictors) canHandleMissing(PNom, PNum, PStr, PDat, PRel, multiInstance, classType, true, false, 100); boolean handleMissingClass = canHandleMissing(PNom, PNum, PStr, PDat, PRel, multiInstance, classType, false, true, 20)[0]; if (handleMissingClass) canHandleMissing(PNom, PNum, PStr, PDat, PRel, multiInstance, classType, false, true, 100); correctBuildInitialisation(PNom, PNum, PStr, PDat, PRel, multiInstance, classType); datasetIntegrity(PNom, PNum, PStr, PDat, PRel, multiInstance, classType, handleMissingPredictors, handleMissingClass); } } /** * Checks whether the scheme can take command line options. * * @return index 0 is true if the kernel can take options */ protected boolean[] canTakeOptions() { boolean[] result = new boolean[2]; print("options..."); if (m_Kernel instanceof OptionHandler) { println("yes"); if (m_Debug) { println("\n=== Full report ==="); Enumeration enu = ((OptionHandler)m_Kernel).listOptions(); while (enu.hasMoreElements()) { Option option = (Option) enu.nextElement(); print(option.synopsis() + "\n" + option.description() + "\n"); } println("\n"); } result[0] = true; } else { println("no"); result[0] = false; } return result; } /** * Checks whether the scheme says it can handle instance weights. * * @return true if the kernel handles instance weights */ protected boolean[] weightedInstancesHandler() { boolean[] result = new boolean[2]; print("weighted instances kernel..."); if (m_Kernel instanceof WeightedInstancesHandler) { println("yes"); result[0] = true; } else { println("no"); result[0] = false; } return result; } /** * Checks whether the scheme handles multi-instance data. * * @return true if the kernel handles multi-instance data */ protected boolean[] multiInstanceHandler() { boolean[] result = new boolean[2]; print("multi-instance kernel..."); if (m_Kernel instanceof MultiInstanceCapabilitiesHandler) { println("yes"); result[0] = true; } else { println("no"); result[0] = false; } return result; } /** * tests for a serialVersionUID. Fails in case the scheme doesn't declare * a UID. * * @return index 0 is true if the scheme declares a UID */ protected boolean[] declaresSerialVersionUID() { boolean[] result = new boolean[2]; print("serialVersionUID..."); result[0] = !SerializationHelper.needsUID(m_Kernel.getClass()); if (result[0]) println("yes"); else println("no"); return result; } /** * Checks basic prediction of the scheme, for simple non-troublesome * datasets. * * @param nominalPredictor if true use nominal predictor attributes * @param numericPredictor if true use numeric predictor attributes * @param stringPredictor if true use string predictor attributes * @param datePredictor if true use date predictor attributes * @param relationalPredictor if true use relational predictor attributes * @param multiInstance whether multi-instance is needed * @param classType the class type (NOMINAL, NUMERIC, etc.) * @return index 0 is true if the test was passed, index 1 is true if test * was acceptable */ protected boolean[] canPredict( boolean nominalPredictor, boolean numericPredictor, boolean stringPredictor, boolean datePredictor, boolean relationalPredictor, boolean multiInstance, int classType) { print("basic predict"); printAttributeSummary( nominalPredictor, numericPredictor, stringPredictor, datePredictor, relationalPredictor, multiInstance, classType); print("..."); FastVector accepts = new FastVector(); accepts.addElement("unary"); accepts.addElement("binary"); accepts.addElement("nominal"); accepts.addElement("numeric"); accepts.addElement("string"); accepts.addElement("date"); accepts.addElement("relational"); accepts.addElement("multi-instance"); accepts.addElement("not in classpath"); int numTrain = getNumInstances(), numClasses = 2, missingLevel = 0; boolean predictorMissing = false, classMissing = false; return runBasicTest(nominalPredictor, numericPredictor, stringPredictor, datePredictor, relationalPredictor, multiInstance, classType, missingLevel, predictorMissing, classMissing, numTrain, numClasses, accepts); } /** * Checks whether nominal schemes can handle more than two classes. * If a scheme is only designed for two-class problems it should * throw an appropriate exception for multi-class problems. * * @param nominalPredictor if true use nominal predictor attributes * @param numericPredictor if true use numeric predictor attributes * @param stringPredictor if true use string predictor attributes * @param datePredictor if true use date predictor attributes * @param relationalPredictor if true use relational predictor attributes * @param multiInstance whether multi-instance is needed * @param numClasses the number of classes to test * @return index 0 is true if the test was passed, index 1 is true if test * was acceptable */ protected boolean[] canHandleNClasses( boolean nominalPredictor, boolean numericPredictor, boolean stringPredictor, boolean datePredictor, boolean relationalPredictor, boolean multiInstance, int numClasses) { print("more than two class problems"); printAttributeSummary( nominalPredictor, numericPredictor, stringPredictor, datePredictor, relationalPredictor, multiInstance, Attribute.NOMINAL); print("..."); FastVector accepts = new FastVector(); accepts.addElement("number"); accepts.addElement("class"); int numTrain = getNumInstances(), missingLevel = 0; boolean predictorMissing = false, classMissing = false; return runBasicTest(nominalPredictor, numericPredictor, stringPredictor, datePredictor, relationalPredictor, multiInstance, Attribute.NOMINAL, missingLevel, predictorMissing, classMissing, numTrain, numClasses, accepts); } /** * Checks whether the scheme can handle class attributes as Nth attribute. * * @param nominalPredictor if true use nominal predictor attributes * @param numericPredictor if true use numeric predictor attributes * @param stringPredictor if true use string predictor attributes * @param datePredictor if true use date predictor attributes * @param relationalPredictor if true use relational predictor attributes * @param multiInstance whether multi-instance is needed * @param classType the class type (NUMERIC, NOMINAL, etc.) * @param classIndex the index of the class attribute (0-based, -1 means last attribute) * @return index 0 is true if the test was passed, index 1 is true if test * was acceptable * @see TestInstances#CLASS_IS_LAST */ protected boolean[] canHandleClassAsNthAttribute( boolean nominalPredictor, boolean numericPredictor, boolean stringPredictor, boolean datePredictor, boolean relationalPredictor, boolean multiInstance, int classType, int classIndex) { if (classIndex == TestInstances.CLASS_IS_LAST) print("class attribute as last attribute"); else print("class attribute as " + (classIndex + 1) + ". attribute"); printAttributeSummary( nominalPredictor, numericPredictor, stringPredictor, datePredictor, relationalPredictor, multiInstance, classType); print("..."); FastVector accepts = new FastVector(); int numTrain = getNumInstances(), numClasses = 2, missingLevel = 0; boolean predictorMissing = false, classMissing = false; return runBasicTest(nominalPredictor, numericPredictor, stringPredictor, datePredictor, relationalPredictor, multiInstance, classType, classIndex, missingLevel, predictorMissing, classMissing, numTrain, numClasses, accepts); } /** * Checks whether the scheme can handle zero training instances. * * @param nominalPredictor if true use nominal predictor attributes * @param numericPredictor if true use numeric predictor attributes * @param stringPredictor if true use string predictor attributes * @param datePredictor if true use date predictor attributes * @param relationalPredictor if true use relational predictor attributes * @param multiInstance whether multi-instance is needed * @param classType the class type (NUMERIC, NOMINAL, etc.) * @return index 0 is true if the test was passed, index 1 is true if test * was acceptable */ protected boolean[] canHandleZeroTraining( boolean nominalPredictor, boolean numericPredictor, boolean stringPredictor, boolean datePredictor, boolean relationalPredictor, boolean multiInstance, int classType) { print("handle zero training instances"); printAttributeSummary( nominalPredictor, numericPredictor, stringPredictor, datePredictor, relationalPredictor, multiInstance, classType); print("..."); FastVector accepts = new FastVector(); accepts.addElement("train"); accepts.addElement("value"); int numTrain = 0, numClasses = 2, missingLevel = 0; boolean predictorMissing = false, classMissing = false; return runBasicTest( nominalPredictor, numericPredictor, stringPredictor, datePredictor, relationalPredictor, multiInstance, classType, missingLevel, predictorMissing, classMissing, numTrain, numClasses, accepts); } /** * Checks whether the scheme correctly initialises models when * buildKernel is called. This test calls buildKernel with * one training dataset. buildKernel is then called on a training * set with different structure, and then again with the original training * set. If the equals method of the KernelEvaluation class returns * false, this is noted as incorrect build initialisation. * * @param nominalPredictor if true use nominal predictor attributes * @param numericPredictor if true use numeric predictor attributes * @param stringPredictor if true use string predictor attributes * @param datePredictor if true use date predictor attributes * @param relationalPredictor if true use relational predictor attributes * @param multiInstance whether multi-instance is needed * @param classType the class type (NUMERIC, NOMINAL, etc.) * @return index 0 is true if the test was passed */ protected boolean[] correctBuildInitialisation( boolean nominalPredictor, boolean numericPredictor, boolean stringPredictor, boolean datePredictor, boolean relationalPredictor, boolean multiInstance, int classType) { boolean[] result = new boolean[2]; print("correct initialisation during buildKernel"); printAttributeSummary( nominalPredictor, numericPredictor, stringPredictor, datePredictor, relationalPredictor, multiInstance, classType); print("..."); int numTrain = getNumInstances(), numClasses = 2, missingLevel = 0; boolean predictorMissing = false, classMissing = false; Instances train1 = null; Instances train2 = null; Kernel kernel = null; KernelEvaluation evaluation1A = null; KernelEvaluation evaluation1B = null; KernelEvaluation evaluation2 = null; int stage = 0; try { // Make two sets of train/test splits with different // numbers of attributes train1 = makeTestDataset(42, numTrain, nominalPredictor ? getNumNominal() : 0, numericPredictor ? getNumNumeric() : 0, stringPredictor ? getNumString() : 0, datePredictor ? getNumDate() : 0, relationalPredictor ? getNumRelational() : 0, numClasses, classType, multiInstance); train2 = makeTestDataset(84, numTrain, nominalPredictor ? getNumNominal() + 1 : 0, numericPredictor ? getNumNumeric() + 1 : 0, stringPredictor ? getNumString() + 1 : 0, datePredictor ? getNumDate() + 1 : 0, relationalPredictor ? getNumRelational() + 1 : 0, numClasses, classType, multiInstance); if (missingLevel > 0) { addMissing(train1, missingLevel, predictorMissing, classMissing); addMissing(train2, missingLevel, predictorMissing, classMissing); } kernel = Kernel.makeCopy(getKernel()); evaluation1A = new KernelEvaluation(); evaluation1B = new KernelEvaluation(); evaluation2 = new KernelEvaluation(); } catch (Exception ex) { throw new Error("Error setting up for tests: " + ex.getMessage()); } try { stage = 0; evaluation1A.evaluate(kernel, train1); stage = 1; evaluation2.evaluate(kernel, train2); stage = 2; evaluation1B.evaluate(kernel, train1); stage = 3; if (!evaluation1A.equals(evaluation1B)) { if (m_Debug) { println("\n=== Full report ===\n" + evaluation1A.toSummaryString("\nFirst buildKernel()") + "\n\n"); println( evaluation1B.toSummaryString("\nSecond buildKernel()") + "\n\n"); } throw new Exception("Results differ between buildKernel calls"); } println("yes"); result[0] = true; if (false && m_Debug) { println("\n=== Full report ===\n" + evaluation1A.toSummaryString("\nFirst buildKernel()") + "\n\n"); println( evaluation1B.toSummaryString("\nSecond buildKernel()") + "\n\n"); } } catch (Exception ex) { println("no"); result[0] = false; if (m_Debug) { println("\n=== Full Report ==="); print("Problem during building"); switch (stage) { case 0: print(" of dataset 1"); break; case 1: print(" of dataset 2"); break; case 2: print(" of dataset 1 (2nd build)"); break; case 3: print(", comparing results from builds of dataset 1"); break; } println(": " + ex.getMessage() + "\n"); println("here are the datasets:\n"); println("=== Train1 Dataset ===\n" + train1.toString() + "\n"); println("=== Train2 Dataset ===\n" + train2.toString() + "\n"); } } return result; } /** * Checks basic missing value handling of the scheme. If the missing * values cause an exception to be thrown by the scheme, this will be * recorded. * * @param nominalPredictor if true use nominal predictor attributes * @param numericPredictor if true use numeric predictor attributes * @param stringPredictor if true use string predictor attributes * @param datePredictor if true use date predictor attributes * @param relationalPredictor if true use relational predictor attributes * @param multiInstance whether multi-instance is needed * @param classType the class type (NUMERIC, NOMINAL, etc.) * @param predictorMissing true if the missing values may be in * the predictors * @param classMissing true if the missing values may be in the class * @param missingLevel the percentage of missing values * @return index 0 is true if the test was passed, index 1 is true if test * was acceptable */ protected boolean[] canHandleMissing( boolean nominalPredictor, boolean numericPredictor, boolean stringPredictor, boolean datePredictor, boolean relationalPredictor, boolean multiInstance, int classType, boolean predictorMissing, boolean classMissing, int missingLevel) { if (missingLevel == 100) print("100% "); print("missing"); if (predictorMissing) { print(" predictor"); if (classMissing) print(" and"); } if (classMissing) print(" class"); print(" values"); printAttributeSummary( nominalPredictor, numericPredictor, stringPredictor, datePredictor, relationalPredictor, multiInstance, classType); print("..."); FastVector accepts = new FastVector(); accepts.addElement("missing"); accepts.addElement("value"); accepts.addElement("train"); int numTrain = getNumInstances(), numClasses = 2; return runBasicTest(nominalPredictor, numericPredictor, stringPredictor, datePredictor, relationalPredictor, multiInstance, classType, missingLevel, predictorMissing, classMissing, numTrain, numClasses, accepts); } /** * Checks whether the kernel can handle instance weights. * This test compares the kernel performance on two datasets * that are identical except for the training weights. If the * results change, then the kernel must be using the weights. It * may be possible to get a false positive from this test if the * weight changes aren't significant enough to induce a change * in kernel performance (but the weights are chosen to minimize * the likelihood of this). * * @param nominalPredictor if true use nominal predictor attributes * @param numericPredictor if true use numeric predictor attributes * @param stringPredictor if true use string predictor attributes * @param datePredictor if true use date predictor attributes * @param relationalPredictor if true use relational predictor attributes * @param multiInstance whether multi-instance is needed * @param classType the class type (NUMERIC, NOMINAL, etc.) * @return index 0 true if the test was passed */ protected boolean[] instanceWeights( boolean nominalPredictor, boolean numericPredictor, boolean stringPredictor, boolean datePredictor, boolean relationalPredictor, boolean multiInstance, int classType) { print("kernel uses instance weights"); printAttributeSummary( nominalPredictor, numericPredictor, stringPredictor, datePredictor, relationalPredictor, multiInstance, classType); print("..."); int numTrain = 2*getNumInstances(), numClasses = 2, missingLevel = 0; boolean predictorMissing = false, classMissing = false; boolean[] result = new boolean[2]; Instances train = null; Kernel[] kernels = null; KernelEvaluation evaluationB = null; KernelEvaluation evaluationI = null; boolean evalFail = false; try { train = makeTestDataset(42, numTrain, nominalPredictor ? getNumNominal() + 1 : 0, numericPredictor ? getNumNumeric() + 1 : 0, stringPredictor ? getNumString() : 0, datePredictor ? getNumDate() : 0, relationalPredictor ? getNumRelational() : 0, numClasses, classType, multiInstance); if (missingLevel > 0) addMissing(train, missingLevel, predictorMissing, classMissing); kernels = Kernel.makeCopies(getKernel(), 2); evaluationB = new KernelEvaluation(); evaluationI = new KernelEvaluation(); evaluationB.evaluate(kernels[0], train); } catch (Exception ex) { throw new Error("Error setting up for tests: " + ex.getMessage()); } try { // Now modify instance weights and re-built/test for (int i = 0; i < train.numInstances(); i++) { train.instance(i).setWeight(0); } Random random = new Random(1); for (int i = 0; i < train.numInstances() / 2; i++) { int inst = Math.abs(random.nextInt()) % train.numInstances(); int weight = Math.abs(random.nextInt()) % 10 + 1; train.instance(inst).setWeight(weight); } evaluationI.evaluate(kernels[1], train); if (evaluationB.equals(evaluationI)) { // println("no"); evalFail = true; throw new Exception("evalFail"); } println("yes"); result[0] = true; } catch (Exception ex) { println("no"); result[0] = false; if (m_Debug) { println("\n=== Full Report ==="); if (evalFail) { println("Results don't differ between non-weighted and " + "weighted instance models."); println("Here are the results:\n"); println(evaluationB.toSummaryString("\nboth methods\n")); } else { print("Problem during building"); println(": " + ex.getMessage() + "\n"); } println("Here is the dataset:\n"); println("=== Train Dataset ===\n" + train.toString() + "\n"); println("=== Train Weights ===\n"); for (int i = 0; i < train.numInstances(); i++) { println(" " + (i + 1) + " " + train.instance(i).weight()); } } } return result; } /** * Checks whether the scheme alters the training dataset during * building. If the scheme needs to modify the data it should take * a copy of the training data. Currently checks for changes to header * structure, number of instances, order of instances, instance weights. * * @param nominalPredictor if true use nominal predictor attributes * @param numericPredictor if true use numeric predictor attributes * @param stringPredictor if true use string predictor attributes * @param datePredictor if true use date predictor attributes * @param relationalPredictor if true use relational predictor attributes * @param multiInstance whether multi-instance is needed * @param classType the class type (NUMERIC, NOMINAL, etc.) * @param predictorMissing true if we know the kernel can handle * (at least) moderate missing predictor values * @param classMissing true if we know the kernel can handle * (at least) moderate missing class values * @return index 0 is true if the test was passed */ protected boolean[] datasetIntegrity( boolean nominalPredictor, boolean numericPredictor, boolean stringPredictor, boolean datePredictor, boolean relationalPredictor, boolean multiInstance, int classType, boolean predictorMissing, boolean classMissing) { print("kernel doesn't alter original datasets"); printAttributeSummary( nominalPredictor, numericPredictor, stringPredictor, datePredictor, relationalPredictor, multiInstance, classType); print("..."); int numTrain = getNumInstances(), numClasses = 2, missingLevel = 20; boolean[] result = new boolean[2]; Instances train = null; Kernel kernel = null; try { train = makeTestDataset(42, numTrain, nominalPredictor ? getNumNominal() : 0, numericPredictor ? getNumNumeric() : 0, stringPredictor ? getNumString() : 0, datePredictor ? getNumDate() : 0, relationalPredictor ? getNumRelational() : 0, numClasses, classType, multiInstance); if (missingLevel > 0) addMissing(train, missingLevel, predictorMissing, classMissing); kernel = Kernel.makeCopies(getKernel(), 1)[0]; } catch (Exception ex) { throw new Error("Error setting up for tests: " + ex.getMessage()); } try { Instances trainCopy = new Instances(train); kernel.buildKernel(trainCopy); compareDatasets(train, trainCopy); println("yes"); result[0] = true; } catch (Exception ex) { println("no"); result[0] = false; if (m_Debug) { println("\n=== Full Report ==="); print("Problem during building"); println(": " + ex.getMessage() + "\n"); println("Here is the dataset:\n"); println("=== Train Dataset ===\n" + train.toString() + "\n"); } } return result; } /** * Runs a text on the datasets with the given characteristics. * * @param nominalPredictor if true use nominal predictor attributes * @param numericPredictor if true use numeric predictor attributes * @param stringPredictor if true use string predictor attributes * @param datePredictor if true use date predictor attributes * @param relationalPredictor if true use relational predictor attributes * @param multiInstance whether multi-instance is needed * @param classType the class type (NUMERIC, NOMINAL, etc.) * @param missingLevel the percentage of missing values * @param predictorMissing true if the missing values may be in * the predictors * @param classMissing true if the missing values may be in the class * @param numTrain the number of instances in the training set * @param numClasses the number of classes * @param accepts the acceptable string in an exception * @return index 0 is true if the test was passed, index 1 is true if test * was acceptable */ protected boolean[] runBasicTest(boolean nominalPredictor, boolean numericPredictor, boolean stringPredictor, boolean datePredictor, boolean relationalPredictor, boolean multiInstance, int classType, int missingLevel, boolean predictorMissing, boolean classMissing, int numTrain, int numClasses, FastVector accepts) { return runBasicTest( nominalPredictor, numericPredictor, stringPredictor, datePredictor, relationalPredictor, multiInstance, classType, TestInstances.CLASS_IS_LAST, missingLevel, predictorMissing, classMissing, numTrain, numClasses, accepts); } /** * Runs a text on the datasets with the given characteristics. * * @param nominalPredictor if true use nominal predictor attributes * @param numericPredictor if true use numeric predictor attributes * @param stringPredictor if true use string predictor attributes * @param datePredictor if true use date predictor attributes * @param relationalPredictor if true use relational predictor attributes * @param multiInstance whether multi-instance is needed * @param classType the class type (NUMERIC, NOMINAL, etc.) * @param classIndex the attribute index of the class * @param missingLevel the percentage of missing values * @param predictorMissing true if the missing values may be in * the predictors * @param classMissing true if the missing values may be in the class * @param numTrain the number of instances in the training set * @param numClasses the number of classes * @param accepts the acceptable string in an exception * @return index 0 is true if the test was passed, index 1 is true if test * was acceptable */ protected boolean[] runBasicTest(boolean nominalPredictor, boolean numericPredictor, boolean stringPredictor, boolean datePredictor, boolean relationalPredictor, boolean multiInstance, int classType, int classIndex, int missingLevel, boolean predictorMissing, boolean classMissing, int numTrain, int numClasses, FastVector accepts) { boolean[] result = new boolean[2]; Instances train = null; Kernel kernel = null; try { train = makeTestDataset(42, numTrain, nominalPredictor ? getNumNominal() : 0, numericPredictor ? getNumNumeric() : 0, stringPredictor ? getNumString() : 0, datePredictor ? getNumDate() : 0, relationalPredictor ? getNumRelational() : 0, numClasses, classType, classIndex, multiInstance); if (missingLevel > 0) addMissing(train, missingLevel, predictorMissing, classMissing); kernel = Kernel.makeCopies(getKernel(), 1)[0]; } catch (Exception ex) { ex.printStackTrace(); throw new Error("Error setting up for tests: " + ex.getMessage()); } try { kernel.buildKernel(train); println("yes"); result[0] = true; } catch (Exception ex) { boolean acceptable = false; String msg; if (ex.getMessage() == null) msg = ""; else msg = ex.getMessage().toLowerCase(); if (msg.indexOf("not in classpath") > -1) m_ClasspathProblems = true; for (int i = 0; i < accepts.size(); i++) { if (msg.indexOf((String)accepts.elementAt(i)) >= 0) { acceptable = true; } } println("no" + (acceptable ? " (OK error message)" : "")); result[1] = acceptable; if (m_Debug) { println("\n=== Full Report ==="); print("Problem during building"); println(": " + ex.getMessage() + "\n"); if (!acceptable) { if (accepts.size() > 0) { print("Error message doesn't mention "); for (int i = 0; i < accepts.size(); i++) { if (i != 0) { print(" or "); } print('"' + (String)accepts.elementAt(i) + '"'); } } println("here is the dataset:\n"); println("=== Train Dataset ===\n" + train.toString() + "\n"); } } } return result; } /** * Make a simple set of instances, which can later be modified * for use in specific tests. * * @param seed the random number seed * @param numInstances the number of instances to generate * @param numNominal the number of nominal attributes * @param numNumeric the number of numeric attributes * @param numString the number of string attributes * @param numDate the number of date attributes * @param numRelational the number of relational attributes * @param numClasses the number of classes (if nominal class) * @param classType the class type (NUMERIC, NOMINAL, etc.) * @param multiInstance whether the dataset should a multi-instance dataset * @return the test dataset * @throws Exception if the dataset couldn't be generated * @see #process(Instances) */ protected Instances makeTestDataset(int seed, int numInstances, int numNominal, int numNumeric, int numString, int numDate, int numRelational, int numClasses, int classType, boolean multiInstance) throws Exception { return makeTestDataset( seed, numInstances, numNominal, numNumeric, numString, numDate, numRelational, numClasses, classType, TestInstances.CLASS_IS_LAST, multiInstance); } /** * Make a simple set of instances with variable position of the class * attribute, which can later be modified for use in specific tests. * * @param seed the random number seed * @param numInstances the number of instances to generate * @param numNominal the number of nominal attributes * @param numNumeric the number of numeric attributes * @param numString the number of string attributes * @param numDate the number of date attributes * @param numRelational the number of relational attributes * @param numClasses the number of classes (if nominal class) * @param classType the class type (NUMERIC, NOMINAL, etc.) * @param classIndex the index of the class (0-based, -1 as last) * @param multiInstance whether the dataset should a multi-instance dataset * @return the test dataset * @throws Exception if the dataset couldn't be generated * @see TestInstances#CLASS_IS_LAST * @see #process(Instances) */ protected Instances makeTestDataset(int seed, int numInstances, int numNominal, int numNumeric, int numString, int numDate, int numRelational, int numClasses, int classType, int classIndex, boolean multiInstance) throws Exception { TestInstances dataset = new TestInstances(); dataset.setSeed(seed); dataset.setNumInstances(numInstances); dataset.setNumNominal(numNominal); dataset.setNumNumeric(numNumeric); dataset.setNumString(numString); dataset.setNumDate(numDate); dataset.setNumRelational(numRelational); dataset.setNumClasses(numClasses); dataset.setClassType(classType); dataset.setClassIndex(classIndex); dataset.setNumClasses(numClasses); dataset.setMultiInstance(multiInstance); dataset.setWords(getWords()); dataset.setWordSeparators(getWordSeparators()); return process(dataset.generate()); } /** * Print out a short summary string for the dataset characteristics * * @param nominalPredictor true if nominal predictor attributes are present * @param numericPredictor true if numeric predictor attributes are present * @param stringPredictor true if string predictor attributes are present * @param datePredictor true if date predictor attributes are present * @param relationalPredictor true if relational predictor attributes are present * @param multiInstance whether multi-instance is needed * @param classType the class type (NUMERIC, NOMINAL, etc.) */ protected void printAttributeSummary(boolean nominalPredictor, boolean numericPredictor, boolean stringPredictor, boolean datePredictor, boolean relationalPredictor, boolean multiInstance, int classType) { String str = ""; if (numericPredictor) str += " numeric"; if (nominalPredictor) { if (str.length() > 0) str += " &"; str += " nominal"; } if (stringPredictor) { if (str.length() > 0) str += " &"; str += " string"; } if (datePredictor) { if (str.length() > 0) str += " &"; str += " date"; } if (relationalPredictor) { if (str.length() > 0) str += " &"; str += " relational"; } str += " predictors)"; switch (classType) { case Attribute.NUMERIC: str = " (numeric class," + str; break; case Attribute.NOMINAL: str = " (nominal class," + str; break; case Attribute.STRING: str = " (string class," + str; break; case Attribute.DATE: str = " (date class," + str; break; case Attribute.RELATIONAL: str = " (relational class," + str; break; } print(str); } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } /** * Test method for this class * * @param args the commandline parameters */ public static void main(String [] args) { runCheck(new CheckKernel(), args); } }
51,295
34.474412
131
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/functions/supportVector/Kernel.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * Kernel.java * Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.functions.supportVector; import java.io.Serializable; import java.util.Enumeration; import java.util.Vector; import weka.core.Capabilities; import weka.core.CapabilitiesHandler; import weka.core.Copyable; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.OptionHandler; import weka.core.RevisionHandler; import weka.core.RevisionUtils; import weka.core.SerializedObject; import weka.core.Utils; /** * Abstract kernel. * Kernels implementing this class must respect Mercer's condition in order * to ensure a correct behaviour of SMOreg. * * @author Eibe Frank (eibe@cs.waikato.ac.nz) * @author FracPete (fracpete at waikato dot ac dot nz) * @version $Revision: 9893 $ */ public abstract class Kernel implements Serializable, OptionHandler, CapabilitiesHandler, RevisionHandler { /** for serialization */ private static final long serialVersionUID = -6102771099905817064L; /** The dataset */ protected Instances m_data; /** enables debugging output */ protected boolean m_Debug = false; /** Turns off all checks */ protected boolean m_ChecksTurnedOff = false; /** * Returns a string describing the kernel * * @return a description suitable for displaying in the * explorer/experimenter gui */ public abstract String globalInfo(); /** * Computes the result of the kernel function for two instances. * If id1 == -1, eval use inst1 instead of an instance in the dataset. * * @param id1 the index of the first instance in the dataset * @param id2 the index of the second instance in the dataset * @param inst1 the instance corresponding to id1 (used if id1 == -1) * @return the result of the kernel function * @throws Exception if something goes wrong */ public abstract double eval(int id1, int id2, Instance inst1) throws Exception; /** * Frees the memory used by the kernel. * (Useful with kernels which use cache.) * This function is called when the training is done. * i.e. after that, eval will be called with id1 == -1. */ public abstract void clean(); /** * Returns the number of kernel evaluation performed. * * @return the number of kernel evaluation performed. */ public abstract int numEvals(); /** * Returns the number of dot product cache hits. * * @return the number of dot product cache hits, or -1 if not supported by this kernel. */ public abstract int numCacheHits(); /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ public Enumeration listOptions() { Vector result; result = new Vector(); result.addElement(new Option( "\tEnables debugging output (if available) to be printed.\n" + "\t(default: off)", "D", 0, "-D")); result.addElement(new Option( "\tTurns off all checks - use with caution!\n" + "\t(default: checks on)", "no-checks", 0, "-no-checks")); return result.elements(); } /** * Parses a given list of options. <p/> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { setDebug(Utils.getFlag('D', options)); setChecksTurnedOff(Utils.getFlag("no-checks", options)); Utils.checkForRemainingOptions(options); } /** * Gets the current settings of the Kernel. * * @return an array of strings suitable for passing to setOptions */ public String[] getOptions() { Vector result; result = new Vector(); if (getDebug()) result.add("-D"); if (getChecksTurnedOff()) result.add("-no-checks"); return (String[]) result.toArray(new String[result.size()]); } /** * Enables or disables the output of debug information (if the derived * kernel supports that) * * @param value whether to output debugging information */ public void setDebug(boolean value) { m_Debug = value; } /** * Gets whether debugging output is turned on or not. * * @return true if debugging output is produced. */ public boolean getDebug() { return m_Debug; } /** * Returns the tip text for this property * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String debugTipText() { return "Turns on the output of debugging information."; } /** * Disables or enables the checks (which could be time-consuming). Use with * caution! * * @param value if true turns off all checks */ public void setChecksTurnedOff(boolean value) { m_ChecksTurnedOff = value; } /** * Returns whether the checks are turned off or not. * * @return true if the checks are turned off */ public boolean getChecksTurnedOff() { return m_ChecksTurnedOff; } /** * Returns the tip text for this property * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String checksTurnedOffTipText() { return "Turns time-consuming checks off - use with caution."; } /** * initializes variables etc. * * @param data the data to use */ protected void initVars(Instances data) { m_data = data; } /** * Returns the Capabilities of this kernel. Derived kernels have to * override this method to enable capabilities. * * @return the capabilities of this object * @see Capabilities */ public Capabilities getCapabilities() { Capabilities result = new Capabilities(this); result.enableAll(); return result; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 9893 $"); } /** * builds the kernel with the given data * * @param data the data to base the kernel on * @throws Exception if something goes wrong */ public void buildKernel(Instances data) throws Exception { // does kernel handle the data? if (!getChecksTurnedOff()) getCapabilities().testWithFail(data); initVars(data); } /** * Creates a shallow copy of the kernel (if it implements Copyable) * otherwise a deep copy using serialization. * * @param kernel the kernel to copy * @return a shallow or deep copy of the kernel * @throws Exception if an error occurs */ public static Kernel makeCopy(Kernel kernel) throws Exception { if (kernel instanceof Copyable) { return (Kernel) ((Copyable) kernel).copy(); } return (Kernel) new SerializedObject(kernel).getObject(); } /** * Creates a given number of deep or shallow (if the kernel implements Copyable) * copies of the given kernel using serialization. * * @param model the kernel to copy * @param num the number of kernel copies to create. * @return an array of kernels. * @throws Exception if an error occurs */ public static Kernel[] makeCopies(Kernel model, int num) throws Exception { if (model == null) throw new Exception("No model kernel set"); Kernel[] kernels = new Kernel[num]; if (model instanceof Copyable) { for (int i = 0; i < kernels.length; i++) { kernels[i] = (Kernel) ((Copyable) model).copy(); } } else { SerializedObject so = new SerializedObject(model); for (int i = 0; i < kernels.length; i++) kernels[i] = (Kernel) so.getObject(); } return kernels; } /** * Creates a new instance of a kernel given it's class name and * (optional) arguments to pass to it's setOptions method. * * @param kernelName the fully qualified class name of the classifier * @param options an array of options suitable for passing to setOptions. May * be null. * @return the newly created classifier, ready for use. * @throws Exception if the classifier name is invalid, or the options * supplied are not acceptable to the classifier */ public static Kernel forName(String kernelName, String[] options) throws Exception { return (Kernel) Utils.forName(Kernel.class, kernelName, options); } }
9,160
27.101227
89
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/functions/supportVector/KernelEvaluation.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * KernelEvaluation.java * Copyright (C) 2006-2012 University of Waikato, Hamilton, New Zealand */ package weka.classifiers.functions.supportVector; import java.io.BufferedReader; import java.io.FileReader; import java.util.Enumeration; import weka.core.Instances; import weka.core.Option; import weka.core.OptionHandler; import weka.core.RevisionHandler; import weka.core.RevisionUtils; import weka.core.Utils; /** * Class for evaluating Kernels. * * @author fracpete (fracpete at waikato dot ac dot nz) * @version $Revision: 8034 $ */ public class KernelEvaluation implements RevisionHandler { /** the result string */ protected StringBuffer m_Result; /** the kernel evaluation results */ protected double[][] m_Evaluations; /** the number of performed evaluations */ protected int m_NumEvals; /** the number of cache hits */ protected int m_NumCacheHits; /** user-supplied options */ protected String[] m_Options; /** * default constructor */ public KernelEvaluation() { super(); m_Result = new StringBuffer(); m_Evaluations = new double[0][0]; m_Options = new String[0]; m_NumEvals = 0; m_NumCacheHits = 0; } /** * sets the option the user supplied for the kernel * * @param options options that were supplied for the kernel */ public void setUserOptions(String[] options) { m_Options = (String[]) options.clone(); } /** * returns the options the user supplied for the kernel * * @return the user supplied options for the kernel */ public String[] getUserOptions() { return (String[]) m_Options.clone(); } /** * Generates an option string to output on the commandline. * * @param Kernel the Kernel to generate the string for * @return the option string */ protected static String makeOptionString(Kernel Kernel) { StringBuffer text; text = new StringBuffer(); // general options text.append("\nGeneral options:\n\n"); text.append("-t <training file>\n"); text.append("\tThe name of the training file.\n"); text.append("-c <class index>\n"); text.append("\tSets index of class attribute (default: last).\n"); // Kernel specific options, if any if (Kernel instanceof OptionHandler) { text.append( "\nOptions specific to " + Kernel.getClass().getName().replaceAll(".*\\.", "") + ":\n\n"); Enumeration enm = ((OptionHandler) Kernel).listOptions(); while (enm.hasMoreElements()) { Option option = (Option) enm.nextElement(); text.append(option.synopsis() + "\n"); text.append(option.description() + "\n"); } } return text.toString(); } /** * Evaluates the Kernel with the given commandline options and returns * the evaluation string. * * @param Kernel the Kernel to evaluate * @param options the commandline options * @return the generated output string * @throws Exception if evaluation fails */ public static String evaluate(Kernel Kernel, String[] options) throws Exception { String trainFileString = ""; BufferedReader reader; KernelEvaluation eval; String classIndexString; int classIndex = -1; Instances train; String[] userOptions; // help? if (Utils.getFlag('h', options)) throw new Exception("\nHelp requested.\n" + makeOptionString(Kernel)); try { // general options trainFileString = Utils.getOption('t', options); if (trainFileString.length() == 0) throw new Exception("No training file given!"); reader = new BufferedReader(new FileReader(trainFileString)); classIndexString = Utils.getOption('c', options); if (classIndexString.length() != 0) { if (classIndexString.equals("first")) classIndex = 1; else if (classIndexString.equals("last")) classIndex = -1; else classIndex = Integer.parseInt(classIndexString); } // Kernel specific options userOptions = (String[]) options.clone(); if (Kernel instanceof OptionHandler) { ((OptionHandler) Kernel).setOptions(options); } // left-over options? Utils.checkForRemainingOptions(options); } catch (Exception e) { throw new Exception( "\nWeka exception: " + e.getMessage() + "\n" + makeOptionString(Kernel)); } // load file and build kernel eval = new KernelEvaluation(); eval.setUserOptions(userOptions); train = new Instances(reader); if (classIndex == -1) train.setClassIndex(train.numAttributes() - 1); else train.setClassIndex(classIndex); return eval.evaluate(Kernel, train); } /** * Evaluates a kernel with the options given in an array of strings. * * @param kernelString class of kernel as a string * @param options the array of string containing the options * @throws Exception if model could not be evaluated successfully * @return a string describing the results */ public static String evaluate(String kernelString, String[] options) throws Exception { Kernel kernel; // Create kernel try { kernel = (Kernel) Class.forName(kernelString).newInstance(); } catch (Exception e) { throw new Exception("Can't find class with name " + kernelString + '.'); } return evaluate(kernel, options); } /** * Evaluates the Kernel with the given commandline options and returns * the evaluation string. * * @param kernel the Kernel to evaluate * @param data the data to run the Kernel with * @return the generated output string * @throws Exception if evaluation fails */ public String evaluate(Kernel kernel, Instances data) throws Exception { long startTime; long endTime; int i; int n; m_Result = new StringBuffer(); // build kernel startTime = System.currentTimeMillis(); kernel.buildKernel(data); endTime = System.currentTimeMillis(); m_Result.append("\n=== Model ===\n\n"); if (Utils.joinOptions(getUserOptions()).trim().length() != 0) m_Result.append("Options: " + Utils.joinOptions(getUserOptions()) + "\n\n"); m_Result.append(kernel.toString() + "\n"); // evaluate dataset m_Evaluations = new double[data.numInstances()][data.numInstances()]; for (n = 0; n < data.numInstances(); n++) { for (i = n; i < data.numInstances(); i++) { m_Evaluations[n][i] = kernel.eval(n, i, data.instance(n)); } } // test cache for cached kernels if (kernel instanceof CachedKernel) { for (n = 0; n < data.numInstances(); n++) { for (i = n; i < data.numInstances(); i++) { m_Evaluations[n][i] = kernel.eval(n, i, data.instance(n)); } } } m_NumEvals = kernel.numEvals(); m_NumCacheHits = kernel.numCacheHits(); // summary m_Result.append("\n=== Evaluation ===\n\n"); if (kernel instanceof CachedKernel) { m_Result.append("Cache size : " + ((CachedKernel) kernel).getCacheSize() + "\n"); } m_Result.append("# Evaluations: " + m_NumEvals + "\n"); m_Result.append("# Cache hits : " + m_NumCacheHits + "\n"); m_Result.append("Elapsed time : " + (((double) (endTime - startTime)) / 1000) + "s\n"); return m_Result.toString(); } /** * Tests whether the current evaluation object is equal to another * evaluation object * * @param obj the object to compare against * @return true if the two objects are equal */ public boolean equals(Object obj) { if ((obj == null) || !(obj.getClass().equals(this.getClass()))) return false; KernelEvaluation cmp = (KernelEvaluation) obj; if (m_NumEvals != cmp.m_NumEvals) return false; if (m_NumCacheHits != cmp.m_NumCacheHits) return false; if (m_Evaluations.length != cmp.m_Evaluations.length) return false; for (int n = 0; n < m_Evaluations.length; n++) { for (int i = 0; i < m_Evaluations[n].length; i++) { if (Double.isNaN(m_Evaluations[n][i]) && Double.isNaN(cmp.m_Evaluations[n][i])) continue; if (m_Evaluations[n][i] != cmp.m_Evaluations[n][i]) return false; } } return true; } /** * returns a summary string of the evaluation with a no title * * @return the summary string */ public String toSummaryString() { return toSummaryString(""); } /** * returns a summary string of the evaluation with a default title * * @param title the title to print before the result * @return the summary string */ public String toSummaryString(String title) { StringBuffer result; result = new StringBuffer(title); if (title.length() != 0) result.append("\n"); result.append(m_Result); return result.toString(); } /** * returns the current result * * @return the currently stored result * @see #toSummaryString() */ public String toString() { return toSummaryString(); } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } /** * A test method for this class. Just extracts the first command line * argument as a kernel class name and calls evaluate. * * @param args an array of command line arguments, the first of which * must be the class name of a kernel. */ public static void main(String[] args) { try { if (args.length == 0) { throw new Exception( "The first argument must be the class name of a kernel"); } String kernel = args[0]; args[0] = ""; System.out.println(evaluate(kernel, args)); } catch (Exception ex) { ex.printStackTrace(); System.err.println(ex.getMessage()); } } }
10,575
27.353887
91
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/functions/supportVector/NormalizedPolyKernel.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * NormalizedPolyKernel.java * Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.functions.supportVector; import weka.core.Instance; import weka.core.Instances; import weka.core.RevisionUtils; /** <!-- globalinfo-start --> * The normalized polynomial kernel.<br/> * K(x,y) = &lt;x,y&gt;/sqrt(&lt;x,x&gt;&lt;y,y&gt;) where &lt;x,y&gt; = PolyKernel(x,y) * <p/> <!-- globalinfo-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -D * Enables debugging output (if available) to be printed. * (default: off)</pre> * * <pre> -no-checks * Turns off all checks - use with caution! * (default: checks on)</pre> * * <pre> -C &lt;num&gt; * The size of the cache (a prime number), 0 for full cache and * -1 to turn it off. * (default: 250007)</pre> * * <pre> -E &lt;num&gt; * The Exponent to use. * (default: 1.0)</pre> * * <pre> -L * Use lower-order terms. * (default: no)</pre> * <!-- options-end --> * * @author Eibe Frank (eibe@cs.waikato.ac.nz) * @version $Revision: 8034 $ */ public class NormalizedPolyKernel extends PolyKernel { /** for serialization */ static final long serialVersionUID = 1248574185532130851L; /** * default constructor - does nothing */ public NormalizedPolyKernel() { super(); setExponent(2.0); } /** * Creates a new <code>NormalizedPolyKernel</code> instance. * * @param dataset the training dataset used. * @param cacheSize the size of the cache (a prime number) * @param exponent the exponent to use * @param lowerOrder whether to use lower-order terms * @throws Exception if something goes wrong */ public NormalizedPolyKernel(Instances dataset, int cacheSize, double exponent, boolean lowerOrder) throws Exception { super(dataset, cacheSize, exponent, lowerOrder); } /** * Returns a string describing the kernel * * @return a description suitable for displaying in the * explorer/experimenter gui */ public String globalInfo() { return "The normalized polynomial kernel.\n" + "K(x,y) = <x,y>/sqrt(<x,x><y,y>) where <x,y> = PolyKernel(x,y)"; } /** * Computes the result of the kernel function for two instances. * If id1 == -1, eval use inst1 instead of an instance in the dataset. * Redefines the eval function of PolyKernel. * * @param id1 the index of the first instance in the dataset * @param id2 the index of the second instance in the dataset * @param inst1 the instance corresponding to id1 (used if id1 == -1) * @return the result of the kernel function * @throws Exception if something goes wrong */ public double eval(int id1, int id2, Instance inst1) throws Exception { double div = Math.sqrt(super.eval(id1, id1, inst1) * ((m_keys != null) ? super.eval(id2, id2, m_data.instance(id2)) : super.eval(-1, -1, m_data.instance(id2)))); if(div != 0){ return super.eval(id1, id2, inst1) / div; } else { return 0; } } /** * Sets the exponent value (must be different from 1.0). * * @param value the exponent value */ public void setExponent(double value) { if (value != 1.0) super.setExponent(value); else System.out.println("A linear kernel, i.e., Exponent=1, is not possible!"); } /** * returns a string representation for the Kernel * * @return a string representaiton of the kernel */ public String toString() { String result; if (getUseLowerOrder()) result = "Normalized Poly Kernel with lower order: K(x,y) = (<x,y>+1)^" + getExponent() + "/" + "((<x,x>+1)^" + getExponent() + "*" + "(<y,y>+1)^" + getExponent() + ")^(1/2)"; else result = "Normalized Poly Kernel: K(x,y) = <x,y>^" + getExponent() + "/" + "(<x,x>^" + getExponent() + "*" + "<y,y>^" + getExponent() + ")^(1/2)"; return result; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } }
4,914
27.74269
102
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/functions/supportVector/PolyKernel.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * PolyKernel.java * Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.functions.supportVector; import java.util.Enumeration; import java.util.Vector; import weka.core.Capabilities; import weka.core.Capabilities.Capability; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.RevisionUtils; import weka.core.Utils; /** <!-- globalinfo-start --> * The polynomial kernel : K(x, y) = &lt;x, y&gt;^p or K(x, y) = (&lt;x, y&gt;+1)^p * <p/> <!-- globalinfo-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -D * Enables debugging output (if available) to be printed. * (default: off)</pre> * * <pre> -no-checks * Turns off all checks - use with caution! * (default: checks on)</pre> * * <pre> -C &lt;num&gt; * The size of the cache (a prime number), 0 for full cache and * -1 to turn it off. * (default: 250007)</pre> * * <pre> -E &lt;num&gt; * The Exponent to use. * (default: 1.0)</pre> * * <pre> -L * Use lower-order terms. * (default: no)</pre> * <!-- options-end --> * * @author Eibe Frank (eibe@cs.waikato.ac.nz) * @author Shane Legg (shane@intelligenesis.net) (sparse vector code) * @author Stuart Inglis (stuart@reeltwo.com) (sparse vector code) * @version $Revision: 8034 $ */ public class PolyKernel extends CachedKernel { /** for serialization */ static final long serialVersionUID = -321831645846363201L; /** Use lower-order terms? */ protected boolean m_lowerOrder = false; /** The exponent for the polynomial kernel. */ protected double m_exponent = 1.0; /** * default constructor - does nothing. */ public PolyKernel() { super(); } /** * Frees the cache used by the kernel. */ public void clean() { if (getExponent() == 1.0) { m_data = null; } super.clean(); } /** * Creates a new <code>PolyKernel</code> instance. * * @param data the training dataset used. * @param cacheSize the size of the cache (a prime number) * @param exponent the exponent to use * @param lowerOrder whether to use lower-order terms * @throws Exception if something goes wrong */ public PolyKernel(Instances data, int cacheSize, double exponent, boolean lowerOrder) throws Exception { super(); setCacheSize(cacheSize); setExponent(exponent); setUseLowerOrder(lowerOrder); buildKernel(data); } /** * Returns a string describing the kernel * * @return a description suitable for displaying in the * explorer/experimenter gui */ public String globalInfo() { return "The polynomial kernel : K(x, y) = <x, y>^p or K(x, y) = (<x, y>+1)^p"; } /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ public Enumeration listOptions() { Vector result; Enumeration en; result = new Vector(); en = super.listOptions(); while (en.hasMoreElements()) result.addElement(en.nextElement()); result.addElement(new Option( "\tThe Exponent to use.\n" + "\t(default: 1.0)", "E", 1, "-E <num>")); result.addElement(new Option( "\tUse lower-order terms.\n" + "\t(default: no)", "L", 0, "-L")); return result.elements(); } /** * Parses a given list of options. <p/> * <!-- options-start --> * Valid options are: <p/> * * <pre> -D * Enables debugging output (if available) to be printed. * (default: off)</pre> * * <pre> -no-checks * Turns off all checks - use with caution! * (default: checks on)</pre> * * <pre> -C &lt;num&gt; * The size of the cache (a prime number), 0 for full cache and * -1 to turn it off. * (default: 250007)</pre> * * <pre> -E &lt;num&gt; * The Exponent to use. * (default: 1.0)</pre> * * <pre> -L * Use lower-order terms. * (default: no)</pre> * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { String tmpStr; tmpStr = Utils.getOption('E', options); if (tmpStr.length() != 0) setExponent(Double.parseDouble(tmpStr)); else setExponent(1.0); setUseLowerOrder(Utils.getFlag('L', options)); super.setOptions(options); } /** * Gets the current settings of the Kernel. * * @return an array of strings suitable for passing to setOptions */ public String[] getOptions() { int i; Vector result; String[] options; result = new Vector(); options = super.getOptions(); for (i = 0; i < options.length; i++) result.add(options[i]); result.add("-E"); result.add("" + getExponent()); if (getUseLowerOrder()) result.add("-L"); return (String[]) result.toArray(new String[result.size()]); } /** * * @param id1 the index of instance 1 * @param id2 the index of instance 2 * @param inst1 the instance 1 object * @return the dot product * @throws Exception if something goes wrong */ protected double evaluate(int id1, int id2, Instance inst1) throws Exception { double result; if (id1 == id2) { result = dotProd(inst1, inst1); } else { result = dotProd(inst1, m_data.instance(id2)); } // Use lower order terms? if (m_lowerOrder) { result += 1.0; } if (m_exponent != 1.0) { result = Math.pow(result, m_exponent); } return result; } /** * Returns the Capabilities of this kernel. * * @return the capabilities of this object * @see Capabilities */ public Capabilities getCapabilities() { Capabilities result = super.getCapabilities(); result.disableAll(); result.enable(Capability.NUMERIC_ATTRIBUTES); result.enableAllClasses(); result.enable(Capability.MISSING_CLASS_VALUES); return result; } /** * Sets the exponent value. * * @param value the exponent value */ public void setExponent(double value) { m_exponent = value; } /** * Gets the exponent value. * * @return the exponent value */ public double getExponent() { return m_exponent; } /** * Returns the tip text for this property * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String exponentTipText() { return "The exponent value."; } /** * Sets whether to use lower-order terms. * * @param value true if lower-order terms will be used */ public void setUseLowerOrder(boolean value) { m_lowerOrder = value; } /** * Gets whether lower-order terms are used. * * @return true if lower-order terms are used */ public boolean getUseLowerOrder() { return m_lowerOrder; } /** * Returns the tip text for this property * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String useLowerOrderTipText() { return "Whether to use lower-order terms."; } /** * returns a string representation for the Kernel * * @return a string representaiton of the kernel */ public String toString() { String result; if (getExponent() == 1.0) { if (getUseLowerOrder()) result = "Linear Kernel with lower order: K(x,y) = <x,y> + 1"; else result = "Linear Kernel: K(x,y) = <x,y>"; } else { if (getUseLowerOrder()) result = "Poly Kernel with lower order: K(x,y) = (<x,y> + 1)^" + getExponent(); else result = "Poly Kernel: K(x,y) = <x,y>^" + getExponent(); } return result; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } }
8,808
23.134247
83
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/functions/supportVector/PrecomputedKernelMatrixKernel.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * PrecomputedKernelMatrixKernel.java * Copyright (C) 2008-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.functions.supportVector; import java.io.File; import java.io.FileReader; import java.util.Enumeration; import java.util.Vector; import weka.core.Capabilities; import weka.core.Capabilities.Capability; import weka.core.Copyable; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.RevisionUtils; import weka.core.Utils; import weka.core.matrix.Matrix; /** * <!-- globalinfo-start --> * This kernel is based on a static kernel matrix that is read from a file. Instances must have a single nominal attribute (excluding the class). This attribute must be the first attribute in the file and its values are used to reference rows/columns in the kernel matrix. The second attribute must be the class attribute. * <p/> <!-- globalinfo-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -D * Enables debugging output (if available) to be printed. * (default: off)</pre> * * <pre> -no-checks * Turns off all checks - use with caution! * (default: checks on)</pre> * * <pre> -M &lt;file name&gt; * The file name of the file that holds the kernel matrix. * (default: kernelMatrix.matrix)</pre> * <!-- options-end --> * * @author Eibe Frank (eibe@cs.waikato.ac.nz) * @version $Revision: 9893 $ */ public class PrecomputedKernelMatrixKernel extends Kernel implements Copyable { /** for serialization */ static final long serialVersionUID = -321831645846363333L; /** The file holding the kernel matrix. */ protected File m_KernelMatrixFile = new File("kernelMatrix.matrix"); /** The kernel matrix. */ protected Matrix m_KernelMatrix; /** A classifier counter. */ protected int m_Counter; /** * Return a shallow copy of this kernel * * @return a shallow copy of this kernel */ public Object copy() { PrecomputedKernelMatrixKernel newK = new PrecomputedKernelMatrixKernel(); newK.setKernelMatrix(m_KernelMatrix); newK.setKernelMatrixFile(m_KernelMatrixFile); newK.m_Counter = m_Counter; return newK; } /** * Returns a string describing the kernel * * @return a description suitable for displaying in the * explorer/experimenter gui */ public String globalInfo() { return "This kernel is based on a static kernel matrix that is read from a file. " + "Instances must have a single nominal attribute (excluding the class). " + "This attribute must be the first attribute in the file and its values are " + "used to reference rows/columns in the kernel matrix. The second attribute " + "must be the class attribute."; } /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ public Enumeration listOptions() { Vector result; result = new Vector(); Enumeration en = super.listOptions(); while (en.hasMoreElements()) result.addElement(en.nextElement()); result.addElement(new Option( "\tThe file name of the file that holds the kernel matrix.\n" + "\t(default: kernelMatrix.matrix)", "M", 1, "-M <file name>")); return result.elements(); } /** * Parses a given list of options. <p/> * <!-- options-start --> * Valid options are: <p/> * * <pre> -D * Enables debugging output (if available) to be printed. * (default: off)</pre> * * <pre> -no-checks * Turns off all checks - use with caution! * (default: checks on)</pre> * * <pre> -M &lt;file name&gt; * The file name of the file that holds the kernel matrix. * (default: kernelMatrix.matrix)</pre> * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { String tmpStr; tmpStr = Utils.getOption('M', options); if (tmpStr.length() != 0) setKernelMatrixFile(new File(tmpStr)); else setKernelMatrixFile(new File("kernelMatrix.matrix")); super.setOptions(options); } /** * Gets the current settings of the Kernel. * * @return an array of strings suitable for passing to setOptions */ public String[] getOptions() { int i; Vector<String> result; String[] options; result = new Vector<String>(); options = super.getOptions(); for (i = 0; i < options.length; i++) result.add(options[i]); result.add("-M"); result.add("" + getKernelMatrixFile()); return (String[]) result.toArray(new String[result.size()]); } /** * * @param id1 the index of instance 1 * @param id2 the index of instance 2 * @param inst1 the instance 1 object * @return the dot product * @throws Exception if something goes wrong */ public double eval(int id1, int id2, Instance inst1) throws Exception { if (m_KernelMatrix == null) { throw new IllegalArgumentException("Kernel matrix has not been loaded successfully."); } int index1 = -1; if (id1 > -1) { index1 = (int)m_data.instance(id1).value(0); } else { index1 = (int)inst1.value(0); } int index2 = (int)m_data.instance(id2).value(0); return m_KernelMatrix.get(index1, index2); } /** * initializes variables etc. * * @param data the data to use */ protected void initVars(Instances data) { super.initVars(data); try { if (m_KernelMatrix == null) { m_KernelMatrix = new Matrix(new FileReader(m_KernelMatrixFile)); // System.err.println("Read kernel matrix."); } } catch (Exception e) { System.err.println("Problem reading matrix from " + m_KernelMatrixFile); } m_Counter++; // System.err.print("Building classifier: " + m_Counter + "\r"); } /** * Returns the Capabilities of this kernel. * * @return the capabilities of this object * @see Capabilities */ public Capabilities getCapabilities() { Capabilities result = super.getCapabilities(); result.disableAll(); result.enable(Capability.NOMINAL_ATTRIBUTES); result.enableAllClasses(); result.enable(Capability.MISSING_CLASS_VALUES); return result; } /** * Sets the file holding the kernel matrix * * @param f the file holding the matrix */ public void setKernelMatrixFile(File f) { m_KernelMatrixFile = f; } /** * Gets the file containing the kernel matrix. * * @return the exponent value */ public File getKernelMatrixFile() { return m_KernelMatrixFile; } /** * Returns the tip text for this property * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String kernelMatrixFileTipText() { return "The file holding the kernel matrix."; } /** * Set the kernel matrix. This method is used by the * unit test for this class, as it loads at test matrix * as a system resource. * * @param km the kernel matrix to use */ protected void setKernelMatrix(Matrix km) { m_KernelMatrix = km; } /** * returns a string representation for the Kernel * * @return a string representaiton of the kernel */ public String toString() { return "Using kernel matrix from file with name: " + getKernelMatrixFile(); } /** * Frees the memory used by the kernel. * (Useful with kernels which use cache.) * This function is called when the training is done. * i.e. after that, eval will be called with id1 == -1. */ public void clean() { // do nothing } /** * Returns the number of kernel evaluation performed. * * @return the number of kernel evaluation performed. */ public int numEvals() { return 0; } /** * Returns the number of dot product cache hits. * * @return the number of dot product cache hits, or -1 if not supported by this kernel. */ public int numCacheHits() { return 0; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 9893 $"); } }
9,239
26.5
322
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/functions/supportVector/Puk.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * Puk.java * Copyright (C) 2007-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.functions.supportVector; import java.util.Enumeration; import java.util.Vector; import weka.core.Capabilities; import weka.core.Capabilities.Capability; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.RevisionUtils; import weka.core.TechnicalInformation; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import weka.core.TechnicalInformationHandler; import weka.core.Utils; /** <!-- globalinfo-start --> * The Pearson VII function-based universal kernel.<br/> * <br/> * For more information see:<br/> * <br/> * B. Uestuen, W.J. Melssen, L.M.C. Buydens (2006). Facilitating the application of Support Vector Regression by using a universal Pearson VII function based kernel. Chemometrics and Intelligent Laboratory Systems. 81:29-40. * <p/> <!-- globalinfo-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -D * Enables debugging output (if available) to be printed. * (default: off)</pre> * * <pre> -no-checks * Turns off all checks - use with caution! * (default: checks on)</pre> * * <pre> -C &lt;num&gt; * The size of the cache (a prime number), 0 for full cache and * -1 to turn it off. * (default: 250007)</pre> * * <pre> -O &lt;num&gt; * The Omega parameter. * (default: 1.0)</pre> * * <pre> -S &lt;num&gt; * The Sigma parameter. * (default: 1.0)</pre> * <!-- options-end --> * * @author Bernhard Pfahringer (bernhard@cs.waikato.ac.nz) * @version $Revision: 8034 $ */ public class Puk extends CachedKernel implements TechnicalInformationHandler { /** for serialization */ private static final long serialVersionUID = 1682161522559978851L; /** The precalculated dotproducts of &lt;inst_i,inst_i&gt; */ protected double m_kernelPrecalc[]; /** Omega for the Puk kernel. */ protected double m_omega = 1.0; /** Sigma for the Puk kernel. */ protected double m_sigma = 1.0; /** Cached factor for the Puk kernel. */ protected double m_factor = 1.0; /** * default constructor - does nothing. */ public Puk() { super(); } /** * Constructor. Initializes m_kernelPrecalc[]. * * @param data the data to use * @param cacheSize the size of the cache * @param omega the exponent * @param sigma the bandwidth * @throws Exception if something goes wrong */ public Puk(Instances data, int cacheSize, double omega, double sigma) throws Exception { super(); setCacheSize(cacheSize); setOmega(omega); setSigma(sigma); buildKernel(data); } /** * Returns a string describing the kernel * * @return a description suitable for displaying in the * explorer/experimenter gui */ public String globalInfo() { return "The Pearson VII function-based universal kernel.\n\n" + "For more information see:\n\n" + getTechnicalInformation().toString(); } /** * Returns an instance of a TechnicalInformation object, containing * detailed information about the technical background of this class, * e.g., paper reference or book this class is based on. * * @return the technical information about this class */ public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; result = new TechnicalInformation(Type.ARTICLE); result.setValue(Field.AUTHOR, "B. Uestuen and W.J. Melssen and L.M.C. Buydens"); result.setValue(Field.YEAR, "2006"); result.setValue(Field.TITLE, "Facilitating the application of Support Vector Regression by using a universal Pearson VII function based kernel"); result.setValue(Field.JOURNAL, "Chemometrics and Intelligent Laboratory Systems"); result.setValue(Field.VOLUME, "81"); result.setValue(Field.PAGES, "29-40"); result.setValue(Field.PDF, "http://www.cac.science.ru.nl/research/publications/PDFs/ustun2006.pdf"); return result; } /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ public Enumeration listOptions() { Vector result; Enumeration en; result = new Vector(); en = super.listOptions(); while (en.hasMoreElements()) result.addElement(en.nextElement()); result.addElement(new Option( "\tThe Omega parameter.\n" + "\t(default: 1.0)", "O", 1, "-O <num>")); result.addElement(new Option( "\tThe Sigma parameter.\n" + "\t(default: 1.0)", "S", 1, "-S <num>")); return result.elements(); } /** * Parses a given list of options. <p/> * <!-- options-start --> * Valid options are: <p/> * * <pre> -D * Enables debugging output (if available) to be printed. * (default: off)</pre> * * <pre> -no-checks * Turns off all checks - use with caution! * (default: checks on)</pre> * * <pre> -C &lt;num&gt; * The size of the cache (a prime number), 0 for full cache and * -1 to turn it off. * (default: 250007)</pre> * * <pre> -O &lt;num&gt; * The Omega parameter. * (default: 1.0)</pre> * * <pre> -S &lt;num&gt; * The Sigma parameter. * (default: 1.0)</pre> * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { String tmpStr; tmpStr = Utils.getOption('O', options); if (tmpStr.length() != 0) setOmega(Double.parseDouble(tmpStr)); else setOmega(1.0); tmpStr = Utils.getOption('S', options); if (tmpStr.length() != 0) setSigma(Double.parseDouble(tmpStr)); else setSigma(1.0); super.setOptions(options); } /** * Gets the current settings of the Kernel. * * @return an array of strings suitable for passing to setOptions */ public String[] getOptions() { int i; Vector result; String[] options; result = new Vector(); options = super.getOptions(); for (i = 0; i < options.length; i++) result.add(options[i]); result.add("-O"); result.add("" + getOmega()); result.add("-S"); result.add("" + getSigma()); return (String[]) result.toArray(new String[result.size()]); } /** * returns the dot product * * @param id1 the index of instance 1 * @param id2 the index of instance 2 * @param inst1 the instance 1 object * @return the dot product * @throws Exception if something goes wrong */ protected double evaluate(int id1, int id2, Instance inst1) throws Exception { if (id1 == id2) { return 1.0; } else { double precalc1; if (id1 == -1) precalc1 = dotProd(inst1, inst1); else precalc1 = m_kernelPrecalc[id1]; Instance inst2 = m_data.instance(id2); double squaredDifference = -2.0 * dotProd(inst1, inst2) + precalc1 + m_kernelPrecalc[id2]; double intermediate = m_factor * Math.sqrt(squaredDifference); double result = 1.0 / Math.pow(1.0 + intermediate * intermediate, getOmega()); return result; } } /** * Sets the omega value. * * @param value the omega value */ public void setOmega(double value) { m_omega = value; m_factor = computeFactor(m_omega, m_sigma); } /** * Gets the omega value. * * @return the omega value */ public double getOmega() { return m_omega; } /** * Returns the tip text for this property * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String omegaTipText() { return "The Omega value."; } /** * Sets the sigma value. * * @param value the sigma value */ public void setSigma(double value) { m_sigma = value; m_factor = computeFactor(m_omega, m_sigma); } /** * Gets the sigma value. * * @return the sigma value */ public double getSigma() { return m_sigma; } /** * Returns the tip text for this property * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String sigmaTipText() { return "The Sigma value."; } /** * computes the factor for curve-fitting (see equation (13) in paper) * * @param omega the omega to use * @param sigma the sigma to use * @return the factor for curve-fitting */ protected double computeFactor(double omega, double sigma) { double root = Math.sqrt(Math.pow(2.0, 1.0 / omega) - 1); return 2.0 * root / sigma; } /** * initializes variables etc. * * @param data the data to use */ protected void initVars(Instances data) { super.initVars(data); m_factor = computeFactor(m_omega, m_sigma); m_kernelPrecalc = new double[data.numInstances()]; } /** * Returns the Capabilities of this kernel. * * @return the capabilities of this object * @see Capabilities */ public Capabilities getCapabilities() { Capabilities result = super.getCapabilities(); result.disableAll(); result.enable(Capability.NUMERIC_ATTRIBUTES); result.enableAllClasses(); result.enable(Capability.MISSING_CLASS_VALUES); return result; } /** * builds the kernel with the given data. Initializes the kernel cache. * The actual size of the cache in bytes is (64 * cacheSize). * * @param data the data to base the kernel on * @throws Exception if something goes wrong */ public void buildKernel(Instances data) throws Exception { // does kernel handle the data? if (!getChecksTurnedOff()) getCapabilities().testWithFail(data); initVars(data); for (int i = 0; i < data.numInstances(); i++) m_kernelPrecalc[i] = dotProd(data.instance(i), data.instance(i)); } /** * returns a string representation for the Kernel * * @return a string representaiton of the kernel */ public String toString() { return "Puk kernel"; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } }
11,152
25.242353
224
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/functions/supportVector/RBFKernel.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * RBFKernel.java * Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand * Copyright (C) 2005 J. Lindgren * */ package weka.classifiers.functions.supportVector; import java.util.Enumeration; import java.util.Vector; import weka.core.Capabilities; import weka.core.Capabilities.Capability; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.RevisionUtils; import weka.core.Utils; /** <!-- globalinfo-start --> * The RBF kernel. K(x, y) = e^-(gamma * &lt;x-y, x-y&gt;^2) * <p/> <!-- globalinfo-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -D * Enables debugging output (if available) to be printed. * (default: off)</pre> * * <pre> -no-checks * Turns off all checks - use with caution! * (default: checks on)</pre> * * <pre> -C &lt;num&gt; * The size of the cache (a prime number), 0 for full cache and * -1 to turn it off. * (default: 250007)</pre> * * <pre> -G &lt;num&gt; * The Gamma parameter. * (default: 0.01)</pre> * <!-- options-end --> * * @author Eibe Frank (eibe@cs.waikato.ac.nz) * @author Shane Legg (shane@intelligenesis.net) (sparse vector code) * @author Stuart Inglis (stuart@reeltwo.com) (sparse vector code) * @author J. Lindgren (jtlindgr{at}cs.helsinki.fi) (RBF kernel) * @version $Revision: 8034 $ */ public class RBFKernel extends CachedKernel { /** for serialization */ static final long serialVersionUID = 5247117544316387852L; /** The precalculated dotproducts of &lt;inst_i,inst_i&gt; */ protected double m_kernelPrecalc[]; /** Gamma for the RBF kernel. */ protected double m_gamma = 0.01; /** * default constructor - does nothing. */ public RBFKernel() { super(); } /** * Constructor. Initializes m_kernelPrecalc[]. * * @param data the data to use * @param cacheSize the size of the cache * @param gamma the bandwidth * @throws Exception if something goes wrong */ public RBFKernel(Instances data, int cacheSize, double gamma) throws Exception { super(); setCacheSize(cacheSize); setGamma(gamma); buildKernel(data); } /** * Returns a string describing the kernel * * @return a description suitable for displaying in the * explorer/experimenter gui */ public String globalInfo() { return "The RBF kernel. K(x, y) = e^-(gamma * <x-y, x-y>^2)"; } /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ public Enumeration listOptions() { Vector result; Enumeration en; result = new Vector(); en = super.listOptions(); while (en.hasMoreElements()) result.addElement(en.nextElement()); result.addElement(new Option( "\tThe Gamma parameter.\n" + "\t(default: 0.01)", "G", 1, "-G <num>")); return result.elements(); } /** * Parses a given list of options. <p/> * <!-- options-start --> * Valid options are: <p/> * * <pre> -D * Enables debugging output (if available) to be printed. * (default: off)</pre> * * <pre> -no-checks * Turns off all checks - use with caution! * (default: checks on)</pre> * * <pre> -C &lt;num&gt; * The size of the cache (a prime number), 0 for full cache and * -1 to turn it off. * (default: 250007)</pre> * * <pre> -G &lt;num&gt; * The Gamma parameter. * (default: 0.01)</pre> * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { String tmpStr; tmpStr = Utils.getOption('G', options); if (tmpStr.length() != 0) setGamma(Double.parseDouble(tmpStr)); else setGamma(0.01); super.setOptions(options); } /** * Gets the current settings of the Kernel. * * @return an array of strings suitable for passing to setOptions */ public String[] getOptions() { int i; Vector result; String[] options; result = new Vector(); options = super.getOptions(); for (i = 0; i < options.length; i++) result.add(options[i]); result.add("-G"); result.add("" + getGamma()); return (String[]) result.toArray(new String[result.size()]); } /** * * @param id1 the index of instance 1 * @param id2 the index of instance 2 * @param inst1 the instance 1 object * @return the dot product * @throws Exception if something goes wrong */ protected double evaluate(int id1, int id2, Instance inst1) throws Exception { if (id1 == id2) { return 1.0; } else { double precalc1; if (id1 == -1) precalc1 = dotProd(inst1, inst1); else precalc1 = m_kernelPrecalc[id1]; Instance inst2 = m_data.instance(id2); double result = Math.exp(m_gamma * (2. * dotProd(inst1, inst2) - precalc1 - m_kernelPrecalc[id2])); return result; } } /** * Sets the gamma value. * * @param value the gamma value */ public void setGamma(double value) { m_gamma = value; } /** * Gets the gamma value. * * @return the gamma value */ public double getGamma() { return m_gamma; } /** * Returns the tip text for this property * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String gammaTipText() { return "The Gamma value."; } /** * initializes variables etc. * * @param data the data to use */ protected void initVars(Instances data) { super.initVars(data); m_kernelPrecalc = new double[data.numInstances()]; } /** * Returns the Capabilities of this kernel. * * @return the capabilities of this object * @see Capabilities */ public Capabilities getCapabilities() { Capabilities result = super.getCapabilities(); result.disableAll(); result.enable(Capability.NUMERIC_ATTRIBUTES); result.enableAllClasses(); result.enable(Capability.MISSING_CLASS_VALUES); return result; } /** * builds the kernel with the given data. Initializes the kernel cache. * The actual size of the cache in bytes is (64 * cacheSize). * * @param data the data to base the kernel on * @throws Exception if something goes wrong */ public void buildKernel(Instances data) throws Exception { // does kernel handle the data? if (!getChecksTurnedOff()) getCapabilities().testWithFail(data); initVars(data); for (int i = 0; i < data.numInstances(); i++) m_kernelPrecalc[i] = dotProd(data.instance(i), data.instance(i)); } /** * returns a string representation for the Kernel * * @return a string representaiton of the kernel */ public String toString() { return "RBF kernel: K(x,y) = e^-(" + getGamma() + "* <x-y,x-y>^2)"; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } }
8,005
23.786378
76
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/functions/supportVector/RegOptimizer.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * RegOptimizer.java * Copyright (C) 2006-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.functions.supportVector; import java.io.Serializable; import java.util.Enumeration; import java.util.Random; import java.util.Vector; import weka.classifiers.functions.SMOreg; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.OptionHandler; import weka.core.RevisionHandler; import weka.core.RevisionUtils; import weka.core.Utils; /** * Base class implementation for learning algorithm of SMOreg * <!-- options-start --> * Valid options are: <p/> * * <pre> -L &lt;double&gt; * The epsilon parameter in epsilon-insensitive loss function. * (default 1.0e-3)</pre> * * <pre> -W &lt;double&gt; * The random number seed. * (default 1)</pre> * <!-- options-end --> * * @author Remco Bouckaert (remco@cs.waikato.ac.nz,rrb@xm.co.nz) * @version $Revision: 8034 $ */ public class RegOptimizer implements OptionHandler, Serializable, RevisionHandler { /** for serialization */ private static final long serialVersionUID = -2198266997254461814L; /** loss type **/ //protected int m_nLossType = EPSILON; /** the loss type: L1 */ //public final static int L1 = 1; /** the loss type: L2 */ //public final static int L2 = 2; /** the loss type: HUBER */ //public final static int HUBER = 3; /** the loss type: EPSILON */ //public final static int EPSILON = 4; /** the loss type */ //public static final Tag[] TAGS_LOSS_TYPE = { // new Tag(L2, "L2"), // new Tag(L1, "L1"), // new Tag(HUBER, "Huber"), // new Tag(EPSILON, "EPSILON"), //}; /** alpha and alpha* arrays containing weights for solving dual problem **/ public double[] m_alpha; public double[] m_alphaStar; /** offset **/ protected double m_b; /** epsilon of epsilon-insensitive cost function **/ protected double m_epsilon = 1e-3; /** capacity parameter, copied from SMOreg **/ protected double m_C = 1.0; /** class values/desired output vector **/ protected double[] m_target; /** points to data set **/ protected Instances m_data; /** the kernel */ protected Kernel m_kernel; /** index of class variable in data set **/ protected int m_classIndex = -1; /** number of instances in data set **/ protected int m_nInstances = -1; /** random number generator **/ protected Random m_random; /** seed for initializing random number generator **/ protected int m_nSeed = 1; /** set of support vectors, that is, vectors with alpha(*)!=0 **/ protected SMOset m_supportVectors; /** number of kernel evaluations, used for printing statistics only **/ protected int m_nEvals = 0; /** number of kernel cache hits, used for printing statistics only **/ protected int m_nCacheHits = -1; /** weights for linear kernel **/ protected double[] m_weights; /** Variables to hold weight vector in sparse form. (To reduce storage requirements.) */ protected double[] m_sparseWeights; protected int[] m_sparseIndices; /** flag to indicate whether the model is built yet **/ protected boolean m_bModelBuilt = false; /** parent SMOreg class **/ protected SMOreg m_SVM = null; /** * the default constructor */ public RegOptimizer() { super(); m_random = new Random(m_nSeed); } /** * Gets an enumeration describing the available options. * * @return an enumeration of all the available options. */ public Enumeration listOptions() { Vector result = new Vector(); result.addElement(new Option( "\tThe epsilon parameter in epsilon-insensitive loss function.\n" + "\t(default 1.0e-3)", "L", 1, "-L <double>")); // result.addElement(new Option( // "\tLoss type (L1, L2, Huber, Epsilon insensitive loss)\n", // "L", 1, "-L [L1|L2|HUBER|EPSILON]")); result.addElement(new Option( "\tThe random number seed.\n" + "\t(default 1)", "W", 1, "-W <double>")); return result.elements(); } /** * Parses a given list of options. <p/> * <!-- options-start --> * Valid options are: <p/> * * <pre> -L &lt;double&gt; * The epsilon parameter in epsilon-insensitive loss function. * (default 1.0e-3)</pre> * * <pre> -W &lt;double&gt; * The random number seed. * (default 1)</pre> * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { String tmpStr; tmpStr = Utils.getOption('L', options); if (tmpStr.length() != 0) { setEpsilonParameter(Double.parseDouble(tmpStr)); } else { setEpsilonParameter(1.0e-3); } /* tmpStr = Utils.getOption('S', options); if (tmpStr.length() != 0) setLossType(new SelectedTag(tmpStr, TAGS_LOSS_TYPE)); else setLossType(new SelectedTag(EPSILON, TAGS_LOSS_TYPE)); */ tmpStr = Utils.getOption('W', options); if (tmpStr.length() != 0) { setSeed(Integer.parseInt(tmpStr)); } else { setSeed(1); } } /** * Gets the current settings of the classifier. * * @return an array of strings suitable for passing to setOptions */ public String[] getOptions() { Vector result; result = new Vector(); result.add("-L"); result.add("" + getEpsilonParameter()); result.add("-W"); result.add("" + getSeed()); //result.add("-S"; //result.add((new SelectedTag(m_nLossType, TAGS_LOSS_TYPE)).getSelectedTag().getReadable(); return (String[]) result.toArray(new String[result.size()]); } /** * flag to indicate whether the model was built yet * * @return true if the model was built */ public boolean modelBuilt() { return m_bModelBuilt; } /** * sets the parent SVM * * @param value the parent SVM */ public void setSMOReg(SMOreg value) { m_SVM = value; } /** * returns the number of kernel evaluations * * @return the number of kernel evaluations */ public int getKernelEvaluations() { return m_nEvals; } /** * return the number of kernel cache hits * * @return the number of hits */ public int getCacheHits() { return m_nCacheHits; } /** * initializes the algorithm * * @param data the data to work with * @throws Exception if m_SVM is null */ protected void init(Instances data) throws Exception { if (m_SVM == null) { throw new Exception ("SVM not initialized in optimizer. Use RegOptimizer.setSVMReg()"); } m_C = m_SVM.getC(); m_data = data; m_classIndex = data.classIndex(); m_nInstances = data.numInstances(); // Initialize kernel m_kernel = Kernel.makeCopy(m_SVM.getKernel()); m_kernel.buildKernel(data); //init m_target m_target = new double[m_nInstances]; for (int i = 0; i < m_nInstances; i++) { m_target[i] = data.instance(i).classValue(); } m_random = new Random(m_nSeed); // initialize alpha and alpha* array to all zero m_alpha = new double[m_target.length]; m_alphaStar = new double[m_target.length]; m_supportVectors = new SMOset(m_nInstances); m_b = 0.0; m_nEvals = 0; m_nCacheHits = -1; } /** * wrap up various variables to save memeory and do some housekeeping after optimization * has finished. * * @throws Exception if something goes wrong */ protected void wrapUp() throws Exception { m_target = null; m_nEvals = m_kernel.numEvals(); m_nCacheHits = m_kernel.numCacheHits(); if ((m_SVM.getKernel() instanceof PolyKernel) && ((PolyKernel) m_SVM.getKernel()).getExponent() == 1.0) { // convert alpha's to weights double [] weights = new double[m_data.numAttributes()]; for (int k = m_supportVectors.getNext(-1); k != -1; k = m_supportVectors.getNext(k)) { for (int j = 0; j < weights.length; j++) { if (j != m_classIndex) { weights[j] += (m_alpha[k] - m_alphaStar[k]) * m_data.instance(k).value(j); } } } m_weights = weights; // release memory m_alpha = null; m_alphaStar = null; m_kernel = null; } m_bModelBuilt = true; } /** * Compute the value of the objective function. * * @return the score * @throws Exception if something goes wrong */ protected double getScore() throws Exception { double res = 0; double t = 0, t2 = 0; double sumAlpha = 0.0; for (int i = 0; i < m_nInstances; i++) { sumAlpha += (m_alpha[i] - m_alphaStar[i]); for (int j = 0; j < m_nInstances; j++) { t += (m_alpha[i] - m_alphaStar[i]) * (m_alpha[j] - m_alphaStar[j]) * m_kernel.eval(i, j, m_data.instance(i)); } // switch(m_nLossType) { // case L1: // t2 += m_data.instance(i).classValue() * (m_alpha[i] - m_alpha_[i]); // break; // case L2: // t2 += m_data.instance(i).classValue() * (m_alpha[i] - m_alpha_[i]) - (0.5/m_SVM.getC()) * (m_alpha[i]*m_alpha[i] + m_alpha_[i]*m_alpha_[i]); // break; // case HUBER: // t2 += m_data.instance(i).classValue() * (m_alpha[i] - m_alpha_[i]) - (0.5*m_SVM.getEpsilon()/m_SVM.getC()) * (m_alpha[i]*m_alpha[i] + m_alpha_[i]*m_alpha_[i]); // break; // case EPSILON: //t2 += m_data.instance(i).classValue() * (m_alpha[i] - m_alphaStar[i]) - m_epsilon * (m_alpha[i] + m_alphaStar[i]); t2 += m_target[i] * (m_alpha[i] - m_alphaStar[i]) - m_epsilon * (m_alpha[i] + m_alphaStar[i]); // break; // } } res += -0.5 * t + t2; return res; } /** * learn SVM parameters from data. * Subclasses should implement something more interesting. * * @param data the data to work with * @throws Exception always an Exceoption since subclasses must override it */ public void buildClassifier(Instances data) throws Exception { throw new Exception("Don't call this directly, use subclass instead"); } /** * sets the loss type type to use * * @param newLossType the loss type to use */ //public void setLossType(SelectedTag newLossType) { // if (newLossType.getTags() == TAGS_LOSS_TYPE) { // m_nLossType = newLossType.getSelectedTag().getID(); // } //} /** * returns the current loss type * * @return the loss type */ //public SelectedTag getLossType() { // return new SelectedTag(m_nLossType, TAGS_LOSS_TYPE); //} /** * SVMOutput of an instance in the training set, m_data * This uses the cache, unlike SVMOutput(Instance) * * @param index index of the training instance in m_data * @return the SVM output * @throws Exception if something goes wrong */ protected double SVMOutput(int index) throws Exception { double result = -m_b; for (int i = m_supportVectors.getNext(-1); i != -1; i = m_supportVectors.getNext(i)) { result += (m_alpha[i] - m_alphaStar[i]) * m_kernel.eval(index, i, m_data.instance(index)); } return result; } /** * * @param inst * @return * @throws Exception */ public double SVMOutput(Instance inst) throws Exception { double result = -m_b; // Is the machine linear? if (m_weights != null) { // Is weight vector stored in sparse format? for (int i = 0; i < inst.numValues(); i++) { if (inst.index(i) != m_classIndex) { result += m_weights[inst.index(i)] * inst.valueSparse(i); } } } else { for (int i = m_supportVectors.getNext(-1); i != -1; i = m_supportVectors.getNext(i)) { result += (m_alpha[i] - m_alphaStar[i]) * m_kernel.eval(-1, i, inst); } } return result; } /** * Returns the tip text for this property * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String seedTipText() { return "Seed for random number generator."; } /** * Gets the current seed value for the random number generator * * @return the seed value */ public int getSeed() { return m_nSeed; } /** * Sets the seed value for the random number generator * * @param value the seed value */ public void setSeed(int value) { m_nSeed = value; } /** * Returns the tip text for this property * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String epsilonParameterTipText() { return "The epsilon parameter of the epsilon insensitive loss function.(default 0.001)."; } /** * Get the value of epsilon parameter of the epsilon insensitive loss function. * * @return Value of epsilon parameter. */ public double getEpsilonParameter() { return m_epsilon; } /** * Set the value of epsilon parameter of the epsilon insensitive loss function. * * @param v Value to assign to epsilon parameter. */ public void setEpsilonParameter(double v) { m_epsilon = v; } /** * Prints out the classifier. * * @return a description of the classifier as a string */ public String toString() { StringBuffer text = new StringBuffer(); text.append("SMOreg\n\n"); if (m_weights != null) { text.append("weights (not support vectors):\n"); // it's a linear machine for (int i = 0; i < m_data.numAttributes(); i++) { if (i != m_classIndex) { text.append((m_weights[i] >= 0 ? " + " : " - ") + Utils.doubleToString(Math.abs(m_weights[i]), 12, 4) + " * "); if (m_SVM.getFilterType().getSelectedTag().getID() == SMOreg.FILTER_STANDARDIZE) { text.append("(standardized) "); } else if (m_SVM.getFilterType().getSelectedTag().getID() == SMOreg.FILTER_NORMALIZE) { text.append("(normalized) "); } text.append(m_data.attribute(i).name() + "\n"); } } } else { // non linear, print out all supportvectors text.append("Support vectors:\n"); for (int i = 0; i < m_nInstances; i++) { if (m_alpha[i] > 0) { text.append("+" + m_alpha[i] + " * k[" + i + "]\n"); } if (m_alphaStar[i] > 0) { text.append("-" + m_alphaStar[i] + " * k[" + i + "]\n"); } } } text.append((m_b<=0?" + ":" - ") + Utils.doubleToString(Math.abs(m_b), 12, 4) + "\n\n"); text.append("\n\nNumber of kernel evaluations: " + m_nEvals); if (m_nCacheHits >= 0 && m_nEvals > 0) { double hitRatio = 1 - m_nEvals * 1.0 / (m_nCacheHits + m_nEvals); text.append(" (" + Utils.doubleToString(hitRatio * 100, 7, 3).trim() + "% cached)"); } return text.toString(); } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } }
15,637
26.776199
165
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/functions/supportVector/RegSMO.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * RegSMO.java * Copyright (C) 2006-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.functions.supportVector; import java.util.Enumeration; import java.util.Vector; import weka.core.Instances; import weka.core.Option; import weka.core.RevisionUtils; import weka.core.TechnicalInformation; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import weka.core.TechnicalInformationHandler; import weka.core.Utils; /** <!-- globalinfo-start --> * Implementation of SMO for support vector regression as described in :<br/> * <br/> * A.J. Smola, B. Schoelkopf (1998). A tutorial on support vector regression. * <p/> <!-- globalinfo-end --> * <!-- technical-bibtex-start --> * BibTeX: * <pre> * &#64;misc{Smola1998, * author = {A.J. Smola and B. Schoelkopf}, * note = {NeuroCOLT2 Technical Report NC2-TR-1998-030}, * title = {A tutorial on support vector regression}, * year = {1998} * } * </pre> * <p/> <!-- technical-bibtex-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -P &lt;double&gt; * The epsilon for round-off error. * (default 1.0e-12)</pre> * * <pre> -L &lt;double&gt; * The epsilon parameter in epsilon-insensitive loss function. * (default 1.0e-3)</pre> * * <pre> -W &lt;double&gt; * The random number seed. * (default 1)</pre> * <!-- options-end --> * * @author Remco Bouckaert (remco@cs.waikato.ac.nz,rrb@xm.co.nz) * @version $Revision: 8034 $ */ public class RegSMO extends RegOptimizer implements TechnicalInformationHandler { /** for serialization */ private static final long serialVersionUID = -7504070793279598638L; /** tolerance parameter, smaller changes on alpha in inner loop will be ignored **/ protected double m_eps = 1.0e-12; /** Precision constant for updating sets */ protected final static double m_Del = 1e-10; //1000 * Double.MIN_VALUE; /** error cache containing m_error[i] = SVMOutput(i) - m_target[i] - m_b <br/> * note, we don't need m_b in the cache, since if we do, we need to maintain * it when m_b is updated */ double[] m_error; /** alpha value for first candidate **/ protected double m_alpha1; /** alpha* value for first candidate **/ protected double m_alpha1Star; /** alpha value for second candidate **/ protected double m_alpha2; /** alpha* value for second candidate **/ protected double m_alpha2Star; /** * default constructor */ public RegSMO() { super(); } /** * Returns a string describing classifier * * @return a description suitable for * displaying in the explorer/experimenter gui */ public String globalInfo() { return "Implementation of SMO for support vector regression as described " + "in :\n\n" + getTechnicalInformation().toString(); } /** * Returns an instance of a TechnicalInformation object, containing * detailed information about the technical background of this class, * e.g., paper reference or book this class is based on. * * @return the technical information about this class */ public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; result = new TechnicalInformation(Type.MISC); result.setValue(Field.AUTHOR, "A.J. Smola and B. Schoelkopf"); result.setValue(Field.TITLE, "A tutorial on support vector regression"); result.setValue(Field.NOTE, "NeuroCOLT2 Technical Report NC2-TR-1998-030"); result.setValue(Field.YEAR, "1998"); return result; } /** * Returns an enumeration describing the available options * * @return an enumeration of all the available options */ public Enumeration listOptions() { Vector result = new Vector(); result.addElement(new Option( "\tThe epsilon for round-off error.\n" + "\t(default 1.0e-12)", "P", 1, "-P <double>")); Enumeration enm = super.listOptions(); while (enm.hasMoreElements()) { result.addElement(enm.nextElement()); } return result.elements(); } /** * Parses a given list of options. <p/> * <!-- options-start --> * Valid options are: <p/> * * <pre> -P &lt;double&gt; * The epsilon for round-off error. * (default 1.0e-12)</pre> * * <pre> -L &lt;double&gt; * The epsilon parameter in epsilon-insensitive loss function. * (default 1.0e-3)</pre> * * <pre> -W &lt;double&gt; * The random number seed. * (default 1)</pre> * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { String tmpStr; tmpStr = Utils.getOption('P', options); if (tmpStr.length() != 0) { setEpsilon(Double.parseDouble(tmpStr)); } else { setEpsilon(1.0e-12); } super.setOptions(options); } /** * Gets the current settings of the classifier. * * @return an array of strings suitable for passing to setOptions */ public String[] getOptions() { int i; Vector result; String[] options; result = new Vector(); options = super.getOptions(); for (i = 0; i < options.length; i++) result.add(options[i]); result.add("-P"); result.add("" + getEpsilon()); return (String[]) result.toArray(new String[result.size()]); } /** * Returns the tip text for this property * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String epsilonTipText() { return "The epsilon for round-off error (shouldn't be changed)."; } /** * Get the value of epsilon. * * @return Value of epsilon. */ public double getEpsilon() { return m_eps; } /** * Set the value of epsilon. * * @param v Value to assign to epsilon. */ public void setEpsilon(double v) { m_eps = v; } /** initialize various variables before starting the actual optimizer * * @param data data set used for learning * @throws Exception if something goes wrong */ protected void init(Instances data) throws Exception { super.init(data); //init error cache m_error = new double[m_nInstances]; for (int i = 0; i < m_nInstances; i++) { m_error[i] = -m_target[i]; } } /** * wrap up various variables to save memeory and do some housekeeping after optimization * has finished. * * @throws Exception if something goes wrong */ protected void wrapUp() throws Exception { m_error = null; super.wrapUp(); } /** * Finds optimal point on line constrained by first (i1) and second (i2) * candidate. Parameters correspond to pseudocode (see technicalinformation) * * @param i1 * @param alpha1 * @param alpha1Star * @param C1 * @param i2 * @param alpha2 * @param alpha2Star * @param C2 * @param gamma * @param eta * @param deltaPhi * @return */ protected boolean findOptimalPointOnLine(int i1, double alpha1, double alpha1Star, double C1, int i2, double alpha2, double alpha2Star, double C2, double gamma, double eta, double deltaPhi) { if (eta <= 0) { // this may happen due to numeric instability // due to Mercer's condition, this should not happen, hence we give up return false; } boolean case1 = false; boolean case2 = false; boolean case3 = false; boolean case4 = false; boolean finished = false; // while !finished // % this loop is passed at most three times // % case variables needed to avoid attempting small changes twice while (!finished) { // if (case1 == 0) && // (alpha1 > 0 || (alpha1* == 0 && deltaPhi > 0)) && // (alpha2 > 0 || (alpha2* == 0 && deltaPhi < 0)) // compute L, H (wrt. alpha1, alpha2) // if L < H // a2 = alpha2 ? - deltaPhi/eta // a2 = min(a2, H) // a2 = max(L, a2) // a1 = alpha1 ? - (a2 ? alpha2) // update alpha1, alpha2 if change is larger than some eps // else // finished = 1 // endif // case1 = 1; if ((case1 == false) && (alpha1 > 0 || (alpha1Star == 0 && deltaPhi > 0)) && (alpha2 > 0 || (alpha2Star == 0 && deltaPhi < 0))) { // compute L, H (wrt. alpha1, alpha2) double L = Math.max(0, gamma - C1); double H = Math.min(C2, gamma); if (L < H) { double a2 = alpha2 - deltaPhi / eta; a2 = Math.min(a2, H); a2 = Math.max(L, a2); // To prevent precision problems if (a2 > C2 - m_Del * C2) { a2 = C2; } else if (a2 <= m_Del * C2) { a2 = 0; } double a1 = alpha1 - (a2 - alpha2); if (a1 > C1 - m_Del * C1) { a1 = C1; } else if (a1 <= m_Del * C1) { a1 = 0; } // update alpha1, alpha2 if change is larger than some eps if (Math.abs(alpha1 - a1) > m_eps) { deltaPhi += eta * (a2 - alpha2); alpha1 = a1; alpha2 = a2; } } else { finished = true; } case1 = true; } // elseif (case2 == 0) && // (alpha1 > 0 || (alpha1* == 0 && deltaPhi > 2 epsilon)) && // (alpha2* > 0 || (alpha2 == 0 && deltaPhi > 2 epsilon)) // compute L, H (wrt. alpha1, alpha2*) // if L < H // a2 = alpha2* + (deltaPhi ?- 2 epsilon)/eta // a2 = min(a2, H) // a2 = max(L, a2) // a1 = alpha1 + (a2 ? alpha2*) // update alpha1, alpha2* if change is larger than some eps // else // finished = 1 // endif // case2 = 1; else if ( (case2 == false) && (alpha1 > 0 || (alpha1Star == 0 && deltaPhi > 2 * m_epsilon)) && (alpha2Star > 0 || (alpha2 == 0 && deltaPhi > 2 * m_epsilon))) { // compute L, H (wrt. alpha1, alpha2*) double L = Math.max(0, -gamma); double H = Math.min(C2, -gamma + C1); if (L < H) { double a2 = alpha2Star + (deltaPhi - 2 * m_epsilon) / eta; a2 = Math.min(a2, H); a2 = Math.max(L, a2); // To prevent precision problems if (a2 > C2 - m_Del * C2) { a2 = C2; } else if (a2 <= m_Del * C2) { a2 = 0; } double a1 = alpha1 + (a2 - alpha2Star); if (a1 > C1 - m_Del * C1) { a1 = C1; } else if (a1 <= m_Del * C1) { a1 = 0; } // update alpha1, alpha2* if change is larger than some eps if (Math.abs(alpha1 - a1) > m_eps) { deltaPhi += eta * (-a2 + alpha2Star); alpha1 = a1; alpha2Star = a2; } } else { finished = true; } case2 = true; } // elseif (case3 == 0) && // (alpha1* > 0 || (alpha1 == 0 && deltaPhi < -2 epsilon)) && // (alpha2 > 0 || (alpha2* == 0 && deltaPhi < -2 epsilon)) // compute L, H (wrt. alpha1*, alpha2) // if L < H // a2 = alpha2 ?- (deltaPhi ?+ 2 epsilon)/eta // a2 = min(a2, H) // a2 = max(L, a2) // a1 = alpha1* + (a2 ? alpha2) // update alpha1*, alpha2 if change is larger than some eps // else // finished = 1 // endif // case3 = 1; else if ( (case3 == false) && (alpha1Star > 0 || (alpha1 == 0 && deltaPhi < - 2 * m_epsilon)) && (alpha2 > 0 || (alpha2Star == 0 && deltaPhi < - 2 * m_epsilon))) { // compute L, H (wrt. alpha1*, alpha2) double L = Math.max(0, gamma); double H = Math.min(C2, C1 + gamma); if (L < H) { // note Smola's psuedocode has a minus, where there should be a plus in the following line, Keerthi's is correct double a2 = alpha2 - (deltaPhi + 2 * m_epsilon) / eta; a2 = Math.min(a2, H); a2 = Math.max(L, a2); // To prevent precision problems if (a2 > C2 - m_Del * C2) { a2 = C2; } else if (a2 <= m_Del * C2) { a2 = 0; } double a1 = alpha1Star + (a2 - alpha2); if (a1 > C1 - m_Del * C1) { a1 = C1; } else if (a1 <= m_Del * C1) { a1 = 0; } // update alpha1*, alpha2 if change is larger than some eps if (Math.abs(alpha1Star - a1) > m_eps) { deltaPhi += eta * (a2 - alpha2); alpha1Star = a1; alpha2 = a2; } } else { finished = true; } case3 = true; } // elseif (case4 == 0) && // (alpha1* > 0 || (alpha1 == 0 && deltaPhi < 0)) && // (alpha2* > 0 || (alpha2 == 0 && deltaPhi > 0)) // compute L, H (wrt. alpha1*, alpha2*) // if L < H // a2 = alpha2* + deltaPhi/eta // a2 = min(a2, H) // a2 = max(L, a2) // a1 = alpha1* ? (a2 ? alpha2*) // update alpha1*, alpha2* if change is larger than some eps // else // finished = 1 // endif // case4 = 1; // else // finished = 1 // endif else if ((case4 == false) && (alpha1Star > 0 || (alpha1 == 0 && deltaPhi < 0)) && (alpha2Star > 0 || (alpha2 == 0 && deltaPhi > 0))) { // compute L, H (wrt. alpha1*, alpha2*) double L = Math.max(0, -gamma - C1); double H = Math.min(C2, -gamma); if (L < H) { double a2 = alpha2Star + deltaPhi / eta; a2 = Math.min(a2, H); a2 = Math.max(L, a2); // To prevent precision problems if (a2 > C2 - m_Del * C2) { a2 = C2; } else if (a2 <= m_Del * C2) { a2 = 0; } double a1 = alpha1Star - (a2 - alpha2Star); if (a1 > C1 - m_Del * C1) { a1 = C1; } else if (a1 <= m_Del * C1) { a1 = 0; } // update alpha1*, alpha2* if change is larger than some eps if (Math.abs(alpha1Star - a1) > m_eps) { deltaPhi += eta * (-a2 + alpha2Star); alpha1Star = a1; alpha2Star = a2; } } else { finished = true; } case4 = true; } else { finished = true; } // update deltaPhi // using 4.36 from Smola's thesis: // deltaPhi = deltaPhi - eta * ((alpha1New-alpha1StarNew)-(alpha1-alpha1Star)); // the update is done inside the loop, saving us to remember old values of alpha1(*) //deltaPhi += eta * ((alpha2 - alpha2Star) - dAlpha2Old); //dAlpha2Old = (alpha2 - alpha2Star); // endwhile } if (Math.abs(alpha1 - m_alpha[i1]) > m_eps || Math.abs(alpha1Star - m_alphaStar[i1]) > m_eps || Math.abs(alpha2 - m_alpha[i2]) > m_eps || Math.abs(alpha2Star - m_alphaStar[i2]) > m_eps) { if (alpha1 > C1 - m_Del * C1) { alpha1 = C1; } else if (alpha1 <= m_Del * C1) { alpha1 = 0; } if (alpha1Star > C1 - m_Del * C1) { alpha1Star = C1; } else if (alpha1Star <= m_Del * C1) { alpha1Star = 0; } if (alpha2 > C2 - m_Del * C2) { alpha2 = C2; } else if (alpha2 <= m_Del * C2) { alpha2 = 0; } if (alpha2Star > C2 - m_Del * C2) { alpha2Star = C2; } else if (alpha2Star <= m_Del * C2) { alpha2Star = 0; } // store new alpha's m_alpha[i1] = alpha1; m_alphaStar[i1] = alpha1Star; m_alpha[i2] = alpha2; m_alphaStar[i2] = alpha2Star; // update supportvector set if (alpha1 != 0 || alpha1Star != 0){ if (!m_supportVectors.contains(i1)) { m_supportVectors.insert(i1); } } else { m_supportVectors.delete(i1); } if (alpha2 != 0 || alpha2Star != 0){ if (!m_supportVectors.contains(i2)) { m_supportVectors.insert(i2); } } else { m_supportVectors.delete(i2); } return true; } return false; } /** * takeStep method from pseudocode. * Parameters correspond to pseudocode (see technicalinformation) * * @param i1 * @param i2 * @param alpha2 * @param alpha2Star * @param phi2 * @return * @throws Exception */ protected int takeStep(int i1, int i2, double alpha2, double alpha2Star, double phi2) throws Exception { // if (i1 == i2) return 0 if (i1 == i2) { return 0; } double C1 = m_C * m_data.instance(i1).weight(); double C2 = m_C * m_data.instance(i2).weight(); // alpha1, alpha1* = Lagrange multipliers for i1 // y1 = target[i1] // phi1 = SVM output on point[i1] ? y1 (in error cache) double alpha1 = m_alpha[i1]; double alpha1Star = m_alphaStar[i1]; double y1 = m_target[i1]; double phi1 = m_error[i1]; // k11 = kernel(point[i1],point[i1]) // k12 = kernel(point[i1],point[i2]) // k22 = kernel(point[i2],point[i2]) // eta = 2*k12? - k11? - k22 // gamma = alpha1 ?- alpha1* + alpha2 ?- alpha2* double k11 = m_kernel.eval(i1, i1, m_data.instance(i1)); double k12 = m_kernel.eval(i1, i2, m_data.instance(i1)); double k22 = m_kernel.eval(i2, i2, m_data.instance(i2)); double eta = -2 * k12 + k11 + k22; // note, Smola's psuedocode has signs swapped, Keerthi's doesn't if (eta < 0) { // this may happen due to numeric instability // due to Mercer's condition, this should not happen, hence we give up return 0; } double gamma = alpha1 - alpha1Star + alpha2 - alpha2Star; // % we assume eta < 0. otherwise one has to repeat the complete // % reasoning similarly (compute objective function for L and H // % and decide which one is largest // case1 = case2 = case3 = case4 = finished = 0 // alpha1old = alpha1, alpha1old* = alpha1* // alpha2old = alpha2, alpha2old* = alpha2* // deltaPhi = phi1 ?- phi2 double alpha1old = alpha1; double alpha1Starold = alpha1Star; double alpha2old = alpha2; double alpha2Starold = alpha2Star; double deltaPhi = phi2 - phi1; if (findOptimalPointOnLine(i1, alpha1, alpha1Star, C1, i2, alpha2, alpha2Star, C2, gamma, eta, deltaPhi)) { alpha1 = m_alpha[i1]; alpha1Star = m_alphaStar[i1]; alpha2 = m_alpha[i2]; alpha2Star = m_alphaStar[i2]; // Update error cache using new Lagrange multipliers double dAlpha1 = alpha1 - alpha1old - (alpha1Star - alpha1Starold); double dAlpha2 = alpha2 - alpha2old - (alpha2Star - alpha2Starold); for (int j = 0; j < m_nInstances; j++) { if ((j != i1) && (j != i2)/* && m_error[j] != MAXERR*/) { m_error[j] += dAlpha1 * m_kernel.eval(i1, j, m_data.instance(i1)) + dAlpha2 * m_kernel.eval(i2, j, m_data.instance(i2)); } } m_error[i1] += dAlpha1 * k11 + dAlpha2 * k12; m_error[i2] += dAlpha1 * k12 + dAlpha2 * k22; // Update threshold to reflect change in Lagrange multipliers double b1 = Double.MAX_VALUE; double b2 = Double.MAX_VALUE; if ((0 < alpha1 && alpha1 < C1) || (0 < alpha1Star && alpha1Star < C1) ||(0 < alpha2 && alpha2 < C2) || (0 < alpha2Star && alpha2Star < C2)) { if (0 < alpha1 && alpha1 < C1) { b1 = m_error[i1] - m_epsilon; } else if (0 < alpha1Star && alpha1Star < C1) { b1 = m_error[i1] + m_epsilon; } if (0 < alpha2 && alpha2 < C2) { b2 = m_error[i2] - m_epsilon; } else if (0 < alpha2Star && alpha2Star < C2) { b2 = m_error[i2] + m_epsilon; } if (b1 < Double.MAX_VALUE) { m_b = b1; if (b2 < Double.MAX_VALUE) { m_b = (b1 + b2) / 2.0; } } else if (b2 < Double.MAX_VALUE) { m_b = b2; } } else if (m_b == 0) { // both alpha's are on the boundary, and m_b is not initialized m_b = (m_error[i1] + m_error[i2])/2.0; } // if changes in alpha1(*), alpha2(*) are larger than some eps // return 1 // else // return 0 // endif return 1; } else { return 0; } // endprocedure } /** * examineExample method from pseudocode. * Parameters correspond to pseudocode (see technicalinformation) * * @param i2 * @return * @throws Exception */ protected int examineExample(int i2) throws Exception { // procedure examineExample(i2) // y2 = target[i2] double y2 = m_target[i2]; // alpha2, alpha2* = Lagrange multipliers for i2 double alpha2 = m_alpha[i2]; double alpha2Star = m_alphaStar[i2]; // C2, C2* = Constraints for i2 double C2 = m_C; double C2Star = m_C; // phi2 = SVM output on point[i2] ? y2 (in error cache) double phi2 = m_error[i2]; // phi2b contains the error, taking the offset in account double phi2b = phi2 - m_b; // if ((phi2 > epsilon && alpha2* < C2*) || // (phi2 < epsilon && alpha2* > 0 ) || // (-?phi2 > epsilon && alpha2 < C2 ) || // (?-phi2 > epsilon && alpha2 > 0 )) if ((phi2b > m_epsilon && alpha2Star < C2Star) || (phi2b < m_epsilon && alpha2Star > 0) || (-phi2b > m_epsilon && alpha2 < C2) || (-phi2b > m_epsilon && alpha2 > 0)) { // if (number of non?zero & non?C alpha > 1) // i1 = result of second choice heuristic // if takeStep(i1,i2) return 1 // endif int i1 = secondChoiceHeuristic(i2); if (i1 >= 0 && (takeStep(i1, i2, alpha2, alpha2Star, phi2) > 0)) { return 1; } // loop over all non?zero and non?C alpha, random start // i1 = identity of current alpha // if takeStep(i1,i2) return 1 // endloop for (i1 = 0; i1 < m_target.length; i1++) { if ((m_alpha[i1] > 0 && m_alpha[i1] < m_C) || (m_alphaStar[i1] > 0 && m_alphaStar[i1] < m_C)) { if (takeStep(i1, i2, alpha2, alpha2Star, phi2) > 0) { return 1; } } } // loop over all possible i1, with random start // i1 = loop variable // if takeStep(i1,i2) return 1 // endloop for (i1 = 0; i1 < m_target.length; i1++) { if (takeStep(i1, i2, alpha2, alpha2Star, phi2) > 0) { return 1; } } // endif } // return 0 return 0; // endprocedure } /** * applies heuristic for finding candidate that is expected to lead to * good gain when applying takeStep together with second candidate. * * @param i2 index of second candidate * @return */ protected int secondChoiceHeuristic(int i2) { // randomly select an index i1 (not equal to i2) with non?zero and non?C alpha, if any for (int i = 0; i < 59; i++) { int i1 = m_random.nextInt(m_nInstances); if ((i1 != i2) && (m_alpha[i1] > 0 && m_alpha[i1] < m_C) || (m_alphaStar[i1] > 0 && m_alphaStar[i1] < m_C)) { return i1; } } return -1; } /** * finds alpha and alpha* parameters that optimize the SVM target function * * @throws Exception */ public void optimize() throws Exception { // main routine: // initialize threshold to zero // numChanged = 0 // examineAll = 1 // SigFig = -100 // LoopCounter = 0 int numChanged = 0; int examineAll = 1; int sigFig = -100; int loopCounter = 0; // while ((numChanged > 0 | examineAll) | (SigFig < 3)) while ((numChanged > 0 || (examineAll > 0)) | (sigFig < 3)) { // LoopCounter++ // numChanged = 0; loopCounter++; numChanged = 0; // if (examineAll) // loop I over all training examples // numChanged += examineExample(I) // else // loop I over examples where alpha is not 0 & not C // numChanged += examineExample(I) // endif int numSamples = 0; if (examineAll > 0) { for (int i = 0; i < m_nInstances; i++) { numChanged += examineExample(i); } } else { for (int i = 0; i < m_target.length; i++) { if ((m_alpha[i] > 0 && m_alpha[i] < m_C * m_data.instance(i).weight()) || (m_alphaStar[i] > 0 && m_alphaStar[i] < m_C * m_data.instance(i).weight())) { numSamples++; numChanged += examineExample(i); } } } // // if (mod(LoopCounter, 2) == 0) // MinimumNumChanged = max(1, 0.1*NumSamples) // else // MinimumNumChanged = 1 // endif int minimumNumChanged = 1; if (loopCounter % 2 == 0) { minimumNumChanged = (int) Math.max(1, 0.1 * numSamples); } // if (examineAll == 1) // examineAll = 0 // elseif (numChanged < MinimumNumChanged) // examineAll = 1 // endif if (examineAll == 1) { examineAll = 0; } else if (numChanged < minimumNumChanged) { examineAll = 1; } // endwhile if (loopCounter == 2500) { break; } } // endmain } /** * learn SVM parameters from data using Smola's SMO algorithm. * Subclasses should implement something more interesting. * * @param instances the data to learn from * @throws Exception if something goes wrong */ public void buildClassifier(Instances instances) throws Exception { // initialize variables init(instances); // solve optimization problem optimize(); // clean up wrapUp(); } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } }
25,823
28.278912
148
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/functions/supportVector/RegSMOImproved.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * RegSMOImproved.java * Copyright (C) 2006-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.functions.supportVector; import java.util.Enumeration; import java.util.Vector; import weka.core.Instances; import weka.core.Option; import weka.core.RevisionUtils; import weka.core.TechnicalInformation; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import weka.core.TechnicalInformationHandler; import weka.core.Utils; /** <!-- globalinfo-start --> * Learn SVM for regression using SMO with Shevade, Keerthi, et al. adaption of the stopping criterion.<br/> * <br/> * For more information see:<br/> * <br/> * S.K. Shevade, S.S. Keerthi, C. Bhattacharyya, K.R.K. Murthy: Improvements to the SMO Algorithm for SVM Regression. In: IEEE Transactions on Neural Networks, 1999.<br/> * <br/> * S.K. Shevade, S.S. Keerthi, C. Bhattacharyya, K.R.K. Murthy (1999). Improvements to the SMO Algorithm for SVM Regression. Control Division, Dept. of Mechanical Engineering. * <p/> <!-- globalinfo-end --> * <!-- technical-bibtex-start --> * BibTeX: * <pre> * &#64;inproceedings{Shevade1999, * author = {S.K. Shevade and S.S. Keerthi and C. Bhattacharyya and K.R.K. Murthy}, * booktitle = {IEEE Transactions on Neural Networks}, * title = {Improvements to the SMO Algorithm for SVM Regression}, * year = {1999}, * PS = {http://guppy.mpe.nus.edu.sg/\~mpessk/svm/ieee_smo_reg.ps.gz} * } * * &#64;techreport{Shevade1999, * address = {Control Division, Dept. of Mechanical Engineering}, * author = {S.K. Shevade and S.S. Keerthi and C. Bhattacharyya and K.R.K. Murthy}, * institution = {National University of Singapore}, * number = {CD-99-16}, * title = {Improvements to the SMO Algorithm for SVM Regression}, * year = {1999}, * PS = {http://guppy.mpe.nus.edu.sg/\~mpessk/svm/smoreg_mod.ps.gz} * } * </pre> * <p/> <!-- technical-bibtex-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -T &lt;double&gt; * The tolerance parameter for checking the stopping criterion. * (default 0.001)</pre> * * <pre> -V * Use variant 1 of the algorithm when true, otherwise use variant 2. * (default true)</pre> * * <pre> -P &lt;double&gt; * The epsilon for round-off error. * (default 1.0e-12)</pre> * * <pre> -L &lt;double&gt; * The epsilon parameter in epsilon-insensitive loss function. * (default 1.0e-3)</pre> * * <pre> -W &lt;double&gt; * The random number seed. * (default 1)</pre> * <!-- options-end --> * * @author Remco Bouckaert (remco@cs.waikato.ac.nz,rrb@xm.co.nz) * @version $Revision: 8034 $ */ public class RegSMOImproved extends RegSMO implements TechnicalInformationHandler { /** for serialization */ private static final long serialVersionUID = 471692841446029784L; public final static int I0 = 3; public final static int I0a = 1; public final static int I0b = 2; public final static int I1 = 4; public final static int I2 = 8; public final static int I3 = 16; /** The different sets used by the algorithm. */ protected SMOset m_I0; /** Index set {i: 0 < m_alpha[i] < C || 0 < m_alphaStar[i] < C}} */ protected int [] m_iSet; /** b.up and b.low boundaries used to determine stopping criterion */ protected double m_bUp, m_bLow; /** index of the instance that gave us b.up and b.low */ protected int m_iUp, m_iLow; /** tolerance parameter used for checking stopping criterion b.up < b.low + 2 tol */ double m_fTolerance = 0.001; /** set true to use variant 1 of the paper, otherwise use variant 2 */ boolean m_bUseVariant1 = true; /** * Returns a string describing the object * * @return a description suitable for * displaying in the explorer/experimenter gui */ public String globalInfo() { return "Learn SVM for regression using SMO with Shevade, Keerthi, et al. " + "adaption of the stopping criterion.\n\n" + "For more information see:\n\n" + getTechnicalInformation().toString(); } /** * Returns an instance of a TechnicalInformation object, containing * detailed information about the technical background of this class, * e.g., paper reference or book this class is based on. * * @return the technical information about this class */ public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; TechnicalInformation additional; result = new TechnicalInformation(Type.INPROCEEDINGS); result.setValue(Field.AUTHOR, "S.K. Shevade and S.S. Keerthi and C. Bhattacharyya and K.R.K. Murthy"); result.setValue(Field.TITLE, "Improvements to the SMO Algorithm for SVM Regression"); result.setValue(Field.BOOKTITLE, "IEEE Transactions on Neural Networks"); result.setValue(Field.YEAR, "1999"); result.setValue(Field.PS, "http://guppy.mpe.nus.edu.sg/~mpessk/svm/ieee_smo_reg.ps.gz"); additional = result.add(Type.TECHREPORT); additional.setValue(Field.AUTHOR, "S.K. Shevade and S.S. Keerthi and C. Bhattacharyya and K.R.K. Murthy"); additional.setValue(Field.TITLE, "Improvements to the SMO Algorithm for SVM Regression"); additional.setValue(Field.INSTITUTION, "National University of Singapore"); additional.setValue(Field.ADDRESS, "Control Division, Dept. of Mechanical Engineering"); additional.setValue(Field.NUMBER, "CD-99-16"); additional.setValue(Field.YEAR, "1999"); additional.setValue(Field.PS, "http://guppy.mpe.nus.edu.sg/~mpessk/svm/smoreg_mod.ps.gz"); return result; } /** * Returns an enumeration describing the available options * * @return an enumeration of all the available options */ public Enumeration listOptions() { Vector result = new Vector(); result.addElement(new Option( "\tThe tolerance parameter for checking the stopping criterion.\n" + "\t(default 0.001)", "T", 1, "-T <double>")); result.addElement(new Option( "\tUse variant 1 of the algorithm when true, otherwise use variant 2.\n" + "\t(default true)", "V", 0, "-V")); Enumeration enm = super.listOptions(); while (enm.hasMoreElements()) { result.addElement(enm.nextElement()); } return result.elements(); } /** * Parses a given list of options. <p/> * <!-- options-start --> * Valid options are: <p/> * * <pre> -T &lt;double&gt; * The tolerance parameter for checking the stopping criterion. * (default 0.001)</pre> * * <pre> -V * Use variant 1 of the algorithm when true, otherwise use variant 2. * (default true)</pre> * * <pre> -P &lt;double&gt; * The epsilon for round-off error. * (default 1.0e-12)</pre> * * <pre> -L &lt;double&gt; * The epsilon parameter in epsilon-insensitive loss function. * (default 1.0e-3)</pre> * * <pre> -W &lt;double&gt; * The random number seed. * (default 1)</pre> * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { String tmpStr; tmpStr = Utils.getOption('T', options); if (tmpStr.length() != 0) { setTolerance(Double.parseDouble(tmpStr)); } else { setTolerance(0.001); } setUseVariant1(Utils.getFlag('V', options)); super.setOptions(options); } /** * Gets the current settings of the object. * * @return an array of strings suitable for passing to setOptions */ public String[] getOptions() { int i; Vector result; String[] options; result = new Vector(); options = super.getOptions(); for (i = 0; i < options.length; i++) result.add(options[i]); result.add("-T"); result.add("" + getTolerance()); if (m_bUseVariant1) result.add("-V"); return (String[]) result.toArray(new String[result.size()]); } /** * Returns the tip text for this property * * @return a description suitable for * displaying in the explorer/experimenter gui */ public String toleranceTipText() { return "tolerance parameter used for checking stopping criterion b.up < b.low + 2 tol"; } /** * returns the current tolerance * * @return the tolerance */ public double getTolerance() { return m_fTolerance; } /** * sets the tolerance * * @param d the new tolerance */ public void setTolerance(double d) { m_fTolerance = d; } /** * Returns the tip text for this property * * @return a description suitable for * displaying in the explorer/experimenter gui */ public String useVariant1TipText() { return "set true to use variant 1 of the paper, otherwise use variant 2."; } /** * Whether variant 1 is used * * @return true if variant 1 is used */ public boolean isUseVariant1() { return m_bUseVariant1; } /** * Sets whether to use variant 1 * * @param b if true then variant 1 is used */ public void setUseVariant1(boolean b) { m_bUseVariant1 = b; } /** * takeStep method from Shevade et al.s paper. * parameters correspond to pseudocode from paper. * * @param i1 * @param i2 * @param alpha2 * @param alpha2Star * @param phi2 * @return * @throws Exception */ protected int takeStep(int i1, int i2, double alpha2, double alpha2Star, double phi2) throws Exception { //procedure takeStep(i1, i2) // // if (i1 == i2) // return 0 if (i1 == i2) { return 0; } double C1 = m_C * m_data.instance(i1).weight(); double C2 = m_C * m_data.instance(i2).weight(); // alpha1, alpha1' = Lagrange multipliers for i1 double alpha1 = m_alpha[i1]; double alpha1Star = m_alphaStar[i1]; // double y1 = m_target[i1]; // TODO: verify we do not need to recompute m_error[i1] here // TODO: since m_error is only updated for indices in m_I0 double phi1 = m_error[i1]; // if ((m_iSet[i1] & I0)==0) { // phi1 = -SVMOutput(i1) - m_b + m_target[i1]; // m_error[i1] = phi1; // } // k11 = kernel(point[i1], point[i1]) // k12 = kernel(point[i1], point[i2]) // k22 = kernel(point[i2], point[i2]) // eta = -2*k12+k11+k22 // gamma = alpha1-alpha1'+alpha2-alpha2' // double k11 = m_kernel.eval(i1, i1, m_data.instance(i1)); double k12 = m_kernel.eval(i1, i2, m_data.instance(i1)); double k22 = m_kernel.eval(i2, i2, m_data.instance(i2)); double eta = -2 * k12 + k11 + k22; double gamma = alpha1 - alpha1Star + alpha2 - alpha2Star; // if (eta < 0) { // this may happen due to numeric instability // due to Mercer's condition, this should not happen, hence we give up // return 0; // } // % We assume that eta > 0. Otherwise one has to repeat the complete // % reasoning similarly (i.e. compute objective functions at L and H // % and decide which one is largest // // case1 = case2 = case3 = case4 = finished = 0 // alpha1old = alpha1, // alpha1old' = alpha1' // alpha2old = alpha2, // alpha2old' = alpha2' // deltaphi = F1 - F2 // // while !finished // % This loop is passed at most three times // % Case variables needed to avoid attempting small changes twice // if (case1 == 0) && // (alpha1 > 0 || (alpha1' == 0 && deltaphi > 0)) && // (alpha2 > 0 || (alpha2' == 0 && deltaphi < 0)) // compute L, H (w.r.t. alpha1, alpha2) // if (L < H) // a2 = alpha2 - (deltaphi / eta ) a2 = min(a2, H) a2 = max(L, a2) a1 = alpha1 - (a2 - alpha2) // update alpha1, alpha2 if change is larger than some eps // else // finished = 1 // endif // case1 = 1 // elseif (case2 == 0) && // (alpha1 > 0 || (alpha1' == 0 && deltaphi > 2*epsilon)) && // (alpha2' > 0 || (alpha2 == 0 && deltaphi > 2*epsilon)) // // compute L, H (w.r.t. alpha1, alpha2') // if (L < H) // a2 = alpha2' + ((deltaphi - 2*epsilon)/eta)) a2 = min(a2, H) a2 = max(L, a2) a1 = alpha1 + (a2-alpha2') // update alpha1, alpha2' if change is larger than some eps // else // finished = 1 // endif // case2 = 1 // elseif (case3 == 0) && // (alpha1' > 0 || (alpha1 == 0 && deltaphi < -2*epsilon)) && // (alpha2 > 0 || (alpha2' == 0 && deltaphi < -2*epsilon)) // compute L, H (w.r.t. alpha1', alpha2) // if (L < H) // a2 = alpha2 - ((deltaphi + 2*epsilon)/eta) a2 = min(a2, H) a2 = max(L, a2) a1 = alpha1' + (a2 - alpha2) // update alpha1', alpha2 if change is larger than some eps // else // finished = 1 // endif // case3 = 1 // elseif (case4 == 0) && // (alpha1' > 0) || (alpha1 == 0 && deltaphi < 0)) && // (alpha2' > 0) || (alpha2 == 0 && deltaphi > 0)) // compute L, H (w.r.t. alpha1', alpha2') // if (L < H) // a2 = alpha2' + deltaphi/eta a2 = min(a2, H) a2 = max(L, a2) a1 = alpha1' - (a2 - alpha2') // update alpha1, alpha2' if change is larger than some eps // else // finished = 1 // endif // case4 = 1 // else // finished = 1 // endif // update deltaphi // endwhile double alpha1old = alpha1; double alpha1Starold = alpha1Star; double alpha2old = alpha2; double alpha2Starold = alpha2Star; double deltaPhi = phi1 - phi2; if (findOptimalPointOnLine(i1, alpha1, alpha1Star, C1, i2, alpha2, alpha2Star, C2, gamma, eta, deltaPhi)) { alpha1 = m_alpha[i1]; alpha1Star = m_alphaStar[i1]; alpha2 = m_alpha[i2]; alpha2Star = m_alphaStar[i2]; // if changes in alpha('), alpha2(') are larger than some eps // Update f-cache[i] for i in I.0 using new Lagrange multipliers // Store the changes in alpha, alpha' array // Update I.0, I.1, I.2, I.3 // Compute (i.low, b.low) and (i.up, b.up) by applying the conditions mentioned above, using only i1, i2 and indices in I.0 // return 1 // else // return 0 //endif endprocedure // Update error cache using new Lagrange multipliers double dAlpha1 = alpha1 - alpha1old - (alpha1Star - alpha1Starold); double dAlpha2 = alpha2 - alpha2old - (alpha2Star - alpha2Starold); for (int j = m_I0.getNext(-1); j != -1; j = m_I0.getNext(j)) { if ((j != i1) && (j != i2)) { m_error[j] -= dAlpha1 * m_kernel.eval(i1, j, m_data.instance(i1)) + dAlpha2 * m_kernel.eval(i2, j, m_data.instance(i2)); } } m_error[i1] -= dAlpha1 * k11 + dAlpha2 * k12; m_error[i2] -= dAlpha1 * k12 + dAlpha2 * k22; updateIndexSetFor(i1, C1); updateIndexSetFor(i2, C2); // Compute (i.low, b.low) and (i.up, b.up) by applying the conditions mentioned above, using only i1, i2 and indices in I.0 m_bUp = Double.MAX_VALUE; m_bLow = -Double.MAX_VALUE; for (int j = m_I0.getNext(-1); j != -1; j = m_I0.getNext(j)) { updateBoundaries(j, m_error[j]); } if (!m_I0.contains(i1)) { updateBoundaries(i1, m_error[i1]); } if (!m_I0.contains(i2)) { updateBoundaries(i2, m_error[i2]); } return 1; } else { return 0; } } /** * updates the index sets I0a, IOb, I1, I2 and I3 for vector i * * @param i index of vector * @param C capacity for vector i * @throws Exception */ protected void updateIndexSetFor(int i, double C) throws Exception { /* m_I0a.delete(i); m_I0b.delete(i); m_I1.delete(i); m_I2.delete(i); m_I3.delete(i); */ if (m_alpha[i] == 0 && m_alphaStar[i] == 0) { //m_I1.insert(i); m_iSet[i] = I1; m_I0.delete(i); } else if (m_alpha[i] > 0) { if (m_alpha[i] < C) { if ((m_iSet[i] & I0) == 0) { //m_error[i] = -SVMOutput(i) - m_b + m_target[i]; m_I0.insert(i); } //m_I0a.insert(i); m_iSet[i] = I0a; } else { // m_alpha[i] == C //m_I3.insert(i); m_iSet[i] = I3; m_I0.delete(i); } } else {// m_alphaStar[i] > 0 if (m_alphaStar[i] < C) { if ((m_iSet[i] & I0) == 0) { //m_error[i] = -SVMOutput(i) - m_b + m_target[i]; m_I0.insert(i); } //m_I0b.insert(i); m_iSet[i] = I0b; } else { // m_alpha[i] == C //m_I2.insert(i); m_iSet[i] = I2; m_I0.delete(i); } } } /** * updates boundaries bLow and bHi and corresponding indexes * * @param i2 index of vector * @param F2 error of vector i2 */ protected void updateBoundaries(int i2, double F2) { int iSet = m_iSet[i2]; double FLow = m_bLow; if ((iSet & (I2 | I0b)) > 0) { FLow = F2 + m_epsilon; } else if ((iSet & (I1 | I0a)) > 0) { FLow = F2 - m_epsilon; } if (m_bLow < FLow) { m_bLow = FLow; m_iLow = i2; } double FUp = m_bUp; if ((iSet & (I3 | I0a)) > 0) { FUp = F2 - m_epsilon; } else if ((iSet & (I1 | I0b)) > 0) { FUp = F2 + m_epsilon; } if (m_bUp > FUp) { m_bUp = FUp; m_iUp = i2; } } /** * parameters correspond to pseudocode from paper. * * @param i2 index of candidate * @return * @throws Exception */ protected int examineExample(int i2) throws Exception { //procedure examineExample(i2) // // alpha2, alpha2' = Lagrange multipliers for i2 double alpha2 = m_alpha[i2]; double alpha2Star = m_alphaStar[i2]; // if (i2 is in I.0) // F2 = f-cache[i2] // else // compute F2 = F.i2 and set f-cache[i2] = F2 // % Update (b.low, i.low) or (b.up, i.up) using (F2, i2)... // if (i2 is in I.1) // if (F2+epsilon < b.up) // b.up = F2+epsilon, // i.up = i2 // elseif (F2-epsilon > b.low) // b.low = F2-epsilon, // i.low = i2 // end if // elseif ( (i2 is in I.2) && (F2+epsilon > b.low) ) // b.low = F2+epsilon, // i.low = i2 // elseif ( (i2 is in I.3) && (F2-epsilon < b.up) ) // b.up = F2-epsilon, // i.up = i2 // endif // endif int iSet = m_iSet[i2]; double F2 = m_error[i2]; if (!m_I0.contains(i2)) { F2 = -SVMOutput(i2) - m_b + m_target[i2]; m_error[i2] = F2; if (iSet == I1) { if (F2 + m_epsilon < m_bUp) { m_bUp = F2 + m_epsilon; m_iUp = i2; } else if (F2 - m_epsilon > m_bLow) { m_bLow = F2 - m_epsilon; m_iLow = i2; } } else if ((iSet == I2) && (F2 + m_epsilon > m_bLow)) { m_bLow = F2 + m_epsilon; m_iLow = i2; } else if ((iSet == I3) && (F2 - m_epsilon < m_bUp)) { m_bUp = F2 - m_epsilon; m_iUp = i2; } } // % Check optimality using current b.low and b.up and, if // % violated, find an index i1 to do joint optimization with i2... // optimality = 1; // case 1: i2 is in I.0a // if (b.low-(F2-epsilon) > 2 * tol) // optimality = 0; // i1 = i.low; // % For i2 in I.0a choose the better i1... // if ((F2-epsilon)-b.up > b.low-(F2-epsilon)) // i1 = i.up; // endif // elseif ((F2-epsilon)-b.up > 2 * tol) // optimality = 0; // i1 = i.up; // % For i2 in I.0a choose the better i1... // if ((b.low-(F2-epsilon) > (F2-epsilon)-b.up) // i1 = i.low; // endif // endif // case 2: i2 is in I.0b // if (b.low-(F2+epsilon) > 2 * tol) // optimality = 0; // i1 = i.low; // % For i2 in I.0b choose the better i1... // if ((F2+epsilon)-b.up > b.low-(F2+epsilon)) // i1 = i.up; // endif // elseif ((F2+epsilon)-b.up > 2 * tol) // optimality = 0; // i1 = i.up; // % For i2 in I.0b choose the better i1... // if ((b.low-(F2+epsilon) > (F2+epsilon)-b.up) // i1 = i.low; // endif // endif // case 3: i2 is in I.1 // if (b.low-(F2+epsilon) > 2 * tol) // optimality = 0; // i1 = i.low; // % For i2 in I1 choose the better i1... // if ((F2+epsilon)-b.up > b.low-(F2+epsilon) // i1 = i.up; // endif // elseif ((F2-epsilon)-b.up > 2 * tol) // optimality = 0; // i1 = i.up; // % For i2 in I1 choose the better i1... // if (b.low-(F2-epsilon) > (F2-epsilon)-b.up) // i1 = i.low; // endif // endif // case 4: i2 is in I.2 // if ((F2+epsilon)-b.up > 2*tol) // optimality = 0, // i1 = i.up // endif // case 5: i2 is in I.3 // if ((b.low-(F2-epsilon) > 2*tol) // optimality = 0, i1 = i.low // endif int i1 = i2; boolean bOptimality = true; //case 1: i2 is in I.0a if (iSet == I0a) { if (m_bLow - (F2 - m_epsilon) > 2 * m_fTolerance) { bOptimality = false; i1 = m_iLow; //% For i2 in I .0 a choose the better i1... if ((F2 - m_epsilon) - m_bUp > m_bLow - (F2 - m_epsilon)) { i1 = m_iUp; } } else if ((F2 - m_epsilon) - m_bUp > 2 * m_fTolerance) { bOptimality = false; i1 = m_iUp; //% For i2 in I.0a choose the better i1... if (m_bLow - (F2 - m_epsilon) > (F2 - m_epsilon) - m_bUp) { i1 = m_iLow; } } } // case 2: i2 is in I.0b else if (iSet == I0b) { if (m_bLow - (F2 + m_epsilon) > 2 * m_fTolerance) { bOptimality = false; i1 = m_iLow; // % For i2 in I.0b choose the better i1... if ((F2 + m_epsilon) - m_bUp > m_bLow - (F2 + m_epsilon)) { i1 = m_iUp; } } else if ((F2 + m_epsilon) - m_bUp > 2 * m_fTolerance) { bOptimality = false; i1 = m_iUp; // % For i2 in I.0b choose the better i1... if (m_bLow - (F2 + m_epsilon) > (F2 + m_epsilon) - m_bUp) { i1 = m_iLow; } } } // case 3: i2 is in I.1 else if (iSet == I1) { if (m_bLow - (F2 + m_epsilon) > 2 * m_fTolerance) { bOptimality = false; i1 = m_iLow; //% For i2 in I1 choose the better i1... if ((F2 + m_epsilon) - m_bUp > m_bLow - (F2 + m_epsilon)) { i1 = m_iUp; } } else if ((F2 - m_epsilon) - m_bUp > 2 * m_fTolerance) { bOptimality = false; i1 = m_iUp; // % For i2 in I1 choose the better i1... if (m_bLow - (F2 - m_epsilon) > (F2 - m_epsilon) - m_bUp) { i1 = m_iLow; } } } //case 4: i2 is in I.2 else if (iSet == I2) { if ((F2 + m_epsilon) - m_bUp > 2 * m_fTolerance) { bOptimality = false; i1 = m_iUp; } } //case 5: i2 is in I.3 else if (iSet == I3) { if (m_bLow - (F2 - m_epsilon) > 2 * m_fTolerance) { bOptimality = false; i1 = m_iLow; } } // if (optimality == 1) // return 0 // if (takeStep(i1, i2)) // return 1 // else // return 0 // endif //endprocedure if (bOptimality) { return 0; } return takeStep(i1, i2, m_alpha[i2], m_alphaStar[i2], F2); } /** * initialize various variables before starting the actual optimizer * * @param data data set used for learning * @throws Exception if something goes wrong */ protected void init(Instances data) throws Exception { super.init(data); // from Keerthi's pseudo code: // set alpha and alpha' to zero for every example set I.1 to contain all the examples // Choose any example i from the training set. // set b.up = target[i]+epsilon // set b.low = target[i]-espilon // i.up = i.low = i; // Initialize sets m_I0 = new SMOset(m_data.numInstances()); m_iSet = new int [m_data.numInstances()]; for (int i = 0; i < m_nInstances; i++) { m_iSet[i] = I1; } // m_iUp = m_random.nextInt(m_nInstances); m_iUp = 0; m_bUp = m_target[m_iUp] + m_epsilon; m_iLow = m_iUp; m_bLow = m_target[m_iLow] - m_epsilon; //init error cache m_error = new double[m_nInstances]; for (int i = 0; i < m_nInstances; i++) { m_error[i] = m_target[i]; } } /** * use variant 1 of Shevade's et al.s paper * * @throws Exception if something goes wrong */ protected void optimize1() throws Exception { //% main routine for modification 1 procedure main // while (numChanged > 0 || examineAll) // numChanged = 0; int nNumChanged = 0; boolean bExamineAll = true; // while (numChanged > 0 || examineAll) // numChanged = 0; while (nNumChanged > 0 || bExamineAll) { nNumChanged = 0; // if (examineAll) // loop I over all the training examples // numChanged += examineExample(I) // else // loop I over I.0 // numChanged += examineExample(I) // % It is easy to check if optimality on I.0 is attained... // if (b.up > b.low - 2*tol) at any I // exit the loop after setting numChanged = 0 // endif if (bExamineAll) { for (int i = 0; i < m_nInstances; i++) { nNumChanged += examineExample(i); } } else { for (int i = m_I0.getNext(-1); i != -1; i = m_I0.getNext(i)) { nNumChanged += examineExample(i); if (m_bLow - m_bUp < 2 * m_fTolerance) { nNumChanged = 0; break; } } } // if (examineAll == 1) // examineAll = 0; // elseif (numChanged == 0) // examineAll = 1; // endif // endwhile //endprocedure if (bExamineAll) { bExamineAll = false; } else if (nNumChanged == 0) { bExamineAll = true; } } } /** * use variant 2 of Shevade's et al.s paper * * @throws Exception if something goes wrong */ protected void optimize2() throws Exception { //% main routine for modification 2 procedure main int nNumChanged = 0; boolean bExamineAll = true; // while (numChanged > 0 || examineAll) // numChanged = 0; while (nNumChanged > 0 || bExamineAll) { nNumChanged = 0; // if (examineAll) // loop I over all the training examples // numChanged += examineExample(I) // else // % The following loop is the only difference between the two // % SMO modifications. Whereas, modification 1, the type II // % loop selects i2 fro I.0 sequentially, here i2 is always // % set to the current i.low and i1 is set to the current i.up; // % clearly, this corresponds to choosing the worst violating // % pair using members of I.0 and some other indices // inner.loop.success = 1; // do // i2 = i.low // alpha2, alpha2' = Lagrange multipliers for i2 // F2 = f-cache[i2] // i1 = i.up // inner.loop.success = takeStep(i.up, i.low) // numChanged += inner.loop.success // until ( (b.up > b.low - 2*tol) || inner.loop.success == 0) // numChanged = 0; // endif if (bExamineAll) { for (int i = 0; i < m_nInstances; i++) { nNumChanged += examineExample(i); } } else { boolean bInnerLoopSuccess = true; do { if (takeStep(m_iUp, m_iLow, m_alpha[m_iLow], m_alphaStar[m_iLow], m_error[m_iLow]) > 0) { bInnerLoopSuccess = true; nNumChanged += 1; } else { bInnerLoopSuccess = false; } } while ((m_bUp <= m_bLow - 2 * m_fTolerance) && bInnerLoopSuccess); nNumChanged = 0; } // // if (examineAll == 1) // examineAll = 0 // elseif (numChanged == 0) // examineAll = 1 // endif // endwhile //endprocedure // if (bExamineAll) { bExamineAll = false; } else if (nNumChanged == 0) { bExamineAll = true; } } } /** * wrap up various variables to save memeory and do some housekeeping after optimization * has finished. * * @throws Exception if something goes wrong */ protected void wrapUp() throws Exception { m_b = -(m_bLow + m_bUp) / 2.0; m_target = null; m_error = null; super.wrapUp(); } /** * learn SVM parameters from data using Keerthi's SMO algorithm. * Subclasses should implement something more interesting. * * @param instances the data to work with * @throws Exception if something goes wrong */ public void buildClassifier(Instances instances) throws Exception { // initialize variables init(instances); // solve optimization problem if (m_bUseVariant1) { optimize1(); } else { optimize2(); } // clean up wrapUp(); } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } }
30,082
29.949588
175
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/functions/supportVector/SMOset.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * SMOset.java * Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.functions.supportVector; import java.io.Serializable; import weka.core.RevisionHandler; import weka.core.RevisionUtils; /** * Stores a set of integer of a given size. * * @author Eibe Frank (eibe@cs.waikato.ac.nz) * @version $Revision: 8034 $ */ public class SMOset implements Serializable, RevisionHandler { /** for serialization */ private static final long serialVersionUID = -8364829283188675777L; /** The current number of elements in the set */ private int m_number; /** The first element in the set */ private int m_first; /** Indicators */ private boolean[] m_indicators; /** The next element for each element */ private int[] m_next; /** The previous element for each element */ private int[] m_previous; /** * Creates a new set of the given size. */ public SMOset(int size) { m_indicators = new boolean[size]; m_next = new int[size]; m_previous = new int[size]; m_number = 0; m_first = -1; } /** * Checks whether an element is in the set. */ public boolean contains(int index) { return m_indicators[index]; } /** * Deletes an element from the set. */ public void delete(int index) { if (m_indicators[index]) { if (m_first == index) { m_first = m_next[index]; } else { m_next[m_previous[index]] = m_next[index]; } if (m_next[index] != -1) { m_previous[m_next[index]] = m_previous[index]; } m_indicators[index] = false; m_number--; } } /** * Inserts an element into the set. */ public void insert(int index) { if (!m_indicators[index]) { if (m_number == 0) { m_first = index; m_next[index] = -1; m_previous[index] = -1; } else { m_previous[m_first] = index; m_next[index] = m_first; m_previous[index] = -1; m_first = index; } m_indicators[index] = true; m_number++; } } /** * Gets the next element in the set. -1 gets the first one. */ public int getNext(int index) { if (index == -1) { return m_first; } else { return m_next[index]; } } /** * Prints all the current elements in the set. */ public void printElements() { for (int i = getNext(-1); i != -1; i = getNext(i)) { System.err.print(i + " "); } System.err.println(); for (int i = 0; i < m_indicators.length; i++) { if (m_indicators[i]) { System.err.print(i + " "); } } System.err.println(); System.err.println(m_number); } /** * Returns the number of elements in the set. */ public int numElements() { return m_number; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } }
3,618
21.202454
74
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/functions/supportVector/StringKernel.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * StringKernel.java * Copyright (C) 2006-2012 University of Waikato, Hamilton, New Zealand */ package weka.classifiers.functions.supportVector; import java.util.Enumeration; import java.util.Vector; import weka.core.Attribute; import weka.core.Capabilities; import weka.core.Capabilities.Capability; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.RevisionUtils; import weka.core.SelectedTag; import weka.core.Tag; import weka.core.TechnicalInformation; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import weka.core.TechnicalInformationHandler; import weka.core.Utils; /** <!-- globalinfo-start --> * Implementation of the subsequence kernel (SSK) as described in [1] and of the subsequence kernel with lambda pruning (SSK-LP) as described in [2].<br/> * <br/> * For more information, see<br/> * <br/> * Huma Lodhi, Craig Saunders, John Shawe-Taylor, Nello Cristianini, Christopher J. C. H. Watkins (2002). Text Classification using String Kernels. Journal of Machine Learning Research. 2:419-444.<br/> * <br/> * F. Kleedorfer, A. Seewald (2005). Implementation of a String Kernel for WEKA. Wien, Austria. * <p/> <!-- globalinfo-end --> * <!-- technical-bibtex-start --> * BibTeX: * <pre> * &#64;article{Lodhi2002, * author = {Huma Lodhi and Craig Saunders and John Shawe-Taylor and Nello Cristianini and Christopher J. C. H. Watkins}, * journal = {Journal of Machine Learning Research}, * pages = {419-444}, * title = {Text Classification using String Kernels}, * volume = {2}, * year = {2002}, * HTTP = {http://www.jmlr.org/papers/v2/lodhi02a.html} * } * * &#64;techreport{Kleedorfer2005, * address = {Wien, Austria}, * author = {F. Kleedorfer and A. Seewald}, * institution = {Oesterreichisches Forschungsinstitut fuer Artificial Intelligence}, * number = {TR-2005-13}, * title = {Implementation of a String Kernel for WEKA}, * year = {2005} * } * </pre> * <p/> <!-- technical-bibtex-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -D * Enables debugging output (if available) to be printed. * (default: off)</pre> * * <pre> -no-checks * Turns off all checks - use with caution! * (default: checks on)</pre> * * <pre> -P &lt;0|1&gt; * The pruning method to use: * 0 = No pruning * 1 = Lambda pruning * (default: 0)</pre> * * <pre> -C &lt;num&gt; * The size of the cache (a prime number). * (default: 250007)</pre> * * <pre> -IC &lt;num&gt; * The size of the internal cache (a prime number). * (default: 200003)</pre> * * <pre> -L &lt;num&gt; * The lambda constant. Penalizes non-continuous subsequence * matches. Must be in (0,1). * (default: 0.5)</pre> * * <pre> -ssl &lt;num&gt; * The length of the subsequence. * (default: 3)</pre> * * <pre> -ssl-max &lt;num&gt; * The maximum length of the subsequence. * (default: 9)</pre> * * <pre> -N * Use normalization. * (default: no)</pre> * <!-- options-end --> * * <h1>Theory</h1> * <h2>Overview</h2> * The algorithm computes a measure of similarity between two texts based on * the number and form of their common subsequences, which need not be * contiguous. This method can be parametrized by specifying the subsequence * length k, the penalty factor lambda, which penalizes non-contiguous matches, * and optional 'lambda pruning', which takes maxLambdaExponent, * <code>m</code>, as parameter. Lambda pruning causes very 'stretched' * substring matches not to be counted, thus speeding up the computation. The * functionality of SSK and SSK-LP is explained in the following using simple * examples. * * <h2>Explanation &amp; Examples</h2> * for all of the following examples, we assume these parameter values: *<pre> *k=2 *lambda=0.5 *m=8 (for SSK-LP examples) *</pre> * * <h3>SSK</h3> * * <h4>Example 1</h4> * * <pre> *SSK(2,"ab","axb")=0.5^5 = 0,03125 *</pre> * There is one subsequence of the length of 2 that both strings have in * common, "ab". The result of SSK is computed by raising lambda to the power * of L, where L is the length of the subsequence match in the one string plus * the length of the subsequence match in the other, in our case: * <pre> *&nbsp; ab axb *L= 2 + 3 = 5 * </pre> * hence, the kernel yields 0.5^5 = 0,03125 * * <h4>Example 2</h4> * <pre> *SSK(2,"ab","abb")=0.5^5 + 0.5^4 = 0,09375 *</pre> * Here, we also have one subsequence of the length of 2 that both strings have * in common, "ab". The result of SSK is actually computed by summing over all * values computed for each occurrence of a common subsequence match. In this * example, there are two possible cases: * <pre> *ab abb *-- -- L=4 *-- - - L=5 * </pre> * we have two matches, one of the length of 2+2=4, one of the length of 2+3=5, * so we get the result 0.5^5 + 0.5^4 = 0,09375. * * <h3>SSK-LP</h3> * Without lambda pruning, the string kernel finds *all* common subsequences of * the given length, whereas with lambda pruning, common subsequence matches * that are too much stretched in both strings are not taken into account. It * is argued that the value yielded for such a common subsequence is too low * (<code>lambda ^(length[match_in_s] + length[match_in_t]</code>) . Tests have * shown that a tremendous speedup can be achieved using this technique while * suffering from very little quality loss. <br> * Lambda pruning is parametrized by the maximum lambda exponent. It is a good * idea to choose that value to be about 3 or 4 times the subsequence length as * a rule of thumb. YMMV. * * <h4>Example 3</h4> * Without lambda pruning, one common subsequence, * "AB" would be found in the following two strings. (With k=2) * <pre> *SSK(2,"ab","axb")=0.5^14 = 0,00006103515625 *</pre> * lambda pruning allows for the control of the match length. So, if m * (the maximum lambda exponent) is e.g. 8, these two strings would * yield a kernel value of 0: * <pre> *with lambda pruning: SSK-LP(2,8,"AxxxxxxxxxB","AyB")= 0 *without lambda pruning: SSK(2,"AxxxxxxxxxB","AyB")= 0.5^14 = 0,00006103515625 *</pre> * This is because the exponent for lambda (=the length of the subsequence * match) would be 14, which is &gt; 8. In Contrast, the next result is * &gt; 0 *<pre> *m=8 *SSK-LP(2,8,"AxxB","AyyB")=0.5^8 = 0,00390625 *</pre> * because the lambda exponent would be 8, which is just accepted by lambda * pruning. * * <h3>Normalization</h3> * When the string kernel is used for its main purpose, as the kernel of a * support vector machine, it is not normalized. The normalized kernel can be * switched on by -F (feature space normalization) but is much slower. Like * most unnormalized kernels, K(x,x) is not a fixed value, see the next * example. * * <h4>Example 4</h4> *<pre> *SSK(2,"ab","ab")=0.5^4 = 0.0625 *SSK(2,"AxxxxxxxxxB","AxxxxxxxxxB") = 12.761724710464478 *</pre> * SSK is evaluated twice, each time for two identical strings. A good measure * of similarity would produce the same value in both cases, which should * indicate the same level of similarity. The value of the normalized SSK would * be 1.0 in both cases. So for the purpose of computing string similarity the * normalized kernel should be used. For SVM the unnormalized kernel is usually * sufficient. * * <h2>Complexity of SSK and SSK-LP</h2> * The time complexity of this method (without lambda pruning and with an * infinitely large cache) is<br> * <pre>O(k*|s|*|t|)</pre> * Lambda Pruning has a complexity (without caching) of<br> * <pre>O(m*binom(m,k)^2*(|s|+n)*|t|)</pre> <br> * <pre> *k... subsequence length (ssl) *s,t... strings *|s|... length of string s *binom(x,y)... binomial coefficient (x!/[(x-y)!y!]) *m... maxLambdaExponent (ssl-max) *</pre> * * Keep in mind that execution time can increase fast for long strings * and big values for k, especially if you don't use lambda pruning. * With lambda pruning, computation is usually so fast that switching * on the cache leads to slower computation because of setup costs. Therefore * caching is switched off for lambda pruning. * <br> * <br> * For details and qualitative experiments about SSK, see [1] <br> * For details about lambda pruning and performance comparison of SSK * and SSK-LP (SSK with lambda pruning), see [2] * Note that the complexity estimation in [2] assumes no caching of * intermediate results, which has been implemented in the meantime and * greatly improves the speed of the SSK without lambda pruning. *<br> * *<h1>Notes for usage within Weka</h1> * Only instances of the following form can be processed using string kernels: * <pre> *+----------+-------------+---------------+ *|attribute#| 0 | 1 | *+----------+-------------+---------------+ *| content | [text data] | [class label] | *+----------------------------------------+ * ... or ... *+----------+---------------+-------------+ *|attribute#| 0 | 1 | *+----------+---------------+-------------+ *| content | [class label] | [text data] | *+----------------------------------------+ *</pre> * * @author Florian Kleedorfer (kleedorfer@austria.fm) * @author Alexander K. Seewald (alex@seewald.at) * @version $Revision: 8034 $ */ public class StringKernel extends Kernel implements TechnicalInformationHandler { /** for serialization */ private static final long serialVersionUID = -4902954211202690123L; /** The size of the cache (a prime number) */ private int m_cacheSize = 250007; /** The size of the internal cache for intermediate results (a prime number) */ private int m_internalCacheSize = 200003; /** The attribute number of the string attribute */ private int m_strAttr; /** Kernel cache (i.e., cache for kernel evaluations) */ private double[] m_storage; private long[] m_keys; /** Counts the number of kernel evaluations. */ private int m_kernelEvals; /** The number of instance in the dataset */ private int m_numInsts; /** Pruning method: No Pruning */ public final static int PRUNING_NONE = 0; /** Pruning method: Lambda See [2] for details. */ public final static int PRUNING_LAMBDA = 1; /** Pruning methods */ public static final Tag [] TAGS_PRUNING = { new Tag(PRUNING_NONE, "No pruning"), new Tag(PRUNING_LAMBDA, "Lambda pruning"), }; /** the pruning method */ protected int m_PruningMethod = PRUNING_NONE; /** the decay factor that penalizes non-continuous substring matches. See [1] * for details. */ protected double m_lambda = 0.5; /** The substring length */ private int m_subsequenceLength = 3; /** The maximum substring length for lambda pruning */ private int m_maxSubsequenceLength = 9; /** powers of lambda are prepared prior to kernel evaluations. * all powers between 0 and this value are precalculated */ protected static final int MAX_POWER_OF_LAMBDA = 10000; /** the precalculated powers of lambda */ protected double[] m_powersOflambda = null; /** flag for switching normalization on or off. This defaults to false and * can be turned on by the switch for feature space normalization in SMO */ private boolean m_normalize = false; /** private cache for intermediate results */ private int maxCache; // is set in unnormalizedKernel(s1,s2) private double[] cachekh; private int[] cachekhK; private double[] cachekh2; private int[] cachekh2K; /** cached indexes for private cache */ private int m_multX; private int m_multY; private int m_multZ; private int m_multZZ; private boolean m_useRecursionCache = true; /** * default constructor */ public StringKernel() { super(); } /** * creates a new StringKernel object. Initializes the kernel cache and the * 'lambda cache', i.e. the precalculated powers of lambda from lambda^2 to * lambda^MAX_POWER_OF_LAMBDA * * @param data the dataset to use * @param cacheSize the size of the cache * @param subsequenceLength the subsequence length * @param lambda the lambda value * @param debug whether to output debug information * @throws Exception if something goes wrong */ public StringKernel(Instances data, int cacheSize, int subsequenceLength, double lambda, boolean debug) throws Exception { setDebug(debug); setCacheSize(cacheSize); setInternalCacheSize(200003); setSubsequenceLength(subsequenceLength); setMaxSubsequenceLength(-1); setLambda(lambda); buildKernel(data); } /** * Returns a string describing the kernel * * @return a description suitable for displaying in the * explorer/experimenter gui */ public String globalInfo() { return "Implementation of the subsequence kernel (SSK) as described in [1] " + "and of the subsequence kernel with lambda pruning (SSK-LP) as " + "described in [2].\n\n" + "For more information, see\n\n" + getTechnicalInformation().toString(); } /** * Returns an instance of a TechnicalInformation object, containing * detailed information about the technical background of this class, * e.g., paper reference or book this class is based on. * * @return the technical information about this class */ public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; TechnicalInformation additional; result = new TechnicalInformation(Type.ARTICLE); result.setValue(Field.AUTHOR, "Huma Lodhi and Craig Saunders and John Shawe-Taylor and Nello Cristianini and Christopher J. C. H. Watkins"); result.setValue(Field.YEAR, "2002"); result.setValue(Field.TITLE, "Text Classification using String Kernels"); result.setValue(Field.JOURNAL, "Journal of Machine Learning Research"); result.setValue(Field.VOLUME, "2"); result.setValue(Field.PAGES, "419-444"); result.setValue(Field.HTTP, "http://www.jmlr.org/papers/v2/lodhi02a.html"); additional = result.add(Type.TECHREPORT); additional.setValue(Field.AUTHOR, "F. Kleedorfer and A. Seewald"); additional.setValue(Field.YEAR, "2005"); additional.setValue(Field.TITLE, "Implementation of a String Kernel for WEKA"); additional.setValue(Field.INSTITUTION, "Oesterreichisches Forschungsinstitut fuer Artificial Intelligence"); additional.setValue(Field.ADDRESS, "Wien, Austria"); additional.setValue(Field.NUMBER, "TR-2005-13"); return result; } /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ public Enumeration listOptions() { Vector result; Enumeration en; String desc; String param; int i; SelectedTag tag; result = new Vector(); en = super.listOptions(); while (en.hasMoreElements()) result.addElement(en.nextElement()); desc = ""; param = ""; for (i = 0; i < TAGS_PRUNING.length; i++) { if (i > 0) param += "|"; tag = new SelectedTag(TAGS_PRUNING[i].getID(), TAGS_PRUNING); param += "" + tag.getSelectedTag().getID(); desc += "\t" + tag.getSelectedTag().getID() + " = " + tag.getSelectedTag().getReadable() + "\n"; } result.addElement(new Option( "\tThe pruning method to use:\n" + desc + "\t(default: " + PRUNING_NONE + ")", "P", 1, "-P <" + param + ">")); result.addElement(new Option( "\tThe size of the cache (a prime number).\n" + "\t(default: 250007)", "C", 1, "-C <num>")); result.addElement(new Option( "\tThe size of the internal cache (a prime number).\n" + "\t(default: 200003)", "IC", 1, "-IC <num>")); result.addElement(new Option( "\tThe lambda constant. Penalizes non-continuous subsequence\n" + "\tmatches. Must be in (0,1).\n" + "\t(default: 0.5)", "L", 1, "-L <num>")); result.addElement(new Option( "\tThe length of the subsequence.\n" + "\t(default: 3)", "ssl", 1, "-ssl <num>")); result.addElement(new Option( "\tThe maximum length of the subsequence.\n" + "\t(default: 9)", "ssl-max", 1, "-ssl-max <num>")); result.addElement(new Option( "\tUse normalization.\n" + "\t(default: no)", "N", 0, "-N")); return result.elements(); } /** * Parses a given list of options. <p/> * <!-- options-start --> * Valid options are: <p/> * * <pre> -D * Enables debugging output (if available) to be printed. * (default: off)</pre> * * <pre> -no-checks * Turns off all checks - use with caution! * (default: checks on)</pre> * * <pre> -P &lt;0|1&gt; * The pruning method to use: * 0 = No pruning * 1 = Lambda pruning * (default: 0)</pre> * * <pre> -C &lt;num&gt; * The size of the cache (a prime number). * (default: 250007)</pre> * * <pre> -IC &lt;num&gt; * The size of the internal cache (a prime number). * (default: 200003)</pre> * * <pre> -L &lt;num&gt; * The lambda constant. Penalizes non-continuous subsequence * matches. Must be in (0,1). * (default: 0.5)</pre> * * <pre> -ssl &lt;num&gt; * The length of the subsequence. * (default: 3)</pre> * * <pre> -ssl-max &lt;num&gt; * The maximum length of the subsequence. * (default: 9)</pre> * * <pre> -N * Use normalization. * (default: no)</pre> * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { String tmpStr; tmpStr = Utils.getOption('P', options); if (tmpStr.length() != 0) setPruningMethod( new SelectedTag(Integer.parseInt(tmpStr), TAGS_PRUNING)); else setPruningMethod( new SelectedTag(PRUNING_NONE, TAGS_PRUNING)); tmpStr = Utils.getOption('C', options); if (tmpStr.length() != 0) setCacheSize(Integer.parseInt(tmpStr)); else setCacheSize(250007); tmpStr = Utils.getOption("IC", options); if (tmpStr.length() != 0) setInternalCacheSize(Integer.parseInt(tmpStr)); else setInternalCacheSize(200003); tmpStr = Utils.getOption('L', options); if (tmpStr.length() != 0) setLambda(Double.parseDouble(tmpStr)); else setLambda(0.5); tmpStr = Utils.getOption("ssl", options); if (tmpStr.length() != 0) setSubsequenceLength(Integer.parseInt(tmpStr)); else setSubsequenceLength(3); tmpStr = Utils.getOption("ssl-max", options); if (tmpStr.length() != 0) setMaxSubsequenceLength(Integer.parseInt(tmpStr)); else setMaxSubsequenceLength(9); setUseNormalization(Utils.getFlag('N', options)); if (getMaxSubsequenceLength()<2*getSubsequenceLength()) { throw new IllegalArgumentException("Lambda Pruning forbids even contiguous substring matches! " + "Use a bigger value for ssl-max (at least 2*ssl)."); } super.setOptions(options); } /** * Gets the current settings of the Kernel. * * @return an array of strings suitable for passing to setOptions */ public String[] getOptions() { int i; Vector result; String[] options; result = new Vector(); options = super.getOptions(); for (i = 0; i < options.length; i++) result.add(options[i]); result.add("-P"); result.add("" + m_PruningMethod); result.add("-C"); result.add("" + getCacheSize()); result.add("-IC"); result.add("" + getInternalCacheSize()); result.add("-L"); result.add("" + getLambda()); result.add("-ssl"); result.add("" + getSubsequenceLength()); result.add("-ssl-max"); result.add("" + getMaxSubsequenceLength()); if (getUseNormalization()) result.add("-L"); return (String[]) result.toArray(new String[result.size()]); } /** * Returns the tip text for this property * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String pruningMethodTipText() { return "The pruning method."; } /** * Sets the method used to for pruning. * * @param value the pruning method to use. */ public void setPruningMethod(SelectedTag value) { if (value.getTags() == TAGS_PRUNING) m_PruningMethod = value.getSelectedTag().getID(); } /** * Gets the method used for pruning. * * @return the pruning method to use. */ public SelectedTag getPruningMethod() { return new SelectedTag(m_PruningMethod, TAGS_PRUNING); } /** * Sets the size of the cache to use (a prime number) * * @param value the size of the cache */ public void setCacheSize(int value) { if (value >= 0) { m_cacheSize = value; clean(); } else { System.out.println( "Cache size cannot be smaller than 0 (provided: " + value + ")!"); } } /** * Gets the size of the cache * * @return the cache size */ public int getCacheSize() { return m_cacheSize; } /** * Returns the tip text for this property * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String cacheSizeTipText() { return "The size of the cache (a prime number)."; } /** * sets the size of the internal cache for intermediate results. Memory * consumption is about 16x this amount in bytes. Only use when lambda * pruning is switched off. * * @param value the size of the internal cache */ public void setInternalCacheSize(int value) { if (value >= 0) { m_internalCacheSize = value; clean(); } else { System.out.println( "Cache size cannot be smaller than 0 (provided: " + value + ")!"); } } /** * Gets the size of the internal cache * * @return the cache size */ public int getInternalCacheSize() { return m_internalCacheSize; } /** * Returns the tip text for this property * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String internalCacheSizeTipText() { return "The size of the internal cache (a prime number)."; } /** * Sets the length of the subsequence. * * @param value the length */ public void setSubsequenceLength(int value) { m_subsequenceLength = value; } /** * Returns the length of the subsequence * * @return the length */ public int getSubsequenceLength() { return m_subsequenceLength; } /** * Returns the tip text for this property * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String subsequenceLengthTipText() { return "The subsequence length."; } /** * Sets the maximum length of the subsequence. * * @param value the maximum length */ public void setMaxSubsequenceLength(int value) { m_maxSubsequenceLength = value; } /** * Returns the maximum length of the subsequence * * @return the maximum length */ public int getMaxSubsequenceLength() { return m_maxSubsequenceLength; } /** * Returns the tip text for this property * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String maxSubsequenceLengthTipText() { return "The maximum subsequence length (theta in the paper)"; } /** * Sets the lambda constant used in the string kernel * * @param value the lambda value to use */ public void setLambda(double value) { m_lambda = value; } /** * Gets the lambda constant used in the string kernel * * @return the current lambda constant */ public double getLambda() { return m_lambda; } /** * Returns the tip text for this property * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String lambdaTipText(){ return "Penalizes non-continuous subsequence matches, from (0,1)"; } /** * Sets whether to use normalization. * Each time this value is changed, the kernel cache is cleared. * * @param value whether to use normalization */ public void setUseNormalization(boolean value) { if (value != m_normalize) clean(); m_normalize = value; } /** * Returns whether normalization is used. * * @return true if normalization is used */ public boolean getUseNormalization() { return m_normalize; } /** * Returns the tip text for this property * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String useNormalizationTipText(){ return "Whether to use normalization."; } /** * Computes the result of the kernel function for two instances. * If id1 == -1, eval use inst1 instead of an instance in the dataset. * * @param id1 the index of the first instance in the dataset * @param id2 the index of the second instance in the dataset * @param inst1 the instance corresponding to id1 (used if id1 == -1) * @return the result of the kernel function * @throws Exception if something goes wrong */ public double eval(int id1, int id2, Instance inst1) throws Exception { if (m_Debug && id1>-1 && id2>-1) { System.err.println("\nEvaluation of string kernel for"); System.err.println(m_data.instance(id1).stringValue(m_strAttr)); System.err.println("and"); System.err.println(m_data.instance(id2).stringValue(m_strAttr)); } //the normalized kernel returns 1 for comparison of //two identical strings if (id1 == id2 && m_normalize) return 1.0; double result = 0; long key = -1; int location = -1; // we can only cache if we know the indexes if ((id1 >= 0) && (m_keys != null)) { if (id1 > id2) { key = (long)id1 * m_numInsts + id2; } else { key = (long)id2 * m_numInsts + id1; } if (key < 0) { throw new Exception("Cache overflow detected!"); } location = (int)(key % m_keys.length); if (m_keys[location] == (key + 1)) { if (m_Debug) System.err.println("result (cached): " + m_storage[location]); return m_storage[location]; } } m_kernelEvals++; long start = System.currentTimeMillis(); Instance inst2 = m_data.instance(id2); char[] s1 = inst1.stringValue(m_strAttr).toCharArray(); char[] s2 = inst2.stringValue(m_strAttr).toCharArray(); // prevent the kernel from returning NaN if (s1.length == 0 || s2.length == 0) return 0; if (m_normalize) { result = normalizedKernel(s1,s2); } else { result = unnormalizedKernel(s1, s2); } if (m_Debug) { long duration = System.currentTimeMillis() - start; System.err.println("result: " + result); System.err.println("evaluation time:" + duration +"\n"); } // store result in cache if (key != -1){ m_storage[location] = result; m_keys[location] = (key + 1); } return result; } /** * Frees the memory used by the kernel. * (Useful with kernels which use cache.) * This function is called when the training is done. * i.e. after that, eval will be called with id1 == -1. */ public void clean() { m_storage = null; m_keys = null; } /** * Returns the number of kernel evaluation performed. * * @return the number of kernel evaluation performed. */ public int numEvals() { return m_kernelEvals; } /** * Returns the number of dot product cache hits. * * @return the number of dot product cache hits, or -1 if not supported by * this kernel. */ public int numCacheHits() { // TODO: implement! return -1; } /** * evaluates the normalized kernel between s and t. See [1] for details about * the normalized SSK. * * @param s first input string * @param t second input string * @return a double indicating their distance, or similarity */ public double normalizedKernel(char[] s, char[] t){ double k1 = unnormalizedKernel(s, s); double k2 = unnormalizedKernel(t, t); double normTerm = Math.sqrt( k1*k2 ); return unnormalizedKernel(s, t) / normTerm; } /** * evaluates the unnormalized kernel between s and t. See [1] for details * about the unnormalized SSK. * * @param s first input string * @param t second input string * @return a double indicating their distance, or similarity */ public double unnormalizedKernel(char[] s, char[] t){ if (t.length > s.length) { //swap because the algorithm is faster if s is //the longer string char[] buf = s; s = t; t = buf; } if (m_PruningMethod == PRUNING_NONE) { m_multX=(s.length+1)*(t.length+1); m_multY=(t.length+1); m_multZ=1; maxCache = m_internalCacheSize; if (maxCache==0) { maxCache=(m_subsequenceLength+1)*m_multX; } else if ((m_subsequenceLength+1)*m_multX<maxCache) { maxCache=(m_subsequenceLength+1)*m_multX; } m_useRecursionCache=true; cachekhK = new int[maxCache]; cachekh2K = new int[maxCache]; cachekh = new double[maxCache]; cachekh2 = new double[maxCache]; } else if (m_PruningMethod == PRUNING_LAMBDA) { maxCache=0; m_useRecursionCache=false; } double res; if (m_PruningMethod == PRUNING_LAMBDA) { res = kernelLP( m_subsequenceLength,s,s.length-1,t,t.length-1, m_maxSubsequenceLength); } else { res = kernel( m_subsequenceLength,s,s.length-1, t, t.length-1); } cachekh = null; cachekhK = null; cachekh2 = null; cachekh2K = null; return res; } /** * Recursion-ending function that is called at the end of each * recursion branch. * * @param n * @return */ protected double getReturnValue(int n){ if (n == 0) return 1; else return 0; } /** * the kernel function (Kn). This function performs the outer loop * character-wise over the first input string s. For each character * encountered, a recursion branch is started that identifies all * subsequences in t starting with that character. <br> See [1] for details * but note that this code is optimized and may be hard to recognize. * * @param n the current length of the matching subsequence * @param s first string, as a char array * @param t second string, as a char array * @param endIndexS the portion of s currently regarded is s[1:endIndexS] * @param endIndexT the portion of t currently regarded is t[1:endIndexT] * @return a double indicating the distance or similarity between s and t, * according to and depending on the initial value for n. */ protected double kernel(int n, char[] s,int endIndexS, char[] t, int endIndexT) { //normal recursion ending case if (Math.min(endIndexS+1,endIndexT+1) < n) return getReturnValue(n); //accumulate all recursion results in one: double result = 0; //the tail-recursive function defined in [1] is turned into a //loop here, preventing stack overflows. //skim s from back to front for (int iS=endIndexS; iS > n-2; iS--) { double buf = 0; //let the current character in s be x char x = s[iS]; // iterate over all occurrences of x in t for (int j=0; j <= endIndexT; j++) { if (t[j] == x){ //this is a match for the current character, hence //1. use previous chars in both strings (iS-1, j-1) //2. decrement the remainingMatchLength (n-1) //and start a recursion branch for these parameters buf += kernelHelper(n-1,s,iS-1, t, j-1); } } //ok, all occurrences of x in t have been found //multiply the result with lambda^2 // (one lambda for x, and the other for all matches of x in t) result += buf * m_powersOflambda[2]; } return result; } /** * The kernel helper function, called K' in [1] and [2]. * * @param n the current length of the matching subsequence * @param s first string, as a char array * @param t second string, as a char array * @param endIndexS the portion of s currently regarded is s[1:endIndexS] * @param endIndexT the portion of t currently regarded is t[1:endIndexT] * @return a partial result for K */ protected double kernelHelper (int n, char[] s,int endIndexS, char[] t, int endIndexT) { //recursion ends if the current subsequence has maximal length, //which is the case here if (n <= 0 ) { return getReturnValue(n); } //recursion ends, too, if the current subsequence is shorter than //maximal length, but there is no chance that it will reach maximal length. //in this case, normally 0 is returned, but the EXPERIMENTAL //minSubsequenceLength feature allows shorter subsequence matches //also to contribute if (Math.min(endIndexS+1,endIndexT+1) < n) { return getReturnValue(n); } int adr = 0; if (m_useRecursionCache) { adr=m_multX*n+m_multY*endIndexS+m_multZ*endIndexT; if ( cachekhK[adr % maxCache] == adr+1) return cachekh[adr % maxCache]; } //the tail-recursive function defined in [1] is turned into a //loop here, preventing stack overflows. //loop over s, nearly from the start (skip the first n-1 characters) //and only up until endIndexS, and recursively apply K''. Thus, every //character between n-1 and endIndexS in s is counted once as //being part of the subsequence match and once just as a gap. //In both cases lambda is multiplied with the result. double result = 0; /* for (int iS = n-1; iS <= endIndexS;iS++) { result *= m_lambda; result += kernelHelper2(n,s,iS, t, endIndexT); } if (m_useRecursionCache) { cachekhK[adr % maxCache]=adr+1; cachekh[adr % maxCache]=result; } return result; */ /* ^^^ again, above code segment does not store some intermediate results... */ result = m_lambda*kernelHelper(n,s,endIndexS-1,t,endIndexT) + kernelHelper2(n,s,endIndexS,t,endIndexT); if (m_useRecursionCache) { cachekhK[adr % maxCache]=adr+1; cachekh[adr % maxCache]=result; } return result; } /** * helper function for the evaluation of the kernel K'' see section * 'Efficient Computation of SSK' in [1] * * @param n the current length of the matching subsequence * @param s first string, as a char array * @param t second string, as a char array * @param endIndexS the portion of s currently regarded is s[1:endIndexS] * @param endIndexT the portion of t currently regarded is t[1:endIndexT] * @return a partial result for K' */ protected double kernelHelper2(int n, char[] s, int endIndexS, char[] t, int endIndexT) { //recursion ends if one of the indices in both strings is <0 if (endIndexS <0 || endIndexT <0) { return getReturnValue(n); } int adr = 0; if (m_useRecursionCache) { adr=m_multX*n+m_multY*endIndexS+m_multZ*endIndexT; if ( cachekh2K[adr % maxCache] == adr+1) return cachekh2[adr % maxCache]; } //spot the last character in s, we'll need it char x = s[endIndexS]; //recurse if the last characters of s and t, x (and y) are identical. //which is an easy case: just add up two recursions, // 1. one that counts x and y as a part of the subsequence match // -> n, endIndexS and endIndexT are decremented for next recursion level // -> lambda^2 is multiplied with the result to account for the length // of 2 that has been added to the length of the subsequence match // by accepting x and y. // 2. one that counts y as a gap in the match // -> only endIndexT is decremented for next recursion level // -> lambda is multiplied with the result to account for the length // of 1 that has been added to the length of the subsequence match // by omitting y. if (x == t[endIndexT]) { double ret = m_lambda * (kernelHelper2(n,s,endIndexS, t, endIndexT-1) + m_lambda * kernelHelper(n-1,s,endIndexS-1, t, endIndexT-1)); if (m_useRecursionCache) { cachekh2K[adr % maxCache]=adr+1; cachekh2[adr % maxCache]=ret; } return ret; } else { double ret = m_lambda*kernelHelper2(n,s,endIndexS,t,endIndexT-1); if (m_useRecursionCache) { cachekh2K[adr % maxCache]=adr+1; cachekh2[adr % maxCache]=ret; } return ret; } //look for x in t from back to front. //this is actually an optimization from [1] that spares unneccessary //recursions iff //x is actually found in t, but not at the last position. /* int i; int threshold = n>0?n-1:0; for (i=endIndexT-1; i >= threshold;i--) { if (x == t[i]) { double ret=getPowerOfLambda(endIndexT-i) * kernelHelper2(n,s,endIndexS, t, i); if (m_useRecursionCache) { cachekh2K[adr % maxCache]=adr+1; cachekh2[adr % maxCache]=ret; } return ret; } } */ //end the recursion if x is not found in t. /* double ret = getReturnValue(n); if (m_useRecursionCache) { cachekh2K[adr % maxCache]=adr+1; cachekh2[adr % maxCache]=ret; } return ret;*/ } /** * the kernel function K explained in [1] using lambda pruning, explained in * [2]. An additional parameter is introduced, which denotes the maximum * length of a subsequence match. This allows for the control of how relaxed * the subsequence matches are. <br> * * @param n the current length of the matching subsequence * @param s first string, as a char array * @param t second string, as a char array * @param endIndexS the portion of s currently regarded is s[1:endIndexS] * @param endIndexT the portion of t currently regarded is t[1:endIndexT] * @param remainingMatchLength actually the initial value for * maxLambdaExponent * @return a double indicating the distance or similarity between s and t, * according to and depending on the initial value for n. */ protected double kernelLP(int n, char[] s, int endIndexS,char[] t, int endIndexT,int remainingMatchLength) { //see code docs in kernel() if (Math.min(endIndexS+1,endIndexT +1) < n) { return getReturnValue(n); } //lambda pruning check //stops recursion if the match is so long that the resulting //power of lambda is smaller than minLambda //if lambda pruning is not used, the remainingMatchLength is < 0 //and this check never stops the recursion if (remainingMatchLength == 0) return getReturnValue(n); double result = 0; //see code docs in kernel() for (int iS =endIndexS; iS > n-2; iS--) { double buf = 0; char x = s[iS]; for (int j=0; j <= endIndexT; j++) { if (t[j] == x){ //both t[j] and x are considered part of the subsequence match, hence //subtract 2 from the remainingMatchLength buf += kernelHelperLP(n-1,s,iS-1,t,j-1,remainingMatchLength-2); } } result += buf * m_powersOflambda[2]; } return result; } /** * helper function for the evaluation of the kernel (K'n) using lambda pruning * * @param n the current length of the matching subsequence * @param s first string, as a char array * @param t second string, as a char array * @param endIndexS the portion of s currently regarded is s[1:endIndexS] * @param endIndexT the portion of t currently regarded is t[1:endIndexT] * @param remainingMatchLength the number of characters that may still be * used * for matching (i.e. gaps + matches in both strings) * @return a partial result for K */ protected double kernelHelperLP (int n, char[] s, int endIndexS,char[] t, int endIndexT,int remainingMatchLength) { //see code docs in kernelHelper() if (n == 0) { return getReturnValue(n); } //see code docs in kernelHelper() if (Math.min(endIndexS+1,endIndexT +1) < n) {; return getReturnValue(n); } //lambda pruning check //stops recursion if the match is so long that the resulting //power of lambda is smaller than minLambda //if lambda pruning is not used, the remainingMatchLength is < 0 //and this check never stops the recursion if (remainingMatchLength < 2*n) { return getReturnValue(n); } int adr=0; if (m_useRecursionCache) { adr = m_multX*n+m_multY*endIndexS+m_multZ*endIndexT + m_multZZ * remainingMatchLength; if (cachekh2K[adr % maxCache]==adr+1) { return cachekh2[adr % maxCache]; } } int rml = 0; //counts the remaining match length double result = 0; //see code docs in kernelHelper() //difference to implementation in kernelHelper: //*)choose different starting point, which is found counting //the maximal remaining match length from endIndexS. //*)keep track of the remaining match length, rml, which is // incremented each loop for (int iS = (endIndexS-remainingMatchLength); iS <= endIndexS;iS++) { result *= m_lambda; result += kernelHelper2LP(n,s,iS, t, endIndexT,rml++); } if (m_useRecursionCache && endIndexS >= 0 && endIndexT >= 0 && n >= 0) { cachekhK[adr % maxCache]=adr+1; cachekh[adr % maxCache]=result; } return result; } /** * helper function for the evaluation of the kernel (K''n) using lambda * pruning * * @param n the current length of the matching subsequence * @param s first string, as a char array * @param t second string, as a char array * @param endIndexS the portion of s currently regarded is s[1:endIndexS] * @param endIndexT the portion of t currently regarded is t[1:endIndexT] * @param remainingMatchLength the number of characters that may still be * used * for matching (i.e. gaps + matches in both strings) * @return a partial result for K' */ protected double kernelHelper2LP(int n, char[] s, int endIndexS,char[] t, int endIndexT,int remainingMatchLength) { //lambda pruning check //stops recursion if the match is so long that the resulting //power of lambda is smaller than minLambda //if lambda pruning is not used, the remainingMatchLength is < 0 //and this check never stops the recursion //if (remainingMatchLength <= 0) return 0; if (remainingMatchLength < 2*n) return getReturnValue(n); //see code docs in kernelHelper2() if (endIndexS <0 || endIndexT <0) return getReturnValue(n); int adr=0; if (m_useRecursionCache){ adr = m_multX*n+m_multY*endIndexS+m_multZ*endIndexT + m_multZZ * remainingMatchLength; if (cachekh2K[adr % maxCache]==adr+1) { return cachekh2[adr % maxCache]; } } char x = s[endIndexS]; if (x == t[endIndexT]) { double ret = m_lambda * (kernelHelper2LP(n,s,endIndexS,t,endIndexT-1,remainingMatchLength-1) + m_lambda * kernelHelperLP(n-1,s,endIndexS-1,t,endIndexT-1,remainingMatchLength-2)); if (m_useRecursionCache && endIndexS >= 0 && endIndexT >= 0 && n >= 0) { cachekh2K[adr % maxCache]=adr+1; cachekh2[adr % maxCache]=ret; } return ret; } //see code docs in kernelHelper() //differences to implementation in kernelHelper(): //*) choose a different ending point for the loop // based on the remaining match length int i; int minIndex = endIndexT - remainingMatchLength; if (minIndex < 0) minIndex = 0; for (i=endIndexT; i >= minIndex;i--) { if (x == t[i]) { int skipLength = endIndexT -i; double ret = getPowerOfLambda(skipLength) * kernelHelper2LP(n,s,endIndexS,t,i,remainingMatchLength-skipLength); if (m_useRecursionCache && endIndexS >= 0 && endIndexT >= 0 && n >= 0) { cachekh2K[adr % maxCache]=adr+1; cachekh2[adr % maxCache]=ret; } return ret; } } double ret = getReturnValue(n); if (m_useRecursionCache && endIndexS >= 0 && endIndexT >= 0 && n >= 0) { cachekh2K[adr % maxCache]=adr+1; cachekh2[adr % maxCache]=ret; } return ret; } /** * precalculates small powers of lambda to speed up the kernel evaluation * * @return the powers */ private double[] calculatePowersOfLambda(){ double[] powers = new double[MAX_POWER_OF_LAMBDA+1]; powers[0] = 1.0; double val = 1.0; for (int i = 1; i<=MAX_POWER_OF_LAMBDA;i++) { val *= m_lambda; powers[i] = val; } return powers; } /** * retrieves a power of lambda from the lambda cache or calculates it * directly * * @param exponent the exponent to calculate * @return the exponent-th power of lambda */ private double getPowerOfLambda(int exponent){ if (exponent > MAX_POWER_OF_LAMBDA) return Math.pow(m_lambda,exponent); if (exponent < 0) throw new IllegalArgumentException( "only positive powers of lambda may be computed"); return m_powersOflambda[exponent]; } /** * initializes variables etc. * * @param data the data to use */ protected void initVars(Instances data) { super.initVars(data); m_kernelEvals = 0; // take the first string attribute m_strAttr = -1; for (int i = 0; i < data.numAttributes(); i++) { if (i == data.classIndex()) continue; if (data.attribute(i).type() == Attribute.STRING) { m_strAttr = i; break; } } m_numInsts = m_data.numInstances(); m_storage = new double[m_cacheSize]; m_keys = new long[m_cacheSize]; m_powersOflambda = calculatePowersOfLambda(); } /** * Returns the Capabilities of this kernel. * * @return the capabilities of this object * @see Capabilities */ public Capabilities getCapabilities() { Capabilities result = super.getCapabilities(); result.disableAll(); result.enable(Capability.STRING_ATTRIBUTES); result.enableAllClasses(); result.enable(Capability.MISSING_CLASS_VALUES); return result; } /** * builds the kernel with the given data. * * @param data the data to base the kernel on * @throws Exception if something goes wrong, e.g., the data does not * consist of one string attribute and the class */ public void buildKernel(Instances data) throws Exception { super.buildKernel(data); } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } }
48,083
30.822634
201
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/lazy/AttributeFilterBridge.java
package weka.classifiers.lazy; import java.util.Arrays; import weka.attributeSelection.*; import weka.core.DenseInstance; import weka.core.Instance; import weka.core.Instances; /* 23/7/11: ajb * This class bridges the two weka components required to select a subset of attributes * * 1. weka.attributeSelection.ASEvaluation Abstract base class for the subtypes * AttributeEvaluator: evaluates attributes individually: Concrete subtypes: * ChiSquaredAttributeEval, GainRatioAttributeEval, InfoGainAttributeEval, * OneRAttributeEval, ReliefFAttributeEval, SVMAttributeEval, * SymmetricalUncertAttributeEval, UnsupervisedAttributeEvaluator * SubsetEvaluator: evaluate a subset * CfsSubsetEval, ConsistencySubsetEval, HoldOutSubsetEvaluator, * UnsupervisedSubsetEvaluator, WrapperSubsetEval * * 2. weka.attributeSelection.ASSearch Uses the ASEvaluation to choose a subset of attributes BestFirst, ExhaustiveSearch, GeneticSearch, GreedyStepwise, RaceSearch, RandomSearch, Ranker, RankSearch 23/7/11: Currently throws an Exception if you combine a single attribute ranker (AttributeEvaluator subclass) with a subset search technique (all of ASSearch apart from Ranker. Should deal with this exception, no logic in using anything but Ranker with single attribute evaluator. */ public class AttributeFilterBridge { ASEvaluation eval; //Note the parameters for the stopping criteria of search vary between // implementations, so should be set outside of this class int[] attsToKeep; //The indexes of the original attributes to keep. int[] allAtts; //Sorted array of attributes private ASSearch search; private Instances data; protected double prop=0.2; protected int n=0; private boolean useProp=false; //You have to specify either the data set or the ASEvaluation and ASSearch at creation private AttributeFilterBridge(){} public AttributeFilterBridge(Instances d){ //Search defaults to 10% of the data set using InformationGain data =d; eval=new InfoGainAttributeEval(); Ranker r=new Ranker(); n=(int)(prop*data.numAttributes()); if(n==0) n++; //Note this does not seem to work, so we fix it by just selecting a subset after generation. r.setNumToSelect(n); search=r; } public AttributeFilterBridge makeCopy(){ AttributeFilterBridge newAF=new AttributeFilterBridge(); newAF.search=search; newAF.eval=eval; return newAF; } public AttributeFilterBridge(ASEvaluation e,ASSearch s){ eval=e; search=s; } public void setNosToKeep(int nos){ useProp=false; n=nos; if(data!=null) prop=((double)n)/(data.numAttributes()-1); } public void setProportionToKeep(double p){ useProp=true; prop=p; if(data!=null) n=(int)(prop*(data.numAttributes()-1)); } public Instances filter(){ if(data!=null) return filter(data); return null; } public Instances filter(Instances d){ data=d; Instances newD=d; int[] atts; try{ //Build evaluator eval.buildEvaluator(d); //Select attributes allAtts=search.search(eval,d); if(useProp) n=(int)(prop*(d.numAttributes()-1)); if(n==0) n++; atts=new int[n]; if(n<allAtts.length) System.arraycopy(allAtts, 0, atts, 0, n); else atts=allAtts; //Sort Arrays.sort(atts); //Create clone data set, then remove attributes newD=new Instances(d); int nosDeleted=0; int nosKept=0; int dataPos=0; //Advance to the next to keep while(dataPos<newD.numAttributes()-1 && nosKept<atts.length){ while(dataPos!=atts[nosKept]-nosDeleted && dataPos<newD.numAttributes()-1){ newD.deleteAttributeAt(dataPos); nosDeleted++; } nosKept++; dataPos++; } while(dataPos<newD.numAttributes()-1) newD.deleteAttributeAt(dataPos); attsToKeep=atts; }catch(Exception e){ System.out.println("Exception thrown in AttributeFilterBridge ="+e); e.printStackTrace(); System.exit(0); } return newD; } public Instance filterInstance(Instance ins){ int nosDeleted=0; int nosKept=0; int dataPos=0; Instance newIns=new DenseInstance(ins); //Advance to the next to keep while(dataPos<newIns.numAttributes()-1 && nosKept<attsToKeep.length){ while(dataPos!=attsToKeep[nosKept]-nosDeleted && dataPos<newIns.numAttributes()-1){ newIns.deleteAttributeAt(dataPos); nosDeleted++; } nosKept++; dataPos++; } while(dataPos<newIns.numAttributes()-1) newIns.deleteAttributeAt(dataPos); return newIns; } public String toString(){ String str="\n Attributes retained ="; for(int i=0;i<attsToKeep.length;i++) str+=" "+attsToKeep[i]; return str; } /** So this below is to generate different sets from the same ranking. Usage AttributeFilterBridge af=new AttributeFilterBridge(); //Set eval and search if required af.rankAttributes(Instances data); double prop=0.5; //Proportion of attributes to keep Instances fTrain ]= af.filterBest(prop); //Will not work out the ranks again **/ public void rankAttributes(Instances d){ data=d; try{ //Build evaluator eval.buildEvaluator(d); //Select attributes allAtts=search.search(eval,d); //Sort Arrays.sort(allAtts); }catch(Exception e){ e.printStackTrace(); System.out.println(" Exception in trank atts"); System.exit(0); } } /* public Instances rankAttributes(double p){ Instances newD=new Instances(data); prop=p; int[] atts; try{ if(useProp) n=(int)(prop*(data.numAttributes()-1)); if(n==0) n++; atts=new int[n]; System.arraycopy(allAtts, 0, atts, 0, n); //Create clone data set, then remove attributes newD=new Instances(d); int nosDeleted=0; int nosKept=0; int dataPos=0; //Advance to the next to keep while(dataPos<newD.numAttributes()-1 && nosKept<atts.length){ while(dataPos!=atts[nosKept]-nosDeleted && dataPos<newD.numAttributes()-1){ newD.deleteAttributeAt(dataPos); nosDeleted++; } nosKept++; dataPos++; } while(dataPos<newD.numAttributes()-1) newD.deleteAttributeAt(dataPos); attsToKeep=atts; } */ /*This will test the information gain scores and that the correct attributes * are retained */ public static void testCorrectness(){ } public static void main(String[] args){ /** To check 1. The scoring is correct: Scoring is performed by the evaluator, so is not suitable for testing here 2. That the number retained and proportion retained works robustly 3. That the rank order list is sorted correctly 4. That the correct attributes are retained. */ /* String path="C:\\Research\\Data\\WekaTest\\"; Instances beef=utilities.ClassifierTools.loadData(path+"Beef_TRAIN"); //Iris has 4 attributes, checking that Instances data=utilities.ClassifierTools.loadData(path+"irisSmall"); AttributeFilterBridge af=new AttributeFilterBridge(data); Instances d2=af.filter(); /* 1. Check that the ranking is correct. Iris has four attributes and three class values. Not sure what the Chi-Sq filter does, * the info gain will look at all splits. Leave this. */ /* 2. That the number retained and proportion retained works robustly //Beef has 470 attributes. 10% should retain 47, 1% keep 4 (?), 25% keep 117 AttributeFilterBridge beefFilter=new AttributeFilterBridge(beef); beefFilter.setProportionToKeep(0.1); Instances b2=beefFilter.filter(); System.out.println("10% number of atts = "+(b2.numAttributes()-1)); beefFilter.setProportionToKeep(0.01); b2=beefFilter.filter(); System.out.println("1% number of atts = "+(b2.numAttributes()-1)); beefFilter.setProportionToKeep(0.25); b2=beefFilter.filter(); System.out.println("25% number of atts = "+(b2.numAttributes()-1)); beefFilter.setNosToKeep(10); b2=beefFilter.filter(); System.out.println("10 atts = "+(b2.numAttributes()-1)); beefFilter.setNosToKeep(100); b2=beefFilter.filter(); System.out.println("100 number of atts = "+(b2.numAttributes()-1)); */ /* 2. Check the right attributes are removed 2.1 Print out the ranker list for all 2.2 Check against reduced sorted list AttributeFilterBridge beefFilter=new AttributeFilterBridge(beef); beefFilter.setProportionToKeep(0.05); Instances b2=beefFilter.filter(); System.out.println("5% number of atts = "+(b2.numAttributes()-1)); */ /* System.out.println("Attribute filter \t"+af); System.out.println("New data ="+d2); System.out.println(" Number of attributes in new data ="+d2.numAttributes()); // System.out.println(d2); /* ASEvaluation e=new ChiSquaredAttributeEval(); ASSearch s=new Ranker(); AttributeFilterBridge af2=new AttributeFilterBridge(e,s); Instances d3=af2.filter(data); System.out.println(" Number of attributes in new data ="+d3.numAttributes()); */ //2. Check the correct attributes are being removed } }
8,868
29.477663
149
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/lazy/IB1.java
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* * IB1.java * Copyright (C) 1999 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.lazy; import weka.classifiers.Classifier; import weka.classifiers.UpdateableClassifier; import weka.core.Capabilities; import weka.core.Instance; import weka.core.Instances; import weka.core.RevisionUtils; import weka.core.TechnicalInformation; import weka.core.TechnicalInformationHandler; import weka.core.Utils; import weka.core.Capabilities.Capability; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import java.util.Enumeration; import weka.classifiers.AbstractClassifier; /** <!-- globalinfo-start --> * Nearest-neighbour classifier. Uses normalized Euclidean distance to find the training instance closest to the given test instance, and predicts the same class as this training instance. If multiple instances have the same (smallest) distance to the test instance, the first one found is used.<br/> * <br/> * For more information, see <br/> * <br/> * D. Aha, D. Kibler (1991). Instance-based learning algorithms. Machine Learning. 6:37-66. * <p/> <!-- globalinfo-end --> * <!-- technical-bibtex-start --> * BibTeX: * <pre> * &#64;article{Aha1991, * author = {D. Aha and D. Kibler}, * journal = {Machine Learning}, * pages = {37-66}, * title = {Instance-based learning algorithms}, * volume = {6}, * year = {1991} * } * </pre> * <p/> <!-- technical-bibtex-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -D * If set, classifier is run in debug mode and * may output additional info to the console</pre> * <!-- options-end --> * * @author Stuart Inglis (singlis@cs.waikato.ac.nz) * @author Len Trigg (trigg@cs.waikato.ac.nz) * @author Eibe Frank (eibe@cs.waikato.ac.nz) * @version $Revision: 5525 $ */ public class IB1 extends AbstractClassifier implements UpdateableClassifier, TechnicalInformationHandler { /** for serialization */ static final long serialVersionUID = -6152184127304895851L; /** The training instances used for classification. */ private Instances m_Train; /** The minimum values for numeric attributes. */ private double [] m_MinArray; /** The maximum values for numeric attributes. */ private double [] m_MaxArray; /** * Returns a string describing classifier * @return a description suitable for * displaying in the explorer/experimenter gui */ public String globalInfo() { return "Nearest-neighbour classifier. Uses normalized Euclidean distance to " + "find the training instance closest to the given test instance, and predicts " + "the same class as this training instance. If multiple instances have " + "the same (smallest) distance to the test instance, the first one found is " + "used.\n\n" + "For more information, see \n\n" + getTechnicalInformation().toString(); } /** * Returns an instance of a TechnicalInformation object, containing * detailed information about the technical background of this class, * e.g., paper reference or book this class is based on. * * @return the technical information about this class */ public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; result = new TechnicalInformation(Type.ARTICLE); result.setValue(Field.AUTHOR, "D. Aha and D. Kibler"); result.setValue(Field.YEAR, "1991"); result.setValue(Field.TITLE, "Instance-based learning algorithms"); result.setValue(Field.JOURNAL, "Machine Learning"); result.setValue(Field.VOLUME, "6"); result.setValue(Field.PAGES, "37-66"); return result; } /** * Returns default capabilities of the classifier. * * @return the capabilities of this classifier */ public Capabilities getCapabilities() { Capabilities result = super.getCapabilities(); result.disableAll(); // attributes result.enable(Capability.NOMINAL_ATTRIBUTES); result.enable(Capability.NUMERIC_ATTRIBUTES); result.enable(Capability.DATE_ATTRIBUTES); result.enable(Capability.MISSING_VALUES); // class result.enable(Capability.NOMINAL_CLASS); result.enable(Capability.MISSING_CLASS_VALUES); // instances result.setMinimumNumberInstances(0); return result; } /** * Generates the classifier. * * @param instances set of instances serving as training data * @throws Exception if the classifier has not been generated successfully */ public void buildClassifier(Instances instances) throws Exception { // can classifier handle the data? getCapabilities().testWithFail(instances); // remove instances with missing class instances = new Instances(instances); instances.deleteWithMissingClass(); m_Train = new Instances(instances, 0, instances.numInstances()); m_MinArray = new double [m_Train.numAttributes()]; m_MaxArray = new double [m_Train.numAttributes()]; for (int i = 0; i < m_Train.numAttributes(); i++) { m_MinArray[i] = m_MaxArray[i] = Double.NaN; } Enumeration enu = m_Train.enumerateInstances(); while (enu.hasMoreElements()) { updateMinMax((Instance) enu.nextElement()); } } /** * Updates the classifier. * * @param instance the instance to be put into the classifier * @throws Exception if the instance could not be included successfully */ public void updateClassifier(Instance instance) throws Exception { if (m_Train.equalHeaders(instance.dataset()) == false) { throw new Exception("Incompatible instance types"); } if (instance.classIsMissing()) { return; } m_Train.add(instance); updateMinMax(instance); } /** * Classifies the given test instance. * * @param instance the instance to be classified * @return the predicted class for the instance * @throws Exception if the instance can't be classified */ public double classifyInstance(Instance instance) throws Exception { if (m_Train.numInstances() == 0) { throw new Exception("No training instances!"); } double distance, minDistance = Double.MAX_VALUE, classValue = 0; updateMinMax(instance); Enumeration enu = m_Train.enumerateInstances(); while (enu.hasMoreElements()) { Instance trainInstance = (Instance) enu.nextElement(); if (!trainInstance.classIsMissing()) { distance = distance(instance, trainInstance); if (distance < minDistance) { minDistance = distance; classValue = trainInstance.classValue(); } } } return classValue; } /** * Returns a description of this classifier. * * @return a description of this classifier as a string. */ public String toString() { return ("IB1 classifier"); } /** * Calculates the distance between two instances * * @param first the first instance * @param second the second instance * @return the distance between the two given instances */ private double distance(Instance first, Instance second) { double diff, distance = 0; for(int i = 0; i < m_Train.numAttributes(); i++) { if (i == m_Train.classIndex()) { continue; } if (m_Train.attribute(i).isNominal()) { // If attribute is nominal if (first.isMissing(i) || second.isMissing(i) || ((int)first.value(i) != (int)second.value(i))) { distance += 1; } } else { // If attribute is numeric if (first.isMissing(i) || second.isMissing(i)){ if (first.isMissing(i) && second.isMissing(i)) { diff = 1; } else { if (second.isMissing(i)) { diff = norm(first.value(i), i); } else { diff = norm(second.value(i), i); } if (diff < 0.5) { diff = 1.0 - diff; } } } else { diff = norm(first.value(i), i) - norm(second.value(i), i); } distance += diff * diff; } } return distance; } /** * Normalizes a given value of a numeric attribute. * * @param x the value to be normalized * @param i the attribute's index * @return the normalized value */ private double norm(double x,int i) { if (Double.isNaN(m_MinArray[i]) || Utils.eq(m_MaxArray[i], m_MinArray[i])) { return 0; } else { return (x - m_MinArray[i]) / (m_MaxArray[i] - m_MinArray[i]); } } /** * Updates the minimum and maximum values for all the attributes * based on a new instance. * * @param instance the new instance */ private void updateMinMax(Instance instance) { for (int j = 0;j < m_Train.numAttributes(); j++) { if ((m_Train.attribute(j).isNumeric()) && (!instance.isMissing(j))) { if (Double.isNaN(m_MinArray[j])) { m_MinArray[j] = instance.value(j); m_MaxArray[j] = instance.value(j); } else { if (instance.value(j) < m_MinArray[j]) { m_MinArray[j] = instance.value(j); } else { if (instance.value(j) > m_MaxArray[j]) { m_MaxArray[j] = instance.value(j); } } } } } } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 5525 $"); } /** * Main method for testing this class. * * @param argv should contain command line arguments for evaluation * (see Evaluation). */ public static void main(String [] argv) { runClassifier(new IB1(), argv); } }
10,231
27.903955
300
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/lazy/IBk.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * IBk.java * Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.lazy; import java.util.Enumeration; import java.util.Vector; import weka.classifiers.AbstractClassifier; import weka.classifiers.UpdateableClassifier; import weka.classifiers.rules.ZeroR; import weka.core.AdditionalMeasureProducer; import weka.core.Attribute; import weka.core.Capabilities; import weka.core.Capabilities.Capability; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.OptionHandler; import weka.core.RevisionUtils; import weka.core.SelectedTag; import weka.core.Tag; import weka.core.TechnicalInformation; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import weka.core.TechnicalInformationHandler; import weka.core.Utils; import weka.core.WeightedInstancesHandler; import weka.core.neighboursearch.LinearNNSearch; import weka.core.neighboursearch.NearestNeighbourSearch; /** <!-- globalinfo-start --> * K-nearest neighbours classifier. Can select appropriate value of K based on cross-validation. Can also do distance weighting.<br/> * <br/> * For more information, see<br/> * <br/> * D. Aha, D. Kibler (1991). Instance-based learning algorithms. Machine Learning. 6:37-66. * <p/> <!-- globalinfo-end --> * <!-- technical-bibtex-start --> * BibTeX: * <pre> * &#64;article{Aha1991, * author = {D. Aha and D. Kibler}, * journal = {Machine Learning}, * pages = {37-66}, * title = {Instance-based learning algorithms}, * volume = {6}, * year = {1991} * } * </pre> * <p/> <!-- technical-bibtex-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -I * Weight neighbours by the inverse of their distance * (use when k &gt; 1)</pre> * * <pre> -F * Weight neighbours by 1 - their distance * (use when k &gt; 1)</pre> * * <pre> -K &lt;number of neighbors&gt; * Number of nearest neighbours (k) used in classification. * (Default = 1)</pre> * * <pre> -E * Minimise mean squared error rather than mean absolute * error when using -X option with numeric prediction.</pre> * * <pre> -W &lt;window size&gt; * Maximum number of training instances maintained. * Training instances are dropped FIFO. (Default = no window)</pre> * * <pre> -X * Select the number of nearest neighbours between 1 * and the k value specified using hold-one-out evaluation * on the training data (use when k &gt; 1)</pre> * * <pre> -A * The nearest neighbour search algorithm to use (default: weka.core.neighboursearch.LinearNNSearch). * </pre> * <!-- options-end --> * * @author Stuart Inglis (singlis@cs.waikato.ac.nz) * @author Len Trigg (trigg@cs.waikato.ac.nz) * @author Eibe Frank (eibe@cs.waikato.ac.nz) * @version $Revision: 8034 $ */ public class IBk extends AbstractClassifier implements OptionHandler, UpdateableClassifier, WeightedInstancesHandler, TechnicalInformationHandler, AdditionalMeasureProducer { /** for serialization. */ static final long serialVersionUID = -3080186098777067172L; /** The training instances used for classification. */ protected Instances m_Train; /** The number of class values (or 1 if predicting numeric). */ protected int m_NumClasses; /** The class attribute type. */ protected int m_ClassType; /** The number of neighbours to use for classification (currently). */ protected int m_kNN; /** * The value of kNN provided by the user. This may differ from * m_kNN if cross-validation is being used. */ protected int m_kNNUpper; /** * Whether the value of k selected by cross validation has * been invalidated by a change in the training instances. */ protected boolean m_kNNValid; /** * The maximum number of training instances allowed. When * this limit is reached, old training instances are removed, * so the training data is "windowed". Set to 0 for unlimited * numbers of instances. */ protected int m_WindowSize; /** Whether the neighbours should be distance-weighted. */ protected int m_DistanceWeighting; /** Whether to select k by cross validation. */ protected boolean m_CrossValidate; /** * Whether to minimise mean squared error rather than mean absolute * error when cross-validating on numeric prediction tasks. */ protected boolean m_MeanSquared; /** Default ZeroR model to use when there are no training instances */ protected ZeroR m_defaultModel; /** no weighting. */ public static final int WEIGHT_NONE = 1; /** weight by 1/distance. */ public static final int WEIGHT_INVERSE = 2; /** weight by 1-distance. */ public static final int WEIGHT_SIMILARITY = 4; /** possible instance weighting methods. */ public static final Tag [] TAGS_WEIGHTING = { new Tag(WEIGHT_NONE, "No distance weighting"), new Tag(WEIGHT_INVERSE, "Weight by 1/distance"), new Tag(WEIGHT_SIMILARITY, "Weight by 1-distance") }; /** for nearest-neighbor search. */ protected NearestNeighbourSearch m_NNSearch = new LinearNNSearch(); /** The number of attributes the contribute to a prediction. */ protected double m_NumAttributesUsed; /** * IBk classifier. Simple instance-based learner that uses the class * of the nearest k training instances for the class of the test * instances. * * @param k the number of nearest neighbors to use for prediction */ public IBk(int k) { init(); setKNN(k); } /** * IB1 classifer. Instance-based learner. Predicts the class of the * single nearest training instance for each test instance. */ public IBk() { init(); } /** * Returns a string describing classifier. * @return a description suitable for * displaying in the explorer/experimenter gui */ public String globalInfo() { return "K-nearest neighbours classifier. Can " + "select appropriate value of K based on cross-validation. Can also do " + "distance weighting.\n\n" + "For more information, see\n\n" + getTechnicalInformation().toString(); } /** * Returns an instance of a TechnicalInformation object, containing * detailed information about the technical background of this class, * e.g., paper reference or book this class is based on. * * @return the technical information about this class */ public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; result = new TechnicalInformation(Type.ARTICLE); result.setValue(Field.AUTHOR, "D. Aha and D. Kibler"); result.setValue(Field.YEAR, "1991"); result.setValue(Field.TITLE, "Instance-based learning algorithms"); result.setValue(Field.JOURNAL, "Machine Learning"); result.setValue(Field.VOLUME, "6"); result.setValue(Field.PAGES, "37-66"); return result; } /** * Returns the tip text for this property. * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String KNNTipText() { return "The number of neighbours to use."; } /** * Set the number of neighbours the learner is to use. * * @param k the number of neighbours. */ public void setKNN(int k) { m_kNN = k; m_kNNUpper = k; m_kNNValid = false; } /** * Gets the number of neighbours the learner will use. * * @return the number of neighbours. */ public int getKNN() { return m_kNN; } /** * Returns the tip text for this property. * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String windowSizeTipText() { return "Gets the maximum number of instances allowed in the training " + "pool. The addition of new instances above this value will result " + "in old instances being removed. A value of 0 signifies no limit " + "to the number of training instances."; } /** * Gets the maximum number of instances allowed in the training * pool. The addition of new instances above this value will result * in old instances being removed. A value of 0 signifies no limit * to the number of training instances. * * @return Value of WindowSize. */ public int getWindowSize() { return m_WindowSize; } /** * Sets the maximum number of instances allowed in the training * pool. The addition of new instances above this value will result * in old instances being removed. A value of 0 signifies no limit * to the number of training instances. * * @param newWindowSize Value to assign to WindowSize. */ public void setWindowSize(int newWindowSize) { m_WindowSize = newWindowSize; } /** * Returns the tip text for this property. * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String distanceWeightingTipText() { return "Gets the distance weighting method used."; } /** * Gets the distance weighting method used. Will be one of * WEIGHT_NONE, WEIGHT_INVERSE, or WEIGHT_SIMILARITY * * @return the distance weighting method used. */ public SelectedTag getDistanceWeighting() { return new SelectedTag(m_DistanceWeighting, TAGS_WEIGHTING); } /** * Sets the distance weighting method used. Values other than * WEIGHT_NONE, WEIGHT_INVERSE, or WEIGHT_SIMILARITY will be ignored. * * @param newMethod the distance weighting method to use */ public void setDistanceWeighting(SelectedTag newMethod) { if (newMethod.getTags() == TAGS_WEIGHTING) { m_DistanceWeighting = newMethod.getSelectedTag().getID(); } } /** * Returns the tip text for this property. * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String meanSquaredTipText() { return "Whether the mean squared error is used rather than mean " + "absolute error when doing cross-validation for regression problems."; } /** * Gets whether the mean squared error is used rather than mean * absolute error when doing cross-validation. * * @return true if so. */ public boolean getMeanSquared() { return m_MeanSquared; } /** * Sets whether the mean squared error is used rather than mean * absolute error when doing cross-validation. * * @param newMeanSquared true if so. */ public void setMeanSquared(boolean newMeanSquared) { m_MeanSquared = newMeanSquared; } /** * Returns the tip text for this property. * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String crossValidateTipText() { return "Whether hold-one-out cross-validation will be used " + "to select the best k value."; } /** * Gets whether hold-one-out cross-validation will be used * to select the best k value. * * @return true if cross-validation will be used. */ public boolean getCrossValidate() { return m_CrossValidate; } /** * Sets whether hold-one-out cross-validation will be used * to select the best k value. * * @param newCrossValidate true if cross-validation should be used. */ public void setCrossValidate(boolean newCrossValidate) { m_CrossValidate = newCrossValidate; } /** * Returns the tip text for this property. * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String nearestNeighbourSearchAlgorithmTipText() { return "The nearest neighbour search algorithm to use " + "(Default: weka.core.neighboursearch.LinearNNSearch)."; } /** * Returns the current nearestNeighbourSearch algorithm in use. * @return the NearestNeighbourSearch algorithm currently in use. */ public NearestNeighbourSearch getNearestNeighbourSearchAlgorithm() { return m_NNSearch; } /** * Sets the nearestNeighbourSearch algorithm to be used for finding nearest * neighbour(s). * @param nearestNeighbourSearchAlgorithm - The NearestNeighbourSearch class. */ public void setNearestNeighbourSearchAlgorithm(NearestNeighbourSearch nearestNeighbourSearchAlgorithm) { m_NNSearch = nearestNeighbourSearchAlgorithm; } /** * Get the number of training instances the classifier is currently using. * * @return the number of training instances the classifier is currently using */ public int getNumTraining() { return m_Train.numInstances(); } /** * Returns default capabilities of the classifier. * * @return the capabilities of this classifier */ public Capabilities getCapabilities() { Capabilities result = super.getCapabilities(); result.disableAll(); // attributes result.enable(Capability.NOMINAL_ATTRIBUTES); result.enable(Capability.NUMERIC_ATTRIBUTES); result.enable(Capability.DATE_ATTRIBUTES); result.enable(Capability.MISSING_VALUES); // class result.enable(Capability.NOMINAL_CLASS); result.enable(Capability.NUMERIC_CLASS); result.enable(Capability.DATE_CLASS); result.enable(Capability.MISSING_CLASS_VALUES); // instances result.setMinimumNumberInstances(0); return result; } /** * Generates the classifier. * * @param instances set of instances serving as training data * @throws Exception if the classifier has not been generated successfully */ public void buildClassifier(Instances instances) throws Exception { // can classifier handle the data? getCapabilities().testWithFail(instances); // remove instances with missing class instances = new Instances(instances); instances.deleteWithMissingClass(); m_NumClasses = instances.numClasses(); m_ClassType = instances.classAttribute().type(); m_Train = new Instances(instances, 0, instances.numInstances()); // Throw away initial instances until within the specified window size if ((m_WindowSize > 0) && (instances.numInstances() > m_WindowSize)) { m_Train = new Instances(m_Train, m_Train.numInstances()-m_WindowSize, m_WindowSize); } m_NumAttributesUsed = 0.0; for (int i = 0; i < m_Train.numAttributes(); i++) { if ((i != m_Train.classIndex()) && (m_Train.attribute(i).isNominal() || m_Train.attribute(i).isNumeric())) { m_NumAttributesUsed += 1.0; } } m_NNSearch.setInstances(m_Train); // Invalidate any currently cross-validation selected k m_kNNValid = false; m_defaultModel = new ZeroR(); m_defaultModel.buildClassifier(instances); } /** * Adds the supplied instance to the training set. * * @param instance the instance to add * @throws Exception if instance could not be incorporated * successfully */ public void updateClassifier(Instance instance) throws Exception { if (m_Train.equalHeaders(instance.dataset()) == false) { throw new Exception("Incompatible instance types\n" + m_Train.equalHeadersMsg(instance.dataset())); } if (instance.classIsMissing()) { return; } m_Train.add(instance); m_NNSearch.update(instance); m_kNNValid = false; if ((m_WindowSize > 0) && (m_Train.numInstances() > m_WindowSize)) { boolean deletedInstance=false; while (m_Train.numInstances() > m_WindowSize) { m_Train.delete(0); deletedInstance=true; } //rebuild datastructure KDTree currently can't delete if(deletedInstance==true) m_NNSearch.setInstances(m_Train); } } /** * Calculates the class membership probabilities for the given test instance. * * @param instance the instance to be classified * @return predicted class probability distribution * @throws Exception if an error occurred during the prediction */ public double [] distributionForInstance(Instance instance) throws Exception { if (m_Train.numInstances() == 0) { //throw new Exception("No training instances!"); return m_defaultModel.distributionForInstance(instance); } if ((m_WindowSize > 0) && (m_Train.numInstances() > m_WindowSize)) { m_kNNValid = false; boolean deletedInstance=false; while (m_Train.numInstances() > m_WindowSize) { m_Train.delete(0); } //rebuild datastructure KDTree currently can't delete if(deletedInstance==true) m_NNSearch.setInstances(m_Train); } // Select k by cross validation if (!m_kNNValid && (m_CrossValidate) && (m_kNNUpper >= 1)) { crossValidate(); } m_NNSearch.addInstanceInfo(instance); Instances neighbours = m_NNSearch.kNearestNeighbours(instance, m_kNN); double [] distances = m_NNSearch.getDistances(); double [] distribution = makeDistribution( neighbours, distances ); return distribution; } /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ public Enumeration listOptions() { Vector newVector = new Vector(8); newVector.addElement(new Option( "\tWeight neighbours by the inverse of their distance\n"+ "\t(use when k > 1)", "I", 0, "-I")); newVector.addElement(new Option( "\tWeight neighbours by 1 - their distance\n"+ "\t(use when k > 1)", "F", 0, "-F")); newVector.addElement(new Option( "\tNumber of nearest neighbours (k) used in classification.\n"+ "\t(Default = 1)", "K", 1,"-K <number of neighbors>")); newVector.addElement(new Option( "\tMinimise mean squared error rather than mean absolute\n"+ "\terror when using -X option with numeric prediction.", "E", 0,"-E")); newVector.addElement(new Option( "\tMaximum number of training instances maintained.\n"+ "\tTraining instances are dropped FIFO. (Default = no window)", "W", 1,"-W <window size>")); newVector.addElement(new Option( "\tSelect the number of nearest neighbours between 1\n"+ "\tand the k value specified using hold-one-out evaluation\n"+ "\ton the training data (use when k > 1)", "X", 0,"-X")); newVector.addElement(new Option( "\tThe nearest neighbour search algorithm to use "+ "(default: weka.core.neighboursearch.LinearNNSearch).\n", "A", 0, "-A")); return newVector.elements(); } /** * Parses a given list of options. <p/> * <!-- options-start --> * Valid options are: <p/> * * <pre> -I * Weight neighbours by the inverse of their distance * (use when k &gt; 1)</pre> * * <pre> -F * Weight neighbours by 1 - their distance * (use when k &gt; 1)</pre> * * <pre> -K &lt;number of neighbors&gt; * Number of nearest neighbours (k) used in classification. * (Default = 1)</pre> * * <pre> -E * Minimise mean squared error rather than mean absolute * error when using -X option with numeric prediction.</pre> * * <pre> -W &lt;window size&gt; * Maximum number of training instances maintained. * Training instances are dropped FIFO. (Default = no window)</pre> * * <pre> -X * Select the number of nearest neighbours between 1 * and the k value specified using hold-one-out evaluation * on the training data (use when k &gt; 1)</pre> * * <pre> -A * The nearest neighbour search algorithm to use (default: weka.core.neighboursearch.LinearNNSearch). * </pre> * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { String knnString = Utils.getOption('K', options); if (knnString.length() != 0) { setKNN(Integer.parseInt(knnString)); } else { setKNN(1); } String windowString = Utils.getOption('W', options); if (windowString.length() != 0) { setWindowSize(Integer.parseInt(windowString)); } else { setWindowSize(0); } if (Utils.getFlag('I', options)) { setDistanceWeighting(new SelectedTag(WEIGHT_INVERSE, TAGS_WEIGHTING)); } else if (Utils.getFlag('F', options)) { setDistanceWeighting(new SelectedTag(WEIGHT_SIMILARITY, TAGS_WEIGHTING)); } else { setDistanceWeighting(new SelectedTag(WEIGHT_NONE, TAGS_WEIGHTING)); } setCrossValidate(Utils.getFlag('X', options)); setMeanSquared(Utils.getFlag('E', options)); String nnSearchClass = Utils.getOption('A', options); if(nnSearchClass.length() != 0) { String nnSearchClassSpec[] = Utils.splitOptions(nnSearchClass); if(nnSearchClassSpec.length == 0) { throw new Exception("Invalid NearestNeighbourSearch algorithm " + "specification string."); } String className = nnSearchClassSpec[0]; nnSearchClassSpec[0] = ""; setNearestNeighbourSearchAlgorithm( (NearestNeighbourSearch) Utils.forName( NearestNeighbourSearch.class, className, nnSearchClassSpec) ); } else this.setNearestNeighbourSearchAlgorithm(new LinearNNSearch()); Utils.checkForRemainingOptions(options); } /** * Gets the current settings of IBk. * * @return an array of strings suitable for passing to setOptions() */ public String [] getOptions() { String [] options = new String [11]; int current = 0; options[current++] = "-K"; options[current++] = "" + getKNN(); options[current++] = "-W"; options[current++] = "" + m_WindowSize; if (getCrossValidate()) { options[current++] = "-X"; } if (getMeanSquared()) { options[current++] = "-E"; } if (m_DistanceWeighting == WEIGHT_INVERSE) { options[current++] = "-I"; } else if (m_DistanceWeighting == WEIGHT_SIMILARITY) { options[current++] = "-F"; } options[current++] = "-A"; options[current++] = m_NNSearch.getClass().getName()+" "+Utils.joinOptions(m_NNSearch.getOptions()); while (current < options.length) { options[current++] = ""; } return options; } /** * Returns an enumeration of the additional measure names * produced by the neighbour search algorithm, plus the chosen K in case * cross-validation is enabled. * * @return an enumeration of the measure names */ public Enumeration enumerateMeasures() { if (m_CrossValidate) { Enumeration enm = m_NNSearch.enumerateMeasures(); Vector measures = new Vector(); while (enm.hasMoreElements()) measures.add(enm.nextElement()); measures.add("measureKNN"); return measures.elements(); } else { return m_NNSearch.enumerateMeasures(); } } /** * Returns the value of the named measure from the * neighbour search algorithm, plus the chosen K in case * cross-validation is enabled. * * @param additionalMeasureName the name of the measure to query for its value * @return the value of the named measure * @throws IllegalArgumentException if the named measure is not supported */ public double getMeasure(String additionalMeasureName) { if (additionalMeasureName.equals("measureKNN")) return m_kNN; else return m_NNSearch.getMeasure(additionalMeasureName); } /** * Returns a description of this classifier. * * @return a description of this classifier as a string. */ public String toString() { if (m_Train == null) { return "IBk: No model built yet."; } if (m_Train.numInstances() == 0) { return "Warning: no training instances - ZeroR model used."; } if (!m_kNNValid && m_CrossValidate) { crossValidate(); } String result = "IB1 instance-based classifier\n" + "using " + m_kNN; switch (m_DistanceWeighting) { case WEIGHT_INVERSE: result += " inverse-distance-weighted"; break; case WEIGHT_SIMILARITY: result += " similarity-weighted"; break; } result += " nearest neighbour(s) for classification\n"; if (m_WindowSize != 0) { result += "using a maximum of " + m_WindowSize + " (windowed) training instances\n"; } return result; } /** * Initialise scheme variables. */ protected void init() { setKNN(1); m_WindowSize = 0; m_DistanceWeighting = WEIGHT_NONE; m_CrossValidate = false; m_MeanSquared = false; } /** * Turn the list of nearest neighbors into a probability distribution. * * @param neighbours the list of nearest neighboring instances * @param distances the distances of the neighbors * @return the probability distribution * @throws Exception if computation goes wrong or has no class attribute */ protected double [] makeDistribution(Instances neighbours, double[] distances) throws Exception { double total = 0, weight; double [] distribution = new double [m_NumClasses]; // Set up a correction to the estimator if (m_ClassType == Attribute.NOMINAL) { for(int i = 0; i < m_NumClasses; i++) { distribution[i] = 1.0 / Math.max(1,m_Train.numInstances()); } total = (double)m_NumClasses / Math.max(1,m_Train.numInstances()); } for(int i=0; i < neighbours.numInstances(); i++) { // Collect class counts Instance current = neighbours.instance(i); distances[i] = distances[i]*distances[i]; distances[i] = Math.sqrt(distances[i]/m_NumAttributesUsed); switch (m_DistanceWeighting) { case WEIGHT_INVERSE: weight = 1.0 / (distances[i] + 0.001); // to avoid div by zero break; case WEIGHT_SIMILARITY: weight = 1.0 - distances[i]; break; default: // WEIGHT_NONE: weight = 1.0; break; } weight *= current.weight(); try { switch (m_ClassType) { case Attribute.NOMINAL: distribution[(int)current.classValue()] += weight; break; case Attribute.NUMERIC: distribution[0] += current.classValue() * weight; break; } } catch (Exception ex) { throw new Error("Data has no class attribute!"); } total += weight; } // Normalise distribution if (total > 0) { Utils.normalize(distribution, total); } return distribution; } /** * Select the best value for k by hold-one-out cross-validation. * If the class attribute is nominal, classification error is * minimised. If the class attribute is numeric, mean absolute * error is minimised */ protected void crossValidate() { try { if (m_NNSearch instanceof weka.core.neighboursearch.CoverTree) throw new Exception("CoverTree doesn't support hold-one-out "+ "cross-validation. Use some other NN " + "method."); double [] performanceStats = new double [m_kNNUpper]; double [] performanceStatsSq = new double [m_kNNUpper]; for(int i = 0; i < m_kNNUpper; i++) { performanceStats[i] = 0; performanceStatsSq[i] = 0; } m_kNN = m_kNNUpper; Instance instance; Instances neighbours; double[] origDistances, convertedDistances; for(int i = 0; i < m_Train.numInstances(); i++) { if (m_Debug && (i % 50 == 0)) { System.err.print("Cross validating " + i + "/" + m_Train.numInstances() + "\r"); } instance = m_Train.instance(i); neighbours = m_NNSearch.kNearestNeighbours(instance, m_kNN); origDistances = m_NNSearch.getDistances(); for(int j = m_kNNUpper - 1; j >= 0; j--) { // Update the performance stats convertedDistances = new double[origDistances.length]; System.arraycopy(origDistances, 0, convertedDistances, 0, origDistances.length); double [] distribution = makeDistribution(neighbours, convertedDistances); double thisPrediction = Utils.maxIndex(distribution); if (m_Train.classAttribute().isNumeric()) { thisPrediction = distribution[0]; double err = thisPrediction - instance.classValue(); performanceStatsSq[j] += err * err; // Squared error performanceStats[j] += Math.abs(err); // Absolute error } else { if (thisPrediction != instance.classValue()) { performanceStats[j] ++; // Classification error } } if (j >= 1) { neighbours = pruneToK(neighbours, convertedDistances, j); } } } // Display the results of the cross-validation for(int i = 0; i < m_kNNUpper; i++) { if (m_Debug) { System.err.print("Hold-one-out performance of " + (i + 1) + " neighbors " ); } if (m_Train.classAttribute().isNumeric()) { if (m_Debug) { if (m_MeanSquared) { System.err.println("(RMSE) = " + Math.sqrt(performanceStatsSq[i] / m_Train.numInstances())); } else { System.err.println("(MAE) = " + performanceStats[i] / m_Train.numInstances()); } } } else { if (m_Debug) { System.err.println("(%ERR) = " + 100.0 * performanceStats[i] / m_Train.numInstances()); } } } // Check through the performance stats and select the best // k value (or the lowest k if more than one best) double [] searchStats = performanceStats; if (m_Train.classAttribute().isNumeric() && m_MeanSquared) { searchStats = performanceStatsSq; } double bestPerformance = Double.NaN; int bestK = 1; for(int i = 0; i < m_kNNUpper; i++) { if (Double.isNaN(bestPerformance) || (bestPerformance > searchStats[i])) { bestPerformance = searchStats[i]; bestK = i + 1; } } m_kNN = bestK; if (m_Debug) { System.err.println("Selected k = " + bestK); } m_kNNValid = true; } catch (Exception ex) { throw new Error("Couldn't optimize by cross-validation: " +ex.getMessage()); } } /** * Prunes the list to contain the k nearest neighbors. If there are * multiple neighbors at the k'th distance, all will be kept. * * @param neighbours the neighbour instances. * @param distances the distances of the neighbours from target instance. * @param k the number of neighbors to keep. * @return the pruned neighbours. */ public Instances pruneToK(Instances neighbours, double[] distances, int k) { if(neighbours==null || distances==null || neighbours.numInstances()==0) { return null; } if (k < 1) { k = 1; } int currentK = 0; double currentDist; for(int i=0; i < neighbours.numInstances(); i++) { currentK++; currentDist = distances[i]; if(currentK>k && currentDist!=distances[i-1]) { currentK--; neighbours = new Instances(neighbours, 0, currentK); break; } } return neighbours; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } /** * Main method for testing this class. * * @param argv should contain command line options (see setOptions) */ public static void main(String [] argv) { runClassifier(new IBk(), argv); } }
32,385
29.35239
133
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/lazy/KStar.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * KStar.java * Copyright (C) 1995-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.lazy; import java.util.Enumeration; import java.util.Random; import java.util.Vector; import weka.classifiers.AbstractClassifier; import weka.classifiers.UpdateableClassifier; import weka.classifiers.lazy.kstar.KStarCache; import weka.classifiers.lazy.kstar.KStarConstants; import weka.classifiers.lazy.kstar.KStarNominalAttribute; import weka.classifiers.lazy.kstar.KStarNumericAttribute; import weka.core.Attribute; import weka.core.Capabilities; import weka.core.Capabilities.Capability; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.RevisionUtils; import weka.core.SelectedTag; import weka.core.Tag; import weka.core.TechnicalInformation; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import weka.core.TechnicalInformationHandler; import weka.core.Utils; /** <!-- globalinfo-start --> * K* is an instance-based classifier, that is the class of a test instance is based upon the class of those training instances similar to it, as determined by some similarity function. It differs from other instance-based learners in that it uses an entropy-based distance function.<br/> * <br/> * For more information on K*, see<br/> * <br/> * John G. Cleary, Leonard E. Trigg: K*: An Instance-based Learner Using an Entropic Distance Measure. In: 12th International Conference on Machine Learning, 108-114, 1995. * <p/> <!-- globalinfo-end --> * <!-- technical-bibtex-start --> * BibTeX: * <pre> * &#64;inproceedings{Cleary1995, * author = {John G. Cleary and Leonard E. Trigg}, * booktitle = {12th International Conference on Machine Learning}, * pages = {108-114}, * title = {K*: An Instance-based Learner Using an Entropic Distance Measure}, * year = {1995} * } * </pre> * <p/> <!-- technical-bibtex-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -B &lt;num&gt; * Manual blend setting (default 20%) * </pre> * * <pre> -E * Enable entropic auto-blend setting (symbolic class only) * </pre> * * <pre> -M &lt;char&gt; * Specify the missing value treatment mode (default a) * Valid options are: a(verage), d(elete), m(axdiff), n(ormal) * </pre> * <!-- options-end --> * * @author Len Trigg (len@reeltwo.com) * @author Abdelaziz Mahoui (am14@cs.waikato.ac.nz) - Java port * @version $Revision: 8034 $ */ public class KStar extends AbstractClassifier implements KStarConstants, UpdateableClassifier, TechnicalInformationHandler { /** for serialization */ static final long serialVersionUID = 332458330800479083L; /** The training instances used for classification. */ protected Instances m_Train; /** The number of instances in the dataset */ protected int m_NumInstances; /** The number of class values */ protected int m_NumClasses; /** The number of attributes */ protected int m_NumAttributes; /** The class attribute type */ protected int m_ClassType; /** Table of random class value colomns */ protected int [][] m_RandClassCols; /** Flag turning on and off the computation of random class colomns */ protected int m_ComputeRandomCols = ON; /** Flag turning on and off the initialisation of config variables */ protected int m_InitFlag = ON; /** * A custom data structure for caching distinct attribute values * and their scale factor or stop parameter. */ protected KStarCache [] m_Cache; /** missing value treatment */ protected int m_MissingMode = M_AVERAGE; /** 0 = use specified blend, 1 = entropic blend setting */ protected int m_BlendMethod = B_SPHERE; /** default sphere of influence blend setting */ protected int m_GlobalBlend = 20; /** Define possible missing value handling methods */ public static final Tag [] TAGS_MISSING = { new Tag(M_DELETE, "Ignore the instances with missing values"), new Tag(M_MAXDIFF, "Treat missing values as maximally different"), new Tag(M_NORMAL, "Normalize over the attributes"), new Tag(M_AVERAGE, "Average column entropy curves") }; /** * Returns a string describing classifier * @return a description suitable for * displaying in the explorer/experimenter gui */ public String globalInfo() { return "K* is an instance-based classifier, that is the class of a test " + "instance is based upon the class of those training instances " + "similar to it, as determined by some similarity function. It differs " + "from other instance-based learners in that it uses an entropy-based " + "distance function.\n\n" + "For more information on K*, see\n\n" + getTechnicalInformation().toString(); } /** * Returns an instance of a TechnicalInformation object, containing * detailed information about the technical background of this class, * e.g., paper reference or book this class is based on. * * @return the technical information about this class */ public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; result = new TechnicalInformation(Type.INPROCEEDINGS); result.setValue(Field.AUTHOR, "John G. Cleary and Leonard E. Trigg"); result.setValue(Field.TITLE, "K*: An Instance-based Learner Using an Entropic Distance Measure"); result.setValue(Field.BOOKTITLE, "12th International Conference on Machine Learning"); result.setValue(Field.YEAR, "1995"); result.setValue(Field.PAGES, "108-114"); return result; } /** * Returns default capabilities of the classifier. * * @return the capabilities of this classifier */ public Capabilities getCapabilities() { Capabilities result = super.getCapabilities(); result.disableAll(); // attributes result.enable(Capability.NOMINAL_ATTRIBUTES); result.enable(Capability.NUMERIC_ATTRIBUTES); result.enable(Capability.DATE_ATTRIBUTES); result.enable(Capability.MISSING_VALUES); // class result.enable(Capability.NOMINAL_CLASS); result.enable(Capability.NUMERIC_CLASS); result.enable(Capability.DATE_CLASS); result.enable(Capability.MISSING_CLASS_VALUES); // instances result.setMinimumNumberInstances(0); return result; } /** * Generates the classifier. * * @param instances set of instances serving as training data * @throws Exception if the classifier has not been generated successfully */ public void buildClassifier(Instances instances) throws Exception { String debug = "(KStar.buildClassifier) "; // can classifier handle the data? getCapabilities().testWithFail(instances); // remove instances with missing class instances = new Instances(instances); instances.deleteWithMissingClass(); m_Train = new Instances(instances, 0, instances.numInstances()); // initializes class attributes ** java-speaking! :-) ** init_m_Attributes(); } /** * Adds the supplied instance to the training set * * @param instance the instance to add * @throws Exception if instance could not be incorporated successfully */ public void updateClassifier(Instance instance) throws Exception { String debug = "(KStar.updateClassifier) "; if (m_Train.equalHeaders(instance.dataset()) == false) throw new Exception("Incompatible instance types\n" + m_Train.equalHeadersMsg(instance.dataset())); if ( instance.classIsMissing() ) return; m_Train.add(instance); // update relevant attributes ... update_m_Attributes(); } /** * Calculates the class membership probabilities for the given test instance. * * @param instance the instance to be classified * @return predicted class probability distribution * @throws Exception if an error occurred during the prediction */ public double [] distributionForInstance(Instance instance) throws Exception { String debug = "(KStar.distributionForInstance) "; double transProb = 0.0, temp = 0.0; double [] classProbability = new double[m_NumClasses]; double [] predictedValue = new double[1]; // initialization ... for (int i=0; i<classProbability.length; i++) { classProbability[i] = 0.0; } predictedValue[0] = 0.0; if (m_InitFlag == ON) { // need to compute them only once and will be used for all instances. // We are doing this because the evaluation module controls the calls. if (m_BlendMethod == B_ENTROPY) { generateRandomClassColomns(); } m_Cache = new KStarCache[m_NumAttributes]; for (int i=0; i<m_NumAttributes;i++) { m_Cache[i] = new KStarCache(); } m_InitFlag = OFF; // System.out.println("Computing..."); } // init done. Instance trainInstance; Enumeration enu = m_Train.enumerateInstances(); while ( enu.hasMoreElements() ) { trainInstance = (Instance)enu.nextElement(); transProb = instanceTransformationProbability(instance, trainInstance); switch ( m_ClassType ) { case Attribute.NOMINAL: classProbability[(int)trainInstance.classValue()] += transProb; break; case Attribute.NUMERIC: predictedValue[0] += transProb * trainInstance.classValue(); temp += transProb; break; } } if (m_ClassType == Attribute.NOMINAL) { double sum = Utils.sum(classProbability); if (sum <= 0.0) for (int i=0; i<classProbability.length; i++) classProbability[i] = (double) 1/ (double) m_NumClasses; else Utils.normalize(classProbability, sum); return classProbability; } else { predictedValue[0] = (temp != 0) ? predictedValue[0] / temp : 0.0; return predictedValue; } } /** * Calculate the probability of the first instance transforming into the * second instance: * the probability is the product of the transformation probabilities of * the attributes normilized over the number of instances used. * * @param first the test instance * @param second the train instance * @return transformation probability value */ private double instanceTransformationProbability(Instance first, Instance second) { String debug = "(KStar.instanceTransformationProbability) "; double transProb = 1.0; int numMissAttr = 0; for (int i = 0; i < m_NumAttributes; i++) { if (i == m_Train.classIndex()) { continue; // ignore class attribute } if (first.isMissing(i)) { // test instance attribute value is missing numMissAttr++; continue; } transProb *= attrTransProb(first, second, i); // normilize for missing values if (numMissAttr != m_NumAttributes) { transProb = Math.pow(transProb, (double)m_NumAttributes / (m_NumAttributes - numMissAttr)); } else { // weird case! transProb = 0.0; } } // normilize for the train dataset return transProb / m_NumInstances; } /** * Calculates the transformation probability of the indexed test attribute * to the indexed train attribute. * * @param first the test instance. * @param second the train instance. * @param col the index of the attribute in the instance. * @return the value of the transformation probability. */ private double attrTransProb(Instance first, Instance second, int col) { String debug = "(KStar.attrTransProb)"; double transProb = 0.0; KStarNominalAttribute ksNominalAttr; KStarNumericAttribute ksNumericAttr; switch ( m_Train.attribute(col).type() ) { case Attribute.NOMINAL: ksNominalAttr = new KStarNominalAttribute(first, second, col, m_Train, m_RandClassCols, m_Cache[col]); ksNominalAttr.setOptions(m_MissingMode, m_BlendMethod, m_GlobalBlend); transProb = ksNominalAttr.transProb(); ksNominalAttr = null; break; case Attribute.NUMERIC: ksNumericAttr = new KStarNumericAttribute(first, second, col, m_Train, m_RandClassCols, m_Cache[col]); ksNumericAttr.setOptions(m_MissingMode, m_BlendMethod, m_GlobalBlend); transProb = ksNumericAttr.transProb(); ksNumericAttr = null; break; } return transProb; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String missingModeTipText() { return "Determines how missing attribute values are treated."; } /** * Gets the method to use for handling missing values. Will be one of * M_NORMAL, M_AVERAGE, M_MAXDIFF or M_DELETE. * * @return the method used for handling missing values. */ public SelectedTag getMissingMode() { return new SelectedTag(m_MissingMode, TAGS_MISSING); } /** * Sets the method to use for handling missing values. Values other than * M_NORMAL, M_AVERAGE, M_MAXDIFF and M_DELETE will be ignored. * * @param newMode the method to use for handling missing values. */ public void setMissingMode(SelectedTag newMode) { if (newMode.getTags() == TAGS_MISSING) { m_MissingMode = newMode.getSelectedTag().getID(); } } /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ public Enumeration listOptions() { Vector optVector = new Vector( 3 ); optVector.addElement(new Option( "\tManual blend setting (default 20%)\n", "B", 1, "-B <num>")); optVector.addElement(new Option( "\tEnable entropic auto-blend setting (symbolic class only)\n", "E", 0, "-E")); optVector.addElement(new Option( "\tSpecify the missing value treatment mode (default a)\n" +"\tValid options are: a(verage), d(elete), m(axdiff), n(ormal)\n", "M", 1,"-M <char>")); return optVector.elements(); } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String globalBlendTipText() { return "The parameter for global blending. Values are restricted to [0,100]."; } /** * Set the global blend parameter * @param b the value for global blending */ public void setGlobalBlend(int b) { m_GlobalBlend = b; if ( m_GlobalBlend > 100 ) { m_GlobalBlend = 100; } if ( m_GlobalBlend < 0 ) { m_GlobalBlend = 0; } } /** * Get the value of the global blend parameter * @return the value of the global blend parameter */ public int getGlobalBlend() { return m_GlobalBlend; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String entropicAutoBlendTipText() { return "Whether entropy-based blending is to be used."; } /** * Set whether entropic blending is to be used. * @param e true if entropic blending is to be used */ public void setEntropicAutoBlend(boolean e) { if (e) { m_BlendMethod = B_ENTROPY; } else { m_BlendMethod = B_SPHERE; } } /** * Get whether entropic blending being used * @return true if entropic blending is used */ public boolean getEntropicAutoBlend() { if (m_BlendMethod == B_ENTROPY) { return true; } return false; } /** * Parses a given list of options. <p/> * <!-- options-start --> * Valid options are: <p/> * * <pre> -B &lt;num&gt; * Manual blend setting (default 20%) * </pre> * * <pre> -E * Enable entropic auto-blend setting (symbolic class only) * </pre> * * <pre> -M &lt;char&gt; * Specify the missing value treatment mode (default a) * Valid options are: a(verage), d(elete), m(axdiff), n(ormal) * </pre> * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { String debug = "(KStar.setOptions)"; String blendStr = Utils.getOption('B', options); if (blendStr.length() != 0) { setGlobalBlend(Integer.parseInt(blendStr)); } setEntropicAutoBlend(Utils.getFlag('E', options)); String missingModeStr = Utils.getOption('M', options); if (missingModeStr.length() != 0) { switch ( missingModeStr.charAt(0) ) { case 'a': setMissingMode(new SelectedTag(M_AVERAGE, TAGS_MISSING)); break; case 'd': setMissingMode(new SelectedTag(M_DELETE, TAGS_MISSING)); break; case 'm': setMissingMode(new SelectedTag(M_MAXDIFF, TAGS_MISSING)); break; case 'n': setMissingMode(new SelectedTag(M_NORMAL, TAGS_MISSING)); break; default: setMissingMode(new SelectedTag(M_AVERAGE, TAGS_MISSING)); } } Utils.checkForRemainingOptions(options); } /** * Gets the current settings of K*. * * @return an array of strings suitable for passing to setOptions() */ public String [] getOptions() { // -B <num> -E -M <char> String [] options = new String [ 5 ]; int itr = 0; options[itr++] = "-B"; options[itr++] = "" + m_GlobalBlend; if (getEntropicAutoBlend()) { options[itr++] = "-E"; } options[itr++] = "-M"; if (m_MissingMode == M_AVERAGE) { options[itr++] = "" + "a"; } else if (m_MissingMode == M_DELETE) { options[itr++] = "" + "d"; } else if (m_MissingMode == M_MAXDIFF) { options[itr++] = "" + "m"; } else if (m_MissingMode == M_NORMAL) { options[itr++] = "" + "n"; } while (itr < options.length) { options[itr++] = ""; } return options; } /** * Returns a description of this classifier. * * @return a description of this classifier as a string. */ public String toString() { StringBuffer st = new StringBuffer(); st.append("KStar Beta Verion (0.1b).\n" +"Copyright (c) 1995-97 by Len Trigg (trigg@cs.waikato.ac.nz).\n" +"Java port to Weka by Abdelaziz Mahoui " +"(am14@cs.waikato.ac.nz).\n\nKStar options : "); String [] ops = getOptions(); for (int i=0;i<ops.length;i++) { st.append(ops[i]+' '); } return st.toString(); } /** * Main method for testing this class. * * @param argv should contain command line options (see setOptions) */ public static void main(String [] argv) { runClassifier(new KStar(), argv); } /** * Initializes the m_Attributes of the class. */ private void init_m_Attributes() { try { m_NumInstances = m_Train.numInstances(); m_NumClasses = m_Train.numClasses(); m_NumAttributes = m_Train.numAttributes(); m_ClassType = m_Train.classAttribute().type(); m_InitFlag = ON; } catch(Exception e) { e.printStackTrace(); } } /** * Updates the m_attributes of the class. */ private void update_m_Attributes() { m_NumInstances = m_Train.numInstances(); m_InitFlag = ON; } /** * Note: for Nominal Class Only! * Generates a set of random versions of the class colomn. */ private void generateRandomClassColomns() { String debug = "(KStar.generateRandomClassColomns)"; Random generator = new Random(42); // Random generator = new Random(); m_RandClassCols = new int [NUM_RAND_COLS+1][]; int [] classvals = classValues(); for (int i=0; i < NUM_RAND_COLS; i++) { // generate a randomized version of the class colomn m_RandClassCols[i] = randomize(classvals, generator); } // original colomn is preserved in colomn NUM_RAND_COLS m_RandClassCols[NUM_RAND_COLS] = classvals; } /** * Note: for Nominal Class Only! * Returns an array of the class values * * @return an array of class values */ private int [] classValues() { String debug = "(KStar.classValues)"; int [] classval = new int[m_NumInstances]; for (int i=0; i < m_NumInstances; i++) { try { classval[i] = (int)m_Train.instance(i).classValue(); } catch (Exception ex) { ex.printStackTrace(); } } return classval; } /** * Returns a copy of the array with its elements randomly redistributed. * * @param array the array to randomize. * @param generator the random number generator to use * @return a copy of the array with its elements randomly redistributed. */ private int [] randomize(int [] array, Random generator) { String debug = "(KStar.randomize)"; int index; int temp; int [] newArray = new int[array.length]; System.arraycopy(array, 0, newArray, 0, array.length); for (int j = newArray.length - 1; j > 0; j--) { index = (int) ( generator.nextDouble() * (double)j ); temp = newArray[j]; newArray[j] = newArray[index]; newArray[index] = temp; } return newArray; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } } // class end
21,865
29.454039
289
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/lazy/LBR.java
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * aint with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* * LBR.java * The naive Bayesian classifier provides a simple and effective approach to * classifier learning, but its attribute independence assumption is often * violated in the real world. Lazy Bayesian Rules selectively relaxes the * independence assumption, achieving lower error rates over a range of * learning tasks. LBR defers processing to classification time, making it * a highly efficient and accurate classification algorithm when small * numbers of objects are to be classified. * * For more information, see <!-- technical-plaintext-start --> * Zijian Zheng, G. Webb (2000). Lazy Learning of Bayesian Rules. Machine Learning. 4(1):53-84. <!-- technical-plaintext-end --> * * http://www.cm.deakin.edu.au/webb * * Copyright (C) 2001 Deakin University * School of Computing and Mathematics * Deakin University * Geelong, Vic, 3217, Australia * * Email: zhw@deakin.edu.au * */ package weka.classifiers.lazy; import weka.classifiers.Classifier; import weka.core.Attribute; import weka.core.Capabilities; import weka.core.Instance; import weka.core.Instances; import weka.core.RevisionHandler; import weka.core.RevisionUtils; import weka.core.Statistics; import weka.core.TechnicalInformation; import weka.core.TechnicalInformationHandler; import weka.core.Utils; import weka.core.Capabilities.Capability; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import java.io.Serializable; import java.util.ArrayList; import weka.classifiers.AbstractClassifier; import weka.core.DenseInstance; /** <!-- globalinfo-start --> * Lazy Bayesian Rules Classifier. The naive Bayesian classifier provides a simple and effective approach to classifier learning, but its attribute independence assumption is often violated in the real world. Lazy Bayesian Rules selectively relaxes the independence assumption, achieving lower error rates over a range of learning tasks. LBR defers processing to classification time, making it a highly efficient and accurate classification algorithm when small numbers of objects are to be classified.<br/> * <br/> * For more information, see:<br/> * <br/> * Zijian Zheng, G. Webb (2000). Lazy Learning of Bayesian Rules. Machine Learning. 4(1):53-84. * <p/> <!-- globalinfo-end --> * <!-- technical-bibtex-start --> * BibTeX: * <pre> * &#64;article{Zheng2000, * author = {Zijian Zheng and G. Webb}, * journal = {Machine Learning}, * number = {1}, * pages = {53-84}, * title = {Lazy Learning of Bayesian Rules}, * volume = {4}, * year = {2000} * } * </pre> * <p/> <!-- technical-bibtex-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -D * If set, classifier is run in debug mode and * may output additional info to the console</pre> * <!-- options-end --> * * @author Zhihai Wang (zhw@deakin.edu.au) : July 2001 implemented the algorithm * @author Jason Wells (wells@deakin.edu.au) : November 2001 added instance referencing via indexes * @version $Revision: 5525 $ */ public class LBR extends AbstractClassifier implements TechnicalInformationHandler { /** for serialization */ static final long serialVersionUID = 5648559277738985156L; /** * Class for handling instances and the associated attributes. <p> * Enables a set of indexes to a given dataset to be created and used * with an algorithm. This reduces the memory overheads and time required * when manipulating and referencing Instances and their Attributes. */ public class Indexes implements Serializable, RevisionHandler { /** for serialization */ private static final long serialVersionUID = -2771490019751421307L; /** the array instance indexes **/ public boolean [] m_InstIndexes; /** the array attribute indexes **/ public boolean [] m_AttIndexes; /** the number of instances indexed **/ private int m_NumInstances; /** the number of attributes indexed **/ private int m_NumAtts; /** the array of instance indexes that are set to a either true or false **/ public int [] m_SequentialInstIndexes; /** an array of attribute indexes that are set to either true or false **/ public int [] m_SequentialAttIndexes; /** flag to check if sequential array must be rebuilt due to changes to the instance index*/ private boolean m_SequentialInstanceIndex_valid = false; /** flag to check if sequential array must be rebuilt due to changes to the attribute index */ private boolean m_SequentialAttIndex_valid = false; /** the number of instances "in use" or set to a the original value (true or false) **/ public int m_NumInstsSet; /** the number of attributes "in use" or set to a the original value (true or false) **/ public int m_NumAttsSet; /** the number of sequential instances "in use" or set to a the original value (true or false) **/ public int m_NumSeqInstsSet; /** the number of sequential attributes "in use" or set to a the original value (true or false) **/ public int m_NumSeqAttsSet; /** the Class Index for the data set **/ public int m_ClassIndex; /** * constructor * @param numInstances the number of instances in dataset * @param numAtts the number of attributes in dataset * @param value either true or false * @param classIndex Set to -1 if you want class attribute switched on or the value of the instances * class index will be switched of and the class attibute will not be considered. */ public Indexes(int numInstances, int numAtts, boolean value, int classIndex) { /* to create an empty DATASET with all attributes indexed use FALSE * to create a index of all instances and attributes use TRUE */ // initialise counts m_NumInstsSet = m_NumInstances = numInstances; m_NumAttsSet = m_NumAtts = numAtts; m_InstIndexes = new boolean [(int)numInstances]; /* set all indexes to value */ int i = 0; while(i < numInstances) { m_InstIndexes[i] = value; i++; } m_AttIndexes = new boolean [(int)numAtts]; /* set all indexes to true */ i = 0; while(i < numAtts) { m_AttIndexes[i] = true; i++; } // if the value is false the dataset has no instances therefore no instances are set if(value == false) m_NumInstsSet = 0; // no sequential array has been created m_SequentialInstanceIndex_valid = false; m_SequentialAttIndex_valid = false; // switch class attr to false as the class is not used in the dataset. Set to -1 if you want the class attr included if(classIndex != -1) setAttIndex(classIndex, false); m_ClassIndex = classIndex; } /** * constructor * @param FromIndexes the object you want to copy */ public Indexes(Indexes FromIndexes) { // set counts to the FromIndexes counts m_NumInstances = FromIndexes.getNumInstances(); m_NumInstsSet = FromIndexes.m_NumInstsSet; m_NumAtts = FromIndexes.m_NumAtts; m_NumAttsSet = FromIndexes.m_NumAttsSet; m_InstIndexes = new boolean [m_NumInstances]; System.arraycopy(FromIndexes.m_InstIndexes, 0, m_InstIndexes, 0, m_NumInstances); m_AttIndexes = new boolean [(int)m_NumAtts]; System.arraycopy(FromIndexes.m_AttIndexes, 0, m_AttIndexes, 0, m_NumAtts); m_ClassIndex = FromIndexes.m_ClassIndex; m_SequentialInstanceIndex_valid = false; m_SequentialAttIndex_valid = false; } /** * * Changes the boolean value at the specified index in the InstIndexes array * * @param index the index of the instance * @param value the value to set at the specified index * */ public void setInstanceIndex(int index, boolean value) { if(index < 0 || index >= m_NumInstances) throw new IllegalArgumentException("Invalid Instance Index value"); // checks that the index isn't alreading set to value if(m_InstIndexes[(int)index] != value) { // set the value m_InstIndexes[(int)index] = value; // a change has been made, so sequential array is invalid m_SequentialInstanceIndex_valid = false; // change the number of values "in use" to appropriate value if(value == false) m_NumInstsSet--; else m_NumInstsSet++; } } /** * * Changes the boolean value at the specified index in the InstIndexes array * * @param Attributes array of attributes * @param value the value to set at the specified index * */ public void setAtts(int [] Attributes, boolean value) { for(int i = 0; i < m_NumAtts; i++) { m_AttIndexes[i] = !value; } for (int i = 0; i < Attributes.length; i++) { m_AttIndexes[Attributes[i]] = value; } m_NumAttsSet = Attributes.length; m_SequentialAttIndex_valid = false; } /** * * Changes the boolean value at the specified index in the InstIndexes array * * @param Instances array of instances * @param value the value to set at the specified index * */ public void setInsts(int [] Instances, boolean value) { resetInstanceIndex(!value); for (int i = 0; i < Instances.length; i++) { m_InstIndexes[Instances[i]] = value; } m_NumInstsSet = Instances.length; m_SequentialInstanceIndex_valid = false; } /** * * Changes the boolean value at the specified index in the AttIndexes array * * @param index the index of the instance * @param value the value to set at the specified index * */ public void setAttIndex(int index, boolean value) { if(index < 0 || index >= m_NumAtts) throw new IllegalArgumentException("Invalid Attribute Index value"); // checks that the index isn't alreading set to value if(m_AttIndexes[(int)index] != value) { // set the value m_AttIndexes[(int)index] = value; // a change has been made, so sparse array is invalid m_SequentialAttIndex_valid = false; // change the number of values "in use" to appropriate value if(value == false) m_NumAttsSet--; else m_NumAttsSet++; } } /** * * Returns the boolean value at the specified index in the Instance Index array * * @param index the index of the instance * @return the boolean value at the specified index */ public boolean getInstanceIndex(int index) { if(index < 0 || index >= m_NumInstances) throw new IllegalArgumentException("Invalid index value"); return m_InstIndexes[(int)index]; } /** * * Returns the boolean value at the specified index in the Sequential Instance Indexes array * * @param index the index of the instance * @return the requested value */ public int getSequentialInstanceIndex(int index) { if(index < 0 || index >= m_NumInstances) throw new IllegalArgumentException("Invalid index value"); return m_SequentialInstIndexes[(int)index]; } /** * * Resets the boolean value in the Instance Indexes array to a specified value * * @param value the value to set all indexes * */ public void resetInstanceIndex(boolean value) { m_NumInstsSet = m_NumInstances; for(int i = 0; i < m_NumInstances; i++) { m_InstIndexes[i] = value; } if(value == false) m_NumInstsSet = 0; m_SequentialInstanceIndex_valid = false; } /** * * Resets the boolean values in Attribute and Instance array to reflect an empty dataset withthe same attributes set as in the incoming Indexes Object * * @param FromIndexes the Indexes to be copied * */ public void resetDatasetBasedOn(Indexes FromIndexes) { resetInstanceIndex(false); resetAttIndexTo(FromIndexes); } /** * * Resets the boolean value in AttIndexes array * * @param value the value to set the attributes to * */ public void resetAttIndex(boolean value) { m_NumAttsSet = m_NumAtts; for(int i = 0; i < m_NumAtts; i++) { m_AttIndexes[i] = value; } if(m_ClassIndex != -1) setAttIndex(m_ClassIndex, false); if(value == false) m_NumAttsSet = 0; m_SequentialAttIndex_valid = false; } /** * * Resets the boolean value in AttIndexes array based on another set of Indexes * * @param FromIndexes the Indexes to be copied * */ public void resetAttIndexTo(Indexes FromIndexes) { System.arraycopy(FromIndexes.m_AttIndexes, 0, m_AttIndexes, 0, m_NumAtts); m_NumAttsSet = FromIndexes.getNumAttributesSet(); m_ClassIndex = FromIndexes.m_ClassIndex; m_SequentialAttIndex_valid = false; } /** * * Returns the boolean value at the specified index in the Attribute Indexes array * * @param index the index of the Instance * @return the boolean value */ public boolean getAttIndex(int index) { if(index < 0 || index >= m_NumAtts) throw new IllegalArgumentException("Invalid index value"); return m_AttIndexes[(int)index]; } /** * * Returns the boolean value at the specified index in the Sequential Attribute Indexes array * * @param index the index of the Attribute * @return the requested value */ public int getSequentialAttIndex(int index) { if(index < 0 || index >= m_NumAtts) throw new IllegalArgumentException("Invalid index value"); return m_SequentialAttIndexes[(int)index]; } /** * * Returns the number of instances "in use" * * @return the number of instances "in use" */ public int getNumInstancesSet() { return m_NumInstsSet; } /** * * Returns the number of instances in the dataset * * @return the number of instances in the dataset */ public int getNumInstances() { return m_NumInstances; } /** * * Returns the number of instances in the Sequential array * * @return the number of instances in the sequential array */ public int getSequentialNumInstances() { // will always be the number set as the sequential array is for referencing only return m_NumSeqInstsSet; } /** * * Returns the number of attributes in the dataset * * @return the number of attributes */ public int getNumAttributes() { return m_NumAtts; } /** * * Returns the number of attributes "in use" * * @return the number of attributes "in use" */ public int getNumAttributesSet() { return m_NumAttsSet; } /** * * Returns the number of attributes in the Sequential array * * @return the number of attributes in the sequentual array */ public int getSequentialNumAttributes() { // will always be the number set as the sequential array is for referencing only return m_NumSeqAttsSet; } /** * * Returns whether or not the Sequential Instance Index requires rebuilding due to a change * * @return true if the sequential instance index needs rebuilding */ public boolean isSequentialInstanceIndexValid() { return m_SequentialInstanceIndex_valid; } /** * * Returns whether or not the Sequential Attribute Index requires rebuilding due to a change * * @return true if the sequential attribute index needs rebuilding */ public boolean isSequentialAttIndexValid() { return m_SequentialAttIndex_valid; } /** * * Sets both the Instance and Attribute indexes to a specified value * * @param value the value for the Instance and Attribute indices */ public void setSequentialDataset(boolean value) { setSequentialInstanceIndex(value); setSequentialAttIndex(value); } /** * * A Sequential Instance index is all those Instances that are set to the specified value placed in a sequential array. * Each value in the sequential array contains the Instance index within the Indexes. * * @param value the sequential instance index */ public void setSequentialInstanceIndex(boolean value) { if(m_SequentialInstanceIndex_valid == true) return; /* needs to be recalculated */ int size; size = m_NumInstsSet; m_SequentialInstIndexes = new int [(int)size]; int j = 0; for(int i = 0; i < m_NumInstances; i++) { if(m_InstIndexes[i] == value) { m_SequentialInstIndexes[j] = i; j++; } } m_SequentialInstanceIndex_valid = true; m_NumSeqInstsSet = j; } /** * * A Sequential Attribute index is all those Attributes that are set to the specified value placed in a sequential array. * Each value in the sequential array contains the Attribute index within the Indexes * * @param value the sequential attribute index */ public void setSequentialAttIndex(boolean value) { if(m_SequentialAttIndex_valid == true) return; /* needs to be recalculated */ int size; size = m_NumAttsSet; m_SequentialAttIndexes = new int [(int)size]; int j = 0; for(int i = 0; i < m_NumAtts; i++) { if(m_AttIndexes[i] == value) { m_SequentialAttIndexes[j] = i; j++; } } m_SequentialAttIndex_valid = true; m_NumSeqAttsSet = j; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 5525 $"); } } /* end of Indexes inner-class */ /** All the counts for nominal attributes. */ protected int [][][] m_Counts; /** All the counts for nominal attributes. */ protected int [][][] m_tCounts; /** The prior probabilities of the classes. */ protected int [] m_Priors; /** The prior probabilities of the classes. */ protected int [] m_tPriors; /** number of attributes for the dataset ***/ protected int m_numAtts; /** number of classes for dataset ***/ protected int m_numClasses; /** number of instances in dataset ***/ protected int m_numInsts; /** The set of instances used for current training. */ protected Instances m_Instances = null; /** leave-one-out errors on the training dataset. */ protected int m_Errors; /** leave-one-out error flags on the training dataaet. */ protected boolean [] m_ErrorFlags; /** best attribute's index list. maybe as output result */ protected ArrayList leftHand = new ArrayList(); /** significantly lower */ protected static final double SIGNLOWER = 0.05; /** following is defined by wangzh, * the number of instances to be classified incorrectly * on the subset. */ protected boolean [] m_subOldErrorFlags; /** the number of instances to be classified incorrectly * besides the subset. */ protected int m_RemainderErrors = 0; /** the number of instance to be processed */ protected int m_Number = 0; /** the Number of Instances to be used in building a classifiers */ protected int m_NumberOfInstances = 0; /** for printing in n-fold cross validation */ protected boolean m_NCV = false; /** index of instances and attributes for the given dataset */ protected Indexes m_subInstances; /** index of instances and attributes for the given dataset */ protected Indexes tempSubInstances; /** probability values array */ protected double [] posteriorsArray; protected int bestCnt; protected int tempCnt; protected int forCnt; protected int whileCnt; /** * @return a description of the classifier suitable for * displaying in the explorer/experimenter gui */ public String globalInfo() { return "Lazy Bayesian Rules Classifier. The naive Bayesian classifier " + "provides a simple and effective approach to classifier learning, " + "but its attribute independence assumption is often violated in the " + "real world. Lazy Bayesian Rules selectively relaxes the independence " + "assumption, achieving lower error rates over a range of learning " + "tasks. LBR defers processing to classification time, making it a " + "highly efficient and accurate classification algorithm when small " + "numbers of objects are to be classified.\n\n" + "For more information, see:\n\n" + getTechnicalInformation().toString(); } /** * Returns an instance of a TechnicalInformation object, containing * detailed information about the technical background of this class, * e.g., paper reference or book this class is based on. * * @return the technical information about this class */ public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; result = new TechnicalInformation(Type.ARTICLE); result.setValue(Field.AUTHOR, "Zijian Zheng and G. Webb"); result.setValue(Field.YEAR, "2000"); result.setValue(Field.TITLE, "Lazy Learning of Bayesian Rules"); result.setValue(Field.JOURNAL, "Machine Learning"); result.setValue(Field.VOLUME, "4"); result.setValue(Field.NUMBER, "1"); result.setValue(Field.PAGES, "53-84"); return result; } /** * Returns default capabilities of the classifier. * * @return the capabilities of this classifier */ public Capabilities getCapabilities() { Capabilities result = super.getCapabilities(); result.disableAll(); // attributes result.enable(Capability.NOMINAL_ATTRIBUTES); result.enable(Capability.MISSING_VALUES); // class result.enable(Capability.NOMINAL_CLASS); result.enable(Capability.MISSING_CLASS_VALUES); // instances result.setMinimumNumberInstances(0); return result; } /** * For lazy learning, building classifier is only to prepare their inputs * until classification time. * * @param instances set of instances serving as training data * @throws Exception if the preparation has not been generated. */ public void buildClassifier(Instances instances) throws Exception { int attIndex, i, j; bestCnt = 0; tempCnt = 0; forCnt = 0; whileCnt = 0; // can classifier handle the data? getCapabilities().testWithFail(instances); // remove instances with missing class instances = new Instances(instances); instances.deleteWithMissingClass(); m_numAtts = instances.numAttributes(); m_numClasses = instances.numClasses(); m_numInsts = instances.numInstances(); // Reserve space m_Counts = new int[m_numClasses][m_numAtts][0]; m_Priors = new int[m_numClasses]; m_tCounts = new int[m_numClasses][m_numAtts][0]; m_tPriors = new int[m_numClasses]; m_subOldErrorFlags = new boolean[m_numInsts+1]; m_Instances = instances; m_subInstances = new Indexes(m_numInsts, m_numAtts, true, m_Instances.classIndex()); tempSubInstances = new Indexes(m_numInsts, m_numAtts, true, m_Instances.classIndex()); posteriorsArray = new double[m_numClasses]; // prepare arrays for (attIndex = 0; attIndex < m_numAtts; attIndex++) { Attribute attribute = (Attribute) instances.attribute(attIndex); for (j = 0; j < m_numClasses; j++) { m_Counts[j][attIndex] = new int[attribute.numValues()]; m_tCounts[j][attIndex] = new int[attribute.numValues()]; } } // Compute counts and priors for(i = 0; i < m_numInsts; i++) { Instance instance = (Instance) instances.instance(i); int classValue = (int)instance.classValue(); // pointer for more efficient access to counts matrix in loop int [][] countsPointer = m_tCounts[classValue]; for(attIndex = 0; attIndex < m_numAtts; attIndex++) { countsPointer[attIndex][(int)instance.value(attIndex)]++; } m_tPriors[classValue]++; } // Step 2: Leave-one-out on the training data set. // get m_Errors and its flags array using leave-one-out. m_ErrorFlags = new boolean[m_numInsts]; m_Errors = leaveOneOut(m_subInstances, m_tCounts, m_tPriors, m_ErrorFlags); if (m_Number == 0) { m_NumberOfInstances = m_Instances.numInstances(); } else { System.out.println(" "); System.out.println("N-Fold Cross Validation: "); m_NCV = true; } } /** * Calculates the class membership probabilities * for the given test instance. * This is the most important method for Lazy Bayesian Rule algorithm. * * @param testInstance the instance to be classified * @return predicted class probability distribution * @throws Exception if distribution can't be computed */ public double[] distributionForInstance(Instance testInstance) throws Exception { int inst; int subAttrIndex = 0; int subInstIndex = 0; int tempInstIndex = 0; int attributeBest; int subLocalErrors = 0; int tempErrorsBest = 0; boolean [] tempErrorFlagBest = null; int [] tempD_subsetBestInsts = null; int [] tempD_subsetBestAtts = null; Indexes subInstances = new Indexes(m_numInsts, m_numAtts, true, m_Instances.classIndex()); boolean [] subLocalErrorFlags = new boolean [(int)subInstances.getNumInstances()+1]; // Step 2': Get localErrors, localErrorFlags, and training data set. int localErrors = m_Errors; boolean [] localErrorFlags = (boolean []) m_ErrorFlags.clone(); // The number of errors on New, Not on Old in the subset. int errorsNewNotOld = 0; // The number of errors on Old, Not on New in the subset. int errorsOldNotNew = 0; // Step 3: leftHand.clear(); // Step 4: Beginning Repeat. // Selecting all the attributes that can be moved to the lefthand. while (localErrors >= 5) { attributeBest = -1; whileCnt++; // Step 5: tempErrorsBest = subInstances.getNumInstancesSet() + 1; subInstances.setSequentialDataset(true); // Step 6: selecting an attribute. for (int attr = 0; attr < subInstances.m_NumSeqAttsSet; attr++){ forCnt++; subAttrIndex = subInstances.m_SequentialAttIndexes[attr]; // Step 7: get the corresponding subset. m_RemainderErrors = 0; // reset array to true for(int i = 0; i < m_numInsts; i++) { m_subOldErrorFlags[i] = true; } // reset indexes to reflect an empty dataset but with the same attrs as another dataset tempSubInstances.resetDatasetBasedOn(subInstances); // Get subset of the instances and its m_LastSecondErrors for(inst = 0; inst < subInstances.m_NumSeqInstsSet; inst++) { subInstIndex = subInstances.m_SequentialInstIndexes[inst]; if (m_Instances.instance(subInstIndex).value(subAttrIndex) == testInstance.value(subAttrIndex)) { // add instance to subset list tempSubInstances.setInstanceIndex(subInstIndex, true); if (localErrorFlags[subInstIndex] == false ) { m_subOldErrorFlags[subInstIndex] = false; } } else { if (localErrorFlags[subInstIndex] == false ) { m_RemainderErrors++; } } } // end of for // Step 7': if (tempSubInstances.m_NumInstsSet < subInstances.m_NumInstsSet) { // remove attribute from index tempSubInstances.setAttIndex(subAttrIndex, false); // Step 9: create a classifier on the subset. // Compute counts and priors // create sequential index of instances and attributes that are to be considered localNaiveBayes(tempSubInstances); subLocalErrors = leaveOneOut(tempSubInstances, m_Counts, m_Priors, subLocalErrorFlags); errorsNewNotOld = 0; errorsOldNotNew = 0; tempSubInstances.setSequentialDataset(true); for(int t_inst = 0; t_inst < tempSubInstances.m_NumSeqInstsSet; t_inst++) { tempInstIndex = tempSubInstances.m_SequentialInstIndexes[t_inst]; if (subLocalErrorFlags[tempInstIndex] == false) { // The number of errors on New, Not on Old in the subset. if (m_subOldErrorFlags[tempInstIndex] == true) { errorsNewNotOld ++; } } else { // The number of errors on Old, Not on New in the subset. if(m_subOldErrorFlags[tempInstIndex] == false) { errorsOldNotNew ++; } } } //end of for // Step 10 and Step 11: int tempErrors = subLocalErrors + m_RemainderErrors; // Step 12: // Step 13: stopping criteria. if((tempErrors < tempErrorsBest) && (binomP(errorsNewNotOld, errorsNewNotOld + errorsOldNotNew, 0.5 ) < SIGNLOWER)) { // Step 14: tempCnt++; // -------------------------------------------------- //tempD_subsetBest = new Indexes(tempSubInstances); // ------------------------------------------------------------------------------- tempSubInstances.setSequentialDataset(true); tempD_subsetBestInsts = (int []) tempSubInstances.m_SequentialInstIndexes.clone(); tempD_subsetBestAtts = (int []) tempSubInstances.m_SequentialAttIndexes.clone(); // ------------------------------------------------------------------------------- // Step 15: tempErrorsBest = tempErrors; tempErrorFlagBest = (boolean []) subLocalErrorFlags.clone(); // Step 16: attributeBest = subAttrIndex; } // end of if } // end of if } // end of main for // Step 20: if(attributeBest != -1) { bestCnt++; // Step 21: leftHand.add(testInstance.attribute(attributeBest)); // ------------------------------------------------ // Step 22: //tempD_subsetBest.setAttIndex(attributeBest, false); //subInstances = tempD_subsetBest; // ------------------------------------------------ subInstances.setInsts(tempD_subsetBestInsts, true); subInstances.setAtts(tempD_subsetBestAtts, true); subInstances.setAttIndex(attributeBest, false); // ------------------------------------------------- // Step 25: localErrors = tempErrorsBest; localErrorFlags = tempErrorFlagBest; } else { break; } } // end of while // Step 27: localNaiveBayes(subInstances); return localDistributionForInstance(testInstance, subInstances); } /** * Returns a description of the classifier. * * @return a description of the classifier as a string. */ public String toString() { if (m_Instances == null) { return "Lazy Bayesian Rule: No model built yet."; } try { StringBuffer text = new StringBuffer ("=== LBR Run information ===\n\n"); text.append("Scheme: weka.classifiers.LBR\n"); text.append("Relation: " + m_Instances.attribute(m_Instances.classIndex()).name() + "\n"); text.append("Instances: "+m_Instances.numInstances()+"\n"); text.append("Attributes: "+m_Instances.numAttributes()+"\n"); // Remains are printed by Evaulation.java return text.toString(); } catch (Exception e) { e.printStackTrace(); return "Can't Print Lazy Bayes Rule Classifier!"; } } /** * Leave-one-out strategy. For a given sample data set with n instances, * using (n - 1) instances by leaving one out and tested on the single * remaining case. * This is repeated n times in turn. * The final "Error" is the sum of the instances to be classified * incorrectly. * * @param instanceIndex set of instances serving as training data. * @param counts serving as all the counts of training data. * @param priors serving as the number of instances in each class. * @param errorFlags for the errors * * @return error flag array about each instance. * @throws Exception if something goes wrong **/ public int leaveOneOut(Indexes instanceIndex, int [][][] counts, int [] priors, boolean [] errorFlags) throws Exception { // ###### START LEAVE ONE OUT ############# int tempClassValue; double posteriors; double sumForPriors; double sumForCounts; double max = 0; int maxIndex = 0; int AIndex, attIndex, clss; int inst; int errors = 0; int instIndex; instanceIndex.setSequentialDataset(true); int tempInstanceClassValue; int [] tempAttributeValues = new int[(int)instanceIndex.m_NumSeqAttsSet+1]; Instance tempInstance; for(inst = 0; inst < instanceIndex.m_NumSeqInstsSet; inst++) { instIndex = instanceIndex.m_SequentialInstIndexes[inst]; //get the leave-one-out instance tempInstance = (Instance) m_Instances.instance(instIndex); if (!tempInstance.classIsMissing()) { tempInstanceClassValue = (int)tempInstance.classValue(); // pointer to first index of counts matrix for efficiency int [][] countsPointer = counts[tempInstanceClassValue]; // Compute the counts and priors for (n-1) instances. for(attIndex = 0; attIndex < instanceIndex.m_NumSeqAttsSet; attIndex++) { AIndex = instanceIndex.m_SequentialAttIndexes[attIndex]; tempAttributeValues[attIndex] = (int)tempInstance.value(AIndex); countsPointer[AIndex][tempAttributeValues[attIndex]]--; } priors[tempInstanceClassValue]--; max = 0; maxIndex= 0; // ###### LOCAL CLASSIFY INSTANCE ########### sumForPriors = Utils.sum(priors); for (clss = 0; clss < m_numClasses; clss++) { posteriors = 0.0; posteriors = (priors[clss] + 1) / (sumForPriors + m_numClasses); countsPointer = counts[clss]; for(attIndex = 0; attIndex < instanceIndex.m_NumSeqAttsSet; attIndex++) { AIndex = instanceIndex.m_SequentialAttIndexes[attIndex]; if (!tempInstance.isMissing(AIndex)) { sumForCounts = Utils.sum(countsPointer[AIndex]); posteriors *= ((countsPointer[AIndex][tempAttributeValues[attIndex]] + 1) / (sumForCounts + (double)tempInstance.attribute(AIndex).numValues())); } } if (posteriors > max) { maxIndex = clss; max = posteriors; } } // end of for if (max > 0) { tempClassValue = maxIndex; } else { tempClassValue = (int)Utils.missingValue(); } // ###### END LOCAL CLASSIFY INSTANCE ########### // Adjudge error. Here using classIndex is incorrect, // it is index of the class attribute. if(tempClassValue == tempInstanceClassValue){ errorFlags[instIndex] = true; } else { errorFlags[instIndex] = false; errors++; } countsPointer = counts[tempInstanceClassValue]; for(attIndex = 0; attIndex < instanceIndex.m_NumSeqAttsSet; attIndex++) { AIndex = instanceIndex.m_SequentialAttIndexes[attIndex]; counts[tempInstanceClassValue][AIndex][tempAttributeValues[attIndex]]++; } priors[tempInstanceClassValue]++; } } // end of for // ###### END LEAVE ONE OUT ############# return errors; } /** * Class for building and using a simple Naive Bayes classifier. * For more information, see<p> * * Richard Duda and Peter Hart (1973).<i>Pattern * Classification and Scene Analysis</i>. Wiley, New York. * * This method only get m_Counts and m_Priors. * * @param instanceIndex set of instances serving as training data * @throws Exception if m_Counts and m_Priors have not been * generated successfully */ public void localNaiveBayes(Indexes instanceIndex) throws Exception { int attIndex = 0; int i, AIndex; int attVal = 0; int classVal = 0; Instance instance; instanceIndex.setSequentialDataset(true); // reset local counts for(classVal = 0; classVal < m_numClasses; classVal++) { // counts pointer mcTimesaver int [][] countsPointer1 = m_Counts[classVal]; for(attIndex = 0; attIndex < m_numAtts; attIndex++) { Attribute attribute = m_Instances.attribute(attIndex); // love those pointers for saving time int [] countsPointer2 = countsPointer1[attIndex]; for(attVal = 0; attVal < attribute.numValues(); attVal++) { countsPointer2[attVal] = 0; } } m_Priors[classVal] = 0; } for(i = 0; i < instanceIndex.m_NumSeqInstsSet; i++) { instance = (Instance) m_Instances.instance(instanceIndex.m_SequentialInstIndexes[i]); for(attIndex = 0; attIndex < instanceIndex.m_NumSeqAttsSet; attIndex++) { AIndex = instanceIndex.m_SequentialAttIndexes[attIndex]; m_Counts[(int)instance.classValue()][AIndex][(int)instance.value(AIndex)]++; } m_Priors[(int)instance.classValue()]++; } } /** * Calculates the class membership probabilities. * for the given test instance. * * @param instance the instance to be classified * @param instanceIndex * * @return predicted class probability distribution * @throws Exception if distribution can't be computed */ public double[] localDistributionForInstance(Instance instance, Indexes instanceIndex) throws Exception { double sumForPriors = 0; double sumForCounts = 0; int attIndex, AIndex; int numClassesOfInstance = instance.numClasses(); sumForPriors = 0; sumForCounts = 0; instanceIndex.setSequentialDataset(true); // Calculate all of conditional probabilities. sumForPriors = Utils.sum(m_Priors) + numClassesOfInstance; for (int j = 0; j < numClassesOfInstance; j++) { // pointer to counts to make access more efficient in loop int [][] countsPointer = m_Counts[j]; posteriorsArray[j] = (m_Priors[j] + 1) / (sumForPriors); for(attIndex = 0; attIndex < instanceIndex.m_NumSeqAttsSet; attIndex++) { AIndex = instanceIndex.m_SequentialAttIndexes[attIndex]; sumForCounts = Utils.sum(countsPointer[AIndex]); if (!instance.isMissing(AIndex)) { posteriorsArray[j] *= ((countsPointer[AIndex][(int)instance.value(AIndex)] + 1) / (sumForCounts + (double)instance.attribute(AIndex).numValues())); } } } // Normalize probabilities Utils.normalize(posteriorsArray); return posteriorsArray; } /** * Significance test * binomp: * * @param r * @param n * @param p * @return returns the probability of obtaining r or fewer out of n * if the probability of an event is p. * @throws Exception if computation fails */ public double binomP(double r, double n, double p) throws Exception { if (n == r) return 1.0; return Statistics.incompleteBeta(n-r, r+1.0, 1.0-p); } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 5525 $"); } /** * Main method for testing this class. * * @param argv the options */ public static void main(String [] argv) { runClassifier(new LBR(), argv); } }
40,958
32.004835
507
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/lazy/LWL.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * LWL.java * Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.lazy; import java.util.Enumeration; import java.util.Vector; import weka.classifiers.Classifier; import weka.classifiers.SingleClassifierEnhancer; import weka.classifiers.UpdateableClassifier; import weka.core.Capabilities; import weka.core.Capabilities.Capability; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.RevisionUtils; import weka.core.TechnicalInformation; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import weka.core.TechnicalInformationHandler; import weka.core.Utils; import weka.core.WeightedInstancesHandler; import weka.core.neighboursearch.LinearNNSearch; import weka.core.neighboursearch.NearestNeighbourSearch; /** <!-- globalinfo-start --> * Locally weighted learning. Uses an instance-based algorithm to assign instance weights which are then used by a specified WeightedInstancesHandler.<br/> * Can do classification (e.g. using naive Bayes) or regression (e.g. using linear regression).<br/> * <br/> * For more info, see<br/> * <br/> * Eibe Frank, Mark Hall, Bernhard Pfahringer: Locally Weighted Naive Bayes. In: 19th Conference in Uncertainty in Artificial Intelligence, 249-256, 2003.<br/> * <br/> * C. Atkeson, A. Moore, S. Schaal (1996). Locally weighted learning. AI Review.. * <p/> <!-- globalinfo-end --> * <!-- technical-bibtex-start --> * BibTeX: * <pre> * &#64;inproceedings{Frank2003, * author = {Eibe Frank and Mark Hall and Bernhard Pfahringer}, * booktitle = {19th Conference in Uncertainty in Artificial Intelligence}, * pages = {249-256}, * publisher = {Morgan Kaufmann}, * title = {Locally Weighted Naive Bayes}, * year = {2003} * } * * &#64;article{Atkeson1996, * author = {C. Atkeson and A. Moore and S. Schaal}, * journal = {AI Review}, * title = {Locally weighted learning}, * year = {1996} * } * </pre> * <p/> <!-- technical-bibtex-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -A * The nearest neighbour search algorithm to use (default: weka.core.neighboursearch.LinearNNSearch). * </pre> * * <pre> -K &lt;number of neighbours&gt; * Set the number of neighbours used to set the kernel bandwidth. * (default all)</pre> * * <pre> -U &lt;number of weighting method&gt; * Set the weighting kernel shape to use. 0=Linear, 1=Epanechnikov, * 2=Tricube, 3=Inverse, 4=Gaussian. * (default 0 = Linear)</pre> * * <pre> -D * If set, classifier is run in debug mode and * may output additional info to the console</pre> * * <pre> -W * Full name of base classifier. * (default: weka.classifiers.trees.DecisionStump)</pre> * * <pre> * Options specific to classifier weka.classifiers.trees.DecisionStump: * </pre> * * <pre> -D * If set, classifier is run in debug mode and * may output additional info to the console</pre> * <!-- options-end --> * * @author Len Trigg (trigg@cs.waikato.ac.nz) * @author Eibe Frank (eibe@cs.waikato.ac.nz) * @author Ashraf M. Kibriya (amk14[at-the-rate]cs[dot]waikato[dot]ac[dot]nz) * @version $Revision: 8034 $ */ public class LWL extends SingleClassifierEnhancer implements UpdateableClassifier, WeightedInstancesHandler, TechnicalInformationHandler { /** for serialization. */ static final long serialVersionUID = 1979797405383665815L; /** The training instances used for classification. */ protected Instances m_Train; /** The number of neighbours used to select the kernel bandwidth. */ protected int m_kNN = -1; /** The weighting kernel method currently selected. */ protected int m_WeightKernel = LINEAR; /** True if m_kNN should be set to all instances. */ protected boolean m_UseAllK = true; /** The nearest neighbour search algorithm to use. * (Default: weka.core.neighboursearch.LinearNNSearch) */ protected NearestNeighbourSearch m_NNSearch = new LinearNNSearch(); /** The available kernel weighting methods. */ public static final int LINEAR = 0; public static final int EPANECHNIKOV = 1; public static final int TRICUBE = 2; public static final int INVERSE = 3; public static final int GAUSS = 4; public static final int CONSTANT = 5; /** a ZeroR model in case no model can be built from the data. */ protected Classifier m_ZeroR; /** * Returns a string describing classifier. * @return a description suitable for * displaying in the explorer/experimenter gui */ public String globalInfo() { return "Locally weighted learning. Uses an instance-based algorithm to " + "assign instance weights which are then used by a specified " + "WeightedInstancesHandler.\n" + "Can do classification (e.g. using naive Bayes) or regression " + "(e.g. using linear regression).\n\n" + "For more info, see\n\n" + getTechnicalInformation().toString(); } /** * Returns an instance of a TechnicalInformation object, containing * detailed information about the technical background of this class, * e.g., paper reference or book this class is based on. * * @return the technical information about this class */ public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; TechnicalInformation additional; result = new TechnicalInformation(Type.INPROCEEDINGS); result.setValue(Field.AUTHOR, "Eibe Frank and Mark Hall and Bernhard Pfahringer"); result.setValue(Field.YEAR, "2003"); result.setValue(Field.TITLE, "Locally Weighted Naive Bayes"); result.setValue(Field.BOOKTITLE, "19th Conference in Uncertainty in Artificial Intelligence"); result.setValue(Field.PAGES, "249-256"); result.setValue(Field.PUBLISHER, "Morgan Kaufmann"); additional = result.add(Type.ARTICLE); additional.setValue(Field.AUTHOR, "C. Atkeson and A. Moore and S. Schaal"); additional.setValue(Field.YEAR, "1996"); additional.setValue(Field.TITLE, "Locally weighted learning"); additional.setValue(Field.JOURNAL, "AI Review"); return result; } /** * Constructor. */ public LWL() { m_Classifier = new weka.classifiers.trees.DecisionStump(); } /** * String describing default classifier. * * @return the default classifier classname */ protected String defaultClassifierString() { return "weka.classifiers.trees.DecisionStump"; } /** * Returns an enumeration of the additional measure names * produced by the neighbour search algorithm. * @return an enumeration of the measure names */ public Enumeration enumerateMeasures() { return m_NNSearch.enumerateMeasures(); } /** * Returns the value of the named measure from the * neighbour search algorithm. * @param additionalMeasureName the name of the measure to query for its value * @return the value of the named measure * @throws IllegalArgumentException if the named measure is not supported */ public double getMeasure(String additionalMeasureName) { return m_NNSearch.getMeasure(additionalMeasureName); } /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ public Enumeration listOptions() { Vector newVector = new Vector(3); newVector.addElement(new Option("\tThe nearest neighbour search " + "algorithm to use " + "(default: weka.core.neighboursearch.LinearNNSearch).\n", "A", 0, "-A")); newVector.addElement(new Option("\tSet the number of neighbours used to set" +" the kernel bandwidth.\n" +"\t(default all)", "K", 1, "-K <number of neighbours>")); newVector.addElement(new Option("\tSet the weighting kernel shape to use." +" 0=Linear, 1=Epanechnikov,\n" +"\t2=Tricube, 3=Inverse, 4=Gaussian.\n" +"\t(default 0 = Linear)", "U", 1,"-U <number of weighting method>")); Enumeration enu = super.listOptions(); while (enu.hasMoreElements()) { newVector.addElement(enu.nextElement()); } return newVector.elements(); } /** * Parses a given list of options. <p/> * <!-- options-start --> * Valid options are: <p/> * * <pre> -A * The nearest neighbour search algorithm to use (default: weka.core.neighboursearch.LinearNNSearch). * </pre> * * <pre> -K &lt;number of neighbours&gt; * Set the number of neighbours used to set the kernel bandwidth. * (default all)</pre> * * <pre> -U &lt;number of weighting method&gt; * Set the weighting kernel shape to use. 0=Linear, 1=Epanechnikov, * 2=Tricube, 3=Inverse, 4=Gaussian. * (default 0 = Linear)</pre> * * <pre> -D * If set, classifier is run in debug mode and * may output additional info to the console</pre> * * <pre> -W * Full name of base classifier. * (default: weka.classifiers.trees.DecisionStump)</pre> * * <pre> * Options specific to classifier weka.classifiers.trees.DecisionStump: * </pre> * * <pre> -D * If set, classifier is run in debug mode and * may output additional info to the console</pre> * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { String knnString = Utils.getOption('K', options); if (knnString.length() != 0) { setKNN(Integer.parseInt(knnString)); } else { setKNN(-1); } String weightString = Utils.getOption('U', options); if (weightString.length() != 0) { setWeightingKernel(Integer.parseInt(weightString)); } else { setWeightingKernel(LINEAR); } String nnSearchClass = Utils.getOption('A', options); if(nnSearchClass.length() != 0) { String nnSearchClassSpec[] = Utils.splitOptions(nnSearchClass); if(nnSearchClassSpec.length == 0) { throw new Exception("Invalid NearestNeighbourSearch algorithm " + "specification string."); } String className = nnSearchClassSpec[0]; nnSearchClassSpec[0] = ""; setNearestNeighbourSearchAlgorithm( (NearestNeighbourSearch) Utils.forName( NearestNeighbourSearch.class, className, nnSearchClassSpec) ); } else this.setNearestNeighbourSearchAlgorithm(new LinearNNSearch()); super.setOptions(options); } /** * Gets the current settings of the classifier. * * @return an array of strings suitable for passing to setOptions */ public String [] getOptions() { String [] superOptions = super.getOptions(); String [] options = new String [superOptions.length + 6]; int current = 0; options[current++] = "-U"; options[current++] = "" + getWeightingKernel(); if ( (getKNN() == 0) && m_UseAllK) { options[current++] = "-K"; options[current++] = "-1"; } else { options[current++] = "-K"; options[current++] = "" + getKNN(); } options[current++] = "-A"; options[current++] = m_NNSearch.getClass().getName()+" "+Utils.joinOptions(m_NNSearch.getOptions()); System.arraycopy(superOptions, 0, options, current, superOptions.length); return options; } /** * Returns the tip text for this property. * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String KNNTipText() { return "How many neighbours are used to determine the width of the " + "weighting function (<= 0 means all neighbours)."; } /** * Sets the number of neighbours used for kernel bandwidth setting. * The bandwidth is taken as the distance to the kth neighbour. * * @param knn the number of neighbours included inside the kernel * bandwidth, or 0 to specify using all neighbors. */ public void setKNN(int knn) { m_kNN = knn; if (knn <= 0) { m_kNN = 0; m_UseAllK = true; } else { m_UseAllK = false; } } /** * Gets the number of neighbours used for kernel bandwidth setting. * The bandwidth is taken as the distance to the kth neighbour. * * @return the number of neighbours included inside the kernel * bandwidth, or 0 for all neighbours */ public int getKNN() { return m_kNN; } /** * Returns the tip text for this property. * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String weightingKernelTipText() { return "Determines weighting function. [0 = Linear, 1 = Epnechnikov,"+ "2 = Tricube, 3 = Inverse, 4 = Gaussian and 5 = Constant. "+ "(default 0 = Linear)]."; } /** * Sets the kernel weighting method to use. Must be one of LINEAR, * EPANECHNIKOV, TRICUBE, INVERSE, GAUSS or CONSTANT, other values * are ignored. * * @param kernel the new kernel method to use. Must be one of LINEAR, * EPANECHNIKOV, TRICUBE, INVERSE, GAUSS or CONSTANT. */ public void setWeightingKernel(int kernel) { if ((kernel != LINEAR) && (kernel != EPANECHNIKOV) && (kernel != TRICUBE) && (kernel != INVERSE) && (kernel != GAUSS) && (kernel != CONSTANT)) { return; } m_WeightKernel = kernel; } /** * Gets the kernel weighting method to use. * * @return the new kernel method to use. Will be one of LINEAR, * EPANECHNIKOV, TRICUBE, INVERSE, GAUSS or CONSTANT. */ public int getWeightingKernel() { return m_WeightKernel; } /** * Returns the tip text for this property. * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String nearestNeighbourSearchAlgorithmTipText() { return "The nearest neighbour search algorithm to use (Default: LinearNN)."; } /** * Returns the current nearestNeighbourSearch algorithm in use. * @return the NearestNeighbourSearch algorithm currently in use. */ public NearestNeighbourSearch getNearestNeighbourSearchAlgorithm() { return m_NNSearch; } /** * Sets the nearestNeighbourSearch algorithm to be used for finding nearest * neighbour(s). * @param nearestNeighbourSearchAlgorithm - The NearestNeighbourSearch class. */ public void setNearestNeighbourSearchAlgorithm(NearestNeighbourSearch nearestNeighbourSearchAlgorithm) { m_NNSearch = nearestNeighbourSearchAlgorithm; } /** * Returns default capabilities of the classifier. * * @return the capabilities of this classifier */ public Capabilities getCapabilities() { Capabilities result; if (m_Classifier != null) { result = m_Classifier.getCapabilities(); } else { result = super.getCapabilities(); } result.setMinimumNumberInstances(0); // set dependencies for (Capability cap: Capability.values()) result.enableDependency(cap); return result; } /** * Generates the classifier. * * @param instances set of instances serving as training data * @throws Exception if the classifier has not been generated successfully */ public void buildClassifier(Instances instances) throws Exception { if (!(m_Classifier instanceof WeightedInstancesHandler)) { throw new IllegalArgumentException("Classifier must be a " + "WeightedInstancesHandler!"); } // can classifier handle the data? getCapabilities().testWithFail(instances); // remove instances with missing class instances = new Instances(instances); instances.deleteWithMissingClass(); // only class? -> build ZeroR model if (instances.numAttributes() == 1) { System.err.println( "Cannot build model (only class attribute present in data!), " + "using ZeroR model instead!"); m_ZeroR = new weka.classifiers.rules.ZeroR(); m_ZeroR.buildClassifier(instances); return; } else { m_ZeroR = null; } m_Train = new Instances(instances, 0, instances.numInstances()); m_NNSearch.setInstances(m_Train); } /** * Adds the supplied instance to the training set. * * @param instance the instance to add * @throws Exception if instance could not be incorporated * successfully */ public void updateClassifier(Instance instance) throws Exception { if (m_Train == null) { throw new Exception("No training instance structure set!"); } else if (m_Train.equalHeaders(instance.dataset()) == false) { throw new Exception("Incompatible instance types\n" + m_Train.equalHeadersMsg(instance.dataset())); } if (!instance.classIsMissing()) { m_NNSearch.update(instance); m_Train.add(instance); } } /** * Calculates the class membership probabilities for the given test instance. * * @param instance the instance to be classified * @return preedicted class probability distribution * @throws Exception if distribution can't be computed successfully */ public double[] distributionForInstance(Instance instance) throws Exception { // default model? if (m_ZeroR != null) { return m_ZeroR.distributionForInstance(instance); } if (m_Train.numInstances() == 0) { throw new Exception("No training instances!"); } m_NNSearch.addInstanceInfo(instance); int k = m_Train.numInstances(); if( (!m_UseAllK && (m_kNN < k)) /*&& !(m_WeightKernel==INVERSE || m_WeightKernel==GAUSS)*/ ) { k = m_kNN; } Instances neighbours = m_NNSearch.kNearestNeighbours(instance, k); double distances[] = m_NNSearch.getDistances(); if (m_Debug) { System.out.println("Test Instance: "+instance); System.out.println("For "+k+" kept " + neighbours.numInstances() + " out of " + m_Train.numInstances() + " instances."); } //IF LinearNN has skipped so much that <k neighbours are remaining. if(k>distances.length) k = distances.length; if (m_Debug) { System.out.println("Instance Distances"); for (int i = 0; i < distances.length; i++) { System.out.println("" + distances[i]); } } // Determine the bandwidth double bandwidth = distances[k-1]; // Check for bandwidth zero if (bandwidth <= 0) { //if the kth distance is zero than give all instances the same weight for(int i=0; i < distances.length; i++) distances[i] = 1; } else { // Rescale the distances by the bandwidth for (int i = 0; i < distances.length; i++) distances[i] = distances[i] / bandwidth; } // Pass the distances through a weighting kernel for (int i = 0; i < distances.length; i++) { switch (m_WeightKernel) { case LINEAR: distances[i] = 1.0001 - distances[i]; break; case EPANECHNIKOV: distances[i] = 3/4D*(1.0001 - distances[i]*distances[i]); break; case TRICUBE: distances[i] = Math.pow( (1.0001 - Math.pow(distances[i], 3)), 3 ); break; case CONSTANT: //System.err.println("using constant kernel"); distances[i] = 1; break; case INVERSE: distances[i] = 1.0 / (1.0 + distances[i]); break; case GAUSS: distances[i] = Math.exp(-distances[i] * distances[i]); break; } } if (m_Debug) { System.out.println("Instance Weights"); for (int i = 0; i < distances.length; i++) { System.out.println("" + distances[i]); } } // Set the weights on the training data double sumOfWeights = 0, newSumOfWeights = 0; for (int i = 0; i < distances.length; i++) { double weight = distances[i]; Instance inst = (Instance) neighbours.instance(i); sumOfWeights += inst.weight(); newSumOfWeights += inst.weight() * weight; inst.setWeight(inst.weight() * weight); //weightedTrain.add(newInst); } // Rescale weights for (int i = 0; i < neighbours.numInstances(); i++) { Instance inst = neighbours.instance(i); inst.setWeight(inst.weight() * sumOfWeights / newSumOfWeights); } // Create a weighted classifier m_Classifier.buildClassifier(neighbours); if (m_Debug) { System.out.println("Classifying test instance: " + instance); System.out.println("Built base classifier:\n" + m_Classifier.toString()); } // Return the classifier's predictions return m_Classifier.distributionForInstance(instance); } /** * Returns a description of this classifier. * * @return a description of this classifier as a string. */ public String toString() { // only ZeroR model? if (m_ZeroR != null) { StringBuffer buf = new StringBuffer(); buf.append(this.getClass().getName().replaceAll(".*\\.", "") + "\n"); buf.append(this.getClass().getName().replaceAll(".*\\.", "").replaceAll(".", "=") + "\n\n"); buf.append("Warning: No model could be built, hence ZeroR model is used:\n\n"); buf.append(m_ZeroR.toString()); return buf.toString(); } if (m_Train == null) { return "Locally weighted learning: No model built yet."; } String result = "Locally weighted learning\n" + "===========================\n"; result += "Using classifier: " + m_Classifier.getClass().getName() + "\n"; switch (m_WeightKernel) { case LINEAR: result += "Using linear weighting kernels\n"; break; case EPANECHNIKOV: result += "Using epanechnikov weighting kernels\n"; break; case TRICUBE: result += "Using tricube weighting kernels\n"; break; case INVERSE: result += "Using inverse-distance weighting kernels\n"; break; case GAUSS: result += "Using gaussian weighting kernels\n"; break; case CONSTANT: result += "Using constant weighting kernels\n"; break; } result += "Using " + (m_UseAllK ? "all" : "" + m_kNN) + " neighbours"; return result; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } /** * Main method for testing this class. * * @param argv the options */ public static void main(String [] argv) { runClassifier(new LWL(), argv); } }
23,588
30.326693
159
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/lazy/RSC.java
package weka.classifiers.lazy; import java.util.ArrayList; import java.util.Random; import java.util.TreeSet; import weka.classifiers.AbstractClassifier; import weka.classifiers.Classifier; import weka.classifiers.Evaluation; import weka.core.*; /** * * @author Aaron. Implementation */ public class RSC extends AbstractClassifier implements Randomizable{ private int alpha; private NormalizableDistance distanceFunc; private TreeSet<Instance> uncoveredCases; private Instances allCases; private ArrayList<Instance> T; private ArrayList<Sphere> sphereSet; private int randSeed=100; private Random random = new Random(randSeed); private boolean crossValidateAlpha=false; public RSC() { crossValidate(true); distanceFunc = new EuclideanDistance(); } public RSC(int a) { this.alpha = a; distanceFunc = new EuclideanDistance(); } public final void crossValidate(boolean b){ crossValidateAlpha=b; } @Override public void setSeed(int seed) { random.setSeed(seed); randSeed=seed; } @Override public int getSeed() { return randSeed; } //default distance function is Euclidean @Override public void buildClassifier(Instances inst){ if(crossValidateAlpha){ //This is a REALLY inefficient way to do this cross validation, it is just a first go // Spheres are recalculated for every single fold! double bestAccuracy=0; int maxAlpha=inst.numInstances()/10; RSC r; int folds=10; for(int a=1;a<maxAlpha;a++){ //Eval r=new RSC(1); try{ Evaluation e=new Evaluation(inst); e.crossValidateModel(r, inst, folds, random); double acc=e.correct()/inst.numInstances(); if(acc>bestAccuracy){ bestAccuracy=acc; this.alpha=a; } }catch(Exception e){ e.printStackTrace(); System.exit(0); } } } sphereSet = new ArrayList(); uncoveredCases = new TreeSet<Instance>(new InstanceComparator()); distanceFunc.setInstances(inst); allCases = inst; // uncoveredCases.addAll(i); for(int j=0;j<inst.numInstances();j++) uncoveredCases.add(inst.instance(j)); //add members of allCases to covered as their covered until allCases is empty. while(uncoveredCases.size()>0){ //randomly pick an instance int rand = (int)(random.nextDouble()*uncoveredCases.size()); Instance[] tempArray = new Instance[uncoveredCases.size()]; uncoveredCases.toArray(tempArray); Instance temp = tempArray[rand]; uncoveredCases.remove(temp); //find closest instance that is not the same class value. Instance edge = null; double distance = Double.MAX_VALUE; for(int j=0; j<allCases.numInstances();j++){ Instance temp2 =allCases.instance(j); double tempDist = distanceFunc.distance(temp,temp2); //if its in the sphere and isn't the same class. if((tempDist <= distance) && (temp.classValue() != temp2.classValue())){ distance = tempDist; edge = temp2; } } Sphere TempSphere = new Sphere(temp,distance); //find the instances that are covered by the sphere. //i feel i could do some optimization here because there ordered? //but there ordered with respect to each other and does that mean they'll //be close togerger. Who knows? //if(uncoveredCases.size()>0){ T= new ArrayList(); T.add(edge); //find all cases that are inside the sphere. for(int j=0; j<allCases.numInstances();j++){ Instance tempInst = allCases.instance(j); double tempDist = distanceFunc.distance(temp,tempInst); //if its in the sphere and isn't itself. if((tempDist <= distance) && (tempDist != 0)){ T.add(tempInst); } } //check the number of instances covered. if(T.size()>=alpha){ for(int j=0;j<T.size();j++){ //remove from uncovered Instance temp1 =T.get(j); uncoveredCases.remove(temp1); } sphereSet.add(TempSphere); } //} } } //returns the instances classValue if its inside its sphere. Else it retursn the closest sphere edge. @Override public double classifyInstance(Instance i) throws Exception{ int closestSphere =0; int closestCentre=-1; double previousDistance = Double.MAX_VALUE; if(sphereSet.size() > 0){ for(int j=0;j<sphereSet.size();j++){ Sphere temp = sphereSet.get(j); double distance = distanceFunc.distance(temp.getCentre(),i); //if its inside the sphere if(distance <= temp.getRadius()){ if(closestCentre!=-1){ if(distance < distanceFunc.distance(sphereSet.get(closestCentre).getCentre(),i)) closestCentre=j; } else closestCentre =j; //return sphereSet.get(j).getCentre().classValue(); } else if(distance-temp.getRadius() <= previousDistance){ previousDistance = distance-temp.getRadius(); closestSphere = j; } //if its not, then check which sphere edge is closest. } if(closestCentre!=-1) return sphereSet.get(closestCentre).getCentre().classValue(); else return sphereSet.get(closestSphere).getCentre().classValue(); } else throw new Exception("No Spheres in the set"); } public void setDistanceFunc(NormalizableDistance in){ distanceFunc =in; } public ArrayList<Sphere> getSphereSet(){ return sphereSet; } @Override public String getRevision() { throw new UnsupportedOperationException("Not supported yet."); } public static class Sphere { private Instance centre; private double radius; public Sphere(Instance c, double r){ this.centre =c; this.radius =r; } public Instance getCentre(){ return centre; } public double getRadius(){ return radius; } } public static void main(String[] args){ System.out.println(" Test harness not implemented"); } }
7,244
31.34375
105
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/lazy/kstar/KStarCache.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /** * KStarCache.java * Copyright (C) 1995-2012 University of Waikato * Java port to Weka by Abdelaziz Mahoui (am14@cs.waikato.ac.nz). * */ package weka.classifiers.lazy.kstar; import java.io.Serializable; import weka.core.RevisionHandler; import weka.core.RevisionUtils; /** * A class representing the caching system used to keep track of each attribute * value and its corresponding scale factor or stop parameter. * * @author Len Trigg (len@reeltwo.com) * @author Abdelaziz Mahoui (am14@cs.waikato.ac.nz) * @version $Revision: 8034 $ */ public class KStarCache implements Serializable, RevisionHandler { /** for serialization */ private static final long serialVersionUID = -7693632394267140678L; /** * cache table */ CacheTable m_Cache = new CacheTable(); /** * Stores the specified values in the cahce table for easy retrieval. * * @param key attribute value used key to lookup the cache table. * @param value cache parameter: attribute scale/stop parameter. * @param pmiss cache parameter: transformation probability to * attribute with missing value. */ public void store(double key, double value, double pmiss) { if ( !m_Cache.containsKey(key) ) { m_Cache.insert(key, value, pmiss); } } /** * Checks if the specified key maps with an entry in the cache table * * @param key the key to map with an entry in the hashtable. */ public boolean containsKey(double key) { if ( m_Cache.containsKey(key) ) { return true; } return false; } /** * Returns the values in the cache mapped by the specified key * * @param key the key used to retrieve the table entry. */ public TableEntry getCacheValues( double key ) { if ( m_Cache.containsKey(key) ) { return m_Cache.getEntry(key); } return null; } /** * A custom hashtable class to support the caching system. * */ public class CacheTable implements Serializable, RevisionHandler { /** for serialization */ private static final long serialVersionUID = -8086106452588253423L; /** The hash table data. */ private TableEntry [] m_Table; /** The total number of entries in the hash table. */ private int m_Count; /** Rehashes the table when count exceeds this threshold. */ private int m_Threshold; /** The load factor for the hashtable. */ private float m_LoadFactor; /** The default size of the hashtable */ private final int DEFAULT_TABLE_SIZE = 101; /** The default load factor for the hashtable */ private final float DEFAULT_LOAD_FACTOR = 0.75f; // private final float DEFAULT_LOAD_FACTOR = 0.5f; /** Accuracy value for equality */ private final double EPSILON = 1.0E-5; /** * Constructs a new hashtable with a default capacity and load factor. */ public CacheTable(int size, float loadFactor) { m_Table = new TableEntry[size]; m_LoadFactor = loadFactor; m_Threshold = (int)(size * loadFactor); m_Count = 0; } /** * Constructs a new hashtable with a default capacity and load factor. */ public CacheTable() { this(101, 0.75f); } /** * Tests if the specified double is a key in this hashtable. */ public boolean containsKey(double key) { TableEntry [] table = m_Table; int hash = hashCode(key); int index = (hash & 0x7FFFFFFF) % table.length; for (TableEntry e = table[index] ; e != null ; e = e.next) { if ((e.hash == hash) && (Math.abs(e.key - key) < EPSILON)) { return true; } } return false; } /** * Inserts a new entry in the hashtable using the specified key. * If the key already exist in the hashtable, do nothing. */ public void insert(double key, double value, double pmiss) { // Makes sure the key is not already in the hashtable. TableEntry e, ne; TableEntry [] table = m_Table; int hash = hashCode(key); int index = (hash & 0x7FFFFFFF) % table.length; // start looking along the chain for (e = table[index] ; e != null ; e = e.next) { if ((e.hash == hash) && (Math.abs(e.key - key) < EPSILON)) { return; } } // At this point, key is not in table. // Creates a new entry. ne = new TableEntry( hash, key, value, pmiss, table[index] ); // Put entry at the head of the chain. table[index] = ne; m_Count++; // Rehash the table if the threshold is exceeded if (m_Count >= m_Threshold) { rehash(); } } /** * Returns the table entry to which the specified key is mapped in * this hashtable. * @return a table entry. */ public TableEntry getEntry(double key) { TableEntry [] table = m_Table; int hash = hashCode(key); int index = (hash & 0x7FFFFFFF) % table.length; for (TableEntry e = table[index] ; e != null ; e = e.next) { if ((e.hash == hash) && (Math.abs(e.key - key) < EPSILON)) { return e; } } return null; } /** * Returns the number of keys in this hashtable. * @return the number of keys in this hashtable. */ public int size() { return m_Count; } /** * Tests if this hashtable maps no keys to values. * @return true if this hastable maps no keys to values. */ public boolean isEmpty() { return m_Count == 0; } /** * Clears this hashtable so that it contains no keys. */ public void clear() { TableEntry table[] = m_Table; for (int index = table.length; --index >= 0; ) { table[index] = null; } m_Count = 0; } /** * Rehashes the contents of the hashtable into a hashtable with a * larger capacity. This method is called automatically when the * number of keys in the hashtable exceeds this hashtable's capacity * and load factor. */ private void rehash() { int oldCapacity = m_Table.length; TableEntry [] oldTable = m_Table; int newCapacity = oldCapacity * 2 + 1; TableEntry [] newTable = new TableEntry[newCapacity]; m_Threshold = (int)(newCapacity * m_LoadFactor); m_Table = newTable; TableEntry e, old; for (int i = oldCapacity ; i-- > 0 ;) { for (old = oldTable[i] ; old != null ; ) { e = old; old = old.next; int index = (e.hash & 0x7FFFFFFF) % newCapacity; e.next = newTable[index]; newTable[index] = e; } } } /** * Returns the hash code of the specified double. * @return the hash code of the specified double. */ private int hashCode(double key) { long bits = Double.doubleToLongBits(key); return (int)(bits ^ (bits >> 32)); } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } } // CacheTable /** * Hashtable collision list. */ public class TableEntry implements Serializable, RevisionHandler { /** for serialization */ private static final long serialVersionUID = 4057602386766259138L; /** attribute value hash code */ public int hash; /** attribute value */ public double key; /** scale factor or stop parameter */ public double value; /** transformation probability to missing value */ public double pmiss; /** next table entry (separate chaining) */ public TableEntry next = null; /** Constructor */ public TableEntry(int hash, double key, double value, double pmiss, TableEntry next) { this.hash = hash; this.key = key; this.value = value; this.pmiss = pmiss; this.next = next; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } } // TableEntry /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } } // Cache
8,926
26.723602
79
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/lazy/kstar/KStarConstants.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /** * KStarConstants.java * Copyright (C) 1995-2012 Univeristy of Waikato * Java port to Weka by Abdelaziz Mahoui (am14@cs.waikato.ac.nz). * */ package weka.classifiers.lazy.kstar; /** * @author Len Trigg (len@reeltwo.com) * @author Abdelaziz Mahoui (am14@cs.waikato.ac.nz) * @version $Revision 1.0 $ */ public interface KStarConstants { /** Some usefull constants */ int ON = 1; int OFF = 0; int NUM_RAND_COLS = 5; double FLOOR = 0.0; double FLOOR1 = 0.1; double INITIAL_STEP = 0.05; double LOG2 = 0.693147181; double EPSILON = 1.0e-5; /** How close the root finder for numeric and nominal have to get */ int ROOT_FINDER_MAX_ITER = 40; double ROOT_FINDER_ACCURACY = 0.01; /** Blend setting modes */ int B_SPHERE = 1; /* Use sphere of influence */ int B_ENTROPY = 2; /* Use entropic blend setting */ /** Missing value handling mode */ /* Ignore the instance with the missing value */ int M_DELETE = 1; /* Treat missing values as maximally different */ int M_MAXDIFF = 2; /* Normilize over the attributes */ int M_NORMAL = 3; /* Average column entropy curves */ int M_AVERAGE = 4; }
1,904
29.238095
74
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/lazy/kstar/KStarNominalAttribute.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /** * KStarNominalAttribute.java * Copyright (C) 1995-2012 Univeristy of Waikato * Java port to Weka by Abdelaziz Mahoui (am14@cs.waikato.ac.nz). * */ package weka.classifiers.lazy.kstar; import weka.core.Attribute; import weka.core.Instance; import weka.core.Instances; import weka.core.RevisionHandler; import weka.core.RevisionUtils; /** * A custom class which provides the environment for computing the * transformation probability of a specified test instance nominal * attribute to a specified train instance nominal attribute. * * @author Len Trigg (len@reeltwo.com) * @author Abdelaziz Mahoui (am14@cs.waikato.ac.nz) * @version $Revision 1.0 $ */ public class KStarNominalAttribute implements KStarConstants, RevisionHandler { /** The training instances used for classification. */ protected Instances m_TrainSet; /** The test instance */ protected Instance m_Test; /** The train instance */ protected Instance m_Train; /** The index of the nominal attribute in the test and train instances */ protected int m_AttrIndex; /** The stop parameter */ protected double m_Stop = 1.0; /** Probability of test attribute transforming into train attribute with missing value */ protected double m_MissingProb = 1.0; /** Average probability of test attribute transforming into train attribute */ protected double m_AverageProb = 1.0; /** Smallest probability of test attribute transforming into train attribute */ protected double m_SmallestProb = 1.0; /** Number of trai instances with no missing attribute values */ protected int m_TotalCount; /** Distribution of the attribute value in the train dataset */ protected int [] m_Distribution; /** Set of colomns: each colomn representing a randomised version of the train dataset class colomn */ protected int [][] m_RandClassCols; /** A cache for storing attribute values and their corresponding stop parameters */ protected KStarCache m_Cache; // KStar Global settings /** The number of instances in the dataset */ protected int m_NumInstances; /** The number of class values */ protected int m_NumClasses; /** The number of attributes */ protected int m_NumAttributes; /** The class attribute type */ protected int m_ClassType; /** missing value treatment */ protected int m_MissingMode = M_AVERAGE; /** B_SPHERE = use specified blend, B_ENTROPY = entropic blend setting */ protected int m_BlendMethod = B_SPHERE ; /** default sphere of influence blend setting */ protected int m_BlendFactor = 20; /** * Constructor */ public KStarNominalAttribute(Instance test, Instance train, int attrIndex, Instances trainSet, int [][] randClassCol, KStarCache cache) { m_Test = test; m_Train = train; m_AttrIndex = attrIndex; m_TrainSet = trainSet; m_RandClassCols = randClassCol; m_Cache = cache; init(); } /** * Initializes the m_Attributes of the class. */ private void init() { try { m_NumInstances = m_TrainSet.numInstances(); m_NumClasses = m_TrainSet.numClasses(); m_NumAttributes = m_TrainSet.numAttributes(); m_ClassType = m_TrainSet.classAttribute().type(); } catch(Exception e) { e.printStackTrace(); } } /** * Calculates the probability of the indexed nominal attribute of the test * instance transforming into the indexed nominal attribute of the training * instance. * * @return the value of the transformation probability. */ public double transProb() { String debug = "(KStarNominalAttribute.transProb) "; double transProb = 0.0; // check if the attribute value has been encountred before // in which case it should be in the nominal cache if (m_Cache.containsKey(m_Test.value(m_AttrIndex))) { KStarCache.TableEntry te = m_Cache.getCacheValues(m_Test.value(m_AttrIndex)); m_Stop = te.value; m_MissingProb = te.pmiss; } else { generateAttrDistribution(); // we have to compute the parameters if (m_BlendMethod == B_ENTROPY) { m_Stop = stopProbUsingEntropy(); } else { // default is B_SPHERE m_Stop = stopProbUsingBlend(); } // store the values in cache m_Cache.store( m_Test.value(m_AttrIndex), m_Stop, m_MissingProb ); } // we've got our m_Stop, then what? if (m_Train.isMissing(m_AttrIndex)) { transProb = m_MissingProb; } else { try { transProb = (1.0 - m_Stop) / m_Test.attribute(m_AttrIndex).numValues(); if ( (int)m_Test.value(m_AttrIndex) == (int)m_Train.value(m_AttrIndex) ) { transProb += m_Stop; } } catch (Exception e) { e.printStackTrace(); } } return transProb; } /** * Calculates the "stop parameter" for this attribute using * the entropy method: the value is computed using a root finder * algorithm. The method takes advantage of the calculation to * compute the smallest and average transformation probabilities * once the stop factor is obtained. It also sets the transformation * probability to an attribute with a missing value. * * @return the value of the stop parameter. * */ private double stopProbUsingEntropy() { String debug = "(KStarNominalAttribute.stopProbUsingEntropy)"; if ( m_ClassType != Attribute.NOMINAL ) { System.err.println("Error: "+debug+" attribute class must be nominal!"); System.exit(1); } int itcount = 0; double stopProb; double lower, upper, pstop; double bestminprob = 0.0, bestpsum = 0.0; double bestdiff = 0.0, bestpstop = 0.0; double currentdiff, lastdiff, stepsize, delta; KStarWrapper botvals = new KStarWrapper(); KStarWrapper upvals = new KStarWrapper(); KStarWrapper vals = new KStarWrapper(); // Initial values for root finder lower = 0.0 + ROOT_FINDER_ACCURACY/2.0; upper = 1.0 - ROOT_FINDER_ACCURACY/2.0; // Find (approx) entropy ranges calculateEntropy(upper, upvals); calculateEntropy(lower, botvals); if (upvals.avgProb == 0) { // When there are no training instances with the test value: // doesn't matter what exact value we use for pstop, just acts as // a constant scale factor in this case. calculateEntropy(lower, vals); } else { // Optimise the scale factor if ( (upvals.randEntropy - upvals.actEntropy < botvals.randEntropy - botvals.actEntropy) && (botvals.randEntropy - botvals.actEntropy > FLOOR) ) { bestpstop = pstop = lower; stepsize = INITIAL_STEP; bestminprob = botvals.minProb; bestpsum = botvals.avgProb; } else { bestpstop = pstop = upper; stepsize = -INITIAL_STEP; bestminprob = upvals.minProb; bestpsum = upvals.avgProb; } bestdiff = currentdiff = FLOOR; itcount = 0; /* Enter the root finder */ while (true) { itcount++; lastdiff = currentdiff; pstop += stepsize; if (pstop <= lower) { pstop = lower; currentdiff = 0.0; delta = -1.0; } else if (pstop >= upper) { pstop = upper; currentdiff = 0.0; delta = -1.0; } else { calculateEntropy(pstop, vals); currentdiff = vals.randEntropy - vals.actEntropy; if (currentdiff < FLOOR) { currentdiff = FLOOR; if ((Math.abs(stepsize) < INITIAL_STEP) && (bestdiff == FLOOR)) { bestpstop = lower; bestminprob = botvals.minProb; bestpsum = botvals.avgProb; break; } } delta = currentdiff - lastdiff; } if (currentdiff > bestdiff) { bestdiff = currentdiff; bestpstop = pstop; bestminprob = vals.minProb; bestpsum = vals.avgProb; } if (delta < 0) { if (Math.abs(stepsize) < ROOT_FINDER_ACCURACY) { break; } else { stepsize /= -2.0; } } if (itcount > ROOT_FINDER_MAX_ITER) { break; } } } m_SmallestProb = bestminprob; m_AverageProb = bestpsum; // Set the probability of transforming to a missing value switch ( m_MissingMode ) { case M_DELETE: m_MissingProb = 0.0; break; case M_NORMAL: m_MissingProb = 1.0; break; case M_MAXDIFF: m_MissingProb = m_SmallestProb; break; case M_AVERAGE: m_MissingProb = m_AverageProb; break; } if ( Math.abs(bestpsum - (double)m_TotalCount) < EPSILON) { // No difference in the values stopProb = 1.0; } else { stopProb = bestpstop; } return stopProb; } /** * Calculates the entropy of the actual class prediction * and the entropy for random class prediction. It also * calculates the smallest and average transformation probabilities. * * @param stop the stop parameter * @param params the object wrapper for the parameters: * actual entropy, random entropy, average probability and smallest * probability. * @return the values are returned in the object "params". * */ private void calculateEntropy( double stop, KStarWrapper params) { String debug = "(KStarNominalAttribute.calculateEntropy)"; int i,j,k; Instance train; double actent = 0.0, randent=0.0; double pstar, tprob, psum=0.0, minprob=1.0; double actClassProb, randClassProb; double [][] pseudoClassProb = new double[NUM_RAND_COLS+1][m_NumClasses]; // init ... for(j = 0; j <= NUM_RAND_COLS; j++) { for(i = 0; i < m_NumClasses; i++) { pseudoClassProb[j][i] = 0.0; } } for (i=0; i < m_NumInstances; i++) { train = m_TrainSet.instance(i); if (!train.isMissing(m_AttrIndex)) { pstar = PStar(m_Test, train, m_AttrIndex, stop); tprob = pstar / m_TotalCount; if (pstar < minprob) { minprob = pstar; } psum += tprob; // filter instances with same class value for (k=0 ; k <= NUM_RAND_COLS ; k++) { // instance i is assigned a random class value in colomn k; // colomn k = NUM_RAND_COLS contains the original mapping: // instance -> class vlaue pseudoClassProb[k][ m_RandClassCols[k][i] ] += tprob; } } } // compute the actual entropy using the class probs // with the original class value mapping (colomn NUM_RAND_COLS) for (j=m_NumClasses-1; j>=0; j--) { actClassProb = pseudoClassProb[NUM_RAND_COLS][j] / psum; if (actClassProb > 0) { actent -= actClassProb * Math.log(actClassProb) / LOG2; } } // compute a random entropy using the pseudo class probs // excluding the colomn NUM_RAND_COLS for (k=0; k < NUM_RAND_COLS;k++) { for (i = m_NumClasses-1; i >= 0; i--) { randClassProb = pseudoClassProb[k][i] / psum; if (randClassProb > 0) { randent -= randClassProb * Math.log(randClassProb) / LOG2; } } } randent /= NUM_RAND_COLS; // return the results ... Yuk !!! params.actEntropy = actent; params.randEntropy = randent; params.avgProb = psum; params.minProb = minprob; } /** * Calculates the "stop parameter" for this attribute using * the blend method: the value is computed using a root finder * algorithm. The method takes advantage of this calculation to * compute the smallest and average transformation probabilities * once the stop factor is obtained. It also sets the transformation * probability to an attribute with a missing value. * * @return the value of the stop parameter. * */ private double stopProbUsingBlend() { String debug = "(KStarNominalAttribute.stopProbUsingBlend) "; int itcount = 0; double stopProb, aimfor; double lower, upper, tstop; KStarWrapper botvals = new KStarWrapper(); KStarWrapper upvals = new KStarWrapper(); KStarWrapper vals = new KStarWrapper(); int testvalue = (int)m_Test.value(m_AttrIndex); aimfor = (m_TotalCount - m_Distribution[testvalue]) * (double)m_BlendFactor / 100.0 + m_Distribution[testvalue]; // Initial values for root finder tstop = 1.0 - (double)m_BlendFactor / 100.0; lower = 0.0 + ROOT_FINDER_ACCURACY/2.0; upper = 1.0 - ROOT_FINDER_ACCURACY/2.0; // Find out function border values calculateSphereSize(testvalue, lower, botvals); botvals.sphere -= aimfor; calculateSphereSize(testvalue, upper, upvals); upvals.sphere -= aimfor; if (upvals.avgProb == 0) { // When there are no training instances with the test value: // doesn't matter what exact value we use for tstop, just acts as // a constant scale factor in this case. calculateSphereSize(testvalue, tstop, vals); } else if (upvals.sphere > 0) { // Can't include aimfor instances, going for min possible tstop = upper; vals.avgProb = upvals.avgProb; } else { // Enter the root finder for (;;) { itcount++; calculateSphereSize(testvalue, tstop, vals); vals.sphere -= aimfor; if ( Math.abs(vals.sphere) <= ROOT_FINDER_ACCURACY || itcount >= ROOT_FINDER_MAX_ITER ) { break; } if (vals.sphere > 0.0) { lower = tstop; tstop = (upper + lower) / 2.0; } else { upper = tstop; tstop = (upper + lower) / 2.0; } } } m_SmallestProb = vals.minProb; m_AverageProb = vals.avgProb; // Set the probability of transforming to a missing value switch ( m_MissingMode ) { case M_DELETE: m_MissingProb = 0.0; break; case M_NORMAL: m_MissingProb = 1.0; break; case M_MAXDIFF: m_MissingProb = m_SmallestProb; break; case M_AVERAGE: m_MissingProb = m_AverageProb; break; } if ( Math.abs(vals.avgProb - m_TotalCount) < EPSILON) { // No difference in the values stopProb = 1.0; } else { stopProb = tstop; } return stopProb; } /** * Calculates the size of the "sphere of influence" defined as: * sphere = sum(P^2)/sum(P)^2 * P(i|j) = (1-tstop)*P(i) + ((i==j)?tstop:0). * This method takes advantage of the calculation to compute the values of * the "smallest" and "average" transformation probabilities when using * the specified stop parameter. * * @param testValue the value of the test instance * @param stop the stop parameter * @param params a wrapper of the parameters to be computed: * "sphere" the sphere size * "avgprob" the average transformation probability * "minProb" the smallest transformation probability * @return the values are returned in "params" object. * */ private void calculateSphereSize(int testvalue, double stop, KStarWrapper params) { String debug = "(KStarNominalAttribute.calculateSphereSize) "; int i, thiscount; double tprob, tval = 0.0, t1 = 0.0; double sphere, minprob = 1.0, transprob = 0.0; for(i = 0; i < m_Distribution.length; i++) { thiscount = m_Distribution[i]; if ( thiscount != 0 ) { if ( testvalue == i ) { tprob = (stop + (1 - stop) / m_Distribution.length) / m_TotalCount; tval += tprob * thiscount; t1 += tprob * tprob * thiscount; } else { tprob = ((1 - stop) / m_Distribution.length) / m_TotalCount; tval += tprob * thiscount; t1 += tprob * tprob * thiscount; } if ( minprob > tprob * m_TotalCount ) { minprob = tprob * m_TotalCount; } } } transprob = tval; sphere = (t1 == 0) ? 0 : ((tval * tval) / t1); // return values ... Yck!!! params.sphere = sphere; params.avgProb = transprob; params.minProb = minprob; } /** * Calculates the nominal probability function defined as: * P(i|j) = (1-stop) * P(i) + ((i==j) ? stop : 0) * In this case, it calculates the transformation probability of the * indexed test attribute to the indexed train attribute. * * @param test the test instance * @param train the train instance * @param col the attribute index * @return the value of the tranformation probability. * */ private double PStar(Instance test, Instance train, int col, double stop) { String debug = "(KStarNominalAttribute.PStar) "; double pstar; int numvalues = 0; try { numvalues = test.attribute(col).numValues(); } catch (Exception ex) { ex.printStackTrace(); } if ( (int)test.value(col) == (int)train.value(col) ) { pstar = stop + (1 - stop) / numvalues; } else { pstar = (1 - stop) / numvalues; } return pstar; } /** * Calculates the distribution, in the dataset, of the indexed nominal * attribute values. It also counts the actual number of training instances * that contributed (those with non-missing values) to calculate the * distribution. */ private void generateAttrDistribution() { String debug = "(KStarNominalAttribute.generateAttrDistribution)"; m_Distribution = new int[ m_TrainSet.attribute(m_AttrIndex).numValues() ]; int i; Instance train; for (i=0; i < m_NumInstances; i++) { train = m_TrainSet.instance(i); if ( !train.isMissing(m_AttrIndex) ) { m_TotalCount++; m_Distribution[(int)train.value(m_AttrIndex)]++; } } } /** * Sets the options. * */ public void setOptions(int missingmode, int blendmethod, int blendfactor) { m_MissingMode = missingmode; m_BlendMethod = blendmethod; m_BlendFactor = blendfactor; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } } // class
18,254
28.58671
78
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/lazy/kstar/KStarNumericAttribute.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /** * KStarNumericAttribute.java * Copyright (C) 1995-2012 Univeristy of Waikato * Java port to Weka by Abdelaziz Mahoui (am14@cs.waikato.ac.nz). * */ package weka.classifiers.lazy.kstar; import weka.core.Attribute; import weka.core.Instance; import weka.core.Instances; import weka.core.RevisionHandler; import weka.core.RevisionUtils; /** * A custom class which provides the environment for computing the * transformation probability of a specified test instance numeric * attribute to a specified train instance numeric attribute. * * @author Len Trigg (len@reeltwo.com) * @author Abdelaziz Mahoui (am14@cs.waikato.ac.nz) * @version $Revision 1.0 $ */ public class KStarNumericAttribute implements KStarConstants, RevisionHandler { /** The training instances used for classification. */ protected Instances m_TrainSet; /** The test instance */ protected Instance m_Test; /** The train instance */ protected Instance m_Train; /** The index of the attribute in the test and train instances */ protected int m_AttrIndex; /** The scale parameter */ protected double m_Scale = 1.0; /** Probability of test attribute transforming into train attribute with missing value */ protected double m_MissingProb = 1.0; /** Average probability of test attribute transforming into train attribute */ protected double m_AverageProb = 1.0; /** Smallest probability of test attribute transforming into train attribute */ protected double m_SmallestProb = 1.0; /** The set of disctances from the test attribute to the set of train attributes */ protected double [] m_Distances; /** Set of colomns: each colomn representing a randomised version of the train dataset class colomn */ protected int [][] m_RandClassCols; /** The number of train instances with no missing attribute values */ protected int m_ActualCount = 0; /** A cache for storing attribute values and their corresponding scale parameters */ protected KStarCache m_Cache; /** The number of instances in the dataset */ protected int m_NumInstances; /** The number of class values */ protected int m_NumClasses; /** The number of attributes */ protected int m_NumAttributes; /** The class attribute type */ protected int m_ClassType; /** missing value treatment */ protected int m_MissingMode = M_AVERAGE; /** 0 = use specified blend, 1 = entropic blend setting */ protected int m_BlendMethod = B_SPHERE ; /** default sphere of influence blend setting */ protected int m_BlendFactor = 20; /** * Constructor */ public KStarNumericAttribute(Instance test, Instance train, int attrIndex, Instances trainSet, int [][] randClassCols, KStarCache cache) { m_Test = test; m_Train = train; m_AttrIndex = attrIndex; m_TrainSet = trainSet; m_RandClassCols = randClassCols; m_Cache = cache; init(); } /** * Initializes the m_Attributes of the class. */ private void init() { try { m_NumInstances = m_TrainSet.numInstances(); m_NumClasses = m_TrainSet.numClasses(); m_NumAttributes = m_TrainSet.numAttributes(); m_ClassType = m_TrainSet.classAttribute().type(); } catch(Exception e) { e.printStackTrace(); } } /** * Calculates the transformation probability of the attribute indexed * "m_AttrIndex" in test instance "m_Test" to the same attribute in * the train instance "m_Train". * * @return the probability value */ public double transProb() { String debug = "(KStarNumericAttribute.transProb) "; double transProb, distance, scale; // check if the attribute value has been encountred before // in which case it should be in the numeric cache if ( m_Cache.containsKey(m_Test.value(m_AttrIndex))) { KStarCache.TableEntry te = m_Cache.getCacheValues( m_Test.value(m_AttrIndex) ); m_Scale = te.value; m_MissingProb = te.pmiss; } else { if (m_BlendMethod == B_ENTROPY) { m_Scale = scaleFactorUsingEntropy(); } else { // default is B_SPHERE m_Scale = scaleFactorUsingBlend(); } m_Cache.store( m_Test.value(m_AttrIndex), m_Scale, m_MissingProb ); } // now what??? if (m_Train.isMissing(m_AttrIndex)) { transProb = m_MissingProb; } else { distance = Math.abs( m_Test.value(m_AttrIndex) - m_Train.value(m_AttrIndex) ); transProb = PStar( distance, m_Scale ); } return transProb; } /** * Calculates the scale factor for the attribute indexed * "m_AttrIndex" in test instance "m_Test" using a global * blending factor (default value is 20%). * * @return the scale factor value */ private double scaleFactorUsingBlend() { String debug = "(KStarNumericAttribute.scaleFactorUsingBlend)"; int i, j, lowestcount = 0, count = 0; double lowest = -1.0, nextlowest = -1.0; double root, broot, up, bot; double aimfor, min_val = 9e300, scale = 1.0; double avgprob = 0.0, minprob = 0.0, min_pos = 0.0; KStarWrapper botvals = new KStarWrapper(); KStarWrapper upvals = new KStarWrapper(); KStarWrapper vals = new KStarWrapper(); m_Distances = new double [m_NumInstances]; for (j=0; j<m_NumInstances; j++) { if ( m_TrainSet.instance(j).isMissing(m_AttrIndex) ) { // mark the train instance with a missing value by setting // the distance to -1.0 m_Distances[j] = -1.0; } else { m_Distances[j] = Math.abs(m_TrainSet.instance(j).value(m_AttrIndex) - m_Test.value(m_AttrIndex)); if ( (m_Distances[j]+1e-5) < nextlowest || nextlowest == -1.0 ) { if ( (m_Distances[j]+1e-5) < lowest || lowest == -1.0 ) { nextlowest = lowest; lowest = m_Distances[j]; lowestcount = 1; } else if ( Math.abs(m_Distances[j]-lowest) < 1e-5 ) { // record the number training instances (number n0) at // the smallest distance from test instance lowestcount++; } else { nextlowest = m_Distances[j]; } } // records the actual number of instances with no missing value m_ActualCount++; } } if (nextlowest == -1 || lowest == -1) { // Data values are all the same scale = 1.0; m_SmallestProb = m_AverageProb = 1.0; return scale; } else { // starting point for root root = 1.0 / (nextlowest - lowest); i = 0; // given the expression: n0 <= E(scale) <= N // E(scale) = (N - n0) * b + n0 with blending factor: 0 <= b <= 1 // aimfor = (N - n0) * b + n0 aimfor = (m_ActualCount - lowestcount) * (double)m_BlendFactor / 100.0 + lowestcount; if (m_BlendFactor == 0) { aimfor += 1.0; } // root is bracketed in interval [bot,up] bot = 0.0 + ROOT_FINDER_ACCURACY / 2.0; up = root * 16; // This is bodgy // E(bot) calculateSphereSize(bot, botvals); botvals.sphere -= aimfor; // E(up) calculateSphereSize(up, upvals); upvals.sphere -= aimfor; if (botvals.sphere < 0) { // Couldn't include that many // instances - going for max possible min_pos = bot; avgprob = botvals.avgProb; minprob = botvals.minProb; } else if (upvals.sphere > 0) { // Couldn't include that few, // going for min possible min_pos = up; avgprob = upvals.avgProb; minprob = upvals.minProb; } else { // Root finding Algorithm starts here ! for (;;) { calculateSphereSize(root, vals); vals.sphere -= aimfor; if ( Math.abs(vals.sphere) < min_val ) { min_val = Math.abs(vals.sphere); min_pos = root; avgprob = vals.avgProb; minprob = vals.minProb; } if ( Math.abs(vals.sphere) <= ROOT_FINDER_ACCURACY ) { break; // converged to a solution, done! } if (vals.sphere > 0.0) { broot = (root + up) / 2.0; bot = root; root = broot; } else { broot = (root + bot) / 2.0; up = root; root = broot; } i++; if (i > ROOT_FINDER_MAX_ITER) { // System.err.println("Warning: "+debug+" // ROOT_FINDER_MAX_ITER exceeded"); root = min_pos; break; } } } m_SmallestProb = minprob; m_AverageProb = avgprob; // Set the probability of transforming to a missing value switch ( m_MissingMode ) { case M_DELETE: m_MissingProb = 0.0; break; case M_NORMAL: m_MissingProb = 1.0; break; case M_MAXDIFF: m_MissingProb = m_SmallestProb; break; case M_AVERAGE: m_MissingProb = m_AverageProb; break; } // set the scale factor value scale = min_pos; return scale; } } /** * Calculates the size of the "sphere of influence" defined as: * sphere = sum(P)^2/sum(P^2) where * P(i) = root*exp(-2*i*root). * Since there are n different training instances we multiply P(i) by 1/n. */ private void calculateSphereSize(double scale, KStarWrapper params) { String debug = "(KStarNumericAttribute.calculateSphereSize)"; int i; double sphereSize, minprob = 1.0; double pstar; // P*(b|a) double pstarSum = 0.0; // sum(P*) double pstarSquareSum = 0.0; // sum(P*^2) double inc; for (i = 0; i < m_NumInstances; i++) { if (m_Distances[i] < 0) { // instance with missing value continue; } else { pstar = PStar( m_Distances[i], scale ); if (minprob > pstar) { minprob = pstar; } inc = pstar / m_ActualCount; pstarSum += inc; pstarSquareSum += inc * inc; } } sphereSize = (pstarSquareSum == 0 ? 0 : pstarSum * pstarSum / pstarSquareSum); // return the values params.sphere = sphereSize; params.avgProb = pstarSum; params.minProb = minprob; } /** * Calculates the scale factor using entropy. * * @return the scale factor value */ private double scaleFactorUsingEntropy() { String debug = "(KStarNumericAttribute.scaleFactorUsingEntropy)"; if ( m_ClassType != Attribute.NOMINAL ) { System.err.println("Error: "+debug+" attribute class must be nominal!"); System.exit(1); } int i,j, lowestcount = 0, count, itcount; double lowest = -1.0, nextlowest = -1.0; double root, up, bot, stepsize, delta; double actentropy = 0.0, randentropy = 0.0, actscale, randscale; double minrand = 0.0, minact = 0.0, maxrand = 0.0, maxact = 0.0; double bestdiff, bestroot, currentdiff, lastdiff; double bestpsum, bestminprob, scale = 1.0; KStarWrapper botvals = new KStarWrapper(); KStarWrapper upvals = new KStarWrapper(); KStarWrapper vals = new KStarWrapper(); m_Distances = new double [m_NumInstances]; for (j=0; j<m_NumInstances; j++) { if ( m_TrainSet.instance(j).isMissing(m_AttrIndex) ) { // mark the train instance with a missing value by setting // the distance to -1.0 m_Distances[j] = -1.0; } else { m_Distances[j] = Math.abs(m_TrainSet.instance(j).value(m_AttrIndex) - m_Test.value(m_AttrIndex)); if ( (m_Distances[j]+1e-5) < nextlowest || nextlowest == -1.0 ) { if ( (m_Distances[j]+1e-5) < lowest || lowest == -1.0 ) { nextlowest = lowest; lowest = m_Distances[j]; lowestcount = 1; } else if ( Math.abs(m_Distances[j]-lowest) < 1e-5 ) { // record the number training instances (number n0) at // the smallest distance from test instance lowestcount++; } else { nextlowest = m_Distances[j]; } } // records the actual number of instances with no missing value m_ActualCount++; } } // for if (nextlowest == -1 || lowest == -1) { // Data values are all the same scale = 1.0; m_SmallestProb = m_AverageProb = 1.0; return scale; } else { // starting point for root root = 1.0 / (nextlowest - lowest); // root is bracketed in interval [bot,up] bot = 0.0 + ROOT_FINDER_ACCURACY / 2; up = root * 8; // This is bodgy // Find (approx) entropy ranges calculateEntropy(up, upvals); calculateEntropy(bot, botvals); actscale = botvals.actEntropy - upvals.actEntropy; randscale = botvals.randEntropy - upvals.randEntropy; // Optimise the scale factor bestroot = root = bot; bestdiff = currentdiff = FLOOR1; bestpsum = botvals.avgProb; bestminprob = botvals.minProb; stepsize = (up - bot) / 20.0; itcount = 0; // Root finding algorithm starts here! while (true) { itcount++; lastdiff = currentdiff; root += Math.log(root + 1.0) * stepsize; if (root <= bot) { root = bot; currentdiff = 0.0; delta = -1.0; } else if (root >= up) { root = up; currentdiff = 0.0; delta = -1.0; } else { calculateEntropy(root, vals); // Normalise entropies vals.randEntropy = (vals.randEntropy - upvals.randEntropy) / randscale; vals.actEntropy = (vals.actEntropy - upvals.actEntropy) / randscale; currentdiff = vals.randEntropy - vals.actEntropy; if (currentdiff < FLOOR1) { currentdiff = FLOOR1; if (stepsize < 0) { // If we've hit the end and turned around we can't // have found any peaks bestdiff = currentdiff; bestroot = bot; bestpsum = botvals.avgProb; bestminprob = botvals.minProb; break; } } delta = currentdiff - lastdiff; } if (currentdiff > bestdiff) { bestdiff = currentdiff; bestroot = root; bestminprob = vals.minProb; bestpsum = vals.avgProb; } if (delta < 0) { if (Math.abs(stepsize) < ROOT_FINDER_ACCURACY) { break; } else { stepsize /= -4.0; } } if (itcount > ROOT_FINDER_MAX_ITER) { // System.err.println("Warning: "+debug+" ROOT_FINDER_MAX_ITER // exceeded"); break; } } // while m_SmallestProb = bestminprob; m_AverageProb = bestpsum; // Set the probability of transforming to a missing value switch ( m_MissingMode ) { case M_DELETE: m_MissingProb = 0.0; break; case M_NORMAL: m_MissingProb = 1.0; break; case M_MAXDIFF: m_MissingProb = m_SmallestProb; break; case M_AVERAGE: m_MissingProb = m_AverageProb; break; } // set scale factor scale = bestroot; } // else return scale; } /** * Calculates several parameters aside from the entropy: for a specified * scale factor, calculates the actual entropy, a random entropy using a * randomized set of class value colomns, and records the average and * smallest probabilities (for use in missing value case). */ private void calculateEntropy(double scale, KStarWrapper params) { String debug = "(KStarNumericAttribute.calculateEntropy)"; int i,j,k; double actent = 0.0, randent = 0.0; double pstar, tprob, avgprob = 0.0, minprob = 1.0; double actClassProb, randClassProb; double [][] pseudoClassProbs = new double[NUM_RAND_COLS+1][m_NumClasses]; // init for(j = 0; j <= NUM_RAND_COLS; j++) { for(i = 0; i < m_NumClasses; i++) { pseudoClassProbs[j][i] = 0.0; } } for (i=0; i < m_NumInstances; i++) { if (m_Distances[i] < 0) { // train instance has mising value continue; } else { pstar = PStar(m_Distances[i], scale); tprob = pstar / m_ActualCount; avgprob += tprob; if (pstar < minprob) { minprob = pstar; } // filter instances with same class value for (k=0; k <= NUM_RAND_COLS; k++) { // instance i is assigned a random class value in colomn k; // colomn k = NUM_RAND_COLS contains the original mapping: // instance -> class vlaue pseudoClassProbs[k][ m_RandClassCols[k][i] ] += tprob; } } } // compute the actual entropy using the class probabilities // with the original class value mapping (colomn NUM_RAND_COLS) for (j = m_NumClasses-1; j >= 0; j--) { actClassProb = pseudoClassProbs[NUM_RAND_COLS][j] / avgprob; if (actClassProb > 0) { actent -= actClassProb * Math.log(actClassProb) / LOG2; } } // compute a random entropy using the pseudo class probs // excluding the colomn NUM_RAND_COLS for (k=0; k < NUM_RAND_COLS; k++) { for (i = m_NumClasses-1; i >= 0; i--) { randClassProb = pseudoClassProbs[k][i] / avgprob; if (randClassProb > 0) { randent -= randClassProb * Math.log(randClassProb) / LOG2; } } } randent /= NUM_RAND_COLS; // return the values params.actEntropy = actent; params.randEntropy = randent; params.avgProb = avgprob; params.minProb = minprob; } /** * Calculates the value of P for a given value x using the expression: * P(x) = scale * exp( -2.0 * x * scale ) * * @param x input value * @param scale the scale factor * @return output of the function P(x) */ private double PStar(double x, double scale) { return scale * Math.exp( -2.0 * x * scale ); } /** * Set options. * @param missingmode the missing value treatment to use * @param blendmethod the blending method to use * @param blendfactor the level of blending to use */ public void setOptions(int missingmode, int blendmethod, int blendfactor) { m_MissingMode = missingmode; m_BlendMethod = blendmethod; m_BlendFactor = blendfactor; } /** * Set the missing value mode. * @param mode the type of missing value treatment to use */ public void setMissingMode(int mode) { m_MissingMode = mode; } /** * Set the blending method * @param method the blending method to use */ public void setBlendMethod(int method) { m_BlendMethod = method; } /** * Set the blending factor * @param factor the level of blending to use */ public void setBlendFactor(int factor) { m_BlendFactor = factor; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } } // class
18,805
28.156589
78
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/lazy/kstar/KStarWrapper.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /** * KStarWrapper.java * Copyright (C) 1995-2012 Univeristy of Waikato * Java port to Weka by Abdelaziz Mahoui (am14@cs.waikato.ac.nz). * */ package weka.classifiers.lazy.kstar; import weka.core.RevisionHandler; import weka.core.RevisionUtils; /* * @author Len Trigg (len@reeltwo.com) * @author Abdelaziz Mahoui (am14@cs.waikato.ac.nz) * @version $Revision 1.0 $ */ public class KStarWrapper implements RevisionHandler { /** used/reused to hold the sphere size */ public double sphere = 0.0; /** used/reused to hold the actual entropy */ public double actEntropy = 0.0; /** used/reused to hold the random entropy */ public double randEntropy = 0.0; /** used/reused to hold the average transformation probability */ public double avgProb = 0.0; /** used/reused to hold the smallest transformation probability */ public double minProb = 0.0; /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } }
1,739
27.52459
74
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/meta/AdaBoostM1.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * AdaBoostM1.java * Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.meta; import java.util.Enumeration; import java.util.Random; import java.util.Vector; import weka.classifiers.Classifier; import weka.classifiers.Evaluation; import weka.classifiers.RandomizableIteratedSingleClassifierEnhancer; import weka.classifiers.Sourcable; import weka.core.Capabilities; import weka.core.Capabilities.Capability; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.Randomizable; import weka.core.RevisionUtils; import weka.core.TechnicalInformation; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import weka.core.TechnicalInformationHandler; import weka.core.Utils; import weka.core.WeightedInstancesHandler; /** * <!-- globalinfo-start --> * Class for boosting a nominal class classifier using the Adaboost M1 method. Only nominal class problems can be tackled. Often dramatically improves performance, but sometimes overfits.<br/> * <br/> * For more information, see<br/> * <br/> * Yoav Freund, Robert E. Schapire: Experiments with a new boosting algorithm. In: Thirteenth International Conference on Machine Learning, San Francisco, 148-156, 1996. * <p/> * <!-- globalinfo-end --> * <p> * <!-- technical-bibtex-start --> * BibTeX: * <pre> * &#64;inproceedings{Freund1996, * address = {San Francisco}, * author = {Yoav Freund and Robert E. Schapire}, * booktitle = {Thirteenth International Conference on Machine Learning}, * pages = {148-156}, * publisher = {Morgan Kaufmann}, * title = {Experiments with a new boosting algorithm}, * year = {1996} * } * </pre> * <p/> * <!-- technical-bibtex-end --> * <p> * <!-- options-start --> * Valid options are: <p/> * * <pre> -P &lt;num&gt; * Percentage of weight mass to base training on. * (default 100, reduce to around 90 speed up)</pre> * * <pre> -Q * Use resampling for boosting.</pre> * * <pre> -S &lt;num&gt; * Random number seed. * (default 1)</pre> * * <pre> -I &lt;num&gt; * Number of iterations. * (default 10)</pre> * * <pre> -D * If set, classifier is run in debug mode and * may output additional info to the console</pre> * * <pre> -W * Full name of base classifier. * (default: weka.classifiers.trees.DecisionStump)</pre> * * <pre> * Options specific to classifier weka.classifiers.trees.DecisionStump: * </pre> * * <pre> -D * If set, classifier is run in debug mode and * may output additional info to the console</pre> * <p> * <!-- options-end --> * <p> * Options after -- are passed to the designated classifier.<p> * * @author Eibe Frank (eibe@cs.waikato.ac.nz) * @author Len Trigg (trigg@cs.waikato.ac.nz) * @version $Revision: 9186 $ */ public class AdaBoostM1 extends RandomizableIteratedSingleClassifierEnhancer implements WeightedInstancesHandler, Sourcable, TechnicalInformationHandler { /** * for serialization */ static final long serialVersionUID = -1178107808933117974L; /** * Max num iterations tried to find classifier with non-zero error. */ private static int MAX_NUM_RESAMPLING_ITERATIONS = 10; /** * Array for storing the weights for the votes. */ protected double[] m_Betas; /** * The number of successfully generated base classifiers. */ protected int m_NumIterationsPerformed; /** * Weight Threshold. The percentage of weight mass used in training */ protected int m_WeightThreshold = 100; /** * Use boosting with reweighting? */ protected boolean m_UseResampling; /** * The number of classes */ protected int m_NumClasses; /** * a ZeroR model in case no model can be built from the data */ protected Classifier m_ZeroR; /** * Constructor. */ public AdaBoostM1() { m_Classifier = new weka.classifiers.trees.DecisionStump(); } /** * Returns a string describing classifier * * @return a description suitable for * displaying in the explorer/experimenter gui */ public String globalInfo() { return "Class for boosting a nominal class classifier using the Adaboost " + "M1 method. Only nominal class problems can be tackled. Often " + "dramatically improves performance, but sometimes overfits.\n\n" + "For more information, see\n\n" + getTechnicalInformation().toString(); } /** * Returns an instance of a TechnicalInformation object, containing * detailed information about the technical background of this class, * e.g., paper reference or book this class is based on. * * @return the technical information about this class */ public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; result = new TechnicalInformation(Type.INPROCEEDINGS); result.setValue(Field.AUTHOR, "Yoav Freund and Robert E. Schapire"); result.setValue(Field.TITLE, "Experiments with a new boosting algorithm"); result.setValue(Field.BOOKTITLE, "Thirteenth International Conference on Machine Learning"); result.setValue(Field.YEAR, "1996"); result.setValue(Field.PAGES, "148-156"); result.setValue(Field.PUBLISHER, "Morgan Kaufmann"); result.setValue(Field.ADDRESS, "San Francisco"); return result; } /** * String describing default classifier. * * @return the default classifier classname */ protected String defaultClassifierString() { return "weka.classifiers.trees.DecisionStump"; } /** * Select only instances with weights that contribute to * the specified quantile of the weight distribution * * @param data the input instances * @param quantile the specified quantile eg 0.9 to select * 90% of the weight mass * @return the selected instances */ protected Instances selectWeightQuantile(Instances data, double quantile) { int numInstances = data.numInstances(); Instances trainData = new Instances(data, numInstances); double[] weights = new double[numInstances]; double sumOfWeights = 0; for (int i = 0; i < numInstances; i++) { weights[i] = data.instance(i).weight(); sumOfWeights += weights[i]; } double weightMassToSelect = sumOfWeights * quantile; int[] sortedIndices = Utils.sort(weights); // Select the instances sumOfWeights = 0; for (int i = numInstances - 1; i >= 0; i--) { Instance instance = (Instance) data.instance(sortedIndices[i]).copy(); trainData.add(instance); sumOfWeights += weights[sortedIndices[i]]; if ((sumOfWeights > weightMassToSelect) && (i > 0) && (weights[sortedIndices[i]] != weights[sortedIndices[i - 1]])) { break; } } if (m_Debug) { System.err.println("Selected " + trainData.numInstances() + " out of " + numInstances); } return trainData; } /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ public Enumeration listOptions() { Vector newVector = new Vector(); newVector.addElement(new Option( "\tPercentage of weight mass to base training on.\n" + "\t(default 100, reduce to around 90 speed up)", "P", 1, "-P <num>")); newVector.addElement(new Option( "\tUse resampling for boosting.", "Q", 0, "-Q")); Enumeration enu = super.listOptions(); while (enu.hasMoreElements()) { newVector.addElement(enu.nextElement()); } return newVector.elements(); } /** * Parses a given list of options. <p/> * <p> * <!-- options-start --> * Valid options are: <p/> * * <pre> -P &lt;num&gt; * Percentage of weight mass to base training on. * (default 100, reduce to around 90 speed up)</pre> * * <pre> -Q * Use resampling for boosting.</pre> * * <pre> -S &lt;num&gt; * Random number seed. * (default 1)</pre> * * <pre> -I &lt;num&gt; * Number of iterations. * (default 10)</pre> * * <pre> -D * If set, classifier is run in debug mode and * may output additional info to the console</pre> * * <pre> -W * Full name of base classifier. * (default: weka.classifiers.trees.DecisionStump)</pre> * * <pre> * Options specific to classifier weka.classifiers.trees.DecisionStump: * </pre> * * <pre> -D * If set, classifier is run in debug mode and * may output additional info to the console</pre> * <p> * <!-- options-end --> * <p> * Options after -- are passed to the designated classifier.<p> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { String thresholdString = Utils.getOption('P', options); if (thresholdString.length() != 0) { setWeightThreshold(Integer.parseInt(thresholdString)); } else { setWeightThreshold(100); } setUseResampling(Utils.getFlag('Q', options)); super.setOptions(options); } /** * Gets the current settings of the Classifier. * * @return an array of strings suitable for passing to setOptions */ public String[] getOptions() { Vector result; String[] options; int i; result = new Vector(); if (getUseResampling()) result.add("-Q"); result.add("-P"); result.add("" + getWeightThreshold()); options = super.getOptions(); for (i = 0; i < options.length; i++) result.add(options[i]); return (String[]) result.toArray(new String[result.size()]); } /** * Returns the tip text for this property * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String weightThresholdTipText() { return "Weight threshold for weight pruning."; } /** * Set weight threshold * * @param threshold the percentage of weight mass used for training */ public void setWeightThreshold(int threshold) { m_WeightThreshold = threshold; } /** * Get the degree of weight thresholding * * @return the percentage of weight mass used for training */ public int getWeightThreshold() { return m_WeightThreshold; } /** * Returns the tip text for this property * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String useResamplingTipText() { return "Whether resampling is used instead of reweighting."; } /** * Set resampling mode * * @param r true if resampling should be done */ public void setUseResampling(boolean r) { m_UseResampling = r; } /** * Get whether resampling is turned on * * @return true if resampling output is on */ public boolean getUseResampling() { return m_UseResampling; } /** * Returns default capabilities of the classifier. * * @return the capabilities of this classifier */ public Capabilities getCapabilities() { Capabilities result = super.getCapabilities(); // class result.disableAllClasses(); result.disableAllClassDependencies(); if (super.getCapabilities().handles(Capability.NOMINAL_CLASS)) result.enable(Capability.NOMINAL_CLASS); if (super.getCapabilities().handles(Capability.BINARY_CLASS)) result.enable(Capability.BINARY_CLASS); return result; } /** * Boosting method. * * @param data the training data to be used for generating the * boosted classifier. * @throws Exception if the classifier could not be built successfully */ public void buildClassifier(Instances data) throws Exception { super.buildClassifier(data); // can classifier handle the data? getCapabilities().testWithFail(data); // remove instances with missing class data = new Instances(data); data.deleteWithMissingClass(); // only class? -> build ZeroR model if (data.numAttributes() == 1) { System.err.println( "Cannot build model (only class attribute present in data!), " + "using ZeroR model instead!"); m_ZeroR = new weka.classifiers.rules.ZeroR(); m_ZeroR.buildClassifier(data); return; } else { m_ZeroR = null; } m_NumClasses = data.numClasses(); if ((!m_UseResampling) && (m_Classifier instanceof WeightedInstancesHandler)) { buildClassifierWithWeights(data); } else { buildClassifierUsingResampling(data); } } /** * Boosting method. Boosts using resampling * * @param data the training data to be used for generating the * boosted classifier. * @throws Exception if the classifier could not be built successfully */ protected void buildClassifierUsingResampling(Instances data) throws Exception { Instances trainData, sample, training; double epsilon, reweight, sumProbs; Evaluation evaluation; int numInstances = data.numInstances(); Random randomInstance = new Random(m_Seed); int resamplingIterations = 0; // Initialize data m_Betas = new double[m_Classifiers.length]; m_NumIterationsPerformed = 0; // Create a copy of the data so that when the weights are diddled // with it doesn't mess up the weights for anyone else training = new Instances(data, 0, numInstances); sumProbs = training.sumOfWeights(); for (int i = 0; i < training.numInstances(); i++) { training.instance(i).setWeight(training.instance(i). weight() / sumProbs); } // Do boostrap iterations for (m_NumIterationsPerformed = 0; m_NumIterationsPerformed < m_Classifiers.length; m_NumIterationsPerformed++) { if (m_Debug) { System.err.println("Training classifier " + (m_NumIterationsPerformed + 1)); } // Select instances to train the classifier on if (m_WeightThreshold < 100) { trainData = selectWeightQuantile(training, (double) m_WeightThreshold / 100); } else { trainData = new Instances(training); } // Resample resamplingIterations = 0; double[] weights = new double[trainData.numInstances()]; for (int i = 0; i < weights.length; i++) { weights[i] = trainData.instance(i).weight(); } do { sample = trainData.resampleWithWeights(randomInstance, weights); // Build and evaluate classifier m_Classifiers[m_NumIterationsPerformed].buildClassifier(sample); evaluation = new Evaluation(data); evaluation.evaluateModel(m_Classifiers[m_NumIterationsPerformed], training); epsilon = evaluation.errorRate(); resamplingIterations++; } while (Utils.eq(epsilon, 0) && (resamplingIterations < MAX_NUM_RESAMPLING_ITERATIONS)); // Stop if error too big or 0 if (Utils.grOrEq(epsilon, 0.5) || Utils.eq(epsilon, 0)) { if (m_NumIterationsPerformed == 0) { m_NumIterationsPerformed = 1; // If we're the first we have to to use it } break; } // Determine the weight to assign to this model m_Betas[m_NumIterationsPerformed] = Math.log((1 - epsilon) / epsilon); reweight = (1 - epsilon) / epsilon; if (m_Debug) { System.err.println("\terror rate = " + epsilon + " beta = " + m_Betas[m_NumIterationsPerformed]); } // Update instance weights setWeights(training, reweight); } } /** * Sets the weights for the next iteration. * * @param training the training instances * @param reweight the reweighting factor * @throws Exception if something goes wrong */ protected void setWeights(Instances training, double reweight) throws Exception { double oldSumOfWeights, newSumOfWeights; oldSumOfWeights = training.sumOfWeights(); Enumeration enu = training.enumerateInstances(); while (enu.hasMoreElements()) { Instance instance = (Instance) enu.nextElement(); if (!Utils.eq(m_Classifiers[m_NumIterationsPerformed].classifyInstance(instance), instance.classValue())) instance.setWeight(instance.weight() * reweight); } // Renormalize weights newSumOfWeights = training.sumOfWeights(); enu = training.enumerateInstances(); while (enu.hasMoreElements()) { Instance instance = (Instance) enu.nextElement(); instance.setWeight(instance.weight() * oldSumOfWeights / newSumOfWeights); } } /** * Boosting method. Boosts any classifier that can handle weighted * instances. * * @param data the training data to be used for generating the * boosted classifier. * @throws Exception if the classifier could not be built successfully */ protected void buildClassifierWithWeights(Instances data) throws Exception { Instances trainData, training; double epsilon, reweight; Evaluation evaluation; int numInstances = data.numInstances(); Random randomInstance = new Random(m_Seed); // Initialize data m_Betas = new double[m_Classifiers.length]; m_NumIterationsPerformed = 0; // Create a copy of the data so that when the weights are diddled // with it doesn't mess up the weights for anyone else training = new Instances(data, 0, numInstances); // Do boostrap iterations for (m_NumIterationsPerformed = 0; m_NumIterationsPerformed < m_Classifiers.length; m_NumIterationsPerformed++) { if (m_Debug) { System.err.println("Training classifier " + (m_NumIterationsPerformed + 1)); } // Select instances to train the classifier on if (m_WeightThreshold < 100) { trainData = selectWeightQuantile(training, (double) m_WeightThreshold / 100); } else { trainData = new Instances(training, 0, numInstances); } // Build the classifier if (m_Classifiers[m_NumIterationsPerformed] instanceof Randomizable) ((Randomizable) m_Classifiers[m_NumIterationsPerformed]).setSeed(randomInstance.nextInt()); m_Classifiers[m_NumIterationsPerformed].buildClassifier(trainData); // Evaluate the classifier evaluation = new Evaluation(data); evaluation.evaluateModel(m_Classifiers[m_NumIterationsPerformed], training); epsilon = evaluation.errorRate(); // Stop if error too small or error too big and ignore this model if (Utils.grOrEq(epsilon, 0.5) || Utils.eq(epsilon, 0)) { if (m_NumIterationsPerformed == 0) { m_NumIterationsPerformed = 1; // If we're the first we have to to use it } break; } // Determine the weight to assign to this model m_Betas[m_NumIterationsPerformed] = Math.log((1 - epsilon) / epsilon); reweight = (1 - epsilon) / epsilon; if (m_Debug) { System.err.println("\terror rate = " + epsilon + " beta = " + m_Betas[m_NumIterationsPerformed]); } // Update instance weights setWeights(training, reweight); } } /** * Calculates the class membership probabilities for the given test instance. * * @param instance the instance to be classified * @return predicted class probability distribution * @throws Exception if instance could not be classified * successfully */ public double[] distributionForInstance(Instance instance) throws Exception { // default model? if (m_ZeroR != null) { return m_ZeroR.distributionForInstance(instance); } if (m_NumIterationsPerformed == 0) { throw new Exception("No model built"); } double[] sums = new double[instance.numClasses()]; if (m_NumIterationsPerformed == 1) { return m_Classifiers[0].distributionForInstance(instance); } else { for (int i = 0; i < m_NumIterationsPerformed; i++) { sums[(int) m_Classifiers[i].classifyInstance(instance)] += m_Betas[i]; } return Utils.logs2probs(sums); } } /** * Returns the boosted model as Java source code. * * @param className the classname of the generated class * @return the tree as Java source code * @throws Exception if something goes wrong */ public String toSource(String className) throws Exception { if (m_NumIterationsPerformed == 0) { throw new Exception("No model built yet"); } if (!(m_Classifiers[0] instanceof Sourcable)) { throw new Exception("Base learner " + m_Classifier.getClass().getName() + " is not Sourcable"); } StringBuffer text = new StringBuffer("class "); text.append(className).append(" {\n\n"); text.append(" public static double classify(Object[] i) {\n"); if (m_NumIterationsPerformed == 1) { text.append(" return " + className + "_0.classify(i);\n"); } else { text.append(" double [] sums = new double [" + m_NumClasses + "];\n"); for (int i = 0; i < m_NumIterationsPerformed; i++) { text.append(" sums[(int) " + className + '_' + i + ".classify(i)] += " + m_Betas[i] + ";\n"); } text.append(" double maxV = sums[0];\n" + " int maxI = 0;\n" + " for (int j = 1; j < " + m_NumClasses + "; j++) {\n" + " if (sums[j] > maxV) { maxV = sums[j]; maxI = j; }\n" + " }\n return (double) maxI;\n"); } text.append(" }\n}\n"); for (int i = 0; i < m_Classifiers.length; i++) { text.append(((Sourcable) m_Classifiers[i]) .toSource(className + '_' + i)); } return text.toString(); } /** * Returns description of the boosted classifier. * * @return description of the boosted classifier as a string */ public String toString() { // only ZeroR model? if (m_ZeroR != null) { StringBuffer buf = new StringBuffer(); buf.append(this.getClass().getName().replaceAll(".*\\.", "") + "\n"); buf.append(this.getClass().getName().replaceAll(".*\\.", "").replaceAll(".", "=") + "\n\n"); buf.append("Warning: No model could be built, hence ZeroR model is used:\n\n"); buf.append(m_ZeroR.toString()); return buf.toString(); } StringBuffer text = new StringBuffer(); if (m_NumIterationsPerformed == 0) { text.append("AdaBoostM1: No model built yet.\n"); } else if (m_NumIterationsPerformed == 1) { text.append("AdaBoostM1: No boosting possible, one classifier used!\n"); text.append(m_Classifiers[0].toString() + "\n"); } else { text.append("AdaBoostM1: Base classifiers and their weights: \n\n"); for (int i = 0; i < m_NumIterationsPerformed; i++) { text.append(m_Classifiers[i].toString() + "\n\n"); text.append("Weight: " + Utils.roundDouble(m_Betas[i], 2) + "\n\n"); } text.append("Number of performed Iterations: " + m_NumIterationsPerformed + "\n"); } return text.toString(); } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 9186 $"); } /** * Main method for testing this class. * * @param argv the options */ public static void main(String[] argv) { runClassifier(new AdaBoostM1(), argv); } }
26,525
32.198999
192
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/meta/AdditiveRegression.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * AdditiveRegression.java * Copyright (C) 2000-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.meta; import java.util.Enumeration; import java.util.Vector; import weka.classifiers.Classifier; import weka.classifiers.IteratedSingleClassifierEnhancer; import weka.classifiers.rules.ZeroR; import weka.core.AdditionalMeasureProducer; import weka.core.Capabilities; import weka.core.Capabilities.Capability; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.OptionHandler; import weka.core.RevisionUtils; import weka.core.TechnicalInformation; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import weka.core.TechnicalInformationHandler; import weka.core.Utils; import weka.core.WeightedInstancesHandler; /** <!-- globalinfo-start --> * Meta classifier that enhances the performance of a regression base classifier. Each iteration fits a model to the residuals left by the classifier on the previous iteration. Prediction is accomplished by adding the predictions of each classifier. Reducing the shrinkage (learning rate) parameter helps prevent overfitting and has a smoothing effect but increases the learning time.<br/> * <br/> * For more information see:<br/> * <br/> * J.H. Friedman (1999). Stochastic Gradient Boosting. * <p/> <!-- globalinfo-end --> * <!-- technical-bibtex-start --> * BibTeX: * <pre> * &#64;techreport{Friedman1999, * author = {J.H. Friedman}, * institution = {Stanford University}, * title = {Stochastic Gradient Boosting}, * year = {1999}, * PS = {http://www-stat.stanford.edu/\~jhf/ftp/stobst.ps} * } * </pre> * <p/> <!-- technical-bibtex-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -S * Specify shrinkage rate. (default = 1.0, ie. no shrinkage) * </pre> * * <pre> -I &lt;num&gt; * Number of iterations. * (default 10)</pre> * * <pre> -D * If set, classifier is run in debug mode and * may output additional info to the console</pre> * * <pre> -W * Full name of base classifier. * (default: weka.classifiers.trees.DecisionStump)</pre> * * <pre> * Options specific to classifier weka.classifiers.trees.DecisionStump: * </pre> * * <pre> -D * If set, classifier is run in debug mode and * may output additional info to the console</pre> * <!-- options-end --> * * @author Mark Hall (mhall@cs.waikato.ac.nz) * @version $Revision: 8034 $ */ public class AdditiveRegression extends IteratedSingleClassifierEnhancer implements OptionHandler, AdditionalMeasureProducer, WeightedInstancesHandler, TechnicalInformationHandler { /** for serialization */ static final long serialVersionUID = -2368937577670527151L; /** * Shrinkage (Learning rate). Default = no shrinkage. */ protected double m_shrinkage = 1.0; /** The number of successfully generated base classifiers. */ protected int m_NumIterationsPerformed; /** The model for the mean */ protected ZeroR m_zeroR; /** whether we have suitable data or nor (if not, ZeroR model is used) */ protected boolean m_SuitableData = true; /** * Returns a string describing this attribute evaluator * @return a description of the evaluator suitable for * displaying in the explorer/experimenter gui */ public String globalInfo() { return " Meta classifier that enhances the performance of a regression " +"base classifier. Each iteration fits a model to the residuals left " +"by the classifier on the previous iteration. Prediction is " +"accomplished by adding the predictions of each classifier. " +"Reducing the shrinkage (learning rate) parameter helps prevent " +"overfitting and has a smoothing effect but increases the learning " +"time.\n\n" +"For more information see:\n\n" + getTechnicalInformation().toString(); } /** * Returns an instance of a TechnicalInformation object, containing * detailed information about the technical background of this class, * e.g., paper reference or book this class is based on. * * @return the technical information about this class */ public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; result = new TechnicalInformation(Type.TECHREPORT); result.setValue(Field.AUTHOR, "J.H. Friedman"); result.setValue(Field.YEAR, "1999"); result.setValue(Field.TITLE, "Stochastic Gradient Boosting"); result.setValue(Field.INSTITUTION, "Stanford University"); result.setValue(Field.PS, "http://www-stat.stanford.edu/~jhf/ftp/stobst.ps"); return result; } /** * Default constructor specifying DecisionStump as the classifier */ public AdditiveRegression() { this(new weka.classifiers.trees.DecisionStump()); } /** * Constructor which takes base classifier as argument. * * @param classifier the base classifier to use */ public AdditiveRegression(Classifier classifier) { m_Classifier = classifier; } /** * String describing default classifier. * * @return the default classifier classname */ protected String defaultClassifierString() { return "weka.classifiers.trees.DecisionStump"; } /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ public Enumeration listOptions() { Vector newVector = new Vector(4); newVector.addElement(new Option( "\tSpecify shrinkage rate. " +"(default = 1.0, ie. no shrinkage)\n", "S", 1, "-S")); Enumeration enu = super.listOptions(); while (enu.hasMoreElements()) { newVector.addElement(enu.nextElement()); } return newVector.elements(); } /** * Parses a given list of options. <p/> * <!-- options-start --> * Valid options are: <p/> * * <pre> -S * Specify shrinkage rate. (default = 1.0, ie. no shrinkage) * </pre> * * <pre> -I &lt;num&gt; * Number of iterations. * (default 10)</pre> * * <pre> -D * If set, classifier is run in debug mode and * may output additional info to the console</pre> * * <pre> -W * Full name of base classifier. * (default: weka.classifiers.trees.DecisionStump)</pre> * * <pre> * Options specific to classifier weka.classifiers.trees.DecisionStump: * </pre> * * <pre> -D * If set, classifier is run in debug mode and * may output additional info to the console</pre> * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { String optionString = Utils.getOption('S', options); if (optionString.length() != 0) { Double temp = Double.valueOf(optionString); setShrinkage(temp.doubleValue()); } super.setOptions(options); } /** * Gets the current settings of the Classifier. * * @return an array of strings suitable for passing to setOptions */ public String [] getOptions() { String [] superOptions = super.getOptions(); String [] options = new String [superOptions.length + 2]; int current = 0; options[current++] = "-S"; options[current++] = "" + getShrinkage(); System.arraycopy(superOptions, 0, options, current, superOptions.length); current += superOptions.length; while (current < options.length) { options[current++] = ""; } return options; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String shrinkageTipText() { return "Shrinkage rate. Smaller values help prevent overfitting and " + "have a smoothing effect (but increase learning time). " +"Default = 1.0, ie. no shrinkage."; } /** * Set the shrinkage parameter * * @param l the shrinkage rate. */ public void setShrinkage(double l) { m_shrinkage = l; } /** * Get the shrinkage rate. * * @return the value of the learning rate */ public double getShrinkage() { return m_shrinkage; } /** * Returns default capabilities of the classifier. * * @return the capabilities of this classifier */ public Capabilities getCapabilities() { Capabilities result = super.getCapabilities(); // class result.disableAllClasses(); result.disableAllClassDependencies(); result.enable(Capability.NUMERIC_CLASS); result.enable(Capability.DATE_CLASS); return result; } /** * Build the classifier on the supplied data * * @param data the training data * @throws Exception if the classifier could not be built successfully */ public void buildClassifier(Instances data) throws Exception { super.buildClassifier(data); // can classifier handle the data? getCapabilities().testWithFail(data); // remove instances with missing class Instances newData = new Instances(data); newData.deleteWithMissingClass(); double sum = 0; double temp_sum = 0; // Add the model for the mean first m_zeroR = new ZeroR(); m_zeroR.buildClassifier(newData); // only class? -> use only ZeroR model if (newData.numAttributes() == 1) { System.err.println( "Cannot build model (only class attribute present in data!), " + "using ZeroR model instead!"); m_SuitableData = false; return; } else { m_SuitableData = true; } newData = residualReplace(newData, m_zeroR, false); for (int i = 0; i < newData.numInstances(); i++) { sum += newData.instance(i).weight() * newData.instance(i).classValue() * newData.instance(i).classValue(); } if (m_Debug) { System.err.println("Sum of squared residuals " +"(predicting the mean) : " + sum); } m_NumIterationsPerformed = 0; do { temp_sum = sum; // Build the classifier m_Classifiers[m_NumIterationsPerformed].buildClassifier(newData); newData = residualReplace(newData, m_Classifiers[m_NumIterationsPerformed], true); sum = 0; for (int i = 0; i < newData.numInstances(); i++) { sum += newData.instance(i).weight() * newData.instance(i).classValue() * newData.instance(i).classValue(); } if (m_Debug) { System.err.println("Sum of squared residuals : "+sum); } m_NumIterationsPerformed++; } while (((temp_sum - sum) > Utils.SMALL) && (m_NumIterationsPerformed < m_Classifiers.length)); } /** * Classify an instance. * * @param inst the instance to predict * @return a prediction for the instance * @throws Exception if an error occurs */ public double classifyInstance(Instance inst) throws Exception { double prediction = m_zeroR.classifyInstance(inst); // default model? if (!m_SuitableData) { return prediction; } for (int i = 0; i < m_NumIterationsPerformed; i++) { double toAdd = m_Classifiers[i].classifyInstance(inst); toAdd *= getShrinkage(); prediction += toAdd; } return prediction; } /** * Replace the class values of the instances from the current iteration * with residuals ater predicting with the supplied classifier. * * @param data the instances to predict * @param c the classifier to use * @param useShrinkage whether shrinkage is to be applied to the model's output * @return a new set of instances with class values replaced by residuals * @throws Exception if something goes wrong */ private Instances residualReplace(Instances data, Classifier c, boolean useShrinkage) throws Exception { double pred,residual; Instances newInst = new Instances(data); for (int i = 0; i < newInst.numInstances(); i++) { pred = c.classifyInstance(newInst.instance(i)); if (useShrinkage) { pred *= getShrinkage(); } residual = newInst.instance(i).classValue() - pred; newInst.instance(i).setClassValue(residual); } // System.err.print(newInst); return newInst; } /** * Returns an enumeration of the additional measure names * @return an enumeration of the measure names */ public Enumeration enumerateMeasures() { Vector newVector = new Vector(1); newVector.addElement("measureNumIterations"); return newVector.elements(); } /** * Returns the value of the named measure * @param additionalMeasureName the name of the measure to query for its value * @return the value of the named measure * @throws IllegalArgumentException if the named measure is not supported */ public double getMeasure(String additionalMeasureName) { if (additionalMeasureName.compareToIgnoreCase("measureNumIterations") == 0) { return measureNumIterations(); } else { throw new IllegalArgumentException(additionalMeasureName + " not supported (AdditiveRegression)"); } } /** * return the number of iterations (base classifiers) completed * @return the number of iterations (same as number of base classifier * models) */ public double measureNumIterations() { return m_NumIterationsPerformed; } /** * Returns textual description of the classifier. * * @return a description of the classifier as a string */ public String toString() { StringBuffer text = new StringBuffer(); // only ZeroR model? if (!m_SuitableData) { StringBuffer buf = new StringBuffer(); buf.append(this.getClass().getName().replaceAll(".*\\.", "") + "\n"); buf.append(this.getClass().getName().replaceAll(".*\\.", "").replaceAll(".", "=") + "\n\n"); buf.append("Warning: No model could be built, hence ZeroR model is used:\n\n"); buf.append(m_zeroR.toString()); return buf.toString(); } if (m_NumIterations == 0) { return "Classifier hasn't been built yet!"; } text.append("Additive Regression\n\n"); text.append("ZeroR model\n\n" + m_zeroR + "\n\n"); text.append("Base classifier " + getClassifier().getClass().getName() + "\n\n"); text.append("" + m_NumIterationsPerformed + " models generated.\n"); for (int i = 0; i < m_NumIterationsPerformed; i++) { text.append("\nModel number " + i + "\n\n" + m_Classifiers[i] + "\n"); } return text.toString(); } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } /** * Main method for testing this class. * * @param argv should contain the following arguments: * -t training file [-T test file] [-c class index] */ public static void main(String [] argv) { runClassifier(new AdditiveRegression(), argv); } }
15,706
28.304104
389
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/meta/AttributeSelectedClassifier.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * AttributeSelectedClassifier.java * Copyright (C) 2000-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.meta; import java.util.Enumeration; import java.util.Random; import java.util.Vector; import weka.attributeSelection.ASEvaluation; import weka.attributeSelection.ASSearch; import weka.attributeSelection.AttributeSelection; import weka.classifiers.SingleClassifierEnhancer; import weka.core.AdditionalMeasureProducer; import weka.core.Capabilities; import weka.core.Capabilities.Capability; import weka.core.Drawable; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.OptionHandler; import weka.core.RevisionUtils; import weka.core.Utils; import weka.core.WeightedInstancesHandler; /** <!-- globalinfo-start --> * Dimensionality of training and test data is reduced by attribute selection before being passed on to a classifier. * <p/> <!-- globalinfo-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -E &lt;attribute evaluator specification&gt; * Full class name of attribute evaluator, followed * by its options. * eg: "weka.attributeSelection.CfsSubsetEval -L" * (default weka.attributeSelection.CfsSubsetEval)</pre> * * <pre> -S &lt;search method specification&gt; * Full class name of search method, followed * by its options. * eg: "weka.attributeSelection.BestFirst -D 1" * (default weka.attributeSelection.BestFirst)</pre> * * <pre> -D * If set, classifier is run in debug mode and * may output additional info to the console</pre> * * <pre> -W * Full name of base classifier. * (default: weka.classifiers.trees.J48)</pre> * * <pre> * Options specific to classifier weka.classifiers.trees.J48: * </pre> * * <pre> -U * Use unpruned tree.</pre> * * <pre> -C &lt;pruning confidence&gt; * Set confidence threshold for pruning. * (default 0.25)</pre> * * <pre> -M &lt;minimum number of instances&gt; * Set minimum number of instances per leaf. * (default 2)</pre> * * <pre> -R * Use reduced error pruning.</pre> * * <pre> -N &lt;number of folds&gt; * Set number of folds for reduced error * pruning. One fold is used as pruning set. * (default 3)</pre> * * <pre> -B * Use binary splits only.</pre> * * <pre> -S * Don't perform subtree raising.</pre> * * <pre> -L * Do not clean up after the tree has been built.</pre> * * <pre> -A * Laplace smoothing for predicted probabilities.</pre> * * <pre> -Q &lt;seed&gt; * Seed for random data shuffling (default 1).</pre> * <!-- options-end --> * * @author Mark Hall (mhall@cs.waikato.ac.nz) * @version $Revision: 9186 $ */ public class AttributeSelectedClassifier extends SingleClassifierEnhancer implements OptionHandler, Drawable, AdditionalMeasureProducer, WeightedInstancesHandler { /** for serialization */ static final long serialVersionUID = -1151805453487947577L; /** The attribute selection object */ protected AttributeSelection m_AttributeSelection = null; /** The attribute evaluator to use */ protected ASEvaluation m_Evaluator = new weka.attributeSelection.CfsSubsetEval(); /** The search method to use */ protected ASSearch m_Search = new weka.attributeSelection.BestFirst(); /** The header of the dimensionally reduced data */ protected Instances m_ReducedHeader; /** The number of class vals in the training data (1 if class is numeric) */ protected int m_numClasses; /** The number of attributes selected by the attribute selection phase */ protected double m_numAttributesSelected; /** The time taken to select attributes in milliseconds */ protected double m_selectionTime; /** The time taken to select attributes AND build the classifier */ protected double m_totalTime; /** * String describing default classifier. * * @return the default classifier classname */ protected String defaultClassifierString() { return "weka.classifiers.trees.J48"; } /** * Default constructor. */ public AttributeSelectedClassifier() { m_Classifier = new weka.classifiers.trees.J48(); } /** * Returns a string describing this search method * @return a description of the search method suitable for * displaying in the explorer/experimenter gui */ public String globalInfo() { return "Dimensionality of training and test data is reduced by " +"attribute selection before being passed on to a classifier."; } /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ public Enumeration listOptions() { Vector newVector = new Vector(3); newVector.addElement(new Option( "\tFull class name of attribute evaluator, followed\n" + "\tby its options.\n" + "\teg: \"weka.attributeSelection.CfsSubsetEval -L\"\n" + "\t(default weka.attributeSelection.CfsSubsetEval)", "E", 1, "-E <attribute evaluator specification>")); newVector.addElement(new Option( "\tFull class name of search method, followed\n" + "\tby its options.\n" + "\teg: \"weka.attributeSelection.BestFirst -D 1\"\n" + "\t(default weka.attributeSelection.BestFirst)", "S", 1, "-S <search method specification>")); Enumeration enu = super.listOptions(); while (enu.hasMoreElements()) { newVector.addElement(enu.nextElement()); } return newVector.elements(); } /** * Parses a given list of options. <p/> * <!-- options-start --> * Valid options are: <p/> * * <pre> -E &lt;attribute evaluator specification&gt; * Full class name of attribute evaluator, followed * by its options. * eg: "weka.attributeSelection.CfsSubsetEval -L" * (default weka.attributeSelection.CfsSubsetEval)</pre> * * <pre> -S &lt;search method specification&gt; * Full class name of search method, followed * by its options. * eg: "weka.attributeSelection.BestFirst -D 1" * (default weka.attributeSelection.BestFirst)</pre> * * <pre> -D * If set, classifier is run in debug mode and * may output additional info to the console</pre> * * <pre> -W * Full name of base classifier. * (default: weka.classifiers.trees.J48)</pre> * * <pre> * Options specific to classifier weka.classifiers.trees.J48: * </pre> * * <pre> -U * Use unpruned tree.</pre> * * <pre> -C &lt;pruning confidence&gt; * Set confidence threshold for pruning. * (default 0.25)</pre> * * <pre> -M &lt;minimum number of instances&gt; * Set minimum number of instances per leaf. * (default 2)</pre> * * <pre> -R * Use reduced error pruning.</pre> * * <pre> -N &lt;number of folds&gt; * Set number of folds for reduced error * pruning. One fold is used as pruning set. * (default 3)</pre> * * <pre> -B * Use binary splits only.</pre> * * <pre> -S * Don't perform subtree raising.</pre> * * <pre> -L * Do not clean up after the tree has been built.</pre> * * <pre> -A * Laplace smoothing for predicted probabilities.</pre> * * <pre> -Q &lt;seed&gt; * Seed for random data shuffling (default 1).</pre> * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { // same for attribute evaluator String evaluatorString = Utils.getOption('E', options); if (evaluatorString.length() == 0) evaluatorString = weka.attributeSelection.CfsSubsetEval.class.getName(); String [] evaluatorSpec = Utils.splitOptions(evaluatorString); if (evaluatorSpec.length == 0) { throw new Exception("Invalid attribute evaluator specification string"); } String evaluatorName = evaluatorSpec[0]; evaluatorSpec[0] = ""; setEvaluator(ASEvaluation.forName(evaluatorName, evaluatorSpec)); // same for search method String searchString = Utils.getOption('S', options); if (searchString.length() == 0) searchString = weka.attributeSelection.BestFirst.class.getName(); String [] searchSpec = Utils.splitOptions(searchString); if (searchSpec.length == 0) { throw new Exception("Invalid search specification string"); } String searchName = searchSpec[0]; searchSpec[0] = ""; setSearch(ASSearch.forName(searchName, searchSpec)); super.setOptions(options); } /** * Gets the current settings of the Classifier. * * @return an array of strings suitable for passing to setOptions */ public String [] getOptions() { String [] superOptions = super.getOptions(); String [] options = new String [superOptions.length + 4]; int current = 0; // same attribute evaluator options[current++] = "-E"; options[current++] = "" +getEvaluatorSpec(); // same for search options[current++] = "-S"; options[current++] = "" + getSearchSpec(); System.arraycopy(superOptions, 0, options, current, superOptions.length); return options; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String evaluatorTipText() { return "Set the attribute evaluator to use. This evaluator is used " +"during the attribute selection phase before the classifier is " +"invoked."; } /** * Sets the attribute evaluator * * @param evaluator the evaluator with all options set. */ public void setEvaluator(ASEvaluation evaluator) { m_Evaluator = evaluator; } /** * Gets the attribute evaluator used * * @return the attribute evaluator */ public ASEvaluation getEvaluator() { return m_Evaluator; } /** * Gets the evaluator specification string, which contains the class name of * the attribute evaluator and any options to it * * @return the evaluator string. */ protected String getEvaluatorSpec() { ASEvaluation e = getEvaluator(); if (e instanceof OptionHandler) { return e.getClass().getName() + " " + Utils.joinOptions(((OptionHandler)e).getOptions()); } return e.getClass().getName(); } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String searchTipText() { return "Set the search method. This search method is used " +"during the attribute selection phase before the classifier is " +"invoked."; } /** * Sets the search method * * @param search the search method with all options set. */ public void setSearch(ASSearch search) { m_Search = search; } /** * Gets the search method used * * @return the search method */ public ASSearch getSearch() { return m_Search; } /** * Gets the search specification string, which contains the class name of * the search method and any options to it * * @return the search string. */ protected String getSearchSpec() { ASSearch s = getSearch(); if (s instanceof OptionHandler) { return s.getClass().getName() + " " + Utils.joinOptions(((OptionHandler)s).getOptions()); } return s.getClass().getName(); } /** * Returns default capabilities of the classifier. * * @return the capabilities of this classifier */ public Capabilities getCapabilities() { Capabilities result; if (getEvaluator() == null) result = super.getCapabilities(); else result = getEvaluator().getCapabilities(); // set dependencies for (Capability cap: Capability.values()) result.enableDependency(cap); return result; } /** * Build the classifier on the dimensionally reduced data. * * @param data the training data * @throws Exception if the classifier could not be built successfully */ public void buildClassifier(Instances data) throws Exception { if (m_Classifier == null) { throw new Exception("No base classifier has been set!"); } if (m_Evaluator == null) { throw new Exception("No attribute evaluator has been set!"); } if (m_Search == null) { throw new Exception("No search method has been set!"); } // can classifier handle the data? getCapabilities().testWithFail(data); // remove instances with missing class Instances newData = new Instances(data); newData.deleteWithMissingClass(); if (newData.numInstances() == 0) { m_Classifier.buildClassifier(newData); return; } if (newData.classAttribute().isNominal()) { m_numClasses = newData.classAttribute().numValues(); } else { m_numClasses = 1; } Instances resampledData = null; // check to see if training data has all equal weights double weight = newData.instance(0).weight(); boolean ok = false; for (int i = 1; i < newData.numInstances(); i++) { if (newData.instance(i).weight() != weight) { ok = true; break; } } if (ok) { if (!(m_Evaluator instanceof WeightedInstancesHandler) || !(m_Classifier instanceof WeightedInstancesHandler)) { Random r = new Random(1); for (int i = 0; i < 10; i++) { r.nextDouble(); } resampledData = newData.resampleWithWeights(r); } } else { // all equal weights in the training data so just use as is resampledData = newData; } m_AttributeSelection = new AttributeSelection(); m_AttributeSelection.setEvaluator(m_Evaluator); m_AttributeSelection.setSearch(m_Search); long start = System.currentTimeMillis(); m_AttributeSelection. SelectAttributes((m_Evaluator instanceof WeightedInstancesHandler) ? newData : resampledData); long end = System.currentTimeMillis(); if (m_Classifier instanceof WeightedInstancesHandler) { newData = m_AttributeSelection.reduceDimensionality(newData); m_Classifier.buildClassifier(newData); } else { resampledData = m_AttributeSelection.reduceDimensionality(resampledData); m_Classifier.buildClassifier(resampledData); } long end2 = System.currentTimeMillis(); m_numAttributesSelected = m_AttributeSelection.numberAttributesSelected(); m_ReducedHeader = new Instances((m_Classifier instanceof WeightedInstancesHandler) ? newData : resampledData, 0); m_selectionTime = (double)(end - start); m_totalTime = (double)(end2 - start); } /** * Classifies a given instance after attribute selection * * @param instance the instance to be classified * @return the class distribution * @throws Exception if instance could not be classified * successfully */ public double [] distributionForInstance(Instance instance) throws Exception { Instance newInstance; if (m_AttributeSelection == null) { // throw new Exception("AttributeSelectedClassifier: No model built yet!"); newInstance = instance; } else { newInstance = m_AttributeSelection.reduceDimensionality(instance); } return m_Classifier.distributionForInstance(newInstance); } /** * Returns the type of graph this classifier * represents. * * @return the type of graph */ public int graphType() { if (m_Classifier instanceof Drawable) return ((Drawable)m_Classifier).graphType(); else return Drawable.NOT_DRAWABLE; } /** * Returns graph describing the classifier (if possible). * * @return the graph of the classifier in dotty format * @throws Exception if the classifier cannot be graphed */ public String graph() throws Exception { if (m_Classifier instanceof Drawable) return ((Drawable)m_Classifier).graph(); else throw new Exception("Classifier: " + getClassifierSpec() + " cannot be graphed"); } /** * Output a representation of this classifier * * @return a representation of this classifier */ public String toString() { if (m_AttributeSelection == null) { return "AttributeSelectedClassifier: No attribute selection possible.\n\n" +m_Classifier.toString(); } StringBuffer result = new StringBuffer(); result.append("AttributeSelectedClassifier:\n\n"); result.append(m_AttributeSelection.toResultsString()); result.append("\n\nHeader of reduced data:\n"+m_ReducedHeader.toString()); result.append("\n\nClassifier Model\n"+m_Classifier.toString()); return result.toString(); } /** * Additional measure --- number of attributes selected * @return the number of attributes selected */ public double measureNumAttributesSelected() { return m_numAttributesSelected; } /** * Additional measure --- time taken (milliseconds) to select the attributes * @return the time taken to select attributes */ public double measureSelectionTime() { return m_selectionTime; } /** * Additional measure --- time taken (milliseconds) to select attributes * and build the classifier * @return the total time (select attributes + build classifier) */ public double measureTime() { return m_totalTime; } /** * Returns an enumeration of the additional measure names * @return an enumeration of the measure names */ public Enumeration enumerateMeasures() { Vector newVector = new Vector(3); newVector.addElement("measureNumAttributesSelected"); newVector.addElement("measureSelectionTime"); newVector.addElement("measureTime"); if (m_Classifier instanceof AdditionalMeasureProducer) { Enumeration en = ((AdditionalMeasureProducer)m_Classifier). enumerateMeasures(); while (en.hasMoreElements()) { String mname = (String)en.nextElement(); newVector.addElement(mname); } } return newVector.elements(); } /** * Returns the value of the named measure * @param additionalMeasureName the name of the measure to query for its value * @return the value of the named measure * @throws IllegalArgumentException if the named measure is not supported */ public double getMeasure(String additionalMeasureName) { if (additionalMeasureName.compareToIgnoreCase("measureNumAttributesSelected") == 0) { return measureNumAttributesSelected(); } else if (additionalMeasureName.compareToIgnoreCase("measureSelectionTime") == 0) { return measureSelectionTime(); } else if (additionalMeasureName.compareToIgnoreCase("measureTime") == 0) { return measureTime(); } else if (m_Classifier instanceof AdditionalMeasureProducer) { return ((AdditionalMeasureProducer)m_Classifier). getMeasure(additionalMeasureName); } else { throw new IllegalArgumentException(additionalMeasureName + " not supported (AttributeSelectedClassifier)"); } } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 9186 $"); } /** * Main method for testing this class. * * @param argv should contain the following arguments: * -t training file [-T test file] [-c class index] */ public static void main(String [] argv) { runClassifier(new AttributeSelectedClassifier(), argv); } }
20,382
28.799708
117
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/meta/Bagging.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope hat it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * Bagging.java * Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.meta; import java.util.Arrays; import java.util.Enumeration; import java.util.List; import java.util.Random; import java.util.Vector; import java.util.ArrayList; import weka.classifiers.Classifier; import weka.classifiers.RandomizableParallelIteratedSingleClassifierEnhancer; import weka.core.AdditionalMeasureProducer; import weka.core.Aggregateable; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.Randomizable; import weka.core.RevisionUtils; import weka.core.TechnicalInformation; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import weka.core.TechnicalInformationHandler; import weka.core.Utils; import weka.core.WeightedInstancesHandler; import weka.core.PartitionGenerator; /** * <!-- globalinfo-start --> * Class for bagging a classifier to reduce variance. Can do classification and regression depending on the base learner. <br/> * <br/> * For more information, see<br/> * <br/> * Leo Breiman (1996). Bagging predictors. Machine Learning. 24(2):123-140. * <p/> * <!-- globalinfo-end --> * <p> * <!-- technical-bibtex-start --> * BibTeX: * <pre> * &#64;article{Breiman1996, * author = {Leo Breiman}, * journal = {Machine Learning}, * number = {2}, * pages = {123-140}, * title = {Bagging predictors}, * volume = {24}, * year = {1996} * } * </pre> * <p/> * <!-- technical-bibtex-end --> * <p> * <!-- options-start --> * Valid options are: <p/> * * <pre> -P * Size of each bag, as a percentage of the * training set size. (default 100)</pre> * * <pre> -O * Calculate the out of bag error.</pre> * * <pre> -S &lt;num&gt; * Random number seed. * (default 1)</pre> * * <pre> -I &lt;num&gt; * Number of iterations. * (default 10)</pre> * * <pre> -D * If set, classifier is run in debug mode and * may output additional info to the console</pre> * * <pre> -W * Full name of base classifier. * (default: weka.classifiers.trees.REPTree)</pre> * * <pre> * Options specific to classifier weka.classifiers.trees.REPTree: * </pre> * * <pre> -M &lt;minimum number of instances&gt; * Set minimum number of instances per leaf (default 2).</pre> * * <pre> -V &lt;minimum variance for split&gt; * Set minimum numeric class variance proportion * of train variance for split (default 1e-3).</pre> * * <pre> -N &lt;number of folds&gt; * Number of folds for reduced error pruning (default 3).</pre> * * <pre> -S &lt;seed&gt; * Seed for random data shuffling (default 1).</pre> * * <pre> -P * No pruning.</pre> * * <pre> -L * Maximum tree depth (default -1, no maximum)</pre> * <p> * <!-- options-end --> * <p> * Options after -- are passed to the designated classifier.<p> * * @author Eibe Frank (eibe@cs.waikato.ac.nz) * @author Len Trigg (len@reeltwo.com) * @author Richard Kirkby (rkirkby@cs.waikato.ac.nz) * @version $Revision: 9785 $ */ public class Bagging extends RandomizableParallelIteratedSingleClassifierEnhancer implements WeightedInstancesHandler, AdditionalMeasureProducer, TechnicalInformationHandler, PartitionGenerator, Aggregateable<Bagging> { /** * for serialization */ static final long serialVersionUID = -115879962237199703L; /** * The size of each bag sample, as a percentage of the training size */ protected int m_BagSizePercent = 100; /** * Whether to calculate the out of bag error */ protected boolean m_CalcOutOfBag = false; /** * The out of bag error that has been calculated */ protected double m_OutOfBagError; /** * Constructor. */ public Bagging() { m_Classifier = new weka.classifiers.trees.REPTree(); } /** * Returns a string describing classifier * * @return a description suitable for * displaying in the explorer/experimenter gui */ public String globalInfo() { return "Class for bagging a classifier to reduce variance. Can do classification " + "and regression depending on the base learner. \n\n" + "For more information, see\n\n" + getTechnicalInformation().toString(); } /** * Returns an instance of a TechnicalInformation object, containing * detailed information about the technical background of this class, * e.g., paper reference or book this class is based on. * * @return the technical information about this class */ public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; result = new TechnicalInformation(Type.ARTICLE); result.setValue(Field.AUTHOR, "Leo Breiman"); result.setValue(Field.YEAR, "1996"); result.setValue(Field.TITLE, "Bagging predictors"); result.setValue(Field.JOURNAL, "Machine Learning"); result.setValue(Field.VOLUME, "24"); result.setValue(Field.NUMBER, "2"); result.setValue(Field.PAGES, "123-140"); return result; } /** * String describing default classifier. * * @return the default classifier classname */ protected String defaultClassifierString() { return "weka.classifiers.trees.REPTree"; } /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ public Enumeration listOptions() { Vector newVector = new Vector(2); newVector.addElement(new Option( "\tSize of each bag, as a percentage of the\n" + "\ttraining set size. (default 100)", "P", 1, "-P")); newVector.addElement(new Option( "\tCalculate the out of bag error.", "O", 0, "-O")); Enumeration enu = super.listOptions(); while (enu.hasMoreElements()) { newVector.addElement(enu.nextElement()); } return newVector.elements(); } /** * Parses a given list of options. <p/> * <p> * <!-- options-start --> * Valid options are: <p/> * * <pre> -P * Size of each bag, as a percentage of the * training set size. (default 100)</pre> * * <pre> -O * Calculate the out of bag error.</pre> * * <pre> -S &lt;num&gt; * Random number seed. * (default 1)</pre> * * <pre> -I &lt;num&gt; * Number of iterations. * (default 10)</pre> * * <pre> -D * If set, classifier is run in debug mode and * may output additional info to the console</pre> * * <pre> -W * Full name of base classifier. * (default: weka.classifiers.trees.REPTree)</pre> * * <pre> * Options specific to classifier weka.classifiers.trees.REPTree: * </pre> * * <pre> -M &lt;minimum number of instances&gt; * Set minimum number of instances per leaf (default 2).</pre> * * <pre> -V &lt;minimum variance for split&gt; * Set minimum numeric class variance proportion * of train variance for split (default 1e-3).</pre> * * <pre> -N &lt;number of folds&gt; * Number of folds for reduced error pruning (default 3).</pre> * * <pre> -S &lt;seed&gt; * Seed for random data shuffling (default 1).</pre> * * <pre> -P * No pruning.</pre> * * <pre> -L * Maximum tree depth (default -1, no maximum)</pre> * <p> * <!-- options-end --> * <p> * Options after -- are passed to the designated classifier.<p> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { String bagSize = Utils.getOption('P', options); if (bagSize.length() != 0) { setBagSizePercent(Integer.parseInt(bagSize)); } else { setBagSizePercent(100); } setCalcOutOfBag(Utils.getFlag('O', options)); super.setOptions(options); } /** * Gets the current settings of the Classifier. * * @return an array of strings suitable for passing to setOptions */ public String[] getOptions() { String[] superOptions = super.getOptions(); String[] options = new String[superOptions.length + 3]; int current = 0; options[current++] = "-P"; options[current++] = "" + getBagSizePercent(); if (getCalcOutOfBag()) { options[current++] = "-O"; } System.arraycopy(superOptions, 0, options, current, superOptions.length); current += superOptions.length; while (current < options.length) { options[current++] = ""; } return options; } /** * Returns the tip text for this property * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String bagSizePercentTipText() { return "Size of each bag, as a percentage of the training set size."; } /** * Gets the size of each bag, as a percentage of the training set size. * * @return the bag size, as a percentage. */ public int getBagSizePercent() { return m_BagSizePercent; } /** * Sets the size of each bag, as a percentage of the training set size. * * @param newBagSizePercent the bag size, as a percentage. */ public void setBagSizePercent(int newBagSizePercent) { m_BagSizePercent = newBagSizePercent; } /** * Returns the tip text for this property * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String calcOutOfBagTipText() { return "Whether the out-of-bag error is calculated."; } /** * Set whether the out of bag error is calculated. * * @param calcOutOfBag whether to calculate the out of bag error */ public void setCalcOutOfBag(boolean calcOutOfBag) { m_CalcOutOfBag = calcOutOfBag; } /** * Get whether the out of bag error is calculated. * * @return whether the out of bag error is calculated */ public boolean getCalcOutOfBag() { return m_CalcOutOfBag; } /** * Gets the out of bag error that was calculated as the classifier * was built. * * @return the out of bag error */ public double measureOutOfBagError() { return m_OutOfBagError; } /** * Returns an enumeration of the additional measure names. * * @return an enumeration of the measure names */ public Enumeration enumerateMeasures() { Vector newVector = new Vector(1); newVector.addElement("measureOutOfBagError"); return newVector.elements(); } /** * Returns the value of the named measure. * * @param additionalMeasureName the name of the measure to query for its value * @return the value of the named measure * @throws IllegalArgumentException if the named measure is not supported */ public double getMeasure(String additionalMeasureName) { if (additionalMeasureName.equalsIgnoreCase("measureOutOfBagError")) { return measureOutOfBagError(); } else { throw new IllegalArgumentException(additionalMeasureName + " not supported (Bagging)"); } } protected Random m_random; protected boolean[][] m_inBag; protected Instances m_data; /** * Returns a training set for a particular iteration. * * @param iteration the number of the iteration for the requested training set. * @return the training set for the supplied iteration number * @throws Exception if something goes wrong when generating a training set. */ protected synchronized Instances getTrainingSet(int iteration) throws Exception { int bagSize = m_data.numInstances() * m_BagSizePercent / 100; Instances bagData = null; Random r = new Random(m_Seed + iteration); // create the in-bag dataset if (m_CalcOutOfBag) { m_inBag[iteration] = new boolean[m_data.numInstances()]; bagData = m_data.resampleWithWeights(r, m_inBag[iteration]); } else { bagData = m_data.resampleWithWeights(r); if (bagSize < m_data.numInstances()) { bagData.randomize(r); Instances newBagData = new Instances(bagData, 0, bagSize); bagData = newBagData; } } return bagData; } /** * Bagging method. * * @param data the training data to be used for generating the * bagged classifier. * @throws Exception if the classifier could not be built successfully */ public void buildClassifier(Instances data) throws Exception { // can classifier handle the data? getCapabilities().testWithFail(data); // remove instances with missing class m_data = new Instances(data); m_data.deleteWithMissingClass(); super.buildClassifier(m_data); if (m_CalcOutOfBag && (m_BagSizePercent != 100)) { throw new IllegalArgumentException("Bag size needs to be 100% if " + "out-of-bag error is to be calculated!"); } int bagSize = m_data.numInstances() * m_BagSizePercent / 100; m_random = new Random(m_Seed); m_inBag = null; if (m_CalcOutOfBag) m_inBag = new boolean[m_Classifiers.length][]; for (int j = 0; j < m_Classifiers.length; j++) { if (m_Classifier instanceof Randomizable) { ((Randomizable) m_Classifiers[j]).setSeed(m_random.nextInt()); } } buildClassifiers(); // calc OOB error? if (getCalcOutOfBag()) { double outOfBagCount = 0.0; double errorSum = 0.0; boolean numeric = m_data.classAttribute().isNumeric(); for (int i = 0; i < m_data.numInstances(); i++) { double vote; double[] votes; if (numeric) votes = new double[1]; else votes = new double[m_data.numClasses()]; // determine predictions for instance int voteCount = 0; for (int j = 0; j < m_Classifiers.length; j++) { if (m_inBag[j][i]) continue; voteCount++; // double pred = m_Classifiers[j].classifyInstance(m_data.instance(i)); if (numeric) { // votes[0] += pred; votes[0] += m_Classifiers[j].classifyInstance(m_data.instance(i)); } else { // votes[(int) pred]++; double[] newProbs = m_Classifiers[j].distributionForInstance(m_data.instance(i)); // average the probability estimates for (int k = 0; k < newProbs.length; k++) { votes[k] += newProbs[k]; } } } // "vote" if (numeric) { vote = votes[0]; if (voteCount > 0) { vote /= voteCount; // average } } else { if (Utils.eq(Utils.sum(votes), 0)) { } else { Utils.normalize(votes); } vote = Utils.maxIndex(votes); // predicted class } // error for instance outOfBagCount += m_data.instance(i).weight(); if (numeric) { errorSum += StrictMath.abs(vote - m_data.instance(i).classValue()) * m_data.instance(i).weight(); } else { if (vote != m_data.instance(i).classValue()) errorSum += m_data.instance(i).weight(); } } m_OutOfBagError = errorSum / outOfBagCount; } else { m_OutOfBagError = 0; } // save memory m_data = null; } /** * Calculates the class membership probabilities for the given test * instance. * * @param instance the instance to be classified * @return predicted class probability distribution * @throws Exception if distribution can't be computed successfully */ public double[] distributionForInstance(Instance instance) throws Exception { double[] sums = new double[instance.numClasses()], newProbs; for (int i = 0; i < m_NumIterations; i++) { if (instance.classAttribute().isNumeric() == true) { sums[0] += m_Classifiers[i].classifyInstance(instance); } else { newProbs = m_Classifiers[i].distributionForInstance(instance); for (int j = 0; j < newProbs.length; j++) sums[j] += newProbs[j]; } } if (instance.classAttribute().isNumeric() == true) { sums[0] /= (double) m_NumIterations; return sums; } else if (Utils.eq(Utils.sum(sums), 0)) { return sums; } else { Utils.normalize(sums); return sums; } } /** * Returns description of the bagged classifier. * * @return description of the bagged classifier as a string */ public String toString() { if (m_Classifiers == null) { return "Bagging: No model built yet."; } StringBuffer text = new StringBuffer(); text.append("All the base classifiers: \n\n"); for (int i = 0; i < m_Classifiers.length; i++) text.append(m_Classifiers[i].toString() + "\n\n"); if (m_CalcOutOfBag) { text.append("Out of bag error: " + Utils.doubleToString(m_OutOfBagError, 4) + "\n\n"); } return text.toString(); } /** * Builds the classifier to generate a partition. */ public void generatePartition(Instances data) throws Exception { if (m_Classifier instanceof PartitionGenerator) buildClassifier(data); else throw new Exception("Classifier: " + getClassifierSpec() + " cannot generate a partition"); } /** * Computes an array that indicates leaf membership */ public double[] getMembershipValues(Instance inst) throws Exception { if (m_Classifier instanceof PartitionGenerator) { ArrayList<double[]> al = new ArrayList<double[]>(); int size = 0; for (int i = 0; i < m_Classifiers.length; i++) { double[] r = ((PartitionGenerator) m_Classifiers[i]). getMembershipValues(inst); size += r.length; al.add(r); } double[] values = new double[size]; int pos = 0; for (double[] v : al) { System.arraycopy(v, 0, values, pos, v.length); pos += v.length; } return values; } else throw new Exception("Classifier: " + getClassifierSpec() + " cannot generate a partition"); } /** * Returns the number of elements in the partition. */ public int numElements() throws Exception { if (m_Classifier instanceof PartitionGenerator) { int size = 0; for (int i = 0; i < m_Classifiers.length; i++) { size += ((PartitionGenerator) m_Classifiers[i]).numElements(); } return size; } else throw new Exception("Classifier: " + getClassifierSpec() + " cannot generate a partition"); } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 9785 $"); } /** * Main method for testing this class. * * @param argv the options */ public static void main(String[] argv) { runClassifier(new Bagging(), argv); } protected List<Classifier> m_classifiersCache; /** * Aggregate an object with this one * * @param toAggregate the object to aggregate * @return the result of aggregation * @throws Exception if the supplied object can't be aggregated for some * reason */ @Override public Bagging aggregate(Bagging toAggregate) throws Exception { if (!m_Classifier.getClass().isAssignableFrom(toAggregate.m_Classifier.getClass())) { throw new Exception("Can't aggregate because base classifiers differ"); } if (m_classifiersCache == null) { m_classifiersCache = new ArrayList<Classifier>(); m_classifiersCache.addAll(Arrays.asList(m_Classifiers)); } m_classifiersCache.addAll(Arrays.asList(toAggregate.m_Classifiers)); return this; } /** * Call to complete the aggregation process. Allows implementers to do any * final processing based on how many objects were aggregated. * * @throws Exception if the aggregation can't be finalized for some reason */ @Override public void finalizeAggregation() throws Exception { m_Classifiers = m_classifiersCache.toArray(new Classifier[1]); m_classifiersCache = null; } }
22,782
29.829499
127
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/meta/CVParameterSelection.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * CVParameterSelection.java * Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.meta; import java.io.Serializable; import java.io.StreamTokenizer; import java.io.StringReader; import java.util.Enumeration; import java.util.Random; import java.util.Vector; import weka.classifiers.Evaluation; import weka.classifiers.RandomizableSingleClassifierEnhancer; import weka.core.Capabilities; import weka.core.Drawable; import weka.core.FastVector; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.OptionHandler; import weka.core.RevisionHandler; import weka.core.RevisionUtils; import weka.core.Summarizable; import weka.core.TechnicalInformation; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import weka.core.TechnicalInformationHandler; import weka.core.Utils; /** <!-- globalinfo-start --> * Class for performing parameter selection by cross-validation for any classifier.<br/> * <br/> * For more information, see:<br/> * <br/> * R. Kohavi (1995). Wrappers for Performance Enhancement and Oblivious Decision Graphs. Department of Computer Science, Stanford University. * <p/> <!-- globalinfo-end --> * <!-- technical-bibtex-start --> * BibTeX: * <pre> * &#64;phdthesis{Kohavi1995, * address = {Department of Computer Science, Stanford University}, * author = {R. Kohavi}, * school = {Stanford University}, * title = {Wrappers for Performance Enhancement and Oblivious Decision Graphs}, * year = {1995} * } * </pre> * <p/> <!-- technical-bibtex-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -X &lt;number of folds&gt; * Number of folds used for cross validation (default 10).</pre> * * <pre> -P &lt;classifier parameter&gt; * Classifier parameter options. * eg: "N 1 5 10" Sets an optimisation parameter for the * classifier with name -N, with lower bound 1, upper bound * 5, and 10 optimisation steps. The upper bound may be the * character 'A' or 'I' to substitute the number of * attributes or instances in the training data, * respectively. This parameter may be supplied more than * once to optimise over several classifier options * simultaneously.</pre> * * <pre> -S &lt;num&gt; * Random number seed. * (default 1)</pre> * * <pre> -D * If set, classifier is run in debug mode and * may output additional info to the console</pre> * * <pre> -W * Full name of base classifier. * (default: weka.classifiers.rules.ZeroR)</pre> * * <pre> * Options specific to classifier weka.classifiers.rules.ZeroR: * </pre> * * <pre> -D * If set, classifier is run in debug mode and * may output additional info to the console</pre> * <!-- options-end --> * * Options after -- are passed to the designated sub-classifier. <p> * * @author Len Trigg (trigg@cs.waikato.ac.nz) * @version $Revision: 8181 $ */ public class CVParameterSelection extends RandomizableSingleClassifierEnhancer implements Drawable, Summarizable, TechnicalInformationHandler { /** for serialization */ static final long serialVersionUID = -6529603380876641265L; /** * A data structure to hold values associated with a single * cross-validation search parameter */ protected class CVParameter implements Serializable, RevisionHandler { /** for serialization */ static final long serialVersionUID = -4668812017709421953L; /** Char used to identify the option of interest */ private String m_ParamChar; /** Lower bound for the CV search */ private double m_Lower; /** Upper bound for the CV search */ private double m_Upper; /** Number of steps during the search */ private double m_Steps; /** The parameter value with the best performance */ private double m_ParamValue; /** True if the parameter should be added at the end of the argument list */ private boolean m_AddAtEnd; /** True if the parameter should be rounded to an integer */ private boolean m_RoundParam; /** * Constructs a CVParameter. * * @param param the parameter definition * @throws Exception if construction of CVParameter fails */ public CVParameter(String param) throws Exception { String[] parts = param.split(" "); if (parts.length < 4 || parts.length > 5) { throw new Exception("CVParameter " + param + ": four or five components expected!"); } try { Double.parseDouble(parts[0]); throw new Exception("CVParameter " + param + ": Character parameter identifier expected"); } catch (NumberFormatException n) { m_ParamChar = parts[0]; } try { m_Lower = Double.parseDouble(parts[1]); } catch (NumberFormatException n) { throw new Exception("CVParameter " + param + ": Numeric lower bound expected"); } if (parts[2].equals("A")) { m_Upper = m_Lower - 1; } else if (parts[2].equals("I")) { m_Upper = m_Lower - 2; } else { try { m_Upper = Double.parseDouble(parts[2]); if (m_Upper < m_Lower) { throw new Exception("CVParameter " + param + ": Upper bound is less than lower bound"); } } catch (NumberFormatException n) { throw new Exception("CVParameter " + param + ": Upper bound must be numeric, or 'A' or 'N'"); } } try { m_Steps = Double.parseDouble(parts[3]); } catch (NumberFormatException n) { throw new Exception("CVParameter " + param + ": Numeric number of steps expected"); } if (parts.length == 5 && parts[4].equals("R")) { m_RoundParam = true; } } /** * Returns a CVParameter as a string. * * @return the CVParameter as string */ public String toString() { String result = m_ParamChar + " " + m_Lower + " "; switch ((int)(m_Lower - m_Upper + 0.5)) { case 1: result += "A"; break; case 2: result += "I"; break; default: result += m_Upper; break; } result += " " + m_Steps; if (m_RoundParam) { result += " R"; } return result; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8181 $"); } } /** * The base classifier options (not including those being set * by cross-validation) */ protected String [] m_ClassifierOptions; /** The set of all classifier options as determined by cross-validation */ protected String [] m_BestClassifierOptions; /** The set of all options at initialization time. So that getOptions can return this. */ protected String [] m_InitOptions; /** The cross-validated performance of the best options */ protected double m_BestPerformance; /** The set of parameters to cross-validate over */ protected FastVector m_CVParams = new FastVector(); /** The number of attributes in the data */ protected int m_NumAttributes; /** The number of instances in a training fold */ protected int m_TrainFoldSize; /** The number of folds used in cross-validation */ protected int m_NumFolds = 10; /** * Create the options array to pass to the classifier. The parameter * values and positions are taken from m_ClassifierOptions and * m_CVParams. * * @return the options array */ protected String [] createOptions() { String [] options = new String [m_ClassifierOptions.length + 2 * m_CVParams.size()]; int start = 0, end = options.length; // Add the cross-validation parameters and their values for (int i = 0; i < m_CVParams.size(); i++) { CVParameter cvParam = (CVParameter)m_CVParams.elementAt(i); double paramValue = cvParam.m_ParamValue; if (cvParam.m_RoundParam) { // paramValue = (double)((int) (paramValue + 0.5)); paramValue = Math.rint(paramValue); } boolean isInt = ((paramValue - (int)paramValue) == 0); if (cvParam.m_AddAtEnd) { options[--end] = "" + ((cvParam.m_RoundParam || isInt) ? Utils.doubleToString(paramValue,4) : cvParam.m_ParamValue); //Utils.doubleToString(paramValue,4); options[--end] = "-" + cvParam.m_ParamChar; } else { options[start++] = "-" + cvParam.m_ParamChar; options[start++] = "" + ((cvParam.m_RoundParam || isInt) ? Utils.doubleToString(paramValue,4) : cvParam.m_ParamValue); //+ Utils.doubleToString(paramValue,4); } } // Add the static parameters System.arraycopy(m_ClassifierOptions, 0, options, start, m_ClassifierOptions.length); return options; } /** * Finds the best parameter combination. (recursive for each parameter * being optimised). * * @param depth the index of the parameter to be optimised at this level * @param trainData the data the search is based on * @param random a random number generator * @throws Exception if an error occurs */ protected void findParamsByCrossValidation(int depth, Instances trainData, Random random) throws Exception { if (depth < m_CVParams.size()) { CVParameter cvParam = (CVParameter)m_CVParams.elementAt(depth); double upper; switch ((int)(cvParam.m_Lower - cvParam.m_Upper + 0.5)) { case 1: upper = m_NumAttributes; break; case 2: upper = m_TrainFoldSize; break; default: upper = cvParam.m_Upper; break; } double increment = (upper - cvParam.m_Lower) / (cvParam.m_Steps - 1); for(cvParam.m_ParamValue = cvParam.m_Lower; cvParam.m_ParamValue <= upper; cvParam.m_ParamValue += increment) { findParamsByCrossValidation(depth + 1, trainData, random); } } else { Evaluation evaluation = new Evaluation(trainData); // Set the classifier options String [] options = createOptions(); if (m_Debug) { System.err.print("Setting options for " + m_Classifier.getClass().getName() + ":"); for (int i = 0; i < options.length; i++) { System.err.print(" " + options[i]); } System.err.println(""); } ((OptionHandler)m_Classifier).setOptions(options); for (int j = 0; j < m_NumFolds; j++) { // We want to randomize the data the same way for every // learning scheme. Instances train = trainData.trainCV(m_NumFolds, j, new Random(1)); Instances test = trainData.testCV(m_NumFolds, j); m_Classifier.buildClassifier(train); evaluation.setPriors(train); evaluation.evaluateModel(m_Classifier, test); } double error = evaluation.errorRate(); if (m_Debug) { System.err.println("Cross-validated error rate: " + Utils.doubleToString(error, 6, 4)); } if ((m_BestPerformance == -99) || (error < m_BestPerformance)) { m_BestPerformance = error; m_BestClassifierOptions = createOptions(); } } } /** * Returns a string describing this classifier * @return a description of the classifier suitable for * displaying in the explorer/experimenter gui */ public String globalInfo() { return "Class for performing parameter selection by cross-validation " + "for any classifier.\n\n" + "For more information, see:\n\n" + getTechnicalInformation().toString(); } /** * Returns an instance of a TechnicalInformation object, containing * detailed information about the technical background of this class, * e.g., paper reference or book this class is based on. * * @return the technical information about this class */ public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; result = new TechnicalInformation(Type.PHDTHESIS); result.setValue(Field.AUTHOR, "R. Kohavi"); result.setValue(Field.YEAR, "1995"); result.setValue(Field.TITLE, "Wrappers for Performance Enhancement and Oblivious Decision Graphs"); result.setValue(Field.SCHOOL, "Stanford University"); result.setValue(Field.ADDRESS, "Department of Computer Science, Stanford University"); return result; } /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ public Enumeration listOptions() { Vector newVector = new Vector(2); newVector.addElement(new Option( "\tNumber of folds used for cross validation (default 10).", "X", 1, "-X <number of folds>")); newVector.addElement(new Option( "\tClassifier parameter options.\n" + "\teg: \"N 1 5 10\" Sets an optimisation parameter for the\n" + "\tclassifier with name -N, with lower bound 1, upper bound\n" + "\t5, and 10 optimisation steps. The upper bound may be the\n" + "\tcharacter 'A' or 'I' to substitute the number of\n" + "\tattributes or instances in the training data,\n" + "\trespectively. This parameter may be supplied more than\n" + "\tonce to optimise over several classifier options\n" + "\tsimultaneously.", "P", 1, "-P <classifier parameter>")); Enumeration enu = super.listOptions(); while (enu.hasMoreElements()) { newVector.addElement(enu.nextElement()); } return newVector.elements(); } /** * Parses a given list of options. <p/> * <!-- options-start --> * Valid options are: <p/> * * <pre> -X &lt;number of folds&gt; * Number of folds used for cross validation (default 10).</pre> * * <pre> -P &lt;classifier parameter&gt; * Classifier parameter options. * eg: "N 1 5 10" Sets an optimisation parameter for the * classifier with name -N, with lower bound 1, upper bound * 5, and 10 optimisation steps. The upper bound may be the * character 'A' or 'I' to substitute the number of * attributes or instances in the training data, * respectively. This parameter may be supplied more than * once to optimise over several classifier options * simultaneously.</pre> * * <pre> -S &lt;num&gt; * Random number seed. * (default 1)</pre> * * <pre> -D * If set, classifier is run in debug mode and * may output additional info to the console</pre> * * <pre> -W * Full name of base classifier. * (default: weka.classifiers.rules.ZeroR)</pre> * * <pre> * Options specific to classifier weka.classifiers.rules.ZeroR: * </pre> * * <pre> -D * If set, classifier is run in debug mode and * may output additional info to the console</pre> * <!-- options-end --> * * Options after -- are passed to the designated sub-classifier. <p> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { String foldsString = Utils.getOption('X', options); if (foldsString.length() != 0) { setNumFolds(Integer.parseInt(foldsString)); } else { setNumFolds(10); } String cvParam; m_CVParams = new FastVector(); do { cvParam = Utils.getOption('P', options); if (cvParam.length() != 0) { addCVParameter(cvParam); } } while (cvParam.length() != 0); super.setOptions(options); } /** * Gets the current settings of the Classifier. * * @return an array of strings suitable for passing to setOptions */ public String [] getOptions() { String[] superOptions; if (m_InitOptions != null) { try { ((OptionHandler)m_Classifier).setOptions((String[])m_InitOptions.clone()); superOptions = super.getOptions(); ((OptionHandler)m_Classifier).setOptions((String[])m_BestClassifierOptions.clone()); } catch (Exception e) { throw new RuntimeException("CVParameterSelection: could not set options " + "in getOptions()."); } } else { superOptions = super.getOptions(); } String [] options = new String [superOptions.length + m_CVParams.size() * 2 + 2]; int current = 0; for (int i = 0; i < m_CVParams.size(); i++) { options[current++] = "-P"; options[current++] = "" + getCVParameter(i); } options[current++] = "-X"; options[current++] = "" + getNumFolds(); System.arraycopy(superOptions, 0, options, current, superOptions.length); return options; } /** * Returns (a copy of) the best options found for the classifier. * * @return the best options */ public String[] getBestClassifierOptions() { return (String[]) m_BestClassifierOptions.clone(); } /** * Returns default capabilities of the classifier. * * @return the capabilities of this classifier */ public Capabilities getCapabilities() { Capabilities result = super.getCapabilities(); result.setMinimumNumberInstances(m_NumFolds); return result; } /** * Generates the classifier. * * @param instances set of instances serving as training data * @throws Exception if the classifier has not been generated successfully */ public void buildClassifier(Instances instances) throws Exception { // can classifier handle the data? getCapabilities().testWithFail(instances); // remove instances with missing class Instances trainData = new Instances(instances); trainData.deleteWithMissingClass(); if (!(m_Classifier instanceof OptionHandler)) { throw new IllegalArgumentException("Base classifier should be OptionHandler."); } m_InitOptions = ((OptionHandler)m_Classifier).getOptions(); m_BestPerformance = -99; m_NumAttributes = trainData.numAttributes(); Random random = new Random(m_Seed); trainData.randomize(random); m_TrainFoldSize = trainData.trainCV(m_NumFolds, 0).numInstances(); // Check whether there are any parameters to optimize if (m_CVParams.size() == 0) { m_Classifier.buildClassifier(trainData); m_BestClassifierOptions = m_InitOptions; return; } if (trainData.classAttribute().isNominal()) { trainData.stratify(m_NumFolds); } m_BestClassifierOptions = null; // Set up m_ClassifierOptions -- take getOptions() and remove // those being optimised. m_ClassifierOptions = ((OptionHandler)m_Classifier).getOptions(); for (int i = 0; i < m_CVParams.size(); i++) { Utils.getOption(((CVParameter)m_CVParams.elementAt(i)).m_ParamChar, m_ClassifierOptions); } findParamsByCrossValidation(0, trainData, random); String [] options = (String [])m_BestClassifierOptions.clone(); ((OptionHandler)m_Classifier).setOptions(options); m_Classifier.buildClassifier(trainData); } /** * Predicts the class distribution for the given test instance. * * @param instance the instance to be classified * @return the predicted class value * @throws Exception if an error occurred during the prediction */ public double[] distributionForInstance(Instance instance) throws Exception { return m_Classifier.distributionForInstance(instance); } /** * Adds a scheme parameter to the list of parameters to be set * by cross-validation * * @param cvParam the string representation of a scheme parameter. The * format is: <br> * param_char lower_bound upper_bound number_of_steps <br> * eg to search a parameter -P from 1 to 10 by increments of 1: <br> * P 1 10 11 <br> * @throws Exception if the parameter specifier is of the wrong format */ public void addCVParameter(String cvParam) throws Exception { CVParameter newCV = new CVParameter(cvParam); m_CVParams.addElement(newCV); } /** * Gets the scheme paramter with the given index. * * @param index the index for the parameter * @return the scheme parameter */ public String getCVParameter(int index) { if (m_CVParams.size() <= index) { return ""; } return ((CVParameter)m_CVParams.elementAt(index)).toString(); } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String CVParametersTipText() { return "Sets the scheme parameters which are to be set "+ "by cross-validation.\n"+ "The format for each string should be:\n"+ "param_char lower_bound upper_bound number_of_steps\n"+ "eg to search a parameter -P from 1 to 10 by increments of 1:\n"+ " \"P 1 10 10\" "; } /** * Get method for CVParameters. * * @return the CVParameters */ public Object[] getCVParameters() { Object[] CVParams = m_CVParams.toArray(); String params[] = new String[CVParams.length]; for(int i=0; i<CVParams.length; i++) params[i] = CVParams[i].toString(); return params; } /** * Set method for CVParameters. * * @param params the CVParameters to use * @throws Exception if the setting of the CVParameters fails */ public void setCVParameters(Object[] params) throws Exception { FastVector backup = m_CVParams; m_CVParams = new FastVector(); for(int i=0; i<params.length; i++) { try{ addCVParameter((String)params[i]); } catch(Exception ex) { m_CVParams = backup; throw ex; } } } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String numFoldsTipText() { return "Get the number of folds used for cross-validation."; } /** * Gets the number of folds for the cross-validation. * * @return the number of folds for the cross-validation */ public int getNumFolds() { return m_NumFolds; } /** * Sets the number of folds for the cross-validation. * * @param numFolds the number of folds for the cross-validation * @throws Exception if parameter illegal */ public void setNumFolds(int numFolds) throws Exception { if (numFolds < 0) { throw new IllegalArgumentException("Stacking: Number of cross-validation " + "folds must be positive."); } m_NumFolds = numFolds; } /** * Returns the type of graph this classifier * represents. * * @return the type of graph this classifier represents */ public int graphType() { if (m_Classifier instanceof Drawable) return ((Drawable)m_Classifier).graphType(); else return Drawable.NOT_DRAWABLE; } /** * Returns graph describing the classifier (if possible). * * @return the graph of the classifier in dotty format * @throws Exception if the classifier cannot be graphed */ public String graph() throws Exception { if (m_Classifier instanceof Drawable) return ((Drawable)m_Classifier).graph(); else throw new Exception("Classifier: " + m_Classifier.getClass().getName() + " " + Utils.joinOptions(m_BestClassifierOptions) + " cannot be graphed"); } /** * Returns description of the cross-validated classifier. * * @return description of the cross-validated classifier as a string */ public String toString() { if (m_InitOptions == null) return "CVParameterSelection: No model built yet."; String result = "Cross-validated Parameter selection.\n" + "Classifier: " + m_Classifier.getClass().getName() + "\n"; try { for (int i = 0; i < m_CVParams.size(); i++) { CVParameter cvParam = (CVParameter)m_CVParams.elementAt(i); result += "Cross-validation Parameter: '-" + cvParam.m_ParamChar + "'" + " ranged from " + cvParam.m_Lower + " to "; switch ((int)(cvParam.m_Lower - cvParam.m_Upper + 0.5)) { case 1: result += m_NumAttributes; break; case 2: result += m_TrainFoldSize; break; default: result += cvParam.m_Upper; break; } result += " with " + cvParam.m_Steps + " steps\n"; } } catch (Exception ex) { result += ex.getMessage(); } result += "Classifier Options: " + Utils.joinOptions(m_BestClassifierOptions) + "\n\n" + m_Classifier.toString(); return result; } /** * A concise description of the model. * * @return a concise description of the model */ public String toSummaryString() { String result = "Selected values: " + Utils.joinOptions(m_BestClassifierOptions); return result + '\n'; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8181 $"); } /** * Main method for testing this class. * * @param argv the options */ public static void main(String [] argv) { runClassifier(new CVParameterSelection(), argv); } }
25,895
28.972222
141
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/meta/ClassificationViaClustering.java
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* * ClassificationViaClustering.java * Copyright (C) 2007 University of Waikato, Hamilton, New Zealand */ package weka.classifiers.meta; import weka.classifiers.Classifier; import weka.classifiers.rules.ZeroR; import weka.clusterers.ClusterEvaluation; import weka.clusterers.Clusterer; import weka.clusterers.AbstractClusterer; import weka.clusterers.SimpleKMeans; import weka.core.Capabilities; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.OptionHandler; import weka.core.RevisionUtils; import weka.core.Utils; import weka.core.Capabilities.Capability; import java.util.Enumeration; import java.util.Vector; import weka.classifiers.AbstractClassifier; import weka.core.DenseInstance; /** <!-- globalinfo-start --> * A simple meta-classifier that uses a clusterer for classification. For cluster algorithms that use a fixed number of clusterers, like SimpleKMeans, the user has to make sure that the number of clusters to generate are the same as the number of class labels in the dataset in order to obtain a useful model.<br/> * <br/> * Note: at prediction time, a missing value is returned if no cluster is found for the instance.<br/> * <br/> * The code is based on the 'clusters to classes' functionality of the weka.clusterers.ClusterEvaluation class by Mark Hall. * <p/> <!-- globalinfo-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -D * If set, classifier is run in debug mode and * may output additional info to the console</pre> * * <pre> -W * Full name of clusterer. * (default: weka.clusterers.SimpleKMeans)</pre> * * <pre> * Options specific to clusterer weka.clusterers.SimpleKMeans: * </pre> * * <pre> -N &lt;num&gt; * number of clusters. * (default 2).</pre> * * <pre> -V * Display std. deviations for centroids. * </pre> * * <pre> -M * Replace missing values with mean/mode. * </pre> * * <pre> -S &lt;num&gt; * Random number seed. * (default 10)</pre> * <!-- options-end --> * * @author fracpete (fracpete at waikato dot ac dot nz) * @version $Revision: 1.5 $ */ public class ClassificationViaClustering extends AbstractClassifier { /** for serialization */ private static final long serialVersionUID = -5687069451420259135L; /** the cluster algorithm used (template) */ protected Clusterer m_Clusterer; /** the actual cluster algorithm being used */ protected Clusterer m_ActualClusterer; /** the original training data header */ protected Instances m_OriginalHeader; /** the modified training data header */ protected Instances m_ClusteringHeader; /** the mapping between clusters and classes */ protected double[] m_ClustersToClasses; /** the default model */ protected Classifier m_ZeroR; /** * default constructor */ public ClassificationViaClustering() { super(); m_Clusterer = new SimpleKMeans(); } /** * Returns a string describing classifier * * @return a description suitable for displaying in the * explorer/experimenter gui */ public String globalInfo() { return "A simple meta-classifier that uses a clusterer for classification. " + "For cluster algorithms that use a fixed number of clusterers, like " + "SimpleKMeans, the user has to make sure that the number of clusters " + "to generate are the same as the number of class labels in the dataset " + "in order to obtain a useful model.\n" + "\n" + "Note: at prediction time, a missing value is returned if no cluster " + "is found for the instance.\n" + "\n" + "The code is based on the 'clusters to classes' functionality of the " + "weka.clusterers.ClusterEvaluation class by Mark Hall."; } /** * Gets an enumeration describing the available options. * * @return an enumeration of all the available options. */ public Enumeration listOptions(){ Vector result; Enumeration enm; result = new Vector(); enm = super.listOptions(); while (enm.hasMoreElements()) result.addElement(enm.nextElement()); result.addElement(new Option( "\tFull name of clusterer.\n" + "\t(default: " + defaultClustererString() +")", "W", 1, "-W")); result.addElement(new Option( "", "", 0, "\nOptions specific to clusterer " + m_Clusterer.getClass().getName() + ":")); enm = ((OptionHandler) m_Clusterer).listOptions(); while (enm.hasMoreElements()) result.addElement(enm.nextElement()); return result.elements(); } /** * returns the options of the current setup * * @return the current options */ public String[] getOptions(){ int i; Vector<String> result; String[] options; result = new Vector<String>(); result.add("-W"); result.add("" + getClusterer().getClass().getName()); options = super.getOptions(); for (i = 0; i < options.length; i++) result.add(options[i]); if (getClusterer() instanceof OptionHandler) { result.add("--"); options = ((OptionHandler) getClusterer()).getOptions(); for (i = 0; i < options.length; i++) result.add(options[i]); } return result.toArray(new String[result.size()]); } /** * Parses the options for this object. <p/> * <!-- options-start --> * Valid options are: <p/> * * <pre> -D * If set, classifier is run in debug mode and * may output additional info to the console</pre> * * <pre> -W * Full name of clusterer. * (default: weka.clusterers.SimpleKMeans)</pre> * * <pre> * Options specific to clusterer weka.clusterers.SimpleKMeans: * </pre> * * <pre> -N &lt;num&gt; * number of clusters. * (default 2).</pre> * * <pre> -V * Display std. deviations for centroids. * </pre> * * <pre> -M * Replace missing values with mean/mode. * </pre> * * <pre> -S &lt;num&gt; * Random number seed. * (default 10)</pre> * <!-- options-end --> * * @param options the options to use * @throws Exception if setting of options fails */ public void setOptions(String[] options) throws Exception { String tmpStr; super.setOptions(options); tmpStr = Utils.getOption('W', options); if (tmpStr.length() > 0) { // This is just to set the classifier in case the option // parsing fails. setClusterer(AbstractClusterer.forName(tmpStr, null)); setClusterer(AbstractClusterer.forName(tmpStr, Utils.partitionOptions(options))); } else { // This is just to set the classifier in case the option // parsing fails. setClusterer(AbstractClusterer.forName(defaultClustererString(), null)); setClusterer(AbstractClusterer.forName(defaultClustererString(), Utils.partitionOptions(options))); } } /** * String describing default clusterer. * * @return the classname */ protected String defaultClustererString() { return SimpleKMeans.class.getName(); } /** * Returns the tip text for this property * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String clustererTipText() { return "The clusterer to be used."; } /** * Set the base clusterer. * * @param value the clusterer to use. */ public void setClusterer(Clusterer value) { m_Clusterer = value; } /** * Get the clusterer used as the base learner. * * @return the current clusterer */ public Clusterer getClusterer() { return m_Clusterer; } /** * Classifies the given test instance. * * @param instance the instance to be classified * @return the predicted most likely class for the instance or * Instance.missingValue() if no prediction is made * @throws Exception if an error occurred during the prediction */ public double classifyInstance(Instance instance) throws Exception { double result; double[] values; Instance newInst; int i; int n; if (m_ZeroR != null) { result = m_ZeroR.classifyInstance(instance); } else { if (m_ActualClusterer != null) { // build new instance values = new double[m_ClusteringHeader.numAttributes()]; n = 0; for (i = 0; i < instance.numAttributes(); i++) { if (i == instance.classIndex()) continue; values[n] = instance.value(i); n++; } newInst = new DenseInstance(instance.weight(), values); newInst.setDataset(m_ClusteringHeader); // determine cluster/class result = m_ClustersToClasses[m_ActualClusterer.clusterInstance(newInst)]; if (result == -1) result = Utils.missingValue(); } else { result = Utils.missingValue(); } } return result; } /** * Returns default capabilities of the classifier. * * @return the capabilities of this classifier */ public Capabilities getCapabilities() { Capabilities result; result = m_Clusterer.getCapabilities(); // class result.disableAllClasses(); result.disable(Capability.NO_CLASS); result.enable(Capability.NOMINAL_CLASS); return result; } /** * builds the classifier * * @param data the training instances * @throws Exception if something goes wrong */ public void buildClassifier(Instances data) throws Exception { Instances clusterData; ClusterEvaluation eval; int i; Instance instance; int[][] counts; int[] clusterTotals; double[] best; double[] current; double[] clusterAssignments; // can classifier handle the data? getCapabilities().testWithFail(data); // remove instances with missing class data = new Instances(data); data.deleteWithMissingClass(); // save original header (needed for clusters to classes output) m_OriginalHeader = new Instances(data, 0); // remove class attribute for clusterer clusterData = new Instances(data); clusterData.setClassIndex(-1); clusterData.deleteAttributeAt(m_OriginalHeader.classIndex()); m_ClusteringHeader = new Instances(clusterData, 0); if (m_ClusteringHeader.numAttributes() == 0) { System.err.println("Data contains only class attribute, defaulting to ZeroR model."); m_ZeroR = new ZeroR(); m_ZeroR.buildClassifier(data); } else { m_ZeroR = null; // build clusterer m_ActualClusterer = AbstractClusterer.makeCopy(m_Clusterer); m_ActualClusterer.buildClusterer(clusterData); // evaluate clusterer on training set eval = new ClusterEvaluation(); eval.setClusterer(m_ActualClusterer); eval.evaluateClusterer(clusterData); clusterAssignments = eval.getClusterAssignments(); // determine classes-to-clusters mapping counts = new int [eval.getNumClusters()][m_OriginalHeader.numClasses()]; clusterTotals = new int[eval.getNumClusters()]; best = new double[eval.getNumClusters()+1]; current = new double[eval.getNumClusters()+1]; for (i = 0; i < data.numInstances(); i++) { instance = data.instance(i); counts[(int) clusterAssignments[i]][(int) instance.classValue()]++; clusterTotals[(int) clusterAssignments[i]]++; i++; } best[eval.getNumClusters()] = Double.MAX_VALUE; ClusterEvaluation.mapClasses(eval.getNumClusters(), 0, counts, clusterTotals, current, best, 0); m_ClustersToClasses = new double[best.length]; System.arraycopy(best, 0, m_ClustersToClasses, 0, best.length); } } /** * Returns a string representation of the classifier. * * @return a string representation of the classifier. */ public String toString() { StringBuffer result; int i; int n; boolean found; result = new StringBuffer(); // title result.append(this.getClass().getName().replaceAll(".*\\.", "") + "\n"); result.append(this.getClass().getName().replaceAll(".*\\.", "").replaceAll(".", "=") + "\n"); // model if (m_ActualClusterer != null) { // output clusterer result.append(m_ActualClusterer + "\n"); // clusters to classes result.append("Clusters to classes mapping:\n"); for (i = 0; i < m_ClustersToClasses.length - 1; i++) { result.append(" " + (i+1) + ". Cluster: "); if (m_ClustersToClasses[i] < 0) result.append("no class"); else result.append( m_OriginalHeader.classAttribute().value((int) m_ClustersToClasses[i]) + " (" + ((int) m_ClustersToClasses[i] + 1) + ")"); result.append("\n"); } result.append("\n"); // classes to clusters result.append("Classes to clusters mapping:\n"); for (i = 0; i < m_OriginalHeader.numClasses(); i++) { result.append( " " + (i+1) + ". Class (" + m_OriginalHeader.classAttribute().value(i) + "): "); found = false; for (n = 0; n < m_ClustersToClasses.length - 1; n++) { if (((int) m_ClustersToClasses[n]) == i) { found = true; result.append((n+1) + ". Cluster"); break; } } if (!found) result.append("no cluster"); result.append("\n"); } result.append("\n"); } else { result.append("no model built yet\n"); } return result.toString(); } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 1.5 $"); } /** * Runs the classifier with the given options * * @param args the commandline options */ public static void main(String[] args) { runClassifier(new ClassificationViaClustering(), args); } }
14,571
27.131274
314
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/meta/ClassificationViaRegression.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * ClassificationViaRegression.java * Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.meta; import weka.classifiers.AbstractClassifier; import weka.classifiers.Classifier; import weka.classifiers.SingleClassifierEnhancer; import weka.core.Capabilities; import weka.core.Capabilities.Capability; import weka.core.Instance; import weka.core.Instances; import weka.core.RevisionUtils; import weka.core.TechnicalInformation; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import weka.core.TechnicalInformationHandler; import weka.core.Utils; import weka.filters.Filter; import weka.filters.unsupervised.attribute.MakeIndicator; /** <!-- globalinfo-start --> * Class for doing classification using regression methods. Class is binarized and one regression model is built for each class value. For more information, see, for example<br/> * <br/> * E. Frank, Y. Wang, S. Inglis, G. Holmes, I.H. Witten (1998). Using model trees for classification. Machine Learning. 32(1):63-76. * <p/> <!-- globalinfo-end --> * <!-- technical-bibtex-start --> * BibTeX: * <pre> * &#64;article{Frank1998, * author = {E. Frank and Y. Wang and S. Inglis and G. Holmes and I.H. Witten}, * journal = {Machine Learning}, * number = {1}, * pages = {63-76}, * title = {Using model trees for classification}, * volume = {32}, * year = {1998} * } * </pre> * <p/> <!-- technical-bibtex-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -D * If set, classifier is run in debug mode and * may output additional info to the console</pre> * * <pre> -W * Full name of base classifier. * (default: weka.classifiers.trees.M5P)</pre> * * <pre> * Options specific to classifier weka.classifiers.trees.M5P: * </pre> * * <pre> -N * Use unpruned tree/rules</pre> * * <pre> -U * Use unsmoothed predictions</pre> * * <pre> -R * Build regression tree/rule rather than a model tree/rule</pre> * * <pre> -M &lt;minimum number of instances&gt; * Set minimum number of instances per leaf * (default 4)</pre> * * <pre> -L * Save instances at the nodes in * the tree (for visualization purposes)</pre> * <!-- options-end --> * * @author Eibe Frank (eibe@cs.waikato.ac.nz) * @author Len Trigg (trigg@cs.waikato.ac.nz) * @version $Revision: 8034 $ */ public class ClassificationViaRegression extends SingleClassifierEnhancer implements TechnicalInformationHandler { /** for serialization */ static final long serialVersionUID = 4500023123618669859L; /** The classifiers. (One for each class.) */ private Classifier[] m_Classifiers; /** The filters used to transform the class. */ private MakeIndicator[] m_ClassFilters; /** * Default constructor. */ public ClassificationViaRegression() { m_Classifier = new weka.classifiers.trees.M5P(); } /** * Returns a string describing classifier * @return a description suitable for * displaying in the explorer/experimenter gui */ public String globalInfo() { return "Class for doing classification using regression methods. Class is " + "binarized and one regression model is built for each class value. For more " + "information, see, for example\n\n" + getTechnicalInformation().toString(); } /** * Returns an instance of a TechnicalInformation object, containing * detailed information about the technical background of this class, * e.g., paper reference or book this class is based on. * * @return the technical information about this class */ public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; result = new TechnicalInformation(Type.ARTICLE); result.setValue(Field.AUTHOR, "E. Frank and Y. Wang and S. Inglis and G. Holmes and I.H. Witten"); result.setValue(Field.YEAR, "1998"); result.setValue(Field.TITLE, "Using model trees for classification"); result.setValue(Field.JOURNAL, "Machine Learning"); result.setValue(Field.VOLUME, "32"); result.setValue(Field.NUMBER, "1"); result.setValue(Field.PAGES, "63-76"); return result; } /** * String describing default classifier. * * @return the default classifier classname */ protected String defaultClassifierString() { return "weka.classifiers.trees.M5P"; } /** * Returns default capabilities of the classifier. * * @return the capabilities of this classifier */ public Capabilities getCapabilities() { Capabilities result = super.getCapabilities(); // class result.disableAllClasses(); result.disableAllClassDependencies(); result.enable(Capability.NOMINAL_CLASS); return result; } /** * Builds the classifiers. * * @param insts the training data. * @throws Exception if a classifier can't be built */ public void buildClassifier(Instances insts) throws Exception { Instances newInsts; // can classifier handle the data? getCapabilities().testWithFail(insts); // remove instances with missing class insts = new Instances(insts); insts.deleteWithMissingClass(); m_Classifiers = AbstractClassifier.makeCopies(m_Classifier, insts.numClasses()); m_ClassFilters = new MakeIndicator[insts.numClasses()]; for (int i = 0; i < insts.numClasses(); i++) { m_ClassFilters[i] = new MakeIndicator(); m_ClassFilters[i].setAttributeIndex("" + (insts.classIndex() + 1)); m_ClassFilters[i].setValueIndex(i); m_ClassFilters[i].setNumeric(true); m_ClassFilters[i].setInputFormat(insts); newInsts = Filter.useFilter(insts, m_ClassFilters[i]); m_Classifiers[i].buildClassifier(newInsts); } } /** * Returns the distribution for an instance. * * @param inst the instance to get the distribution for * @return the computed distribution * @throws Exception if the distribution can't be computed successfully */ public double[] distributionForInstance(Instance inst) throws Exception { double[] probs = new double[inst.numClasses()]; Instance newInst; double sum = 0; for (int i = 0; i < inst.numClasses(); i++) { m_ClassFilters[i].input(inst); m_ClassFilters[i].batchFinished(); newInst = m_ClassFilters[i].output(); probs[i] = m_Classifiers[i].classifyInstance(newInst); if (probs[i] > 1) { probs[i] = 1; } if (probs[i] < 0){ probs[i] = 0; } sum += probs[i]; } if (sum != 0) { Utils.normalize(probs, sum); } return probs; } /** * Prints the classifiers. * * @return a string representation of the classifier */ public String toString() { if (m_Classifiers == null) { return "Classification via Regression: No model built yet."; } StringBuffer text = new StringBuffer(); text.append("Classification via Regression\n\n"); for (int i = 0; i < m_Classifiers.length; i++) { text.append("Classifier for class with index " + i + ":\n\n"); text.append(m_Classifiers[i].toString() + "\n\n"); } return text.toString(); } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } /** * Main method for testing this class. * * @param argv the options for the learner */ public static void main(String [] argv){ runClassifier(new ClassificationViaRegression(), argv); } }
8,314
28.381625
178
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/meta/CostSensitiveClassifier.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * CostSensitiveClassifier.java * Copyright (C) 2002-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.meta; import java.io.BufferedReader; import java.io.File; import java.io.FileReader; import java.io.StringReader; import java.io.StringWriter; import java.util.Enumeration; import java.util.Random; import java.util.Vector; import weka.classifiers.Classifier; import weka.classifiers.CostMatrix; import weka.classifiers.RandomizableSingleClassifierEnhancer; import weka.core.Capabilities; import weka.core.Capabilities.Capability; import weka.core.Drawable; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.OptionHandler; import weka.core.RevisionUtils; import weka.core.SelectedTag; import weka.core.Tag; import weka.core.Utils; import weka.core.WeightedInstancesHandler; /** <!-- globalinfo-start --> * A metaclassifier that makes its base classifier cost-sensitive. Two methods can be used to introduce cost-sensitivity: reweighting training instances according to the total cost assigned to each class; or predicting the class with minimum expected misclassification cost (rather than the most likely class). Performance can often be improved by using a Bagged classifier to improve the probability estimates of the base classifier. * <p/> <!-- globalinfo-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -M * Minimize expected misclassification cost. Default is to * reweight training instances according to costs per class</pre> * * <pre> -C &lt;cost file name&gt; * File name of a cost matrix to use. If this is not supplied, * a cost matrix will be loaded on demand. The name of the * on-demand file is the relation name of the training data * plus ".cost", and the path to the on-demand file is * specified with the -N option.</pre> * * <pre> -N &lt;directory&gt; * Name of a directory to search for cost files when loading * costs on demand (default current directory).</pre> * * <pre> -cost-matrix &lt;matrix&gt; * The cost matrix in Matlab single line format.</pre> * * <pre> -S &lt;num&gt; * Random number seed. * (default 1)</pre> * * <pre> -D * If set, classifier is run in debug mode and * may output additional info to the console</pre> * * <pre> -W * Full name of base classifier. * (default: weka.classifiers.rules.ZeroR)</pre> * * <pre> * Options specific to classifier weka.classifiers.rules.ZeroR: * </pre> * * <pre> -D * If set, classifier is run in debug mode and * may output additional info to the console</pre> * <!-- options-end --> * * Options after -- are passed to the designated classifier.<p> * * @author Len Trigg (len@reeltwo.com) * @version $Revision: 9186 $ */ public class CostSensitiveClassifier extends RandomizableSingleClassifierEnhancer implements OptionHandler, Drawable { /** for serialization */ static final long serialVersionUID = -110658209263002404L; /** load cost matrix on demand */ public static final int MATRIX_ON_DEMAND = 1; /** use explicit cost matrix */ public static final int MATRIX_SUPPLIED = 2; /** Specify possible sources of the cost matrix */ public static final Tag [] TAGS_MATRIX_SOURCE = { new Tag(MATRIX_ON_DEMAND, "Load cost matrix on demand"), new Tag(MATRIX_SUPPLIED, "Use explicit cost matrix") }; /** Indicates the current cost matrix source */ protected int m_MatrixSource = MATRIX_ON_DEMAND; /** * The directory used when loading cost files on demand, null indicates * current directory */ protected File m_OnDemandDirectory = new File(System.getProperty("user.dir")); /** The name of the cost file, for command line options */ protected String m_CostFile; /** The cost matrix */ protected CostMatrix m_CostMatrix = new CostMatrix(1); /** * True if the costs should be used by selecting the minimum expected * cost (false means weight training data by the costs) */ protected boolean m_MinimizeExpectedCost; /** * String describing default classifier. * * @return the default classifier classname */ protected String defaultClassifierString() { return "weka.classifiers.rules.ZeroR"; } /** * Default constructor. */ public CostSensitiveClassifier() { m_Classifier = new weka.classifiers.rules.ZeroR(); } /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ public Enumeration listOptions() { Vector newVector = new Vector(5); newVector.addElement(new Option( "\tMinimize expected misclassification cost. Default is to\n" +"\treweight training instances according to costs per class", "M", 0, "-M")); newVector.addElement(new Option( "\tFile name of a cost matrix to use. If this is not supplied,\n" +"\ta cost matrix will be loaded on demand. The name of the\n" +"\ton-demand file is the relation name of the training data\n" +"\tplus \".cost\", and the path to the on-demand file is\n" +"\tspecified with the -N option.", "C", 1, "-C <cost file name>")); newVector.addElement(new Option( "\tName of a directory to search for cost files when loading\n" +"\tcosts on demand (default current directory).", "N", 1, "-N <directory>")); newVector.addElement(new Option( "\tThe cost matrix in Matlab single line format.", "cost-matrix", 1, "-cost-matrix <matrix>")); Enumeration enu = super.listOptions(); while (enu.hasMoreElements()) { newVector.addElement(enu.nextElement()); } return newVector.elements(); } /** * Parses a given list of options. <p/> * <!-- options-start --> * Valid options are: <p/> * * <pre> -M * Minimize expected misclassification cost. Default is to * reweight training instances according to costs per class</pre> * * <pre> -C &lt;cost file name&gt; * File name of a cost matrix to use. If this is not supplied, * a cost matrix will be loaded on demand. The name of the * on-demand file is the relation name of the training data * plus ".cost", and the path to the on-demand file is * specified with the -N option.</pre> * * <pre> -N &lt;directory&gt; * Name of a directory to search for cost files when loading * costs on demand (default current directory).</pre> * * <pre> -cost-matrix &lt;matrix&gt; * The cost matrix in Matlab single line format.</pre> * * <pre> -S &lt;num&gt; * Random number seed. * (default 1)</pre> * * <pre> -D * If set, classifier is run in debug mode and * may output additional info to the console</pre> * * <pre> -W * Full name of base classifier. * (default: weka.classifiers.rules.ZeroR)</pre> * * <pre> * Options specific to classifier weka.classifiers.rules.ZeroR: * </pre> * * <pre> -D * If set, classifier is run in debug mode and * may output additional info to the console</pre> * <!-- options-end --> * * Options after -- are passed to the designated classifier.<p> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { setMinimizeExpectedCost(Utils.getFlag('M', options)); String costFile = Utils.getOption('C', options); if (costFile.length() != 0) { try { setCostMatrix(new CostMatrix(new BufferedReader( new FileReader(costFile)))); } catch (Exception ex) { // now flag as possible old format cost matrix. Delay cost matrix // loading until buildClassifer is called setCostMatrix(null); } setCostMatrixSource(new SelectedTag(MATRIX_SUPPLIED, TAGS_MATRIX_SOURCE)); m_CostFile = costFile; } else { setCostMatrixSource(new SelectedTag(MATRIX_ON_DEMAND, TAGS_MATRIX_SOURCE)); } String demandDir = Utils.getOption('N', options); if (demandDir.length() != 0) { setOnDemandDirectory(new File(demandDir)); } String cost_matrix = Utils.getOption("cost-matrix", options); if (cost_matrix.length() != 0) { StringWriter writer = new StringWriter(); CostMatrix.parseMatlab(cost_matrix).write(writer); setCostMatrix(new CostMatrix(new StringReader(writer.toString()))); setCostMatrixSource(new SelectedTag(MATRIX_SUPPLIED, TAGS_MATRIX_SOURCE)); } super.setOptions(options); } /** * Gets the current settings of the Classifier. * * @return an array of strings suitable for passing to setOptions */ public String [] getOptions() { String [] superOptions = super.getOptions(); String [] options = new String [superOptions.length + 7]; int current = 0; if (m_MatrixSource == MATRIX_SUPPLIED) { if (m_CostFile != null) { options[current++] = "-C"; options[current++] = "" + m_CostFile; } else { options[current++] = "-cost-matrix"; options[current++] = getCostMatrix().toMatlab(); } } else { options[current++] = "-N"; options[current++] = "" + getOnDemandDirectory(); } if (getMinimizeExpectedCost()) { options[current++] = "-M"; } System.arraycopy(superOptions, 0, options, current, superOptions.length); while (current < options.length) { if (options[current] == null) { options[current] = ""; } current++; } return options; } /** * @return a description of the classifier suitable for * displaying in the explorer/experimenter gui */ public String globalInfo() { return "A metaclassifier that makes its base classifier cost-sensitive. " + "Two methods can be used to introduce cost-sensitivity: reweighting " + "training instances according to the total cost assigned to each " + "class; or predicting the class with minimum expected " + "misclassification cost (rather than the most likely class). " + "Performance can often be " + "improved by using a Bagged classifier to improve the probability " + "estimates of the base classifier."; } /** * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String costMatrixSourceTipText() { return "Sets where to get the cost matrix. The two options are" + "to use the supplied explicit cost matrix (the setting of the " + "costMatrix property), or to load a cost matrix from a file when " + "required (this file will be loaded from the directory set by the " + "onDemandDirectory property and will be named relation_name" + CostMatrix.FILE_EXTENSION + ")."; } /** * Gets the source location method of the cost matrix. Will be one of * MATRIX_ON_DEMAND or MATRIX_SUPPLIED. * * @return the cost matrix source. */ public SelectedTag getCostMatrixSource() { return new SelectedTag(m_MatrixSource, TAGS_MATRIX_SOURCE); } /** * Sets the source location of the cost matrix. Values other than * MATRIX_ON_DEMAND or MATRIX_SUPPLIED will be ignored. * * @param newMethod the cost matrix location method. */ public void setCostMatrixSource(SelectedTag newMethod) { if (newMethod.getTags() == TAGS_MATRIX_SOURCE) { m_MatrixSource = newMethod.getSelectedTag().getID(); } } /** * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String onDemandDirectoryTipText() { return "Sets the directory where cost files are loaded from. This option " + "is used when the costMatrixSource is set to \"On Demand\"."; } /** * Returns the directory that will be searched for cost files when * loading on demand. * * @return The cost file search directory. */ public File getOnDemandDirectory() { return m_OnDemandDirectory; } /** * Sets the directory that will be searched for cost files when * loading on demand. * * @param newDir The cost file search directory. */ public void setOnDemandDirectory(File newDir) { if (newDir.isDirectory()) { m_OnDemandDirectory = newDir; } else { m_OnDemandDirectory = new File(newDir.getParent()); } m_MatrixSource = MATRIX_ON_DEMAND; } /** * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String minimizeExpectedCostTipText() { return "Sets whether the minimum expected cost criteria will be used. If " + "this is false, the training data will be reweighted according to the " + "costs assigned to each class. If true, the minimum expected cost " + "criteria will be used."; } /** * Gets the value of MinimizeExpectedCost. * * @return Value of MinimizeExpectedCost. */ public boolean getMinimizeExpectedCost() { return m_MinimizeExpectedCost; } /** * Set the value of MinimizeExpectedCost. * * @param newMinimizeExpectedCost Value to assign to MinimizeExpectedCost. */ public void setMinimizeExpectedCost(boolean newMinimizeExpectedCost) { m_MinimizeExpectedCost = newMinimizeExpectedCost; } /** * Gets the classifier specification string, which contains the class name of * the classifier and any options to the classifier * * @return the classifier string. */ protected String getClassifierSpec() { Classifier c = getClassifier(); if (c instanceof OptionHandler) { return c.getClass().getName() + " " + Utils.joinOptions(((OptionHandler)c).getOptions()); } return c.getClass().getName(); } /** * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String costMatrixTipText() { return "Sets the cost matrix explicitly. This matrix is used if the " + "costMatrixSource property is set to \"Supplied\"."; } /** * Gets the misclassification cost matrix. * * @return the cost matrix */ public CostMatrix getCostMatrix() { return m_CostMatrix; } /** * Sets the misclassification cost matrix. * * @param newCostMatrix the cost matrix */ public void setCostMatrix(CostMatrix newCostMatrix) { m_CostMatrix = newCostMatrix; m_MatrixSource = MATRIX_SUPPLIED; } /** * Returns default capabilities of the classifier. * * @return the capabilities of this classifier */ public Capabilities getCapabilities() { Capabilities result = super.getCapabilities(); // class result.disableAllClasses(); result.disableAllClassDependencies(); result.enable(Capability.NOMINAL_CLASS); return result; } /** * Builds the model of the base learner. * * @param data the training data * @throws Exception if the classifier could not be built successfully */ public void buildClassifier(Instances data) throws Exception { // can classifier handle the data? getCapabilities().testWithFail(data); // remove instances with missing class data = new Instances(data); data.deleteWithMissingClass(); if (m_Classifier == null) { throw new Exception("No base classifier has been set!"); } if (m_MatrixSource == MATRIX_ON_DEMAND) { String costName = data.relationName() + CostMatrix.FILE_EXTENSION; File costFile = new File(getOnDemandDirectory(), costName); if (!costFile.exists()) { throw new Exception("On-demand cost file doesn't exist: " + costFile); } setCostMatrix(new CostMatrix(new BufferedReader( new FileReader(costFile)))); } else if (m_CostMatrix == null) { // try loading an old format cost file m_CostMatrix = new CostMatrix(data.numClasses()); m_CostMatrix.readOldFormat(new BufferedReader( new FileReader(m_CostFile))); } if (!m_MinimizeExpectedCost) { Random random = null; if (!(m_Classifier instanceof WeightedInstancesHandler)) { random = new Random(m_Seed); } data = m_CostMatrix.applyCostMatrix(data, random); } m_Classifier.buildClassifier(data); } /** * Returns class probabilities. When minimum expected cost approach is chosen, * returns probability one for class with the minimum expected misclassification * cost. Otherwise it returns the probability distribution returned by * the base classifier. * * @param instance the instance to be classified * @return the computed distribution for the given instance * @throws Exception if instance could not be classified * successfully */ public double[] distributionForInstance(Instance instance) throws Exception { if (!m_MinimizeExpectedCost) { return m_Classifier.distributionForInstance(instance); } double [] pred = m_Classifier.distributionForInstance(instance); double [] costs = m_CostMatrix.expectedCosts(pred, instance); /* for (int i = 0; i < pred.length; i++) { System.out.print(pred[i] + " "); } System.out.println(); for (int i = 0; i < costs.length; i++) { System.out.print(costs[i] + " "); } System.out.println("\n"); */ // This is probably not ideal int classIndex = Utils.minIndex(costs); for (int i = 0; i < pred.length; i++) { if (i == classIndex) { pred[i] = 1.0; } else { pred[i] = 0.0; } } return pred; } /** * Returns the type of graph this classifier * represents. * * @return the type of graph this classifier represents */ public int graphType() { if (m_Classifier instanceof Drawable) return ((Drawable)m_Classifier).graphType(); else return Drawable.NOT_DRAWABLE; } /** * Returns graph describing the classifier (if possible). * * @return the graph of the classifier in dotty format * @throws Exception if the classifier cannot be graphed */ public String graph() throws Exception { if (m_Classifier instanceof Drawable) return ((Drawable)m_Classifier).graph(); else throw new Exception("Classifier: " + getClassifierSpec() + " cannot be graphed"); } /** * Output a representation of this classifier * * @return a string representation of the classifier */ public String toString() { if (m_Classifier == null) { return "CostSensitiveClassifier: No model built yet."; } String result = "CostSensitiveClassifier using "; if (m_MinimizeExpectedCost) { result += "minimized expected misclasification cost\n"; } else { result += "reweighted training instances\n"; } result += "\n" + getClassifierSpec() + "\n\nClassifier Model\n" + m_Classifier.toString() + "\n\nCost Matrix\n" + m_CostMatrix.toString(); return result; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 9186 $"); } /** * Main method for testing this class. * * @param argv should contain the following arguments: * -t training file [-T test file] [-c class index] */ public static void main(String [] argv) { runClassifier(new CostSensitiveClassifier(), argv); } }
20,404
29.546407
434
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/meta/Dagging.java
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* * Dagging.java * Copyright (C) 2005 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.meta; import weka.classifiers.Classifier; import weka.classifiers.RandomizableSingleClassifierEnhancer; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.RevisionUtils; import weka.core.TechnicalInformation; import weka.core.TechnicalInformationHandler; import weka.core.Utils; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import java.util.Enumeration; import java.util.Vector; /** <!-- globalinfo-start --> * This meta classifier creates a number of disjoint, stratified folds out of * the data and feeds each chunk of data to a copy of the supplied base classifier. * Predictions are made via majority vote, since all the generated base * classifiers are put into the Vote meta classifier. <br/> * Useful for base classifiers that are quadratic or worse in time behavior, * regarding number of instances in the training data. <br/> * <br/> * For more information, see: <br/> * Ting, K. M., Witten, I. H.: Stacking Bagged and Dagged Models. In: Fourteenth international Conference on Machine Learning, San Francisco, CA, 367-375, 1997. * <p/> <!-- globalinfo-end --> * <!-- technical-bibtex-start --> * BibTeX: * <pre> * &#64;inproceedings{Ting1997, * address = {San Francisco, CA}, * author = {Ting, K. M. and Witten, I. H.}, * booktitle = {Fourteenth international Conference on Machine Learning}, * editor = {D. H. Fisher}, * pages = {367-375}, * publisher = {Morgan Kaufmann Publishers}, * title = {Stacking Bagged and Dagged Models}, * year = {1997} * } * </pre> * <p/> <!-- technical-bibtex-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -F &lt;folds&gt; * The number of folds for splitting the training set into * smaller chunks for the base classifier. * (default 10)</pre> * * <pre> -verbose * Whether to print some more information during building the * classifier. * (default is off)</pre> * * <pre> -S &lt;num&gt; * Random number seed. * (default 1)</pre> * * <pre> -D * If set, classifier is run in debug mode and * may output additional info to the console</pre> * * <pre> -W * Full name of base classifier. * (default: weka.classifiers.functions.SMO)</pre> * * <pre> * Options specific to classifier weka.classifiers.functions.SMO: * </pre> * * <pre> -D * If set, classifier is run in debug mode and * may output additional info to the console</pre> * * <pre> -no-checks * Turns off all checks - use with caution! * Turning them off assumes that data is purely numeric, doesn't * contain any missing values, and has a nominal class. Turning them * off also means that no header information will be stored if the * machine is linear. Finally, it also assumes that no instance has * a weight equal to 0. * (default: checks on)</pre> * * <pre> -C &lt;double&gt; * The complexity constant C. (default 1)</pre> * * <pre> -N * Whether to 0=normalize/1=standardize/2=neither. (default 0=normalize)</pre> * * <pre> -L &lt;double&gt; * The tolerance parameter. (default 1.0e-3)</pre> * * <pre> -P &lt;double&gt; * The epsilon for round-off error. (default 1.0e-12)</pre> * * <pre> -M * Fit logistic models to SVM outputs. </pre> * * <pre> -V &lt;double&gt; * The number of folds for the internal * cross-validation. (default -1, use training data)</pre> * * <pre> -W &lt;double&gt; * The random number seed. (default 1)</pre> * * <pre> -K &lt;classname and parameters&gt; * The Kernel to use. * (default: weka.classifiers.functions.supportVector.PolyKernel)</pre> * * <pre> * Options specific to kernel weka.classifiers.functions.supportVector.PolyKernel: * </pre> * * <pre> -D * Enables debugging output (if available) to be printed. * (default: off)</pre> * * <pre> -no-checks * Turns off all checks - use with caution! * (default: checks on)</pre> * * <pre> -C &lt;num&gt; * The size of the cache (a prime number), 0 for full cache and * -1 to turn it off. * (default: 250007)</pre> * * <pre> -E &lt;num&gt; * The Exponent to use. * (default: 1.0)</pre> * * <pre> -L * Use lower-order terms. * (default: no)</pre> * <!-- options-end --> * * Options after -- are passed to the designated classifier.<p/> * * @author Bernhard Pfahringer (bernhard at cs dot waikato dot ac dot nz) * @author FracPete (fracpete at waikato dot ac dot nz) * @version $Revision: 5306 $ * @see Vote */ public class Dagging extends RandomizableSingleClassifierEnhancer implements TechnicalInformationHandler { /** for serialization */ static final long serialVersionUID = 4560165876570074309L; /** the number of folds to use to split the training data */ protected int m_NumFolds = 10; /** the classifier used for voting */ protected Vote m_Vote = null; /** whether to output some progress information during building */ protected boolean m_Verbose = false; /** * Returns a string describing classifier * @return a description suitable for * displaying in the explorer/experimenter gui */ public String globalInfo() { return "This meta classifier creates a number of disjoint, stratified folds out " + "of the data and feeds each chunk of data to a copy of the supplied " + "base classifier. Predictions are made via averaging, since all the " + "generated base classifiers are put into the Vote meta classifier. \n" + "Useful for base classifiers that are quadratic or worse in time " + "behavior, regarding number of instances in the training data. \n" + "\n" + "For more information, see: \n" + getTechnicalInformation().toString(); } /** * Returns an instance of a TechnicalInformation object, containing * detailed information about the technical background of this class, * e.g., paper reference or book this class is based on. * * @return the technical information about this class */ public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; result = new TechnicalInformation(Type.INPROCEEDINGS); result.setValue(Field.AUTHOR, "Ting, K. M. and Witten, I. H."); result.setValue(Field.TITLE, "Stacking Bagged and Dagged Models"); result.setValue(Field.BOOKTITLE, "Fourteenth international Conference on Machine Learning"); result.setValue(Field.EDITOR, "D. H. Fisher"); result.setValue(Field.YEAR, "1997"); result.setValue(Field.PAGES, "367-375"); result.setValue(Field.PUBLISHER, "Morgan Kaufmann Publishers"); result.setValue(Field.ADDRESS, "San Francisco, CA"); return result; } /** * Constructor. */ public Dagging() { m_Classifier = new weka.classifiers.functions.SMO(); } /** * String describing default classifier. * * @return the default classifier classname */ protected String defaultClassifierString() { return weka.classifiers.functions.SMO.class.getName(); } /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ public Enumeration listOptions() { Vector result = new Vector(); result.addElement(new Option( "\tThe number of folds for splitting the training set into\n" + "\tsmaller chunks for the base classifier.\n" + "\t(default 10)", "F", 1, "-F <folds>")); result.addElement(new Option( "\tWhether to print some more information during building the\n" + "\tclassifier.\n" + "\t(default is off)", "verbose", 0, "-verbose")); Enumeration en = super.listOptions(); while (en.hasMoreElements()) result.addElement(en.nextElement()); return result.elements(); } /** * Parses a given list of options. <p/> * <!-- options-start --> * Valid options are: <p/> * * <pre> -F &lt;folds&gt; * The number of folds for splitting the training set into * smaller chunks for the base classifier. * (default 10)</pre> * * <pre> -verbose * Whether to print some more information during building the * classifier. * (default is off)</pre> * * <pre> -S &lt;num&gt; * Random number seed. * (default 1)</pre> * * <pre> -D * If set, classifier is run in debug mode and * may output additional info to the console</pre> * * <pre> -W * Full name of base classifier. * (default: weka.classifiers.functions.SMO)</pre> * * <pre> * Options specific to classifier weka.classifiers.functions.SMO: * </pre> * * <pre> -D * If set, classifier is run in debug mode and * may output additional info to the console</pre> * * <pre> -no-checks * Turns off all checks - use with caution! * Turning them off assumes that data is purely numeric, doesn't * contain any missing values, and has a nominal class. Turning them * off also means that no header information will be stored if the * machine is linear. Finally, it also assumes that no instance has * a weight equal to 0. * (default: checks on)</pre> * * <pre> -C &lt;double&gt; * The complexity constant C. (default 1)</pre> * * <pre> -N * Whether to 0=normalize/1=standardize/2=neither. (default 0=normalize)</pre> * * <pre> -L &lt;double&gt; * The tolerance parameter. (default 1.0e-3)</pre> * * <pre> -P &lt;double&gt; * The epsilon for round-off error. (default 1.0e-12)</pre> * * <pre> -M * Fit logistic models to SVM outputs. </pre> * * <pre> -V &lt;double&gt; * The number of folds for the internal * cross-validation. (default -1, use training data)</pre> * * <pre> -W &lt;double&gt; * The random number seed. (default 1)</pre> * * <pre> -K &lt;classname and parameters&gt; * The Kernel to use. * (default: weka.classifiers.functions.supportVector.PolyKernel)</pre> * * <pre> * Options specific to kernel weka.classifiers.functions.supportVector.PolyKernel: * </pre> * * <pre> -D * Enables debugging output (if available) to be printed. * (default: off)</pre> * * <pre> -no-checks * Turns off all checks - use with caution! * (default: checks on)</pre> * * <pre> -C &lt;num&gt; * The size of the cache (a prime number), 0 for full cache and * -1 to turn it off. * (default: 250007)</pre> * * <pre> -E &lt;num&gt; * The Exponent to use. * (default: 1.0)</pre> * * <pre> -L * Use lower-order terms. * (default: no)</pre> * <!-- options-end --> * * Options after -- are passed to the designated classifier.<p> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { String tmpStr; tmpStr = Utils.getOption('F', options); if (tmpStr.length() != 0) setNumFolds(Integer.parseInt(tmpStr)); else setNumFolds(10); setVerbose(Utils.getFlag("verbose", options)); super.setOptions(options); } /** * Gets the current settings of the Classifier. * * @return an array of strings suitable for passing to setOptions */ public String[] getOptions() { Vector result; String[] options; int i; result = new Vector(); result.add("-F"); result.add("" + getNumFolds()); if (getVerbose()) result.add("-verbose"); options = super.getOptions(); for (i = 0; i < options.length; i++) result.add(options[i]); return (String[]) result.toArray(new String[result.size()]); } /** * Gets the number of folds to use for splitting the training set. * * @return the number of folds */ public int getNumFolds() { return m_NumFolds; } /** * Sets the number of folds to use for splitting the training set. * * @param value the new number of folds */ public void setNumFolds(int value) { if (value > 0) m_NumFolds = value; else System.out.println( "At least 1 fold is necessary (provided: " + value + ")!"); } /** * Returns the tip text for this property * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String numFoldsTipText() { return "The number of folds to use for splitting the training set into smaller chunks for the base classifier."; } /** * Set the verbose state. * * @param value the verbose state */ public void setVerbose(boolean value) { m_Verbose = value; } /** * Gets the verbose state * * @return the verbose state */ public boolean getVerbose() { return m_Verbose; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String verboseTipText() { return "Whether to ouput some additional information during building."; } /** * Bagging method. * * @param data the training data to be used for generating the * bagged classifier. * @throws Exception if the classifier could not be built successfully */ public void buildClassifier(Instances data) throws Exception { Classifier[] base; int i; int n; int fromIndex; int toIndex; Instances train; double chunkSize; // can classifier handle the data? getCapabilities().testWithFail(data); // remove instances with missing class data = new Instances(data); data.deleteWithMissingClass(); m_Vote = new Vote(); base = new Classifier[getNumFolds()]; chunkSize = (double) data.numInstances() / (double) getNumFolds(); // stratify data if (getNumFolds() > 1) { data.randomize(data.getRandomNumberGenerator(getSeed())); data.stratify(getNumFolds()); } // generate <folds> classifiers for (i = 0; i < getNumFolds(); i++) { base[i] = makeCopy(getClassifier()); // generate training data if (getNumFolds() > 1) { // some progress information if (getVerbose()) System.out.print("."); train = data.testCV(getNumFolds(), i); } else { train = data; } // train classifier base[i].buildClassifier(train); } // init vote m_Vote.setClassifiers(base); if (getVerbose()) System.out.println(); } /** * Calculates the class membership probabilities for the given test * instance. * * @param instance the instance to be classified * @return preedicted class probability distribution * @throws Exception if distribution can't be computed successfully */ public double[] distributionForInstance(Instance instance) throws Exception { return m_Vote.distributionForInstance(instance); } /** * Returns description of the classifier. * * @return description of the classifier as a string */ public String toString() { if (m_Vote == null) return this.getClass().getName().replaceAll(".*\\.", "") + ": No model built yet."; else return m_Vote.toString(); } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 5306 $"); } /** * Main method for testing this class. * * @param args the options */ public static void main(String[] args) { runClassifier(new Dagging(), args); } }
16,761
28
160
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/meta/Decorate.java
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* * Decorate.java * Copyright (C) 2002 Prem Melville * */ package weka.classifiers.meta; import weka.classifiers.Classifier; import weka.classifiers.RandomizableIteratedSingleClassifierEnhancer; import weka.core.Capabilities; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.RevisionUtils; import weka.core.TechnicalInformation; import weka.core.TechnicalInformationHandler; import weka.core.UnsupportedClassTypeException; import weka.core.Utils; import weka.core.Capabilities.Capability; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import java.util.Enumeration; import java.util.Random; import java.util.Vector; import weka.classifiers.AbstractClassifier; import weka.core.DenseInstance; /** <!-- globalinfo-start --> * DECORATE is a meta-learner for building diverse ensembles of classifiers by using specially constructed artificial training examples. Comprehensive experiments have demonstrated that this technique is consistently more accurate than the base classifier, Bagging and Random Forests.Decorate also obtains higher accuracy than Boosting on small training sets, and achieves comparable performance on larger training sets. <br/> * <br/> * For more details see: <br/> * <br/> * P. Melville, R. J. Mooney: Constructing Diverse Classifier Ensembles Using Artificial Training Examples. In: Eighteenth International Joint Conference on Artificial Intelligence, 505-510, 2003.<br/> * <br/> * P. Melville, R. J. Mooney (2004). Creating Diversity in Ensembles Using Artificial Data. Information Fusion: Special Issue on Diversity in Multiclassifier Systems.. * <p/> <!-- globalinfo-end --> * <!-- technical-bibtex-start --> * BibTeX: * <pre> * &#64;inproceedings{Melville2003, * author = {P. Melville and R. J. Mooney}, * booktitle = {Eighteenth International Joint Conference on Artificial Intelligence}, * pages = {505-510}, * title = {Constructing Diverse Classifier Ensembles Using Artificial Training Examples}, * year = {2003} * } * * &#64;article{Melville2004, * author = {P. Melville and R. J. Mooney}, * journal = {Information Fusion: Special Issue on Diversity in Multiclassifier Systems}, * note = {submitted}, * title = {Creating Diversity in Ensembles Using Artificial Data}, * year = {2004} * } * </pre> * <p/> <!-- technical-bibtex-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -E * Desired size of ensemble. * (default 10)</pre> * * <pre> -R * Factor that determines number of artificial examples to generate. * Specified proportional to training set size. * (default 1.0)</pre> * * <pre> -S &lt;num&gt; * Random number seed. * (default 1)</pre> * * <pre> -I &lt;num&gt; * Number of iterations. * (default 10)</pre> * * <pre> -D * If set, classifier is run in debug mode and * may output additional info to the console</pre> * * <pre> -W * Full name of base classifier. * (default: weka.classifiers.trees.J48)</pre> * * <pre> * Options specific to classifier weka.classifiers.trees.J48: * </pre> * * <pre> -U * Use unpruned tree.</pre> * * <pre> -C &lt;pruning confidence&gt; * Set confidence threshold for pruning. * (default 0.25)</pre> * * <pre> -M &lt;minimum number of instances&gt; * Set minimum number of instances per leaf. * (default 2)</pre> * * <pre> -R * Use reduced error pruning.</pre> * * <pre> -N &lt;number of folds&gt; * Set number of folds for reduced error * pruning. One fold is used as pruning set. * (default 3)</pre> * * <pre> -B * Use binary splits only.</pre> * * <pre> -S * Don't perform subtree raising.</pre> * * <pre> -L * Do not clean up after the tree has been built.</pre> * * <pre> -A * Laplace smoothing for predicted probabilities.</pre> * * <pre> -Q &lt;seed&gt; * Seed for random data shuffling (default 1).</pre> * <!-- options-end --> * * Options after -- are passed to the designated classifier.<p> * * @author Prem Melville (melville@cs.utexas.edu) * @version $Revision: 1.9 $ */ public class Decorate extends RandomizableIteratedSingleClassifierEnhancer implements TechnicalInformationHandler { /** for serialization */ static final long serialVersionUID = -6020193348750269931L; /** Vector of classifiers that make up the committee/ensemble. */ protected Vector m_Committee = null; /** The desired ensemble size. */ protected int m_DesiredSize = 10; /** Amount of artificial/random instances to use - specified as a fraction of the training data size. */ protected double m_ArtSize = 1.0 ; /** The random number generator. */ protected Random m_Random = new Random(0); /** Attribute statistics - used for generating artificial examples. */ protected Vector m_AttributeStats = null; /** * Constructor. */ public Decorate() { m_Classifier = new weka.classifiers.trees.J48(); } /** * String describing default classifier. * * @return the default classifier classname */ protected String defaultClassifierString() { return "weka.classifiers.trees.J48"; } /** * Returns an enumeration describing the available options * * @return an enumeration of all the available options */ public Enumeration listOptions() { Vector newVector = new Vector(8); newVector.addElement(new Option( "\tDesired size of ensemble.\n" + "\t(default 10)", "E", 1, "-E")); newVector.addElement(new Option( "\tFactor that determines number of artificial examples to generate.\n" +"\tSpecified proportional to training set size.\n" + "\t(default 1.0)", "R", 1, "-R")); Enumeration enu = super.listOptions(); while (enu.hasMoreElements()) { newVector.addElement(enu.nextElement()); } return newVector.elements(); } /** * Parses a given list of options. <p/> * <!-- options-start --> * Valid options are: <p/> * * <pre> -E * Desired size of ensemble. * (default 10)</pre> * * <pre> -R * Factor that determines number of artificial examples to generate. * Specified proportional to training set size. * (default 1.0)</pre> * * <pre> -S &lt;num&gt; * Random number seed. * (default 1)</pre> * * <pre> -I &lt;num&gt; * Number of iterations. * (default 10)</pre> * * <pre> -D * If set, classifier is run in debug mode and * may output additional info to the console</pre> * * <pre> -W * Full name of base classifier. * (default: weka.classifiers.trees.J48)</pre> * * <pre> * Options specific to classifier weka.classifiers.trees.J48: * </pre> * * <pre> -U * Use unpruned tree.</pre> * * <pre> -C &lt;pruning confidence&gt; * Set confidence threshold for pruning. * (default 0.25)</pre> * * <pre> -M &lt;minimum number of instances&gt; * Set minimum number of instances per leaf. * (default 2)</pre> * * <pre> -R * Use reduced error pruning.</pre> * * <pre> -N &lt;number of folds&gt; * Set number of folds for reduced error * pruning. One fold is used as pruning set. * (default 3)</pre> * * <pre> -B * Use binary splits only.</pre> * * <pre> -S * Don't perform subtree raising.</pre> * * <pre> -L * Do not clean up after the tree has been built.</pre> * * <pre> -A * Laplace smoothing for predicted probabilities.</pre> * * <pre> -Q &lt;seed&gt; * Seed for random data shuffling (default 1).</pre> * <!-- options-end --> * * Options after -- are passed to the designated classifier.<p> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { String desiredSize = Utils.getOption('E', options); if (desiredSize.length() != 0) { setDesiredSize(Integer.parseInt(desiredSize)); } else { setDesiredSize(10); } String artSize = Utils.getOption('R', options); if (artSize.length() != 0) { setArtificialSize(Double.parseDouble(artSize)); } else { setArtificialSize(1.0); } super.setOptions(options); } /** * Gets the current settings of the Classifier. * * @return an array of strings suitable for passing to setOptions */ public String [] getOptions() { String [] superOptions = super.getOptions(); String [] options = new String [superOptions.length + 4]; int current = 0; options[current++] = "-E"; options[current++] = "" + getDesiredSize(); options[current++] = "-R"; options[current++] = "" + getArtificialSize(); System.arraycopy(superOptions, 0, options, current, superOptions.length); current += superOptions.length; while (current < options.length) { options[current++] = ""; } return options; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String desiredSizeTipText() { return "the desired number of member classifiers in the Decorate ensemble. Decorate may terminate " +"before this size is reached (depending on the value of numIterations). " +"Larger ensemble sizes usually lead to more accurate models, but increases " +"training time and model complexity."; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String numIterationsTipText() { return "the maximum number of Decorate iterations to run. Each iteration generates a classifier, " +"but does not necessarily add it to the ensemble. Decorate stops when the desired ensemble " +"size is reached. This parameter should be greater than " +"equal to the desiredSize. If the desiredSize is not being reached it may help to " +"increase this value."; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String artificialSizeTipText() { return "determines the number of artificial examples to use during training. Specified as " +"a proportion of the training data. Higher values can increase ensemble diversity."; } /** * Returns a string describing classifier * @return a description suitable for * displaying in the explorer/experimenter gui */ public String globalInfo() { return "DECORATE is a meta-learner for building diverse ensembles of " +"classifiers by using specially constructed artificial training " +"examples. Comprehensive experiments have demonstrated that this " +"technique is consistently more accurate than the base classifier, Bagging and Random Forests." +"Decorate also obtains higher accuracy than Boosting on small training sets, and achieves " +"comparable performance on larger training sets. \n\n" +"For more details see: \n\n" + getTechnicalInformation().toString(); } /** * Returns an instance of a TechnicalInformation object, containing * detailed information about the technical background of this class, * e.g., paper reference or book this class is based on. * * @return the technical information about this class */ public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; TechnicalInformation additional; result = new TechnicalInformation(Type.INPROCEEDINGS); result.setValue(Field.AUTHOR, "P. Melville and R. J. Mooney"); result.setValue(Field.TITLE, "Constructing Diverse Classifier Ensembles Using Artificial Training Examples"); result.setValue(Field.BOOKTITLE, "Eighteenth International Joint Conference on Artificial Intelligence"); result.setValue(Field.YEAR, "2003"); result.setValue(Field.PAGES, "505-510"); additional = result.add(Type.ARTICLE); additional.setValue(Field.AUTHOR, "P. Melville and R. J. Mooney"); additional.setValue(Field.TITLE, "Creating Diversity in Ensembles Using Artificial Data"); additional.setValue(Field.JOURNAL, "Information Fusion: Special Issue on Diversity in Multiclassifier Systems"); additional.setValue(Field.YEAR, "2004"); additional.setValue(Field.NOTE, "submitted"); return result; } /** * Factor that determines number of artificial examples to generate. * * @return factor that determines number of artificial examples to generate */ public double getArtificialSize() { return m_ArtSize; } /** * Sets factor that determines number of artificial examples to generate. * * @param newArtSize factor that determines number of artificial examples to generate */ public void setArtificialSize(double newArtSize) { m_ArtSize = newArtSize; } /** * Gets the desired size of the committee. * * @return the desired size of the committee */ public int getDesiredSize() { return m_DesiredSize; } /** * Sets the desired size of the committee. * * @param newDesiredSize the desired size of the committee */ public void setDesiredSize(int newDesiredSize) { m_DesiredSize = newDesiredSize; } /** * Returns default capabilities of the classifier. * * @return the capabilities of this classifier */ public Capabilities getCapabilities() { Capabilities result = super.getCapabilities(); // class result.disableAllClasses(); result.disableAllClassDependencies(); result.enable(Capability.NOMINAL_CLASS); // instances result.setMinimumNumberInstances(m_DesiredSize); return result; } /** * Build Decorate classifier * * @param data the training data to be used for generating the classifier * @throws Exception if the classifier could not be built successfully */ public void buildClassifier(Instances data) throws Exception { if(m_Classifier == null) { throw new Exception("A base classifier has not been specified!"); } // can classifier handle the data? getCapabilities().testWithFail(data); // remove instances with missing class data = new Instances(data); data.deleteWithMissingClass(); //initialize random number generator if(m_Seed==-1) m_Random = new Random(); else m_Random = new Random(m_Seed); int i = 1;//current committee size int numTrials = 1;//number of Decorate iterations Instances divData = new Instances(data);//local copy of data - diversity data Instances artData = null;//artificial data //compute number of artficial instances to add at each iteration int artSize = (int) (Math.abs(m_ArtSize)*divData.numInstances()); if(artSize==0) artSize=1;//atleast add one random example computeStats(data);//Compute training data stats for creating artificial examples //initialize new committee m_Committee = new Vector(); Classifier newClassifier = m_Classifier; newClassifier.buildClassifier(divData); m_Committee.add(newClassifier); double eComm = computeError(divData);//compute ensemble error if(m_Debug) System.out.println("Initialize:\tClassifier "+i+" added to ensemble. Ensemble error = "+eComm); //repeat till desired committee size is reached OR the max number of iterations is exceeded while(i<m_DesiredSize && numTrials<m_NumIterations){ //Generate artificial training examples artData = generateArtificialData(artSize, data); //Label artificial examples labelData(artData); addInstances(divData, artData);//Add new artificial data //Build new classifier Classifier tmp[] = AbstractClassifier.makeCopies(m_Classifier,1); newClassifier = tmp[0]; newClassifier.buildClassifier(divData); //Remove all the artificial data removeInstances(divData, artSize); //Test if the new classifier should be added to the ensemble m_Committee.add(newClassifier);//add new classifier to current committee double currError = computeError(divData); if(currError <= eComm){//adding the new member did not increase the error i++; eComm = currError; if(m_Debug) System.out.println("Iteration: "+(1+numTrials)+"\tClassifier "+i+" added to ensemble. Ensemble error = "+eComm); }else{//reject the current classifier because it increased the ensemble error m_Committee.removeElementAt(m_Committee.size()-1);//pop the last member } numTrials++; } } /** * Compute and store statistics required for generating artificial data. * * @param data training instances * @throws Exception if statistics could not be calculated successfully */ protected void computeStats(Instances data) throws Exception{ int numAttributes = data.numAttributes(); m_AttributeStats = new Vector(numAttributes);//use to map attributes to their stats for(int j=0; j<numAttributes; j++){ if(data.attribute(j).isNominal()){ //Compute the probability of occurence of each distinct value int []nomCounts = (data.attributeStats(j)).nominalCounts; double []counts = new double[nomCounts.length]; if(counts.length < 2) throw new Exception("Nominal attribute has less than two distinct values!"); //Perform Laplace smoothing for(int i=0; i<counts.length; i++) counts[i] = nomCounts[i] + 1; Utils.normalize(counts); double []stats = new double[counts.length - 1]; stats[0] = counts[0]; //Calculate cumulative probabilities for(int i=1; i<stats.length; i++) stats[i] = stats[i-1] + counts[i]; m_AttributeStats.add(j,stats); }else if(data.attribute(j).isNumeric()){ //Get mean and standard deviation from the training data double []stats = new double[2]; stats[0] = data.meanOrMode(j); stats[1] = Math.sqrt(data.variance(j)); m_AttributeStats.add(j,stats); }else System.err.println("Decorate can only handle numeric and nominal values."); } } /** * Generate artificial training examples. * @param artSize size of examples set to create * @param data training data * @return the set of unlabeled artificial examples */ protected Instances generateArtificialData(int artSize, Instances data){ int numAttributes = data.numAttributes(); Instances artData = new Instances(data, artSize); double []att; Instance artInstance; for(int i=0; i<artSize; i++){ att = new double[numAttributes]; for(int j=0; j<numAttributes; j++){ if(data.attribute(j).isNominal()){ //Select nominal value based on the frequency of occurence in the training data double []stats = (double [])m_AttributeStats.get(j); att[j] = (double) selectIndexProbabilistically(stats); } else if(data.attribute(j).isNumeric()){ //Generate numeric value from the Guassian distribution //defined by the mean and std dev of the attribute double []stats = (double [])m_AttributeStats.get(j); att[j] = (m_Random.nextGaussian()*stats[1])+stats[0]; }else System.err.println("Decorate can only handle numeric and nominal values."); } artInstance = new DenseInstance(1.0, att); artData.add(artInstance); } return artData; } /** * Labels the artificially generated data. * * @param artData the artificially generated instances * @throws Exception if instances cannot be labeled successfully */ protected void labelData(Instances artData) throws Exception { Instance curr; double []probs; for(int i=0; i<artData.numInstances(); i++){ curr = artData.instance(i); //compute the class membership probs predicted by the current ensemble probs = distributionForInstance(curr); //select class label inversely proportional to the ensemble predictions curr.setClassValue(inverseLabel(probs)); } } /** * Select class label such that the probability of selection is * inversely proportional to the ensemble's predictions. * * @param probs class membership probabilities of instance * @return index of class label selected * @throws Exception if instances cannot be labeled successfully */ protected int inverseLabel(double []probs) throws Exception{ double []invProbs = new double[probs.length]; //Produce probability distribution inversely proportional to the given for(int i=0; i<probs.length; i++){ if(probs[i]==0){ invProbs[i] = Double.MAX_VALUE/probs.length; //Account for probability values of 0 - to avoid divide-by-zero errors //Divide by probs.length to make sure normalizing works properly }else{ invProbs[i] = 1.0 / probs[i]; } } Utils.normalize(invProbs); double []cdf = new double[invProbs.length]; //Compute cumulative probabilities cdf[0] = invProbs[0]; for(int i=1; i<invProbs.length; i++){ cdf[i] = invProbs[i]+cdf[i-1]; } if(Double.isNaN(cdf[invProbs.length-1])) System.err.println("Cumulative class membership probability is NaN!"); return selectIndexProbabilistically(cdf); } /** * Given cumulative probabilities select a nominal attribute value index * * @param cdf array of cumulative probabilities * @return index of attribute selected based on the probability distribution */ protected int selectIndexProbabilistically(double []cdf){ double rnd = m_Random.nextDouble(); int index = 0; while(index < cdf.length && rnd > cdf[index]){ index++; } return index; } /** * Removes a specified number of instances from the given set of instances. * * @param data given instances * @param numRemove number of instances to delete from the given instances */ protected void removeInstances(Instances data, int numRemove){ int num = data.numInstances(); for(int i=num - 1; i>num - 1 - numRemove;i--){ data.delete(i); } } /** * Add new instances to the given set of instances. * * @param data given instances * @param newData set of instances to add to given instances */ protected void addInstances(Instances data, Instances newData){ for(int i=0; i<newData.numInstances(); i++) data.add(newData.instance(i)); } /** * Computes the error in classification on the given data. * * @param data the instances to be classified * @return classification error * @throws Exception if error can not be computed successfully */ protected double computeError(Instances data) throws Exception { double error = 0.0; int numInstances = data.numInstances(); Instance curr; for(int i=0; i<numInstances; i++){ curr = data.instance(i); //Check if the instance has been misclassified if(curr.classValue() != ((int) classifyInstance(curr))) error++; } return (error/numInstances); } /** * Calculates the class membership probabilities for the given test instance. * * @param instance the instance to be classified * @return predicted class probability distribution * @throws Exception if distribution can't be computed successfully */ public double[] distributionForInstance(Instance instance) throws Exception { if (instance.classAttribute().isNumeric()) { throw new UnsupportedClassTypeException("Decorate can't handle a numeric class!"); } double [] sums = new double [instance.numClasses()], newProbs; Classifier curr; for (int i = 0; i < m_Committee.size(); i++) { curr = (Classifier) m_Committee.get(i); newProbs = curr.distributionForInstance(instance); for (int j = 0; j < newProbs.length; j++) sums[j] += newProbs[j]; } if (Utils.eq(Utils.sum(sums), 0)) { return sums; } else { Utils.normalize(sums); return sums; } } /** * Returns description of the Decorate classifier. * * @return description of the Decorate classifier as a string */ public String toString() { if (m_Committee == null) { return "Decorate: No model built yet."; } StringBuffer text = new StringBuffer(); text.append("Decorate base classifiers: \n\n"); for (int i = 0; i < m_Committee.size(); i++) text.append(((Classifier) m_Committee.get(i)).toString() + "\n\n"); text.append("Number of classifier in the ensemble: "+m_Committee.size()+"\n"); return text.toString(); } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 1.9 $"); } /** * Main method for testing this class. * * @param argv the options */ public static void main(String [] argv) { runClassifier(new Decorate(), argv); } }
26,174
32.132911
426
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/meta/END.java
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* * END.java * Copyright (C) 2004-2005 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.meta; import weka.classifiers.Classifier; import weka.classifiers.RandomizableIteratedSingleClassifierEnhancer; import weka.core.Capabilities; import weka.core.Instance; import weka.core.Instances; import weka.core.Randomizable; import weka.core.RevisionUtils; import weka.core.TechnicalInformation; import weka.core.TechnicalInformationHandler; import weka.core.Utils; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import java.util.Hashtable; import java.util.Random; import weka.classifiers.AbstractClassifier; /** <!-- globalinfo-start --> * A meta classifier for handling multi-class datasets with 2-class classifiers by building an ensemble of nested dichotomies.<br/> * <br/> * For more info, check<br/> * <br/> * Lin Dong, Eibe Frank, Stefan Kramer: Ensembles of Balanced Nested Dichotomies for Multi-class Problems. In: PKDD, 84-95, 2005.<br/> * <br/> * Eibe Frank, Stefan Kramer: Ensembles of nested dichotomies for multi-class problems. In: Twenty-first International Conference on Machine Learning, 2004. * <p/> <!-- globalinfo-end --> * <!-- technical-bibtex-start --> * BibTeX: * <pre> * &#64;inproceedings{Dong2005, * author = {Lin Dong and Eibe Frank and Stefan Kramer}, * booktitle = {PKDD}, * pages = {84-95}, * publisher = {Springer}, * title = {Ensembles of Balanced Nested Dichotomies for Multi-class Problems}, * year = {2005} * } * * &#64;inproceedings{Frank2004, * author = {Eibe Frank and Stefan Kramer}, * booktitle = {Twenty-first International Conference on Machine Learning}, * publisher = {ACM}, * title = {Ensembles of nested dichotomies for multi-class problems}, * year = {2004} * } * </pre> * <p/> <!-- technical-bibtex-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -S &lt;num&gt; * Random number seed. * (default 1)</pre> * * <pre> -I &lt;num&gt; * Number of iterations. * (default 10)</pre> * * <pre> -D * If set, classifier is run in debug mode and * may output additional info to the console</pre> * * <pre> -W * Full name of base classifier. * (default: weka.classifiers.meta.nestedDichotomies.ND)</pre> * * <pre> * Options specific to classifier weka.classifiers.meta.nestedDichotomies.ND: * </pre> * * <pre> -S &lt;num&gt; * Random number seed. * (default 1)</pre> * * <pre> -D * If set, classifier is run in debug mode and * may output additional info to the console</pre> * * <pre> -W * Full name of base classifier. * (default: weka.classifiers.trees.J48)</pre> * * <pre> * Options specific to classifier weka.classifiers.trees.J48: * </pre> * * <pre> -U * Use unpruned tree.</pre> * * <pre> -C &lt;pruning confidence&gt; * Set confidence threshold for pruning. * (default 0.25)</pre> * * <pre> -M &lt;minimum number of instances&gt; * Set minimum number of instances per leaf. * (default 2)</pre> * * <pre> -R * Use reduced error pruning.</pre> * * <pre> -N &lt;number of folds&gt; * Set number of folds for reduced error * pruning. One fold is used as pruning set. * (default 3)</pre> * * <pre> -B * Use binary splits only.</pre> * * <pre> -S * Don't perform subtree raising.</pre> * * <pre> -L * Do not clean up after the tree has been built.</pre> * * <pre> -A * Laplace smoothing for predicted probabilities.</pre> * * <pre> -Q &lt;seed&gt; * Seed for random data shuffling (default 1).</pre> * <!-- options-end --> * * Options after -- are passed to the designated classifier.<p> * * @author Eibe Frank * @author Lin Dong * @version $Revision: 1.8 $ */ public class END extends RandomizableIteratedSingleClassifierEnhancer implements TechnicalInformationHandler { /** for serialization */ static final long serialVersionUID = -4143242362912214956L; /** * The hashtable containing the classifiers for the END. */ protected Hashtable m_hashtable = null; /** * Constructor. */ public END() { m_Classifier = new weka.classifiers.meta.nestedDichotomies.ND(); } /** * String describing default classifier. * * @return the default classifier classname */ protected String defaultClassifierString() { return "weka.classifiers.meta.nestedDichotomies.ND"; } /** * Returns a string describing classifier * @return a description suitable for * displaying in the explorer/experimenter gui */ public String globalInfo() { return "A meta classifier for handling multi-class datasets with 2-class " + "classifiers by building an ensemble of nested dichotomies.\n\n" + "For more info, check\n\n" + getTechnicalInformation().toString(); } /** * Returns an instance of a TechnicalInformation object, containing * detailed information about the technical background of this class, * e.g., paper reference or book this class is based on. * * @return the technical information about this class */ public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; TechnicalInformation additional; result = new TechnicalInformation(Type.INPROCEEDINGS); result.setValue(Field.AUTHOR, "Lin Dong and Eibe Frank and Stefan Kramer"); result.setValue(Field.TITLE, "Ensembles of Balanced Nested Dichotomies for Multi-class Problems"); result.setValue(Field.BOOKTITLE, "PKDD"); result.setValue(Field.YEAR, "2005"); result.setValue(Field.PAGES, "84-95"); result.setValue(Field.PUBLISHER, "Springer"); additional = result.add(Type.INPROCEEDINGS); additional.setValue(Field.AUTHOR, "Eibe Frank and Stefan Kramer"); additional.setValue(Field.TITLE, "Ensembles of nested dichotomies for multi-class problems"); additional.setValue(Field.BOOKTITLE, "Twenty-first International Conference on Machine Learning"); additional.setValue(Field.YEAR, "2004"); additional.setValue(Field.PUBLISHER, "ACM"); return result; } /** * Returns default capabilities of the classifier. * * @return the capabilities of this classifier */ public Capabilities getCapabilities() { Capabilities result = super.getCapabilities(); // instances result.setMinimumNumberInstances(1); // at least 1 for the RandomNumberGenerator! return result; } /** * Builds the committee of randomizable classifiers. * * @param data the training data to be used for generating the * bagged classifier. * @throws Exception if the classifier could not be built successfully */ public void buildClassifier(Instances data) throws Exception { // can classifier handle the data? getCapabilities().testWithFail(data); // remove instances with missing class data = new Instances(data); data.deleteWithMissingClass(); if (!(m_Classifier instanceof weka.classifiers.meta.nestedDichotomies.ND) && !(m_Classifier instanceof weka.classifiers.meta.nestedDichotomies.ClassBalancedND) && !(m_Classifier instanceof weka.classifiers.meta.nestedDichotomies.DataNearBalancedND)) { throw new IllegalArgumentException("END only works with ND, ClassBalancedND " + "or DataNearBalancedND classifier"); } m_hashtable = new Hashtable(); m_Classifiers = AbstractClassifier.makeCopies(m_Classifier, m_NumIterations); Random random = data.getRandomNumberGenerator(m_Seed); for (int j = 0; j < m_Classifiers.length; j++) { // Set the random number seed for the current classifier. ((Randomizable) m_Classifiers[j]).setSeed(random.nextInt()); // Set the hashtable if (m_Classifier instanceof weka.classifiers.meta.nestedDichotomies.ND) ((weka.classifiers.meta.nestedDichotomies.ND)m_Classifiers[j]).setHashtable(m_hashtable); else if (m_Classifier instanceof weka.classifiers.meta.nestedDichotomies.ClassBalancedND) ((weka.classifiers.meta.nestedDichotomies.ClassBalancedND)m_Classifiers[j]).setHashtable(m_hashtable); else if (m_Classifier instanceof weka.classifiers.meta.nestedDichotomies.DataNearBalancedND) ((weka.classifiers.meta.nestedDichotomies.DataNearBalancedND)m_Classifiers[j]). setHashtable(m_hashtable); // Build the classifier. m_Classifiers[j].buildClassifier(data); } } /** * Calculates the class membership probabilities for the given test * instance. * * @param instance the instance to be classified * @return preedicted class probability distribution * @throws Exception if distribution can't be computed successfully */ public double[] distributionForInstance(Instance instance) throws Exception { double [] sums = new double [instance.numClasses()], newProbs; for (int i = 0; i < m_NumIterations; i++) { if (instance.classAttribute().isNumeric() == true) { sums[0] += m_Classifiers[i].classifyInstance(instance); } else { newProbs = m_Classifiers[i].distributionForInstance(instance); for (int j = 0; j < newProbs.length; j++) sums[j] += newProbs[j]; } } if (instance.classAttribute().isNumeric() == true) { sums[0] /= (double)m_NumIterations; return sums; } else if (Utils.eq(Utils.sum(sums), 0)) { return sums; } else { Utils.normalize(sums); return sums; } } /** * Returns description of the committee. * * @return description of the committee as a string */ public String toString() { if (m_Classifiers == null) { return "END: No model built yet."; } StringBuffer text = new StringBuffer(); text.append("All the base classifiers: \n\n"); for (int i = 0; i < m_Classifiers.length; i++) text.append(m_Classifiers[i].toString() + "\n\n"); return text.toString(); } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 1.8 $"); } /** * Main method for testing this class. * * @param argv the options */ public static void main(String [] argv) { runClassifier(new END(), argv); } }
11,092
29.899721
156
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/meta/FilteredClassifier.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * FilteredClassifier.java * Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.meta; import java.util.Enumeration; import java.util.Vector; import weka.classifiers.SingleClassifierEnhancer; import weka.core.Capabilities; import weka.core.Capabilities.Capability; import weka.core.Drawable; import weka.core.PartitionGenerator; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.OptionHandler; import weka.core.RevisionUtils; import weka.core.Utils; import weka.filters.Filter; /** <!-- globalinfo-start --> * Class for running an arbitrary classifier on data that has been passed through an arbitrary filter. Like the classifier, the structure of the filter is based exclusively on the training data and test instances will be processed by the filter without changing their structure. * <p/> <!-- globalinfo-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -F &lt;filter specification&gt; * Full class name of filter to use, followed * by filter options. * eg: "weka.filters.unsupervised.attribute.Remove -V -R 1,2"</pre> * * <pre> -D * If set, classifier is run in debug mode and * may output additional info to the console</pre> * * <pre> -W * Full name of base classifier. * (default: weka.classifiers.trees.J48)</pre> * * <pre> * Options specific to classifier weka.classifiers.trees.J48: * </pre> * * <pre> -U * Use unpruned tree.</pre> * * <pre> -C &lt;pruning confidence&gt; * Set confidence threshold for pruning. * (default 0.25)</pre> * * <pre> -M &lt;minimum number of instances&gt; * Set minimum number of instances per leaf. * (default 2)</pre> * * <pre> -R * Use reduced error pruning.</pre> * * <pre> -N &lt;number of folds&gt; * Set number of folds for reduced error * pruning. One fold is used as pruning set. * (default 3)</pre> * * <pre> -B * Use binary splits only.</pre> * * <pre> -S * Don't perform subtree raising.</pre> * * <pre> -L * Do not clean up after the tree has been built.</pre> * * <pre> -A * Laplace smoothing for predicted probabilities.</pre> * * <pre> -Q &lt;seed&gt; * Seed for random data shuffling (default 1).</pre> * <!-- options-end --> * * @author Len Trigg (trigg@cs.waikato.ac.nz) * @version $Revision: 9117 $ */ public class FilteredClassifier extends SingleClassifierEnhancer implements Drawable, PartitionGenerator { /** for serialization */ static final long serialVersionUID = -4523450618538717400L; /** The filter */ protected Filter m_Filter = new weka.filters.supervised.attribute.AttributeSelection(); /** The instance structure of the filtered instances */ protected Instances m_FilteredInstances; /** * Returns a string describing this classifier * @return a description of the classifier suitable for * displaying in the explorer/experimenter gui */ public String globalInfo() { return "Class for running an arbitrary classifier on data that has been passed " + "through an arbitrary filter. Like the classifier, the structure of the filter " + "is based exclusively on the training data and test instances will be processed " + "by the filter without changing their structure."; } /** * String describing default classifier. * * @return the default classifier classname */ protected String defaultClassifierString() { return "weka.classifiers.trees.J48"; } /** * Default constructor. */ public FilteredClassifier() { m_Classifier = new weka.classifiers.trees.J48(); m_Filter = new weka.filters.supervised.attribute.Discretize(); } /** * Returns the type of graph this classifier * represents. * * @return the graph type of this classifier */ public int graphType() { if (m_Classifier instanceof Drawable) return ((Drawable)m_Classifier).graphType(); else return Drawable.NOT_DRAWABLE; } /** * Returns graph describing the classifier (if possible). * * @return the graph of the classifier in dotty format * @throws Exception if the classifier cannot be graphed */ public String graph() throws Exception { if (m_Classifier instanceof Drawable) return ((Drawable)m_Classifier).graph(); else throw new Exception("Classifier: " + getClassifierSpec() + " cannot be graphed"); } /** * Builds the classifier to generate a partition. * (If the base classifier supports this.) */ public void generatePartition(Instances data) throws Exception { if (m_Classifier instanceof PartitionGenerator) buildClassifier(data); else throw new Exception("Classifier: " + getClassifierSpec() + " cannot generate a partition"); } /** * Computes an array that has a value for each element in the partition. * (If the base classifier supports this.) */ public double[] getMembershipValues(Instance inst) throws Exception { if (m_Classifier instanceof PartitionGenerator) { Instance newInstance = filterInstance(inst); if (newInstance == null) { double[] unclassified = new double[numElements()]; for (int i = 0; i < unclassified.length; i++) { unclassified[i] = Utils.missingValue(); } return unclassified; } else { return ((PartitionGenerator)m_Classifier).getMembershipValues(newInstance); } } else throw new Exception("Classifier: " + getClassifierSpec() + " cannot generate a partition"); } /** * Returns the number of elements in the partition. * (If the base classifier supports this.) */ public int numElements() throws Exception { if (m_Classifier instanceof PartitionGenerator) return ((PartitionGenerator)m_Classifier).numElements(); else throw new Exception("Classifier: " + getClassifierSpec() + " cannot generate a partition"); } /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ public Enumeration listOptions() { Vector newVector = new Vector(2); newVector.addElement(new Option( "\tFull class name of filter to use, followed\n" + "\tby filter options.\n" + "\teg: \"weka.filters.unsupervised.attribute.Remove -V -R 1,2\"", "F", 1, "-F <filter specification>")); Enumeration enu = super.listOptions(); while (enu.hasMoreElements()) { newVector.addElement(enu.nextElement()); } return newVector.elements(); } /** * Parses a given list of options. <p/> * <!-- options-start --> * Valid options are: <p/> * * <pre> -F &lt;filter specification&gt; * Full class name of filter to use, followed * by filter options. * eg: "weka.filters.unsupervised.attribute.Remove -V -R 1,2"</pre> * * <pre> -D * If set, classifier is run in debug mode and * may output additional info to the console</pre> * * <pre> -W * Full name of base classifier. * (default: weka.classifiers.trees.J48)</pre> * * <pre> * Options specific to classifier weka.classifiers.trees.J48: * </pre> * * <pre> -U * Use unpruned tree.</pre> * * <pre> -C &lt;pruning confidence&gt; * Set confidence threshold for pruning. * (default 0.25)</pre> * * <pre> -M &lt;minimum number of instances&gt; * Set minimum number of instances per leaf. * (default 2)</pre> * * <pre> -R * Use reduced error pruning.</pre> * * <pre> -N &lt;number of folds&gt; * Set number of folds for reduced error * pruning. One fold is used as pruning set. * (default 3)</pre> * * <pre> -B * Use binary splits only.</pre> * * <pre> -S * Don't perform subtree raising.</pre> * * <pre> -L * Do not clean up after the tree has been built.</pre> * * <pre> -A * Laplace smoothing for predicted probabilities.</pre> * * <pre> -Q &lt;seed&gt; * Seed for random data shuffling (default 1).</pre> * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { // Same for filter String filterString = Utils.getOption('F', options); if (filterString.length() > 0) { String [] filterSpec = Utils.splitOptions(filterString); if (filterSpec.length == 0) { throw new IllegalArgumentException("Invalid filter specification string"); } String filterName = filterSpec[0]; filterSpec[0] = ""; setFilter((Filter) Utils.forName(Filter.class, filterName, filterSpec)); } else { setFilter(new weka.filters.supervised.attribute.Discretize()); } super.setOptions(options); } /** * Gets the current settings of the Classifier. * * @return an array of strings suitable for passing to setOptions */ public String [] getOptions() { String [] superOptions = super.getOptions(); String [] options = new String [superOptions.length + 2]; int current = 0; options[current++] = "-F"; options[current++] = "" + getFilterSpec(); System.arraycopy(superOptions, 0, options, current, superOptions.length); return options; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String filterTipText() { return "The filter to be used."; } /** * Sets the filter * * @param filter the filter with all options set. */ public void setFilter(Filter filter) { m_Filter = filter; } /** * Gets the filter used. * * @return the filter */ public Filter getFilter() { return m_Filter; } /** * Gets the filter specification string, which contains the class name of * the filter and any options to the filter * * @return the filter string. */ protected String getFilterSpec() { Filter c = getFilter(); if (c instanceof OptionHandler) { return c.getClass().getName() + " " + Utils.joinOptions(((OptionHandler)c).getOptions()); } return c.getClass().getName(); } /** * Returns default capabilities of the classifier. * * @return the capabilities of this classifier */ public Capabilities getCapabilities() { Capabilities result; if (getFilter() == null) result = super.getCapabilities(); else result = getFilter().getCapabilities(); // the filtered classifier always needs a class result.disable(Capability.NO_CLASS); // set dependencies for (Capability cap: Capability.values()) result.enableDependency(cap); return result; } /** * Build the classifier on the filtered data. * * @param data the training data * @throws Exception if the classifier could not be built successfully */ public void buildClassifier(Instances data) throws Exception { if (m_Classifier == null) { throw new Exception("No base classifiers have been set!"); } // remove instances with missing class data = new Instances(data); data.deleteWithMissingClass(); /* String fname = m_Filter.getClass().getName(); fname = fname.substring(fname.lastIndexOf('.') + 1); util.Timer t = util.Timer.getTimer("FilteredClassifier::" + fname); t.start(); */ m_Filter.setInputFormat(data); // filter capabilities are checked here data = Filter.useFilter(data, m_Filter); //t.stop(); // can classifier handle the data? getClassifier().getCapabilities().testWithFail(data); m_FilteredInstances = data.stringFreeStructure(); m_Classifier.buildClassifier(data); } /** * Filters the instance so that it can subsequently be classified. */ protected Instance filterInstance(Instance instance) throws Exception { /* System.err.println("FilteredClassifier:: " + m_Filter.getClass().getName() + " in: " + instance); */ if (m_Filter.numPendingOutput() > 0) { throw new Exception("Filter output queue not empty!"); } /* String fname = m_Filter.getClass().getName(); fname = fname.substring(fname.lastIndexOf('.') + 1); util.Timer t = util.Timer.getTimer("FilteredClassifier::" + fname); t.start(); */ if (!m_Filter.input(instance)) { if (!m_Filter.mayRemoveInstanceAfterFirstBatchDone()) { throw new Exception("Filter didn't make the test instance" + " immediately available!"); } else { m_Filter.batchFinished(); return null; } } m_Filter.batchFinished(); return m_Filter.output(); //t.stop(); /* System.err.println("FilteredClassifier:: " + m_Filter.getClass().getName() + " out: " + newInstance); */ } /** * Classifies a given instance after filtering. * * @param instance the instance to be classified * @return the class distribution for the given instance * @throws Exception if instance could not be classified * successfully */ public double [] distributionForInstance(Instance instance) throws Exception { Instance newInstance = filterInstance(instance); if (newInstance == null) { // filter has consumed the instance (e.g. RemoveWithValues // may do this). We will indicate no prediction for this // instance double[] unclassified = null; if (instance.classAttribute().isNumeric()) { unclassified = new double[1]; unclassified[0] = Utils.missingValue(); } else { // all zeros unclassified = new double[instance.classAttribute().numValues()]; } return unclassified; } else { return m_Classifier.distributionForInstance(newInstance); } } /** * Output a representation of this classifier * * @return a representation of this classifier */ public String toString() { if (m_FilteredInstances == null) { return "FilteredClassifier: No model built yet."; } String result = "FilteredClassifier using " + getClassifierSpec() + " on data filtered through " + getFilterSpec() + "\n\nFiltered Header\n" + m_FilteredInstances.toString() + "\n\nClassifier Model\n" + m_Classifier.toString(); return result; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 9117 $"); } /** * Main method for testing this class. * * @param argv should contain the following arguments: * -t training file [-T test file] [-c class index] */ public static void main(String [] argv) { runClassifier(new FilteredClassifier(), argv); } }
15,768
27.361511
278
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/meta/Grading.java
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* * Grading.java * Copyright (C) 2000 University of Waikato * */ package weka.classifiers.meta; import weka.classifiers.Classifier; import weka.core.Attribute; import weka.core.FastVector; import weka.core.Instance; import weka.core.Instances; import weka.core.RevisionUtils; import weka.core.TechnicalInformation; import weka.core.TechnicalInformationHandler; import weka.core.Utils; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import java.util.Random; import weka.classifiers.AbstractClassifier; import weka.core.DenseInstance; /** <!-- globalinfo-start --> * Implements Grading. The base classifiers are "graded".<br/> * <br/> * For more information, see<br/> * <br/> * A.K. Seewald, J. Fuernkranz: An Evaluation of Grading Classifiers. In: Advances in Intelligent Data Analysis: 4th International Conference, Berlin/Heidelberg/New York/Tokyo, 115-124, 2001. * <p/> <!-- globalinfo-end --> * <!-- technical-bibtex-start --> * BibTeX: * <pre> * &#64;inproceedings{Seewald2001, * address = {Berlin/Heidelberg/New York/Tokyo}, * author = {A.K. Seewald and J. Fuernkranz}, * booktitle = {Advances in Intelligent Data Analysis: 4th International Conference}, * editor = {F. Hoffmann et al.}, * pages = {115-124}, * publisher = {Springer}, * title = {An Evaluation of Grading Classifiers}, * year = {2001} * } * </pre> * <p/> <!-- technical-bibtex-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -M &lt;scheme specification&gt; * Full name of meta classifier, followed by options. * (default: "weka.classifiers.rules.Zero")</pre> * * <pre> -X &lt;number of folds&gt; * Sets the number of cross-validation folds.</pre> * * <pre> -S &lt;num&gt; * Random number seed. * (default 1)</pre> * * <pre> -B &lt;classifier specification&gt; * Full class name of classifier to include, followed * by scheme options. May be specified multiple times. * (default: "weka.classifiers.rules.ZeroR")</pre> * * <pre> -D * If set, classifier is run in debug mode and * may output additional info to the console</pre> * <!-- options-end --> * * @author Alexander K. Seewald (alex@seewald.at) * @author Eibe Frank (eibe@cs.waikato.ac.nz) * @version $Revision: 1.13 $ */ public class Grading extends Stacking implements TechnicalInformationHandler { /** for serialization */ static final long serialVersionUID = 5207837947890081170L; /** The meta classifiers, one for each base classifier. */ protected Classifier [] m_MetaClassifiers = new Classifier[0]; /** InstPerClass */ protected double [] m_InstPerClass = null; /** * Returns a string describing classifier * @return a description suitable for * displaying in the explorer/experimenter gui */ public String globalInfo() { return "Implements Grading. The base classifiers are \"graded\".\n\n" + "For more information, see\n\n" + getTechnicalInformation().toString(); } /** * Returns an instance of a TechnicalInformation object, containing * detailed information about the technical background of this class, * e.g., paper reference or book this class is based on. * * @return the technical information about this class */ public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; result = new TechnicalInformation(Type.INPROCEEDINGS); result.setValue(Field.AUTHOR, "A.K. Seewald and J. Fuernkranz"); result.setValue(Field.TITLE, "An Evaluation of Grading Classifiers"); result.setValue(Field.BOOKTITLE, "Advances in Intelligent Data Analysis: 4th International Conference"); result.setValue(Field.EDITOR, "F. Hoffmann et al."); result.setValue(Field.YEAR, "2001"); result.setValue(Field.PAGES, "115-124"); result.setValue(Field.PUBLISHER, "Springer"); result.setValue(Field.ADDRESS, "Berlin/Heidelberg/New York/Tokyo"); return result; } /** * Generates the meta data * * @param newData the data to work on * @param random the random number generator used in the generation * @throws Exception if generation fails */ protected void generateMetaLevel(Instances newData, Random random) throws Exception { m_MetaFormat = metaFormat(newData); Instances [] metaData = new Instances[m_Classifiers.length]; for (int i = 0; i < m_Classifiers.length; i++) { metaData[i] = metaFormat(newData); } for (int j = 0; j < m_NumFolds; j++) { Instances train = newData.trainCV(m_NumFolds, j, random); Instances test = newData.testCV(m_NumFolds, j); // Build base classifiers for (int i = 0; i < m_Classifiers.length; i++) { getClassifier(i).buildClassifier(train); for (int k = 0; k < test.numInstances(); k++) { metaData[i].add(metaInstance(test.instance(k),i)); } } } // calculate InstPerClass m_InstPerClass = new double[newData.numClasses()]; for (int i=0; i < newData.numClasses(); i++) m_InstPerClass[i]=0.0; for (int i=0; i < newData.numInstances(); i++) { m_InstPerClass[(int)newData.instance(i).classValue()]++; } m_MetaClassifiers = AbstractClassifier.makeCopies(m_MetaClassifier, m_Classifiers.length); for (int i = 0; i < m_Classifiers.length; i++) { m_MetaClassifiers[i].buildClassifier(metaData[i]); } } /** * Returns class probabilities for a given instance using the stacked classifier. * One class will always get all the probability mass (i.e. probability one). * * @param instance the instance to be classified * @throws Exception if instance could not be classified * successfully * @return the class distribution for the given instance */ public double[] distributionForInstance(Instance instance) throws Exception { double maxPreds; int numPreds=0; int numClassifiers=m_Classifiers.length; int idxPreds; double [] predConfs = new double[numClassifiers]; double [] preds; for (int i=0; i<numClassifiers; i++) { preds = m_MetaClassifiers[i].distributionForInstance(metaInstance(instance,i)); if (m_MetaClassifiers[i].classifyInstance(metaInstance(instance,i))==1) predConfs[i]=preds[1]; else predConfs[i]=-preds[0]; } if (predConfs[Utils.maxIndex(predConfs)]<0.0) { // no correct classifiers for (int i=0; i<numClassifiers; i++) // use neg. confidences instead predConfs[i]=1.0+predConfs[i]; } else { for (int i=0; i<numClassifiers; i++) // otherwise ignore neg. conf if (predConfs[i]<0) predConfs[i]=0.0; } /*System.out.print(preds[0]); System.out.print(":"); System.out.print(preds[1]); System.out.println("#");*/ preds=new double[instance.numClasses()]; for (int i=0; i<instance.numClasses(); i++) preds[i]=0.0; for (int i=0; i<numClassifiers; i++) { idxPreds=(int)(m_Classifiers[i].classifyInstance(instance)); preds[idxPreds]+=predConfs[i]; } maxPreds=preds[Utils.maxIndex(preds)]; int MaxInstPerClass=-100; int MaxClass=-1; for (int i=0; i<instance.numClasses(); i++) { if (preds[i]==maxPreds) { numPreds++; if (m_InstPerClass[i]>MaxInstPerClass) { MaxInstPerClass=(int)m_InstPerClass[i]; MaxClass=i; } } } int predictedIndex; if (numPreds==1) predictedIndex = Utils.maxIndex(preds); else { // System.out.print("?"); // System.out.print(instance.toString()); // for (int i=0; i<instance.numClasses(); i++) { // System.out.print("/"); // System.out.print(preds[i]); // } // System.out.println(MaxClass); predictedIndex = MaxClass; } double[] classProbs = new double[instance.numClasses()]; classProbs[predictedIndex] = 1.0; return classProbs; } /** * Output a representation of this classifier * * @return a string representation of the classifier */ public String toString() { if (m_Classifiers.length == 0) { return "Grading: No base schemes entered."; } if (m_MetaClassifiers.length == 0) { return "Grading: No meta scheme selected."; } if (m_MetaFormat == null) { return "Grading: No model built yet."; } String result = "Grading\n\nBase classifiers\n\n"; for (int i = 0; i < m_Classifiers.length; i++) { result += getClassifier(i).toString() +"\n\n"; } result += "\n\nMeta classifiers\n\n"; for (int i = 0; i < m_Classifiers.length; i++) { result += m_MetaClassifiers[i].toString() +"\n\n"; } return result; } /** * Makes the format for the level-1 data. * * @param instances the level-0 format * @return the format for the meta data * @throws Exception if an error occurs */ protected Instances metaFormat(Instances instances) throws Exception { FastVector attributes = new FastVector(); Instances metaFormat; for (int i = 0; i<instances.numAttributes(); i++) { if ( i != instances.classIndex() ) { attributes.addElement(instances.attribute(i)); } } FastVector nomElements = new FastVector(2); nomElements.addElement("0"); nomElements.addElement("1"); attributes.addElement(new Attribute("PredConf",nomElements)); metaFormat = new Instances("Meta format", attributes, 0); metaFormat.setClassIndex(metaFormat.numAttributes()-1); return metaFormat; } /** * Makes a level-1 instance from the given instance. * * @param instance the instance to be transformed * @param k index of the classifier * @return the level-1 instance * @throws Exception if an error occurs */ protected Instance metaInstance(Instance instance, int k) throws Exception { double[] values = new double[m_MetaFormat.numAttributes()]; Instance metaInstance; double predConf; int i; int maxIdx; double maxVal; int idx = 0; for (i = 0; i < instance.numAttributes(); i++) { if (i != instance.classIndex()) { values[idx] = instance.value(i); idx++; } } Classifier classifier = getClassifier(k); if (m_BaseFormat.classAttribute().isNumeric()) { throw new Exception("Class Attribute must not be numeric!"); } else { double[] dist = classifier.distributionForInstance(instance); maxIdx=0; maxVal=dist[0]; for (int j = 1; j < dist.length; j++) { if (dist[j]>maxVal) { maxVal=dist[j]; maxIdx=j; } } predConf= (instance.classValue()==maxIdx) ? 1:0; } values[idx]=predConf; metaInstance = new DenseInstance(1, values); metaInstance.setDataset(m_MetaFormat); return metaInstance; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 1.13 $"); } /** * Main method for testing this class. * * @param argv should contain the following arguments: * -t training file [-T test file] [-c class index] */ public static void main(String [] argv) { runClassifier(new Grading(), argv); } }
12,016
29.577608
191
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/meta/LogitBoost.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * LogitBoost.java * Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.meta; import java.util.Enumeration; import java.util.Random; import java.util.Vector; import weka.classifiers.AbstractClassifier; import weka.classifiers.Classifier; import weka.classifiers.Evaluation; import weka.classifiers.RandomizableIteratedSingleClassifierEnhancer; import weka.classifiers.Sourcable; import weka.core.Attribute; import weka.core.Capabilities; import weka.core.Capabilities.Capability; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.RevisionUtils; import weka.core.TechnicalInformation; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import weka.core.TechnicalInformationHandler; import weka.core.Utils; import weka.core.WeightedInstancesHandler; /** * <!-- globalinfo-start --> * Class for performing additive logistic regression. <br/> * This class performs classification using a regression scheme as the base learner, and can handle multi-class problems. For more information, see<br/> * <br/> * J. Friedman, T. Hastie, R. Tibshirani (1998). Additive Logistic Regression: a Statistical View of Boosting. Stanford University.<br/> * <br/> * Can do efficient internal cross-validation to determine appropriate number of iterations. * <p/> * <!-- globalinfo-end --> * <p> * <!-- technical-bibtex-start --> * BibTeX: * <pre> * &#64;techreport{Friedman1998, * address = {Stanford University}, * author = {J. Friedman and T. Hastie and R. Tibshirani}, * title = {Additive Logistic Regression: a Statistical View of Boosting}, * year = {1998}, * PS = {http://www-stat.stanford.edu/\~jhf/ftp/boost.ps} * } * </pre> * <p/> * <!-- technical-bibtex-end --> * <p> * <!-- options-start --> * Valid options are: <p/> * * <pre> -Q * Use resampling instead of reweighting for boosting.</pre> * * <pre> -P &lt;percent&gt; * Percentage of weight mass to base training on. * (default 100, reduce to around 90 speed up)</pre> * * <pre> -F &lt;num&gt; * Number of folds for internal cross-validation. * (default 0 -- no cross-validation)</pre> * * <pre> -R &lt;num&gt; * Number of runs for internal cross-validation. * (default 1)</pre> * * <pre> -L &lt;num&gt; * Threshold on the improvement of the likelihood. * (default -Double.MAX_VALUE)</pre> * * <pre> -H &lt;num&gt; * Shrinkage parameter. * (default 1)</pre> * * <pre> -S &lt;num&gt; * Random number seed. * (default 1)</pre> * * <pre> -I &lt;num&gt; * Number of iterations. * (default 10)</pre> * * <pre> -D * If set, classifier is run in debug mode and * may output additional info to the console</pre> * * <pre> -W * Full name of base classifier. * (default: weka.classifiers.trees.DecisionStump)</pre> * * <pre> * Options specific to classifier weka.classifiers.trees.DecisionStump: * </pre> * * <pre> -D * If set, classifier is run in debug mode and * may output additional info to the console</pre> * <p> * <!-- options-end --> * <p> * Options after -- are passed to the designated learner.<p> * * @author Len Trigg (trigg@cs.waikato.ac.nz) * @author Eibe Frank (eibe@cs.waikato.ac.nz) * @version $Revision: 9186 $ */ public class LogitBoost extends RandomizableIteratedSingleClassifierEnhancer implements Sourcable, WeightedInstancesHandler, TechnicalInformationHandler { /** * for serialization */ static final long serialVersionUID = -1105660358715833753L; /** * Array for storing the generated base classifiers. * Note: we are hiding the variable from IteratedSingleClassifierEnhancer */ protected Classifier[][] m_Classifiers; /** * The number of classes */ protected int m_NumClasses; /** * The number of successfully generated base classifiers. */ protected int m_NumGenerated; /** * The number of folds for the internal cross-validation. */ protected int m_NumFolds = 0; /** * The number of runs for the internal cross-validation. */ protected int m_NumRuns = 1; /** * Weight thresholding. The percentage of weight mass used in training */ protected int m_WeightThreshold = 100; /** * A threshold for responses (Friedman suggests between 2 and 4) */ protected static final double Z_MAX = 3; /** * Dummy dataset with a numeric class */ protected Instances m_NumericClassData; /** * The actual class attribute (for getting class names) */ protected Attribute m_ClassAttribute; /** * Use boosting with reweighting? */ protected boolean m_UseResampling; /** * The threshold on the improvement of the likelihood */ protected double m_Precision = -Double.MAX_VALUE; /** * The value of the shrinkage parameter */ protected double m_Shrinkage = 1; /** * The random number generator used */ protected Random m_RandomInstance = null; /** * The value by which the actual target value for the * true class is offset. */ protected double m_Offset = 0.0; /** * a ZeroR model in case no model can be built from the data */ protected Classifier m_ZeroR; /** * Returns a string describing classifier * * @return a description suitable for * displaying in the explorer/experimenter gui */ public String globalInfo() { return "Class for performing additive logistic regression. \n" + "This class performs classification using a regression scheme as the " + "base learner, and can handle multi-class problems. For more " + "information, see\n\n" + getTechnicalInformation().toString() + "\n\n" + "Can do efficient internal cross-validation to determine " + "appropriate number of iterations."; } /** * Constructor. */ public LogitBoost() { m_Classifier = new weka.classifiers.trees.DecisionStump(); } /** * Returns an instance of a TechnicalInformation object, containing * detailed information about the technical background of this class, * e.g., paper reference or book this class is based on. * * @return the technical information about this class */ public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; result = new TechnicalInformation(Type.TECHREPORT); result.setValue(Field.AUTHOR, "J. Friedman and T. Hastie and R. Tibshirani"); result.setValue(Field.YEAR, "1998"); result.setValue(Field.TITLE, "Additive Logistic Regression: a Statistical View of Boosting"); result.setValue(Field.ADDRESS, "Stanford University"); result.setValue(Field.PS, "http://www-stat.stanford.edu/~jhf/ftp/boost.ps"); return result; } /** * String describing default classifier. * * @return the default classifier classname */ protected String defaultClassifierString() { return "weka.classifiers.trees.DecisionStump"; } /** * Select only instances with weights that contribute to * the specified quantile of the weight distribution * * @param data the input instances * @param quantile the specified quantile eg 0.9 to select * 90% of the weight mass * @return the selected instances */ protected Instances selectWeightQuantile(Instances data, double quantile) { int numInstances = data.numInstances(); Instances trainData = new Instances(data, numInstances); double[] weights = new double[numInstances]; double sumOfWeights = 0; for (int i = 0; i < numInstances; i++) { weights[i] = data.instance(i).weight(); sumOfWeights += weights[i]; } double weightMassToSelect = sumOfWeights * quantile; int[] sortedIndices = Utils.sort(weights); // Select the instances sumOfWeights = 0; for (int i = numInstances - 1; i >= 0; i--) { Instance instance = (Instance) data.instance(sortedIndices[i]).copy(); trainData.add(instance); sumOfWeights += weights[sortedIndices[i]]; if ((sumOfWeights > weightMassToSelect) && (i > 0) && (weights[sortedIndices[i]] != weights[sortedIndices[i - 1]])) { break; } } if (m_Debug) { System.err.println("Selected " + trainData.numInstances() + " out of " + numInstances); } return trainData; } /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ public Enumeration listOptions() { Vector newVector = new Vector(6); newVector.addElement(new Option( "\tUse resampling instead of reweighting for boosting.", "Q", 0, "-Q")); newVector.addElement(new Option( "\tPercentage of weight mass to base training on.\n" + "\t(default 100, reduce to around 90 speed up)", "P", 1, "-P <percent>")); newVector.addElement(new Option( "\tNumber of folds for internal cross-validation.\n" + "\t(default 0 -- no cross-validation)", "F", 1, "-F <num>")); newVector.addElement(new Option( "\tNumber of runs for internal cross-validation.\n" + "\t(default 1)", "R", 1, "-R <num>")); newVector.addElement(new Option( "\tThreshold on the improvement of the likelihood.\n" + "\t(default -Double.MAX_VALUE)", "L", 1, "-L <num>")); newVector.addElement(new Option( "\tShrinkage parameter.\n" + "\t(default 1)", "H", 1, "-H <num>")); Enumeration enu = super.listOptions(); while (enu.hasMoreElements()) { newVector.addElement(enu.nextElement()); } return newVector.elements(); } /** * Parses a given list of options. <p/> * <p> * <!-- options-start --> * Valid options are: <p/> * * <pre> -Q * Use resampling instead of reweighting for boosting.</pre> * * <pre> -P &lt;percent&gt; * Percentage of weight mass to base training on. * (default 100, reduce to around 90 speed up)</pre> * * <pre> -F &lt;num&gt; * Number of folds for internal cross-validation. * (default 0 -- no cross-validation)</pre> * * <pre> -R &lt;num&gt; * Number of runs for internal cross-validation. * (default 1)</pre> * * <pre> -L &lt;num&gt; * Threshold on the improvement of the likelihood. * (default -Double.MAX_VALUE)</pre> * * <pre> -H &lt;num&gt; * Shrinkage parameter. * (default 1)</pre> * * <pre> -S &lt;num&gt; * Random number seed. * (default 1)</pre> * * <pre> -I &lt;num&gt; * Number of iterations. * (default 10)</pre> * * <pre> -D * If set, classifier is run in debug mode and * may output additional info to the console</pre> * * <pre> -W * Full name of base classifier. * (default: weka.classifiers.trees.DecisionStump)</pre> * * <pre> * Options specific to classifier weka.classifiers.trees.DecisionStump: * </pre> * * <pre> -D * If set, classifier is run in debug mode and * may output additional info to the console</pre> * <p> * <!-- options-end --> * <p> * Options after -- are passed to the designated learner.<p> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { String numFolds = Utils.getOption('F', options); if (numFolds.length() != 0) { setNumFolds(Integer.parseInt(numFolds)); } else { setNumFolds(0); } String numRuns = Utils.getOption('R', options); if (numRuns.length() != 0) { setNumRuns(Integer.parseInt(numRuns)); } else { setNumRuns(1); } String thresholdString = Utils.getOption('P', options); if (thresholdString.length() != 0) { setWeightThreshold(Integer.parseInt(thresholdString)); } else { setWeightThreshold(100); } String precisionString = Utils.getOption('L', options); if (precisionString.length() != 0) { setLikelihoodThreshold(new Double(precisionString). doubleValue()); } else { setLikelihoodThreshold(-Double.MAX_VALUE); } String shrinkageString = Utils.getOption('H', options); if (shrinkageString.length() != 0) { setShrinkage(new Double(shrinkageString). doubleValue()); } else { setShrinkage(1.0); } setUseResampling(Utils.getFlag('Q', options)); if (m_UseResampling && (thresholdString.length() != 0)) { throw new Exception("Weight pruning with resampling" + "not allowed."); } super.setOptions(options); } /** * Gets the current settings of the Classifier. * * @return an array of strings suitable for passing to setOptions */ public String[] getOptions() { String[] superOptions = super.getOptions(); String[] options = new String[superOptions.length + 10]; int current = 0; if (getUseResampling()) { options[current++] = "-Q"; } else { options[current++] = "-P"; options[current++] = "" + getWeightThreshold(); } options[current++] = "-F"; options[current++] = "" + getNumFolds(); options[current++] = "-R"; options[current++] = "" + getNumRuns(); options[current++] = "-L"; options[current++] = "" + getLikelihoodThreshold(); options[current++] = "-H"; options[current++] = "" + getShrinkage(); System.arraycopy(superOptions, 0, options, current, superOptions.length); current += superOptions.length; while (current < options.length) { options[current++] = ""; } return options; } /** * Returns the tip text for this property * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String shrinkageTipText() { return "Shrinkage parameter (use small value like 0.1 to reduce " + "overfitting)."; } /** * Get the value of Shrinkage. * * @return Value of Shrinkage. */ public double getShrinkage() { return m_Shrinkage; } /** * Set the value of Shrinkage. * * @param newShrinkage Value to assign to Shrinkage. */ public void setShrinkage(double newShrinkage) { m_Shrinkage = newShrinkage; } /** * Returns the tip text for this property * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String likelihoodThresholdTipText() { return "Threshold on improvement in likelihood."; } /** * Get the value of Precision. * * @return Value of Precision. */ public double getLikelihoodThreshold() { return m_Precision; } /** * Set the value of Precision. * * @param newPrecision Value to assign to Precision. */ public void setLikelihoodThreshold(double newPrecision) { m_Precision = newPrecision; } /** * Returns the tip text for this property * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String numRunsTipText() { return "Number of runs for internal cross-validation."; } /** * Get the value of NumRuns. * * @return Value of NumRuns. */ public int getNumRuns() { return m_NumRuns; } /** * Set the value of NumRuns. * * @param newNumRuns Value to assign to NumRuns. */ public void setNumRuns(int newNumRuns) { m_NumRuns = newNumRuns; } /** * Returns the tip text for this property * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String numFoldsTipText() { return "Number of folds for internal cross-validation (default 0 " + "means no cross-validation is performed)."; } /** * Get the value of NumFolds. * * @return Value of NumFolds. */ public int getNumFolds() { return m_NumFolds; } /** * Set the value of NumFolds. * * @param newNumFolds Value to assign to NumFolds. */ public void setNumFolds(int newNumFolds) { m_NumFolds = newNumFolds; } /** * Returns the tip text for this property * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String useResamplingTipText() { return "Whether resampling is used instead of reweighting."; } /** * Set resampling mode * * @param r true if resampling should be done */ public void setUseResampling(boolean r) { m_UseResampling = r; } /** * Get whether resampling is turned on * * @return true if resampling output is on */ public boolean getUseResampling() { return m_UseResampling; } /** * Returns the tip text for this property * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String weightThresholdTipText() { return "Weight threshold for weight pruning (reduce to 90 " + "for speeding up learning process)."; } /** * Set weight thresholding * * @param threshold the percentage of weight mass used for training */ public void setWeightThreshold(int threshold) { m_WeightThreshold = threshold; } /** * Get the degree of weight thresholding * * @return the percentage of weight mass used for training */ public int getWeightThreshold() { return m_WeightThreshold; } /** * Returns default capabilities of the classifier. * * @return the capabilities of this classifier */ public Capabilities getCapabilities() { Capabilities result = super.getCapabilities(); // class result.disableAllClasses(); result.disableAllClassDependencies(); result.enable(Capability.NOMINAL_CLASS); return result; } /** * Builds the boosted classifier * * @param data the data to train the classifier with * @throws Exception if building fails, e.g., can't handle data */ public void buildClassifier(Instances data) throws Exception { m_RandomInstance = new Random(m_Seed); int classIndex = data.classIndex(); if (m_Classifier == null) { throw new Exception("A base classifier has not been specified!"); } if (!(m_Classifier instanceof WeightedInstancesHandler) && !m_UseResampling) { m_UseResampling = true; } // can classifier handle the data? getCapabilities().testWithFail(data); if (m_Debug) { System.err.println("Creating copy of the training data"); } // remove instances with missing class data = new Instances(data); data.deleteWithMissingClass(); // only class? -> build ZeroR model if (data.numAttributes() == 1) { System.err.println( "Cannot build model (only class attribute present in data!), " + "using ZeroR model instead!"); m_ZeroR = new weka.classifiers.rules.ZeroR(); m_ZeroR.buildClassifier(data); return; } else { m_ZeroR = null; } m_NumClasses = data.numClasses(); m_ClassAttribute = data.classAttribute(); // Create the base classifiers if (m_Debug) { System.err.println("Creating base classifiers"); } m_Classifiers = new Classifier[m_NumClasses][]; for (int j = 0; j < m_NumClasses; j++) { m_Classifiers[j] = AbstractClassifier.makeCopies(m_Classifier, getNumIterations()); } // Do we want to select the appropriate number of iterations // using cross-validation? int bestNumIterations = getNumIterations(); if (m_NumFolds > 1) { if (m_Debug) { System.err.println("Processing first fold."); } // Array for storing the results double[] results = new double[getNumIterations()]; // Iterate throught the cv-runs for (int r = 0; r < m_NumRuns; r++) { // Stratify the data data.randomize(m_RandomInstance); data.stratify(m_NumFolds); // Perform the cross-validation for (int i = 0; i < m_NumFolds; i++) { // Get train and test folds Instances train = data.trainCV(m_NumFolds, i, m_RandomInstance); Instances test = data.testCV(m_NumFolds, i); // Make class numeric Instances trainN = new Instances(train); trainN.setClassIndex(-1); trainN.deleteAttributeAt(classIndex); trainN.insertAttributeAt(new Attribute("'pseudo class'"), classIndex); trainN.setClassIndex(classIndex); m_NumericClassData = new Instances(trainN, 0); // Get class values int numInstances = train.numInstances(); double[][] trainFs = new double[numInstances][m_NumClasses]; double[][] trainYs = new double[numInstances][m_NumClasses]; for (int j = 0; j < m_NumClasses; j++) { for (int k = 0; k < numInstances; k++) { trainYs[k][j] = (train.instance(k).classValue() == j) ? 1.0 - m_Offset : 0.0 + (m_Offset / (double) m_NumClasses); } } // Perform iterations double[][] probs = initialProbs(numInstances); m_NumGenerated = 0; double sumOfWeights = train.sumOfWeights(); for (int j = 0; j < getNumIterations(); j++) { performIteration(trainYs, trainFs, probs, trainN, sumOfWeights); Evaluation eval = new Evaluation(train); eval.evaluateModel(this, test); results[j] += eval.correct(); } } } // Find the number of iterations with the lowest error double bestResult = -Double.MAX_VALUE; for (int j = 0; j < getNumIterations(); j++) { if (results[j] > bestResult) { bestResult = results[j]; bestNumIterations = j; } } if (m_Debug) { System.err.println("Best result for " + bestNumIterations + " iterations: " + bestResult); } } // Build classifier on all the data int numInstances = data.numInstances(); double[][] trainFs = new double[numInstances][m_NumClasses]; double[][] trainYs = new double[numInstances][m_NumClasses]; for (int j = 0; j < m_NumClasses; j++) { for (int i = 0, k = 0; i < numInstances; i++, k++) { trainYs[i][j] = (data.instance(k).classValue() == j) ? 1.0 - m_Offset : 0.0 + (m_Offset / (double) m_NumClasses); } } // Make class numeric data.setClassIndex(-1); data.deleteAttributeAt(classIndex); data.insertAttributeAt(new Attribute("'pseudo class'"), classIndex); data.setClassIndex(classIndex); m_NumericClassData = new Instances(data, 0); // Perform iterations double[][] probs = initialProbs(numInstances); double logLikelihood = logLikelihood(trainYs, probs); m_NumGenerated = 0; if (m_Debug) { System.err.println("Avg. log-likelihood: " + logLikelihood); } double sumOfWeights = data.sumOfWeights(); for (int j = 0; j < bestNumIterations; j++) { double previousLoglikelihood = logLikelihood; performIteration(trainYs, trainFs, probs, data, sumOfWeights); logLikelihood = logLikelihood(trainYs, probs); if (m_Debug) { System.err.println("Avg. log-likelihood: " + logLikelihood); } if (Math.abs(previousLoglikelihood - logLikelihood) < m_Precision) { return; } } } /** * Gets the intial class probabilities. * * @param numInstances the number of instances * @return the initial class probabilities */ private double[][] initialProbs(int numInstances) { double[][] probs = new double[numInstances][m_NumClasses]; for (int i = 0; i < numInstances; i++) { for (int j = 0; j < m_NumClasses; j++) { probs[i][j] = 1.0 / m_NumClasses; } } return probs; } /** * Computes loglikelihood given class values * and estimated probablities. * * @param trainYs class values * @param probs estimated probabilities * @return the computed loglikelihood */ private double logLikelihood(double[][] trainYs, double[][] probs) { double logLikelihood = 0; for (int i = 0; i < trainYs.length; i++) { for (int j = 0; j < m_NumClasses; j++) { if (trainYs[i][j] == 1.0 - m_Offset) { logLikelihood -= Math.log(probs[i][j]); } } } return logLikelihood / (double) trainYs.length; } /** * Performs one boosting iteration. * * @param trainYs class values * @param trainFs F scores * @param probs probabilities * @param data the data to run the iteration on * @param origSumOfWeights the original sum of weights * @throws Exception in case base classifiers run into problems */ private void performIteration(double[][] trainYs, double[][] trainFs, double[][] probs, Instances data, double origSumOfWeights) throws Exception { if (m_Debug) { System.err.println("Training classifier " + (m_NumGenerated + 1)); } // Build the new models for (int j = 0; j < m_NumClasses; j++) { if (m_Debug) { System.err.println("\t...for class " + (j + 1) + " (" + m_ClassAttribute.name() + "=" + m_ClassAttribute.value(j) + ")"); } // Make copy because we want to save the weights Instances boostData = new Instances(data); // Set instance pseudoclass and weights for (int i = 0; i < probs.length; i++) { // Compute response and weight double p = probs[i][j]; double z, actual = trainYs[i][j]; if (actual == 1 - m_Offset) { z = 1.0 / p; if (z > Z_MAX) { // threshold z = Z_MAX; } } else { z = -1.0 / (1.0 - p); if (z < -Z_MAX) { // threshold z = -Z_MAX; } } double w = (actual - p) / z; // Set values for instance Instance current = boostData.instance(i); current.setValue(boostData.classIndex(), z); current.setWeight(current.weight() * w); } // Scale the weights (helps with some base learners) double sumOfWeights = boostData.sumOfWeights(); double scalingFactor = (double) origSumOfWeights / sumOfWeights; for (int i = 0; i < probs.length; i++) { Instance current = boostData.instance(i); current.setWeight(current.weight() * scalingFactor); } // Select instances to train the classifier on Instances trainData = boostData; if (m_WeightThreshold < 100) { trainData = selectWeightQuantile(boostData, (double) m_WeightThreshold / 100); } else { if (m_UseResampling) { double[] weights = new double[boostData.numInstances()]; for (int kk = 0; kk < weights.length; kk++) { weights[kk] = boostData.instance(kk).weight(); } trainData = boostData.resampleWithWeights(m_RandomInstance, weights); } } // Build the classifier m_Classifiers[j][m_NumGenerated].buildClassifier(trainData); } // Evaluate / increment trainFs from the classifier for (int i = 0; i < trainFs.length; i++) { double[] pred = new double[m_NumClasses]; double predSum = 0; for (int j = 0; j < m_NumClasses; j++) { pred[j] = m_Shrinkage * m_Classifiers[j][m_NumGenerated] .classifyInstance(data.instance(i)); predSum += pred[j]; } predSum /= m_NumClasses; for (int j = 0; j < m_NumClasses; j++) { trainFs[i][j] += (pred[j] - predSum) * (m_NumClasses - 1) / m_NumClasses; } } m_NumGenerated++; // Compute the current probability estimates for (int i = 0; i < trainYs.length; i++) { probs[i] = probs(trainFs[i]); } } /** * Returns the array of classifiers that have been built. * * @return the built classifiers */ public Classifier[][] classifiers() { Classifier[][] classifiers = new Classifier[m_NumClasses][m_NumGenerated]; for (int j = 0; j < m_NumClasses; j++) { for (int i = 0; i < m_NumGenerated; i++) { classifiers[j][i] = m_Classifiers[j][i]; } } return classifiers; } /** * Computes probabilities from F scores * * @param Fs the F scores * @return the computed probabilities */ private double[] probs(double[] Fs) { double maxF = -Double.MAX_VALUE; for (int i = 0; i < Fs.length; i++) { if (Fs[i] > maxF) { maxF = Fs[i]; } } double sum = 0; double[] probs = new double[Fs.length]; for (int i = 0; i < Fs.length; i++) { probs[i] = Math.exp(Fs[i] - maxF); sum += probs[i]; } Utils.normalize(probs, sum); return probs; } /** * Calculates the class membership probabilities for the given test instance. * * @param instance the instance to be classified * @return predicted class probability distribution * @throws Exception if instance could not be classified * successfully */ public double[] distributionForInstance(Instance instance) throws Exception { // default model? if (m_ZeroR != null) { return m_ZeroR.distributionForInstance(instance); } instance = (Instance) instance.copy(); instance.setDataset(m_NumericClassData); double[] pred = new double[m_NumClasses]; double[] Fs = new double[m_NumClasses]; for (int i = 0; i < m_NumGenerated; i++) { double predSum = 0; for (int j = 0; j < m_NumClasses; j++) { pred[j] = m_Shrinkage * m_Classifiers[j][i].classifyInstance(instance); predSum += pred[j]; } predSum /= m_NumClasses; for (int j = 0; j < m_NumClasses; j++) { Fs[j] += (pred[j] - predSum) * (m_NumClasses - 1) / m_NumClasses; } } return probs(Fs); } /** * Returns the boosted model as Java source code. * * @param className the classname in the generated code * @return the tree as Java source code * @throws Exception if something goes wrong */ public String toSource(String className) throws Exception { if (m_NumGenerated == 0) { throw new Exception("No model built yet"); } if (!(m_Classifiers[0][0] instanceof Sourcable)) { throw new Exception("Base learner " + m_Classifier.getClass().getName() + " is not Sourcable"); } StringBuffer text = new StringBuffer("class "); text.append(className).append(" {\n\n"); text.append(" private static double RtoP(double []R, int j) {\n" + " double Rcenter = 0;\n" + " for (int i = 0; i < R.length; i++) {\n" + " Rcenter += R[i];\n" + " }\n" + " Rcenter /= R.length;\n" + " double Rsum = 0;\n" + " for (int i = 0; i < R.length; i++) {\n" + " Rsum += Math.exp(R[i] - Rcenter);\n" + " }\n" + " return Math.exp(R[j]) / Rsum;\n" + " }\n\n"); text.append(" public static double classify(Object[] i) {\n" + " double [] d = distribution(i);\n" + " double maxV = d[0];\n" + " int maxI = 0;\n" + " for (int j = 1; j < " + m_NumClasses + "; j++) {\n" + " if (d[j] > maxV) { maxV = d[j]; maxI = j; }\n" + " }\n return (double) maxI;\n }\n\n"); text.append(" public static double [] distribution(Object [] i) {\n"); text.append(" double [] Fs = new double [" + m_NumClasses + "];\n"); text.append(" double [] Fi = new double [" + m_NumClasses + "];\n"); text.append(" double Fsum;\n"); for (int i = 0; i < m_NumGenerated; i++) { text.append(" Fsum = 0;\n"); for (int j = 0; j < m_NumClasses; j++) { text.append(" Fi[" + j + "] = " + className + '_' + j + '_' + i + ".classify(i); Fsum += Fi[" + j + "];\n"); } text.append(" Fsum /= " + m_NumClasses + ";\n"); text.append(" for (int j = 0; j < " + m_NumClasses + "; j++) {"); text.append(" Fs[j] += (Fi[j] - Fsum) * " + (m_NumClasses - 1) + " / " + m_NumClasses + "; }\n"); } text.append(" double [] dist = new double [" + m_NumClasses + "];\n" + " for (int j = 0; j < " + m_NumClasses + "; j++) {\n" + " dist[j] = RtoP(Fs, j);\n" + " }\n return dist;\n"); text.append(" }\n}\n"); for (int i = 0; i < m_Classifiers.length; i++) { for (int j = 0; j < m_Classifiers[i].length; j++) { text.append(((Sourcable) m_Classifiers[i][j]) .toSource(className + '_' + i + '_' + j)); } } return text.toString(); } /** * Returns description of the boosted classifier. * * @return description of the boosted classifier as a string */ public String toString() { // only ZeroR model? if (m_ZeroR != null) { StringBuffer buf = new StringBuffer(); buf.append(this.getClass().getName().replaceAll(".*\\.", "") + "\n"); buf.append(this.getClass().getName().replaceAll(".*\\.", "").replaceAll(".", "=") + "\n\n"); buf.append("Warning: No model could be built, hence ZeroR model is used:\n\n"); buf.append(m_ZeroR.toString()); return buf.toString(); } StringBuffer text = new StringBuffer(); if (m_NumGenerated == 0) { text.append("LogitBoost: No model built yet."); // text.append(m_Classifiers[0].toString()+"\n"); } else { text.append("LogitBoost: Base classifiers and their weights: \n"); for (int i = 0; i < m_NumGenerated; i++) { text.append("\nIteration " + (i + 1)); for (int j = 0; j < m_NumClasses; j++) { text.append("\n\tClass " + (j + 1) + " (" + m_ClassAttribute.name() + "=" + m_ClassAttribute.value(j) + ")\n\n" + m_Classifiers[j][i].toString() + "\n"); } } text.append("Number of performed iterations: " + m_NumGenerated + "\n"); } return text.toString(); } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 9186 $"); } /** * Main method for testing this class. * * @param argv the options */ public static void main(String[] argv) { runClassifier(new LogitBoost(), argv); } }
39,598
31.618616
153
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/meta/MetaCost.java
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* * MetaCost.java * Copyright (C) 2002 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.meta; import weka.classifiers.Classifier; import weka.classifiers.CostMatrix; import weka.classifiers.RandomizableSingleClassifierEnhancer; import weka.core.Capabilities; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.OptionHandler; import weka.core.RevisionUtils; import weka.core.SelectedTag; import weka.core.Tag; import weka.core.TechnicalInformation; import weka.core.TechnicalInformationHandler; import weka.core.Utils; import weka.core.Capabilities.Capability; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import java.io.BufferedReader; import java.io.File; import java.io.FileReader; import java.io.StringReader; import java.io.StringWriter; import java.util.Enumeration; import java.util.Vector; /** <!-- globalinfo-start --> * This metaclassifier makes its base classifier cost-sensitive using the method specified in<br/> * <br/> * Pedro Domingos: MetaCost: A general method for making classifiers cost-sensitive. In: Fifth International Conference on Knowledge Discovery and Data Mining, 155-164, 1999.<br/> * <br/> * This classifier should produce similar results to one created by passing the base learner to Bagging, which is in turn passed to a CostSensitiveClassifier operating on minimum expected cost. The difference is that MetaCost produces a single cost-sensitive classifier of the base learner, giving the benefits of fast classification and interpretable output (if the base learner itself is interpretable). This implementation uses all bagging iterations when reclassifying training data (the MetaCost paper reports a marginal improvement when only those iterations containing each training instance are used in reclassifying that instance). * <p/> <!-- globalinfo-end --> * <!-- technical-bibtex-start --> * BibTeX: * <pre> * &#64;inproceedings{Domingos1999, * author = {Pedro Domingos}, * booktitle = {Fifth International Conference on Knowledge Discovery and Data Mining}, * pages = {155-164}, * title = {MetaCost: A general method for making classifiers cost-sensitive}, * year = {1999} * } * </pre> * <p/> <!-- technical-bibtex-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -I &lt;num&gt; * Number of bagging iterations. * (default 10)</pre> * * <pre> -C &lt;cost file name&gt; * File name of a cost matrix to use. If this is not supplied, * a cost matrix will be loaded on demand. The name of the * on-demand file is the relation name of the training data * plus ".cost", and the path to the on-demand file is * specified with the -N option.</pre> * * <pre> -N &lt;directory&gt; * Name of a directory to search for cost files when loading * costs on demand (default current directory).</pre> * * <pre> -cost-matrix &lt;matrix&gt; * The cost matrix in Matlab single line format.</pre> * * <pre> -P * Size of each bag, as a percentage of the * training set size. (default 100)</pre> * * <pre> -S &lt;num&gt; * Random number seed. * (default 1)</pre> * * <pre> -D * If set, classifier is run in debug mode and * may output additional info to the console</pre> * * <pre> -W * Full name of base classifier. * (default: weka.classifiers.rules.ZeroR)</pre> * * <pre> * Options specific to classifier weka.classifiers.rules.ZeroR: * </pre> * * <pre> -D * If set, classifier is run in debug mode and * may output additional info to the console</pre> * <!-- options-end --> * * Options after -- are passed to the designated classifier.<p> * * @author Len Trigg (len@reeltwo.com) * @version $Revision: 1.24 $ */ public class MetaCost extends RandomizableSingleClassifierEnhancer implements TechnicalInformationHandler { /** for serialization */ static final long serialVersionUID = 1205317833344726855L; /** load cost matrix on demand */ public static final int MATRIX_ON_DEMAND = 1; /** use explicit matrix */ public static final int MATRIX_SUPPLIED = 2; /** Specify possible sources of the cost matrix */ public static final Tag [] TAGS_MATRIX_SOURCE = { new Tag(MATRIX_ON_DEMAND, "Load cost matrix on demand"), new Tag(MATRIX_SUPPLIED, "Use explicit cost matrix") }; /** Indicates the current cost matrix source */ protected int m_MatrixSource = MATRIX_ON_DEMAND; /** * The directory used when loading cost files on demand, null indicates * current directory */ protected File m_OnDemandDirectory = new File(System.getProperty("user.dir")); /** The name of the cost file, for command line options */ protected String m_CostFile; /** The cost matrix */ protected CostMatrix m_CostMatrix = new CostMatrix(1); /** The number of iterations. */ protected int m_NumIterations = 10; /** The size of each bag sample, as a percentage of the training size */ protected int m_BagSizePercent = 100; /** * Returns a string describing classifier * @return a description suitable for * displaying in the explorer/experimenter gui */ public String globalInfo() { return "This metaclassifier makes its base classifier cost-sensitive using the " + "method specified in\n\n" + getTechnicalInformation().toString() + "\n\n" + "This classifier should produce similar results to one created by " + "passing the base learner to Bagging, which is in turn passed to a " + "CostSensitiveClassifier operating on minimum expected cost. The difference " + "is that MetaCost produces a single cost-sensitive classifier of the " + "base learner, giving the benefits of fast classification and interpretable " + "output (if the base learner itself is interpretable). This implementation " + "uses all bagging iterations when reclassifying training data (the MetaCost " + "paper reports a marginal improvement when only those iterations containing " + "each training instance are used in reclassifying that instance)."; } /** * Returns an instance of a TechnicalInformation object, containing * detailed information about the technical background of this class, * e.g., paper reference or book this class is based on. * * @return the technical information about this class */ public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; result = new TechnicalInformation(Type.INPROCEEDINGS); result.setValue(Field.AUTHOR, "Pedro Domingos"); result.setValue(Field.TITLE, "MetaCost: A general method for making classifiers cost-sensitive"); result.setValue(Field.BOOKTITLE, "Fifth International Conference on Knowledge Discovery and Data Mining"); result.setValue(Field.YEAR, "1999"); result.setValue(Field.PAGES, "155-164"); return result; } /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ public Enumeration listOptions() { Vector newVector = new Vector(6); newVector.addElement(new Option( "\tNumber of bagging iterations.\n" + "\t(default 10)", "I", 1, "-I <num>")); newVector.addElement(new Option( "\tFile name of a cost matrix to use. If this is not supplied,\n" +"\ta cost matrix will be loaded on demand. The name of the\n" +"\ton-demand file is the relation name of the training data\n" +"\tplus \".cost\", and the path to the on-demand file is\n" +"\tspecified with the -N option.", "C", 1, "-C <cost file name>")); newVector.addElement(new Option( "\tName of a directory to search for cost files when loading\n" +"\tcosts on demand (default current directory).", "N", 1, "-N <directory>")); newVector.addElement(new Option( "\tThe cost matrix in Matlab single line format.", "cost-matrix", 1, "-cost-matrix <matrix>")); newVector.addElement(new Option( "\tSize of each bag, as a percentage of the\n" + "\ttraining set size. (default 100)", "P", 1, "-P")); Enumeration enu = super.listOptions(); while (enu.hasMoreElements()) { newVector.addElement(enu.nextElement()); } return newVector.elements(); } /** * Parses a given list of options. <p/> * <!-- options-start --> * Valid options are: <p/> * * <pre> -I &lt;num&gt; * Number of bagging iterations. * (default 10)</pre> * * <pre> -C &lt;cost file name&gt; * File name of a cost matrix to use. If this is not supplied, * a cost matrix will be loaded on demand. The name of the * on-demand file is the relation name of the training data * plus ".cost", and the path to the on-demand file is * specified with the -N option.</pre> * * <pre> -N &lt;directory&gt; * Name of a directory to search for cost files when loading * costs on demand (default current directory).</pre> * * <pre> -cost-matrix &lt;matrix&gt; * The cost matrix in Matlab single line format.</pre> * * <pre> -P * Size of each bag, as a percentage of the * training set size. (default 100)</pre> * * <pre> -S &lt;num&gt; * Random number seed. * (default 1)</pre> * * <pre> -D * If set, classifier is run in debug mode and * may output additional info to the console</pre> * * <pre> -W * Full name of base classifier. * (default: weka.classifiers.rules.ZeroR)</pre> * * <pre> * Options specific to classifier weka.classifiers.rules.ZeroR: * </pre> * * <pre> -D * If set, classifier is run in debug mode and * may output additional info to the console</pre> * <!-- options-end --> * * Options after -- are passed to the designated classifier.<p> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { String bagIterations = Utils.getOption('I', options); if (bagIterations.length() != 0) { setNumIterations(Integer.parseInt(bagIterations)); } else { setNumIterations(10); } String bagSize = Utils.getOption('P', options); if (bagSize.length() != 0) { setBagSizePercent(Integer.parseInt(bagSize)); } else { setBagSizePercent(100); } String costFile = Utils.getOption('C', options); if (costFile.length() != 0) { setCostMatrix(new CostMatrix(new BufferedReader( new FileReader(costFile)))); setCostMatrixSource(new SelectedTag(MATRIX_SUPPLIED, TAGS_MATRIX_SOURCE)); m_CostFile = costFile; } else { setCostMatrixSource(new SelectedTag(MATRIX_ON_DEMAND, TAGS_MATRIX_SOURCE)); } String demandDir = Utils.getOption('N', options); if (demandDir.length() != 0) { setOnDemandDirectory(new File(demandDir)); } String cost_matrix= Utils.getOption("cost-matrix", options); if (cost_matrix.length() != 0) { StringWriter writer = new StringWriter(); CostMatrix.parseMatlab(cost_matrix).write(writer); setCostMatrix(new CostMatrix(new StringReader(writer.toString()))); setCostMatrixSource(new SelectedTag(MATRIX_SUPPLIED, TAGS_MATRIX_SOURCE)); } super.setOptions(options); } /** * Gets the current settings of the Classifier. * * @return an array of strings suitable for passing to setOptions */ public String [] getOptions() { String [] superOptions = super.getOptions(); String [] options; options = new String [superOptions.length + 6]; int current = 0; if (m_MatrixSource == MATRIX_SUPPLIED) { if (m_CostFile != null) { options[current++] = "-C"; options[current++] = "" + m_CostFile; } else { options[current++] = "-cost-matrix"; options[current++] = getCostMatrix().toMatlab(); } } else { options[current++] = "-N"; options[current++] = "" + getOnDemandDirectory(); } options[current++] = "-I"; options[current++] = "" + getNumIterations(); options[current++] = "-P"; options[current++] = "" + getBagSizePercent(); System.arraycopy(superOptions, 0, options, current, superOptions.length); return options; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String costMatrixSourceTipText() { return "Gets the source location method of the cost matrix. Will " + "be one of MATRIX_ON_DEMAND or MATRIX_SUPPLIED."; } /** * Gets the source location method of the cost matrix. Will be one of * MATRIX_ON_DEMAND or MATRIX_SUPPLIED. * * @return the cost matrix source. */ public SelectedTag getCostMatrixSource() { return new SelectedTag(m_MatrixSource, TAGS_MATRIX_SOURCE); } /** * Sets the source location of the cost matrix. Values other than * MATRIX_ON_DEMAND or MATRIX_SUPPLIED will be ignored. * * @param newMethod the cost matrix location method. */ public void setCostMatrixSource(SelectedTag newMethod) { if (newMethod.getTags() == TAGS_MATRIX_SOURCE) { m_MatrixSource = newMethod.getSelectedTag().getID(); } } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String onDemandDirectoryTipText() { return "Name of directory to search for cost files when loading " + "costs on demand."; } /** * Returns the directory that will be searched for cost files when * loading on demand. * * @return The cost file search directory. */ public File getOnDemandDirectory() { return m_OnDemandDirectory; } /** * Sets the directory that will be searched for cost files when * loading on demand. * * @param newDir The cost file search directory. */ public void setOnDemandDirectory(File newDir) { if (newDir.isDirectory()) { m_OnDemandDirectory = newDir; } else { m_OnDemandDirectory = new File(newDir.getParent()); } m_MatrixSource = MATRIX_ON_DEMAND; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String bagSizePercentTipText() { return "The size of each bag, as a percentage of the training set " + "size."; } /** * Gets the size of each bag, as a percentage of the training set size. * * @return the bag size, as a percentage. */ public int getBagSizePercent() { return m_BagSizePercent; } /** * Sets the size of each bag, as a percentage of the training set size. * * @param newBagSizePercent the bag size, as a percentage. */ public void setBagSizePercent(int newBagSizePercent) { m_BagSizePercent = newBagSizePercent; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String numIterationsTipText() { return "The number of bagging iterations."; } /** * Sets the number of bagging iterations * * @param numIterations the number of iterations to use */ public void setNumIterations(int numIterations) { m_NumIterations = numIterations; } /** * Gets the number of bagging iterations * * @return the maximum number of bagging iterations */ public int getNumIterations() { return m_NumIterations; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String costMatrixTipText() { return "A misclassification cost matrix."; } /** * Gets the misclassification cost matrix. * * @return the cost matrix */ public CostMatrix getCostMatrix() { return m_CostMatrix; } /** * Sets the misclassification cost matrix. * * @param newCostMatrix the cost matrix */ public void setCostMatrix(CostMatrix newCostMatrix) { m_CostMatrix = newCostMatrix; m_MatrixSource = MATRIX_SUPPLIED; } /** * Returns default capabilities of the classifier. * * @return the capabilities of this classifier */ public Capabilities getCapabilities() { Capabilities result = super.getCapabilities(); // class result.disableAllClasses(); result.disableAllClassDependencies(); result.enable(Capability.NOMINAL_CLASS); return result; } /** * Builds the model of the base learner. * * @param data the training data * @throws Exception if the classifier could not be built successfully */ public void buildClassifier(Instances data) throws Exception { // can classifier handle the data? getCapabilities().testWithFail(data); // remove instances with missing class data = new Instances(data); data.deleteWithMissingClass(); if (m_MatrixSource == MATRIX_ON_DEMAND) { String costName = data.relationName() + CostMatrix.FILE_EXTENSION; File costFile = new File(getOnDemandDirectory(), costName); if (!costFile.exists()) { throw new Exception("On-demand cost file doesn't exist: " + costFile); } setCostMatrix(new CostMatrix(new BufferedReader( new FileReader(costFile)))); } // Set up the bagger Bagging bagger = new Bagging(); bagger.setClassifier(getClassifier()); bagger.setSeed(getSeed()); bagger.setNumIterations(getNumIterations()); bagger.setBagSizePercent(getBagSizePercent()); bagger.buildClassifier(data); // Use the bagger to reassign class values according to minimum expected // cost Instances newData = new Instances(data); for (int i = 0; i < newData.numInstances(); i++) { Instance current = newData.instance(i); double [] pred = bagger.distributionForInstance(current); int minCostPred = Utils.minIndex(m_CostMatrix.expectedCosts(pred)); current.setClassValue(minCostPred); } // Build a classifier using the reassigned data m_Classifier.buildClassifier(newData); } /** * Classifies a given instance after filtering. * * @param instance the instance to be classified * @return the class distribution for the given instance * @throws Exception if instance could not be classified * successfully */ public double[] distributionForInstance(Instance instance) throws Exception { return m_Classifier.distributionForInstance(instance); } /** * Gets the classifier specification string, which contains the * class name of the classifier and any options to the classifier * * @return the classifier string. */ protected String getClassifierSpec() { Classifier c = getClassifier(); return c.getClass().getName() + " " + Utils.joinOptions(((OptionHandler)c).getOptions()); } /** * Output a representation of this classifier * * @return a string representaiton of the classifier */ public String toString() { if (m_Classifier == null) { return "MetaCost: No model built yet."; } String result = "MetaCost cost sensitive classifier induction"; result += "\nOptions: " + Utils.joinOptions(getOptions()); result += "\nBase learner: " + getClassifierSpec() + "\n\nClassifier Model\n" + m_Classifier.toString() + "\n\nCost Matrix\n" + m_CostMatrix.toString(); return result; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 1.24 $"); } /** * Main method for testing this class. * * @param argv should contain the following arguments: * -t training file [-T test file] [-c class index] */ public static void main(String [] argv) { runClassifier(new MetaCost(), argv); } }
21,275
30.707899
641
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/meta/MultiBoostAB.java
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* * MultiBoostAB.java * * MultiBoosting is an extension to the highly successful AdaBoost * technique for forming decision committees. MultiBoosting can be * viewed as combining AdaBoost with wagging. It is able to harness * both AdaBoost's high bias and variance reduction with wagging's * superior variance reduction. Using C4.5 as the base learning * algorithm, Multi-boosting is demonstrated to produce decision * committees with lower error than either AdaBoost or wagging * significantly more often than the reverse over a large * representative cross-section of UCI data sets. It offers the * further advantage over AdaBoost of suiting parallel execution. * * For more info refer to : <!-- technical-plaintext-start --> * Geoffrey I. Webb (2000). MultiBoosting: A Technique for Combining Boosting and Wagging. Machine Learning. Vol.40(No.2). <!-- technical-plaintext-end --> * * Originally based on AdaBoostM1.java * * http://www.cm.deakin.edu.au/webb * * School of Computing and Mathematics * Deakin University * Geelong, Vic, 3217, Australia * Copyright (C) 2001 Deakin University * */ package weka.classifiers.meta; import weka.core.Instances; import weka.core.Option; import weka.core.RevisionUtils; import weka.core.TechnicalInformation; import weka.core.TechnicalInformationHandler; import weka.core.Utils; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import java.util.Enumeration; import java.util.Random; import java.util.Vector; /** <!-- globalinfo-start --> * Class for boosting a classifier using the MultiBoosting method.<br/> * <br/> * MultiBoosting is an extension to the highly successful AdaBoost technique for forming decision committees. MultiBoosting can be viewed as combining AdaBoost with wagging. It is able to harness both AdaBoost's high bias and variance reduction with wagging's superior variance reduction. Using C4.5 as the base learning algorithm, Multi-boosting is demonstrated to produce decision committees with lower error than either AdaBoost or wagging significantly more often than the reverse over a large representative cross-section of UCI data sets. It offers the further advantage over AdaBoost of suiting parallel execution.<br/> * <br/> * For more information, see<br/> * <br/> * Geoffrey I. Webb (2000). MultiBoosting: A Technique for Combining Boosting and Wagging. Machine Learning. Vol.40(No.2). * <p/> <!-- globalinfo-end --> * <!-- technical-bibtex-start --> * BibTeX: * <pre> * &#64;article{Webb2000, * address = {Boston}, * author = {Geoffrey I. Webb}, * journal = {Machine Learning}, * number = {No.2}, * publisher = {Kluwer Academic Publishers}, * title = {MultiBoosting: A Technique for Combining Boosting and Wagging}, * volume = {Vol.40}, * year = {2000} * } * </pre> * <p/> <!-- technical-bibtex-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -C &lt;num&gt; * Number of sub-committees. (Default 3)</pre> * * <pre> -P &lt;num&gt; * Percentage of weight mass to base training on. * (default 100, reduce to around 90 speed up)</pre> * * <pre> -Q * Use resampling for boosting.</pre> * * <pre> -S &lt;num&gt; * Random number seed. * (default 1)</pre> * * <pre> -I &lt;num&gt; * Number of iterations. * (default 10)</pre> * * <pre> -D * If set, classifier is run in debug mode and * may output additional info to the console</pre> * * <pre> -W * Full name of base classifier. * (default: weka.classifiers.trees.DecisionStump)</pre> * * <pre> * Options specific to classifier weka.classifiers.trees.DecisionStump: * </pre> * * <pre> -D * If set, classifier is run in debug mode and * may output additional info to the console</pre> * <!-- options-end --> * * Options after -- are passed to the designated classifier.<p> * * @author Shane Butler (sbutle@deakin.edu.au) * @author Eibe Frank (eibe@cs.waikato.ac.nz) * @author Len Trigg (trigg@cs.waikato.ac.nz) * @version $Revision: 1.16 $ */ public class MultiBoostAB extends AdaBoostM1 implements TechnicalInformationHandler { /** for serialization */ static final long serialVersionUID = -6681619178187935148L; /** The number of sub-committees to use */ protected int m_NumSubCmtys = 3; /** Random number generator */ protected Random m_Random = null; /** * Returns a string describing classifier * @return a description suitable for * displaying in the explorer/experimenter gui */ public String globalInfo() { return "Class for boosting a classifier using the MultiBoosting method.\n\n" + "MultiBoosting is an extension to the highly successful AdaBoost " + "technique for forming decision committees. MultiBoosting can be " + "viewed as combining AdaBoost with wagging. It is able to harness " + "both AdaBoost's high bias and variance reduction with wagging's " + "superior variance reduction. Using C4.5 as the base learning " + "algorithm, Multi-boosting is demonstrated to produce decision " + "committees with lower error than either AdaBoost or wagging " + "significantly more often than the reverse over a large " + "representative cross-section of UCI data sets. It offers the " + "further advantage over AdaBoost of suiting parallel execution.\n\n" + "For more information, see\n\n" + getTechnicalInformation().toString(); } /** * Returns an instance of a TechnicalInformation object, containing * detailed information about the technical background of this class, * e.g., paper reference or book this class is based on. * * @return the technical information about this class */ public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; result = new TechnicalInformation(Type.ARTICLE); result.setValue(Field.AUTHOR, "Geoffrey I. Webb"); result.setValue(Field.YEAR, "2000"); result.setValue(Field.TITLE, "MultiBoosting: A Technique for Combining Boosting and Wagging"); result.setValue(Field.JOURNAL, "Machine Learning"); result.setValue(Field.VOLUME, "Vol.40"); result.setValue(Field.NUMBER, "No.2"); result.setValue(Field.PUBLISHER, "Kluwer Academic Publishers"); result.setValue(Field.ADDRESS, "Boston"); return result; } /** * Returns an enumeration describing the available options * * @return an enumeration of all the available options */ public Enumeration listOptions() { Enumeration enu = super.listOptions(); Vector vec = new Vector(1); vec.addElement(new Option( "\tNumber of sub-committees. (Default 3)", "C", 1, "-C <num>")); while (enu.hasMoreElements()) { vec.addElement(enu.nextElement()); } return vec.elements(); } /** * Parses a given list of options. <p/> * <!-- options-start --> * Valid options are: <p/> * * <pre> -C &lt;num&gt; * Number of sub-committees. (Default 3)</pre> * * <pre> -P &lt;num&gt; * Percentage of weight mass to base training on. * (default 100, reduce to around 90 speed up)</pre> * * <pre> -Q * Use resampling for boosting.</pre> * * <pre> -S &lt;num&gt; * Random number seed. * (default 1)</pre> * * <pre> -I &lt;num&gt; * Number of iterations. * (default 10)</pre> * * <pre> -D * If set, classifier is run in debug mode and * may output additional info to the console</pre> * * <pre> -W * Full name of base classifier. * (default: weka.classifiers.trees.DecisionStump)</pre> * * <pre> * Options specific to classifier weka.classifiers.trees.DecisionStump: * </pre> * * <pre> -D * If set, classifier is run in debug mode and * may output additional info to the console</pre> * <!-- options-end --> * * Options after -- are passed to the designated classifier.<p> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { String subcmtyString = Utils.getOption('C', options); if (subcmtyString.length() != 0) { setNumSubCmtys(Integer.parseInt(subcmtyString)); } else { setNumSubCmtys(3); } super.setOptions(options); } /** * Gets the current settings of the Classifier. * * @return an array of strings suitable for passing to setOptions */ public String [] getOptions() { String [] ops = super.getOptions(); String [] options = new String[ops.length + 2]; options[0] = "-C"; options[1] = "" + getNumSubCmtys(); System.arraycopy(ops, 0, options, 2, ops.length); return options; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String numSubCmtysTipText() { return "Sets the (approximate) number of subcommittees."; } /** * Set the number of sub committees to use * * @param subc the number of sub committees */ public void setNumSubCmtys(int subc) { m_NumSubCmtys = subc; } /** * Get the number of sub committees to use * * @return the seed for resampling */ public int getNumSubCmtys() { return m_NumSubCmtys; } /** * Method for building this classifier. * * @param training the data to train with * @throws Exception if the training fails */ public void buildClassifier(Instances training) throws Exception { m_Random = new Random(m_Seed); super.buildClassifier(training); m_Random = null; } /** * Sets the weights for the next iteration. * * @param training the data to train with * @param reweight the reweighting factor * @throws Exception in case of an error */ protected void setWeights(Instances training, double reweight) throws Exception { int subCmtySize = m_Classifiers.length / m_NumSubCmtys; if ((m_NumIterationsPerformed + 1) % subCmtySize == 0) { if (getDebug()) System.err.println(m_NumIterationsPerformed + " " + subCmtySize); double oldSumOfWeights = training.sumOfWeights(); // Randomly set the weights of the training instances to the poisson distributon for (int i = 0; i < training.numInstances(); i++) { training.instance(i).setWeight( - Math.log((m_Random.nextDouble() * 9999) / 10000) ); } // Renormailise weights double sumProbs = training.sumOfWeights(); for (int i = 0; i < training.numInstances(); i++) { training.instance(i).setWeight(training.instance(i).weight() * oldSumOfWeights / sumProbs); } } else { super.setWeights(training, reweight); } } /** * Returns description of the boosted classifier. * * @return description of the boosted classifier as a string */ public String toString() { // only ZeroR model? if (m_ZeroR != null) { StringBuffer buf = new StringBuffer(); buf.append(this.getClass().getName().replaceAll(".*\\.", "") + "\n"); buf.append(this.getClass().getName().replaceAll(".*\\.", "").replaceAll(".", "=") + "\n\n"); buf.append("Warning: No model could be built, hence ZeroR model is used:\n\n"); buf.append(m_ZeroR.toString()); return buf.toString(); } StringBuffer text = new StringBuffer(); if (m_NumIterations == 0) { text.append("MultiBoostAB: No model built yet.\n"); } else if (m_NumIterations == 1) { text.append("MultiBoostAB: No boosting possible, one classifier used!\n"); text.append(m_Classifiers[0].toString() + "\n"); } else { text.append("MultiBoostAB: Base classifiers and their weights: \n\n"); for (int i = 0; i < m_NumIterations ; i++) { if ( (m_Classifiers != null) && (m_Classifiers[i] != null) ) { text.append(m_Classifiers[i].toString() + "\n\n"); text.append("Weight: " + Utils.roundDouble(m_Betas[i], 2) + "\n\n"); } else { text.append("not yet initialized!\n\n"); } } text.append("Number of performed Iterations: " + m_NumIterations + "\n"); } return text.toString(); } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 1.16 $"); } /** * Main method for testing this class. * * @param argv the options */ public static void main(String [] argv) { runClassifier(new MultiBoostAB(), argv); } }
13,502
30.771765
626
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/meta/MultiClassClassifier.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * MultiClassClassifier.java * Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.meta; import java.io.Serializable; import java.util.Enumeration; import java.util.Random; import java.util.Vector; import weka.classifiers.AbstractClassifier; import weka.classifiers.Classifier; import weka.classifiers.RandomizableSingleClassifierEnhancer; import weka.classifiers.rules.ZeroR; import weka.core.Attribute; import weka.core.Capabilities; import weka.core.Capabilities.Capability; import weka.core.FastVector; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.OptionHandler; import weka.core.Range; import weka.core.RevisionHandler; import weka.core.RevisionUtils; import weka.core.SelectedTag; import weka.core.Tag; import weka.core.Utils; import weka.filters.Filter; import weka.filters.unsupervised.attribute.MakeIndicator; import weka.filters.unsupervised.instance.RemoveWithValues; /** <!-- globalinfo-start --> * A metaclassifier for handling multi-class datasets with 2-class classifiers. This classifier is also capable of applying error correcting output codes for increased accuracy. * <p/> <!-- globalinfo-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -M &lt;num&gt; * Sets the method to use. Valid values are 0 (1-against-all), * 1 (random codes), 2 (exhaustive code), and 3 (1-against-1). (default 0) * </pre> * * <pre> -R &lt;num&gt; * Sets the multiplier when using random codes. (default 2.0)</pre> * * <pre> -P * Use pairwise coupling (only has an effect for 1-against1)</pre> * * <pre> -S &lt;num&gt; * Random number seed. * (default 1)</pre> * * <pre> -D * If set, classifier is run in debug mode and * may output additional info to the console</pre> * * <pre> -W * Full name of base classifier. * (default: weka.classifiers.functions.Logistic)</pre> * * <pre> * Options specific to classifier weka.classifiers.functions.Logistic: * </pre> * * <pre> -D * Turn on debugging output.</pre> * * <pre> -R &lt;ridge&gt; * Set the ridge in the log-likelihood.</pre> * * <pre> -M &lt;number&gt; * Set the maximum number of iterations (default -1, until convergence).</pre> * <!-- options-end --> * * @author Eibe Frank (eibe@cs.waikato.ac.nz) * @author Len Trigg (len@reeltwo.com) * @author Richard Kirkby (rkirkby@cs.waikato.ac.nz) * @version $Revision: 8034 $ */ public class MultiClassClassifier extends RandomizableSingleClassifierEnhancer implements OptionHandler { /** for serialization */ static final long serialVersionUID = -3879602011542849141L; /** The classifiers. */ protected Classifier [] m_Classifiers; /** Use pairwise coupling with 1-vs-1 */ protected boolean m_pairwiseCoupling = false; /** Needed for pairwise coupling */ protected double [] m_SumOfWeights; /** The filters used to transform the class. */ protected Filter[] m_ClassFilters; /** ZeroR classifier for when all base classifier return zero probability. */ private ZeroR m_ZeroR; /** Internal copy of the class attribute for output purposes */ protected Attribute m_ClassAttribute; /** A transformed dataset header used by the 1-against-1 method */ protected Instances m_TwoClassDataset; /** * The multiplier when generating random codes. Will generate * numClasses * m_RandomWidthFactor codes */ private double m_RandomWidthFactor = 2.0; /** The multiclass method to use */ protected int m_Method = METHOD_1_AGAINST_ALL; /** 1-against-all */ public static final int METHOD_1_AGAINST_ALL = 0; /** random correction code */ public static final int METHOD_ERROR_RANDOM = 1; /** exhaustive correction code */ public static final int METHOD_ERROR_EXHAUSTIVE = 2; /** 1-against-1 */ public static final int METHOD_1_AGAINST_1 = 3; /** The error correction modes */ public static final Tag [] TAGS_METHOD = { new Tag(METHOD_1_AGAINST_ALL, "1-against-all"), new Tag(METHOD_ERROR_RANDOM, "Random correction code"), new Tag(METHOD_ERROR_EXHAUSTIVE, "Exhaustive correction code"), new Tag(METHOD_1_AGAINST_1, "1-against-1") }; /** * Constructor. */ public MultiClassClassifier() { m_Classifier = new weka.classifiers.functions.Logistic(); } /** * String describing default classifier. * * @return the default classifier classname */ protected String defaultClassifierString() { return "weka.classifiers.functions.Logistic"; } /** * Interface for the code constructors */ private abstract class Code implements Serializable, RevisionHandler { /** for serialization */ static final long serialVersionUID = 418095077487120846L; /** * Subclasses must allocate and fill these. * First dimension is number of codes. * Second dimension is number of classes. */ protected boolean [][]m_Codebits; /** * Returns the number of codes. * @return the number of codes */ public int size() { return m_Codebits.length; } /** * Returns the indices of the values set to true for this code, * using 1-based indexing (for input to Range). * * @param which the index * @return the 1-based indices */ public String getIndices(int which) { StringBuffer sb = new StringBuffer(); for (int i = 0; i < m_Codebits[which].length; i++) { if (m_Codebits[which][i]) { if (sb.length() != 0) { sb.append(','); } sb.append(i + 1); } } return sb.toString(); } /** * Returns a human-readable representation of the codes. * @return a string representation of the codes */ public String toString() { StringBuffer sb = new StringBuffer(); for(int i = 0; i < m_Codebits[0].length; i++) { for (int j = 0; j < m_Codebits.length; j++) { sb.append(m_Codebits[j][i] ? " 1" : " 0"); } sb.append('\n'); } return sb.toString(); } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } } /** * Constructs a code with no error correction */ private class StandardCode extends Code { /** for serialization */ static final long serialVersionUID = 3707829689461467358L; /** * constructor * * @param numClasses the number of classes */ public StandardCode(int numClasses) { m_Codebits = new boolean[numClasses][numClasses]; for (int i = 0; i < numClasses; i++) { m_Codebits[i][i] = true; } //System.err.println("Code:\n" + this); } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } } /** * Constructs a random code assignment */ private class RandomCode extends Code { /** for serialization */ static final long serialVersionUID = 4413410540703926563L; /** random number generator */ Random r = null; /** * constructor * * @param numClasses the number of classes * @param numCodes the number of codes * @param data the data to use */ public RandomCode(int numClasses, int numCodes, Instances data) { r = data.getRandomNumberGenerator(m_Seed); numCodes = Math.max(2, numCodes); // Need at least two classes m_Codebits = new boolean[numCodes][numClasses]; int i = 0; do { randomize(); //System.err.println(this); } while (!good() && (i++ < 100)); //System.err.println("Code:\n" + this); } private boolean good() { boolean [] ninClass = new boolean[m_Codebits[0].length]; boolean [] ainClass = new boolean[m_Codebits[0].length]; for (int i = 0; i < ainClass.length; i++) { ainClass[i] = true; } for (int i = 0; i < m_Codebits.length; i++) { boolean ninCode = false; boolean ainCode = true; for (int j = 0; j < m_Codebits[i].length; j++) { boolean current = m_Codebits[i][j]; ninCode = ninCode || current; ainCode = ainCode && current; ninClass[j] = ninClass[j] || current; ainClass[j] = ainClass[j] && current; } if (!ninCode || ainCode) { return false; } } for (int j = 0; j < ninClass.length; j++) { if (!ninClass[j] || ainClass[j]) { return false; } } return true; } /** * randomizes */ private void randomize() { for (int i = 0; i < m_Codebits.length; i++) { for (int j = 0; j < m_Codebits[i].length; j++) { double temp = r.nextDouble(); m_Codebits[i][j] = (temp < 0.5) ? false : true; } } } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } } /* * TODO: Constructs codes as per: * Bose, R.C., Ray Chaudhuri (1960), On a class of error-correcting * binary group codes, Information and Control, 3, 68-79. * Hocquenghem, A. (1959) Codes corecteurs d'erreurs, Chiffres, 2, 147-156. */ //private class BCHCode extends Code {...} /** Constructs an exhaustive code assignment */ private class ExhaustiveCode extends Code { /** for serialization */ static final long serialVersionUID = 8090991039670804047L; /** * constructor * * @param numClasses the number of classes */ public ExhaustiveCode(int numClasses) { int width = (int)Math.pow(2, numClasses - 1) - 1; m_Codebits = new boolean[width][numClasses]; for (int j = 0; j < width; j++) { m_Codebits[j][0] = true; } for (int i = 1; i < numClasses; i++) { int skip = (int) Math.pow(2, numClasses - (i + 1)); for(int j = 0; j < width; j++) { m_Codebits[j][i] = ((j / skip) % 2 != 0); } } //System.err.println("Code:\n" + this); } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } } /** * Returns default capabilities of the classifier. * * @return the capabilities of this classifier */ public Capabilities getCapabilities() { Capabilities result = super.getCapabilities(); // class result.disableAllClasses(); result.disableAllClassDependencies(); result.enable(Capability.NOMINAL_CLASS); return result; } /** * Builds the classifiers. * * @param insts the training data. * @throws Exception if a classifier can't be built */ public void buildClassifier(Instances insts) throws Exception { Instances newInsts; // can classifier handle the data? getCapabilities().testWithFail(insts); // zero training instances - could be incremental boolean zeroTrainingInstances = insts.numInstances() == 0; // remove instances with missing class insts = new Instances(insts); insts.deleteWithMissingClass(); if (m_Classifier == null) { throw new Exception("No base classifier has been set!"); } m_ZeroR = new ZeroR(); m_ZeroR.buildClassifier(insts); m_TwoClassDataset = null; int numClassifiers = insts.numClasses(); if (numClassifiers <= 2) { m_Classifiers = AbstractClassifier.makeCopies(m_Classifier, 1); m_Classifiers[0].buildClassifier(insts); m_ClassFilters = null; } else if (m_Method == METHOD_1_AGAINST_1) { // generate fastvector of pairs FastVector pairs = new FastVector(); for (int i=0; i<insts.numClasses(); i++) { for (int j=0; j<insts.numClasses(); j++) { if (j<=i) continue; int[] pair = new int[2]; pair[0] = i; pair[1] = j; pairs.addElement(pair); } } numClassifiers = pairs.size(); m_Classifiers = AbstractClassifier.makeCopies(m_Classifier, numClassifiers); m_ClassFilters = new Filter[numClassifiers]; m_SumOfWeights = new double[numClassifiers]; // generate the classifiers for (int i=0; i<numClassifiers; i++) { RemoveWithValues classFilter = new RemoveWithValues(); classFilter.setAttributeIndex("" + (insts.classIndex() + 1)); classFilter.setModifyHeader(true); classFilter.setInvertSelection(true); classFilter.setNominalIndicesArr((int[])pairs.elementAt(i)); Instances tempInstances = new Instances(insts, 0); tempInstances.setClassIndex(-1); classFilter.setInputFormat(tempInstances); newInsts = Filter.useFilter(insts, classFilter); if (newInsts.numInstances() > 0 || zeroTrainingInstances) { newInsts.setClassIndex(insts.classIndex()); m_Classifiers[i].buildClassifier(newInsts); m_ClassFilters[i] = classFilter; m_SumOfWeights[i] = newInsts.sumOfWeights(); } else { m_Classifiers[i] = null; m_ClassFilters[i] = null; } } // construct a two-class header version of the dataset m_TwoClassDataset = new Instances(insts, 0); int classIndex = m_TwoClassDataset.classIndex(); m_TwoClassDataset.setClassIndex(-1); m_TwoClassDataset.deleteAttributeAt(classIndex); FastVector classLabels = new FastVector(); classLabels.addElement("class0"); classLabels.addElement("class1"); m_TwoClassDataset.insertAttributeAt(new Attribute("class", classLabels), classIndex); m_TwoClassDataset.setClassIndex(classIndex); } else { // use error correcting code style methods Code code = null; switch (m_Method) { case METHOD_ERROR_EXHAUSTIVE: code = new ExhaustiveCode(numClassifiers); break; case METHOD_ERROR_RANDOM: code = new RandomCode(numClassifiers, (int)(numClassifiers * m_RandomWidthFactor), insts); break; case METHOD_1_AGAINST_ALL: code = new StandardCode(numClassifiers); break; default: throw new Exception("Unrecognized correction code type"); } numClassifiers = code.size(); m_Classifiers = AbstractClassifier.makeCopies(m_Classifier, numClassifiers); m_ClassFilters = new MakeIndicator[numClassifiers]; for (int i = 0; i < m_Classifiers.length; i++) { m_ClassFilters[i] = new MakeIndicator(); MakeIndicator classFilter = (MakeIndicator) m_ClassFilters[i]; classFilter.setAttributeIndex("" + (insts.classIndex() + 1)); classFilter.setValueIndices(code.getIndices(i)); classFilter.setNumeric(false); classFilter.setInputFormat(insts); newInsts = Filter.useFilter(insts, m_ClassFilters[i]); m_Classifiers[i].buildClassifier(newInsts); } } m_ClassAttribute = insts.classAttribute(); } /** * Returns the individual predictions of the base classifiers * for an instance. Used by StackedMultiClassClassifier. * Returns the probability for the second "class" predicted * by each base classifier. * * @param inst the instance to get the prediction for * @return the individual predictions * @throws Exception if the predictions can't be computed successfully */ public double[] individualPredictions(Instance inst) throws Exception { double[] result = null; if (m_Classifiers.length == 1) { result = new double[1]; result[0] = m_Classifiers[0].distributionForInstance(inst)[1]; } else { result = new double[m_ClassFilters.length]; for(int i = 0; i < m_ClassFilters.length; i++) { if (m_Classifiers[i] != null) { if (m_Method == METHOD_1_AGAINST_1) { Instance tempInst = (Instance)inst.copy(); tempInst.setDataset(m_TwoClassDataset); result[i] = m_Classifiers[i].distributionForInstance(tempInst)[1]; } else { m_ClassFilters[i].input(inst); m_ClassFilters[i].batchFinished(); result[i] = m_Classifiers[i]. distributionForInstance(m_ClassFilters[i].output())[1]; } } } } return result; } /** * Returns the distribution for an instance. * * @param inst the instance to get the distribution for * @return the distribution * @throws Exception if the distribution can't be computed successfully */ public double[] distributionForInstance(Instance inst) throws Exception { if (m_Classifiers.length == 1) { return m_Classifiers[0].distributionForInstance(inst); } double[] probs = new double[inst.numClasses()]; if (m_Method == METHOD_1_AGAINST_1) { double[][] r = new double[inst.numClasses()][inst.numClasses()]; double[][] n = new double[inst.numClasses()][inst.numClasses()]; for(int i = 0; i < m_ClassFilters.length; i++) { if (m_Classifiers[i] != null) { Instance tempInst = (Instance)inst.copy(); tempInst.setDataset(m_TwoClassDataset); double [] current = m_Classifiers[i].distributionForInstance(tempInst); Range range = new Range(((RemoveWithValues)m_ClassFilters[i]) .getNominalIndices()); range.setUpper(m_ClassAttribute.numValues()); int[] pair = range.getSelection(); if (m_pairwiseCoupling && inst.numClasses() > 2) { r[pair[0]][pair[1]] = current[0]; n[pair[0]][pair[1]] = m_SumOfWeights[i]; } else { if (current[0] > current[1]) { probs[pair[0]] += 1.0; } else { probs[pair[1]] += 1.0; } } } } if (m_pairwiseCoupling && inst.numClasses() > 2) { return pairwiseCoupling(n, r); } } else { // error correcting style methods for(int i = 0; i < m_ClassFilters.length; i++) { m_ClassFilters[i].input(inst); m_ClassFilters[i].batchFinished(); double [] current = m_Classifiers[i]. distributionForInstance(m_ClassFilters[i].output()); for (int j = 0; j < m_ClassAttribute.numValues(); j++) { if (((MakeIndicator)m_ClassFilters[i]).getValueRange().isInRange(j)) { probs[j] += current[1]; } else { probs[j] += current[0]; } } } } if (Utils.gr(Utils.sum(probs), 0)) { Utils.normalize(probs); return probs; } else { return m_ZeroR.distributionForInstance(inst); } } /** * Prints the classifiers. * * @return a string representation of the classifier */ public String toString() { if (m_Classifiers == null) { return "MultiClassClassifier: No model built yet."; } StringBuffer text = new StringBuffer(); text.append("MultiClassClassifier\n\n"); for (int i = 0; i < m_Classifiers.length; i++) { text.append("Classifier ").append(i + 1); if (m_Classifiers[i] != null) { if ((m_ClassFilters != null) && (m_ClassFilters[i] != null)) { if (m_ClassFilters[i] instanceof RemoveWithValues) { Range range = new Range(((RemoveWithValues)m_ClassFilters[i]) .getNominalIndices()); range.setUpper(m_ClassAttribute.numValues()); int[] pair = range.getSelection(); text.append(", " + (pair[0]+1) + " vs " + (pair[1]+1)); } else if (m_ClassFilters[i] instanceof MakeIndicator) { text.append(", using indicator values: "); text.append(((MakeIndicator)m_ClassFilters[i]).getValueRange()); } } text.append('\n'); text.append(m_Classifiers[i].toString() + "\n\n"); } else { text.append(" Skipped (no training examples)\n"); } } return text.toString(); } /** * Returns an enumeration describing the available options * * @return an enumeration of all the available options */ public Enumeration listOptions() { Vector vec = new Vector(4); vec.addElement(new Option( "\tSets the method to use. Valid values are 0 (1-against-all),\n" +"\t1 (random codes), 2 (exhaustive code), and 3 (1-against-1). (default 0)\n", "M", 1, "-M <num>")); vec.addElement(new Option( "\tSets the multiplier when using random codes. (default 2.0)", "R", 1, "-R <num>")); vec.addElement(new Option( "\tUse pairwise coupling (only has an effect for 1-against1)", "P", 0, "-P")); Enumeration enu = super.listOptions(); while (enu.hasMoreElements()) { vec.addElement(enu.nextElement()); } return vec.elements(); } /** * Parses a given list of options. <p/> * <!-- options-start --> * Valid options are: <p/> * * <pre> -M &lt;num&gt; * Sets the method to use. Valid values are 0 (1-against-all), * 1 (random codes), 2 (exhaustive code), and 3 (1-against-1). (default 0) * </pre> * * <pre> -R &lt;num&gt; * Sets the multiplier when using random codes. (default 2.0)</pre> * * <pre> -P * Use pairwise coupling (only has an effect for 1-against1)</pre> * * <pre> -S &lt;num&gt; * Random number seed. * (default 1)</pre> * * <pre> -D * If set, classifier is run in debug mode and * may output additional info to the console</pre> * * <pre> -W * Full name of base classifier. * (default: weka.classifiers.functions.Logistic)</pre> * * <pre> * Options specific to classifier weka.classifiers.functions.Logistic: * </pre> * * <pre> -D * Turn on debugging output.</pre> * * <pre> -R &lt;ridge&gt; * Set the ridge in the log-likelihood.</pre> * * <pre> -M &lt;number&gt; * Set the maximum number of iterations (default -1, until convergence).</pre> * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { String errorString = Utils.getOption('M', options); if (errorString.length() != 0) { setMethod(new SelectedTag(Integer.parseInt(errorString), TAGS_METHOD)); } else { setMethod(new SelectedTag(METHOD_1_AGAINST_ALL, TAGS_METHOD)); } String rfactorString = Utils.getOption('R', options); if (rfactorString.length() != 0) { setRandomWidthFactor((new Double(rfactorString)).doubleValue()); } else { setRandomWidthFactor(2.0); } setUsePairwiseCoupling(Utils.getFlag('P', options)); super.setOptions(options); } /** * Gets the current settings of the Classifier. * * @return an array of strings suitable for passing to setOptions */ public String [] getOptions() { String [] superOptions = super.getOptions(); String [] options = new String [superOptions.length + 5]; int current = 0; options[current++] = "-M"; options[current++] = "" + m_Method; if (getUsePairwiseCoupling()) { options[current++] = "-P"; } options[current++] = "-R"; options[current++] = "" + m_RandomWidthFactor; System.arraycopy(superOptions, 0, options, current, superOptions.length); current += superOptions.length; while (current < options.length) { options[current++] = ""; } return options; } /** * @return a description of the classifier suitable for * displaying in the explorer/experimenter gui */ public String globalInfo() { return "A metaclassifier for handling multi-class datasets with 2-class " + "classifiers. This classifier is also capable of " + "applying error correcting output codes for increased accuracy."; } /** * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String randomWidthFactorTipText() { return "Sets the width multiplier when using random codes. The number " + "of codes generated will be thus number multiplied by the number of " + "classes."; } /** * Gets the multiplier when generating random codes. Will generate * numClasses * m_RandomWidthFactor codes. * * @return the width multiplier */ public double getRandomWidthFactor() { return m_RandomWidthFactor; } /** * Sets the multiplier when generating random codes. Will generate * numClasses * m_RandomWidthFactor codes. * * @param newRandomWidthFactor the new width multiplier */ public void setRandomWidthFactor(double newRandomWidthFactor) { m_RandomWidthFactor = newRandomWidthFactor; } /** * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String methodTipText() { return "Sets the method to use for transforming the multi-class problem into " + "several 2-class ones."; } /** * Gets the method used. Will be one of METHOD_1_AGAINST_ALL, * METHOD_ERROR_RANDOM, METHOD_ERROR_EXHAUSTIVE, or METHOD_1_AGAINST_1. * * @return the current method. */ public SelectedTag getMethod() { return new SelectedTag(m_Method, TAGS_METHOD); } /** * Sets the method used. Will be one of METHOD_1_AGAINST_ALL, * METHOD_ERROR_RANDOM, METHOD_ERROR_EXHAUSTIVE, or METHOD_1_AGAINST_1. * * @param newMethod the new method. */ public void setMethod(SelectedTag newMethod) { if (newMethod.getTags() == TAGS_METHOD) { m_Method = newMethod.getSelectedTag().getID(); } } /** * Set whether to use pairwise coupling with 1-vs-1 * classification to improve probability estimates. * * @param p true if pairwise coupling is to be used */ public void setUsePairwiseCoupling(boolean p) { m_pairwiseCoupling = p; } /** * Gets whether to use pairwise coupling with 1-vs-1 * classification to improve probability estimates. * * @return true if pairwise coupling is to be used */ public boolean getUsePairwiseCoupling() { return m_pairwiseCoupling; } /** * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String usePairwiseCouplingTipText() { return "Use pairwise coupling (only has an effect for 1-against-1)."; } /** * Implements pairwise coupling. * * @param n the sum of weights used to train each model * @param r the probability estimate from each model * @return the coupled estimates */ public static double[] pairwiseCoupling(double[][] n, double[][] r) { // Initialize p and u array double[] p = new double[r.length]; for (int i =0; i < p.length; i++) { p[i] = 1.0 / (double)p.length; } double[][] u = new double[r.length][r.length]; for (int i = 0; i < r.length; i++) { for (int j = i + 1; j < r.length; j++) { u[i][j] = 0.5; } } // firstSum doesn't change double[] firstSum = new double[p.length]; for (int i = 0; i < p.length; i++) { for (int j = i + 1; j < p.length; j++) { firstSum[i] += n[i][j] * r[i][j]; firstSum[j] += n[i][j] * (1 - r[i][j]); } } // Iterate until convergence boolean changed; do { changed = false; double[] secondSum = new double[p.length]; for (int i = 0; i < p.length; i++) { for (int j = i + 1; j < p.length; j++) { secondSum[i] += n[i][j] * u[i][j]; secondSum[j] += n[i][j] * (1 - u[i][j]); } } for (int i = 0; i < p.length; i++) { if ((firstSum[i] == 0) || (secondSum[i] == 0)) { if (p[i] > 0) { changed = true; } p[i] = 0; } else { double factor = firstSum[i] / secondSum[i]; double pOld = p[i]; p[i] *= factor; if (Math.abs(pOld - p[i]) > 1.0e-3) { changed = true; } } } Utils.normalize(p); for (int i = 0; i < r.length; i++) { for (int j = i + 1; j < r.length; j++) { u[i][j] = p[i] / (p[i] + p[j]); } } } while (changed); return p; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } /** * Main method for testing this class. * * @param argv the options */ public static void main(String [] argv) { runClassifier(new MultiClassClassifier(), argv); } }
29,150
28.151
177
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/meta/MultiClassClassifierUpdateable.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * MultiClassClassifierUpdateable.java * Copyright (C) 2011-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.meta; import weka.classifiers.UpdateableClassifier; import weka.core.Instance; import weka.core.Instances; import weka.core.OptionHandler; import weka.core.Range; import weka.core.RevisionUtils; import weka.core.Utils; import weka.filters.unsupervised.instance.RemoveWithValues; /** <!-- globalinfo-start --> * A metaclassifier for handling multi-class datasets with 2-class classifiers. This classifier is also capable of applying error correcting output codes for increased accuracy. The base classifier must be an updateable classifier * <p/> <!-- globalinfo-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -M &lt;num&gt; * Sets the method to use. Valid values are 0 (1-against-all), * 1 (random codes), 2 (exhaustive code), and 3 (1-against-1). (default 0) * </pre> * * <pre> -R &lt;num&gt; * Sets the multiplier when using random codes. (default 2.0)</pre> * * <pre> -P * Use pairwise coupling (only has an effect for 1-against1)</pre> * * <pre> -S &lt;num&gt; * Random number seed. * (default 1)</pre> * * <pre> -D * If set, classifier is run in debug mode and * may output additional info to the console</pre> * * <pre> -W * Full name of base classifier. * (default: weka.classifiers.functions.SGD)</pre> * <!-- options-end --> * * @author Eibe Frank (eibe@cs.waikato.ac.nz) * @author Len Trigg (len@reeltwo.com) * @author Richard Kirkby (rkirkby@cs.waikato.ac.nz) * @author Mark Hall (mhall{[at]}pentaho{[dot]}com) * * @version $Revision: 9248 $ */ public class MultiClassClassifierUpdateable extends MultiClassClassifier implements OptionHandler, UpdateableClassifier { /** For serialization */ private static final long serialVersionUID = -1619685269774366430L; /** * Constructor */ public MultiClassClassifierUpdateable() { m_Classifier = new weka.classifiers.functions.SGD(); } /** * @return a description of the classifier suitable for displaying in the * explorer/experimenter gui */ @Override public String globalInfo() { return "A metaclassifier for handling multi-class datasets with 2-class " + "classifiers. This classifier is also capable of " + "applying error correcting output codes for increased accuracy. " + "The base classifier must be an updateable classifier"; } @Override public void buildClassifier(Instances insts) throws Exception { if (m_Classifier == null) { throw new Exception("No base classifier has been set!"); } if (!(m_Classifier instanceof UpdateableClassifier)) { throw new Exception("Base classifier must be updateable!"); } super.buildClassifier(insts); } /** * Updates the classifier with the given instance. * * @param instance the new training instance to include in the model * @exception Exception if the instance could not be incorporated in the * model. */ @Override public void updateClassifier(Instance instance) throws Exception { if (!instance.classIsMissing()) { if (m_Classifiers.length == 1) { ((UpdateableClassifier) m_Classifiers[0]).updateClassifier(instance); return; } for (int i = 0; i < m_Classifiers.length; i++) { if (m_Classifiers[i] != null) { m_ClassFilters[i].input(instance); Instance converted = m_ClassFilters[i].output(); if (converted != null) { converted.dataset().setClassIndex(m_ClassAttribute.index()); ((UpdateableClassifier) m_Classifiers[i]) .updateClassifier(converted); if (m_Method == METHOD_1_AGAINST_1) { m_SumOfWeights[i] += converted.weight(); } } } } } } /** * Returns the distribution for an instance. * * @param inst the instance to get the distribution for * @return the distribution * @throws Exception if the distribution can't be computed successfully */ @Override public double[] distributionForInstance(Instance inst) throws Exception { if (m_Classifiers.length == 1) { return m_Classifiers[0].distributionForInstance(inst); } double[] probs = new double[inst.numClasses()]; if (m_Method == METHOD_1_AGAINST_1) { double[][] r = new double[inst.numClasses()][inst.numClasses()]; double[][] n = new double[inst.numClasses()][inst.numClasses()]; for (int i = 0; i < m_ClassFilters.length; i++) { if (m_Classifiers[i] != null && m_SumOfWeights[i] > 0) { Instance tempInst = (Instance) inst.copy(); tempInst.setDataset(m_TwoClassDataset); double[] current = m_Classifiers[i].distributionForInstance(tempInst); Range range = new Range( ((RemoveWithValues) m_ClassFilters[i]).getNominalIndices()); range.setUpper(m_ClassAttribute.numValues()); int[] pair = range.getSelection(); if (m_pairwiseCoupling && inst.numClasses() > 2) { r[pair[0]][pair[1]] = current[0]; n[pair[0]][pair[1]] = m_SumOfWeights[i]; } else { if (current[0] > current[1]) { probs[pair[0]] += 1.0; } else { probs[pair[1]] += 1.0; } } } } if (m_pairwiseCoupling && inst.numClasses() > 2) { try { return pairwiseCoupling(n, r); } catch (IllegalArgumentException ex) { } } if (Utils.gr(Utils.sum(probs), 0)) { Utils.normalize(probs); } return probs; } else { probs = super.distributionForInstance(inst); } /* * if (probs.length == 1) { // ZeroR made the prediction return new * double[m_ClassAttribute.numValues()]; } */ return probs; } /** * Returns the revision string. * * @return the revision */ @Override public String getRevision() { return RevisionUtils.extract("$Revision: 9248 $"); } /** * Main method for testing this class. * * @param argv the options */ public static void main(String[] argv) { runClassifier(new MultiClassClassifierUpdateable(), argv); } }
7,038
29.737991
230
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/meta/MultiScheme.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * MultiScheme.java * Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.meta; import java.util.Enumeration; import java.util.Random; import java.util.Vector; import weka.classifiers.Classifier; import weka.classifiers.Evaluation; import weka.classifiers.RandomizableMultipleClassifiersCombiner; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.OptionHandler; import weka.core.RevisionUtils; import weka.core.Utils; /** <!-- globalinfo-start --> * Class for selecting a classifier from among several using cross validation on the training data or the performance on the training data. Performance is measured based on percent correct (classification) or mean-squared error (regression). * <p/> <!-- globalinfo-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -X &lt;number of folds&gt; * Use cross validation for model selection using the * given number of folds. (default 0, is to * use training error)</pre> * * <pre> -S &lt;num&gt; * Random number seed. * (default 1)</pre> * * <pre> -B &lt;classifier specification&gt; * Full class name of classifier to include, followed * by scheme options. May be specified multiple times. * (default: "weka.classifiers.rules.ZeroR")</pre> * * <pre> -D * If set, classifier is run in debug mode and * may output additional info to the console</pre> * <!-- options-end --> * * @author Len Trigg (trigg@cs.waikato.ac.nz) * @version $Revision: 8034 $ */ public class MultiScheme extends RandomizableMultipleClassifiersCombiner { /** for serialization */ static final long serialVersionUID = 5710744346128957520L; /** The classifier that had the best performance on training data. */ protected Classifier m_Classifier; /** The index into the vector for the selected scheme */ protected int m_ClassifierIndex; /** * Number of folds to use for cross validation (0 means use training * error for selection) */ protected int m_NumXValFolds; /** * Returns a string describing classifier * @return a description suitable for * displaying in the explorer/experimenter gui */ public String globalInfo() { return "Class for selecting a classifier from among several using cross " + "validation on the training data or the performance on the " + "training data. Performance is measured based on percent correct " + "(classification) or mean-squared error (regression)."; } /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ public Enumeration listOptions() { Vector newVector = new Vector(1); newVector.addElement(new Option( "\tUse cross validation for model selection using the\n" + "\tgiven number of folds. (default 0, is to\n" + "\tuse training error)", "X", 1, "-X <number of folds>")); Enumeration enu = super.listOptions(); while (enu.hasMoreElements()) { newVector.addElement(enu.nextElement()); } return newVector.elements(); } /** * Parses a given list of options. <p/> * <!-- options-start --> * Valid options are: <p/> * * <pre> -X &lt;number of folds&gt; * Use cross validation for model selection using the * given number of folds. (default 0, is to * use training error)</pre> * * <pre> -S &lt;num&gt; * Random number seed. * (default 1)</pre> * * <pre> -B &lt;classifier specification&gt; * Full class name of classifier to include, followed * by scheme options. May be specified multiple times. * (default: "weka.classifiers.rules.ZeroR")</pre> * * <pre> -D * If set, classifier is run in debug mode and * may output additional info to the console</pre> * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { String numFoldsString = Utils.getOption('X', options); if (numFoldsString.length() != 0) { setNumFolds(Integer.parseInt(numFoldsString)); } else { setNumFolds(0); } super.setOptions(options); } /** * Gets the current settings of the Classifier. * * @return an array of strings suitable for passing to setOptions */ public String [] getOptions() { String [] superOptions = super.getOptions(); String [] options = new String [superOptions.length + 2]; int current = 0; options[current++] = "-X"; options[current++] = "" + getNumFolds(); System.arraycopy(superOptions, 0, options, current, superOptions.length); return options; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String classifiersTipText() { return "The classifiers to be chosen from."; } /** * Sets the list of possible classifers to choose from. * * @param classifiers an array of classifiers with all options set. */ public void setClassifiers(Classifier [] classifiers) { m_Classifiers = classifiers; } /** * Gets the list of possible classifers to choose from. * * @return the array of Classifiers */ public Classifier [] getClassifiers() { return m_Classifiers; } /** * Gets a single classifier from the set of available classifiers. * * @param index the index of the classifier wanted * @return the Classifier */ public Classifier getClassifier(int index) { return m_Classifiers[index]; } /** * Gets the classifier specification string, which contains the class name of * the classifier and any options to the classifier * * @param index the index of the classifier string to retrieve, starting from * 0. * @return the classifier string, or the empty string if no classifier * has been assigned (or the index given is out of range). */ protected String getClassifierSpec(int index) { if (m_Classifiers.length < index) { return ""; } Classifier c = getClassifier(index); if (c instanceof OptionHandler) { return c.getClass().getName() + " " + Utils.joinOptions(((OptionHandler)c).getOptions()); } return c.getClass().getName(); } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String seedTipText() { return "The seed used for randomizing the data " + "for cross-validation."; } /** * Sets the seed for random number generation. * * @param seed the random number seed */ public void setSeed(int seed) { m_Seed = seed;; } /** * Gets the random number seed. * * @return the random number seed */ public int getSeed() { return m_Seed; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String numFoldsTipText() { return "The number of folds used for cross-validation (if 0, " + "performance on training data will be used)."; } /** * Gets the number of folds for cross-validation. A number less * than 2 specifies using training error rather than cross-validation. * * @return the number of folds for cross-validation */ public int getNumFolds() { return m_NumXValFolds; } /** * Sets the number of folds for cross-validation. A number less * than 2 specifies using training error rather than cross-validation. * * @param numFolds the number of folds for cross-validation */ public void setNumFolds(int numFolds) { m_NumXValFolds = numFolds; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String debugTipText() { return "Whether debug information is output to console."; } /** * Set debugging mode * * @param debug true if debug output should be printed */ public void setDebug(boolean debug) { m_Debug = debug; } /** * Get whether debugging is turned on * * @return true if debugging output is on */ public boolean getDebug() { return m_Debug; } /** * Get the index of the classifier that was determined as best during * cross-validation. * * @return the index in the classifier array */ public int getBestClassifierIndex() { return m_ClassifierIndex; } /** * Buildclassifier selects a classifier from the set of classifiers * by minimising error on the training data. * * @param data the training data to be used for generating the * boosted classifier. * @throws Exception if the classifier could not be built successfully */ public void buildClassifier(Instances data) throws Exception { if (m_Classifiers.length == 0) { throw new Exception("No base classifiers have been set!"); } // can classifier handle the data? getCapabilities().testWithFail(data); // remove instances with missing class Instances newData = new Instances(data); newData.deleteWithMissingClass(); Random random = new Random(m_Seed); newData.randomize(random); if (newData.classAttribute().isNominal() && (m_NumXValFolds > 1)) { newData.stratify(m_NumXValFolds); } Instances train = newData; // train on all data by default Instances test = newData; // test on training data by default Classifier bestClassifier = null; int bestIndex = -1; double bestPerformance = Double.NaN; int numClassifiers = m_Classifiers.length; for (int i = 0; i < numClassifiers; i++) { Classifier currentClassifier = getClassifier(i); Evaluation evaluation; if (m_NumXValFolds > 1) { evaluation = new Evaluation(newData); for (int j = 0; j < m_NumXValFolds; j++) { // We want to randomize the data the same way for every // learning scheme. train = newData.trainCV(m_NumXValFolds, j, new Random (1)); test = newData.testCV(m_NumXValFolds, j); currentClassifier.buildClassifier(train); evaluation.setPriors(train); evaluation.evaluateModel(currentClassifier, test); } } else { currentClassifier.buildClassifier(train); evaluation = new Evaluation(train); evaluation.evaluateModel(currentClassifier, test); } double error = evaluation.errorRate(); if (m_Debug) { System.err.println("Error rate: " + Utils.doubleToString(error, 6, 4) + " for classifier " + currentClassifier.getClass().getName()); } if ((i == 0) || (error < bestPerformance)) { bestClassifier = currentClassifier; bestPerformance = error; bestIndex = i; } } m_ClassifierIndex = bestIndex; if (m_NumXValFolds > 1) { bestClassifier.buildClassifier(newData); } m_Classifier = bestClassifier; } /** * Returns class probabilities. * * @param instance the instance to be classified * @return the distribution for the instance * @throws Exception if instance could not be classified * successfully */ public double[] distributionForInstance(Instance instance) throws Exception { return m_Classifier.distributionForInstance(instance); } /** * Output a representation of this classifier * @return a string representation of the classifier */ public String toString() { if (m_Classifier == null) { return "MultiScheme: No model built yet."; } String result = "MultiScheme selection using"; if (m_NumXValFolds > 1) { result += " cross validation error"; } else { result += " error on training data"; } result += " from the following:\n"; for (int i = 0; i < m_Classifiers.length; i++) { result += '\t' + getClassifierSpec(i) + '\n'; } result += "Selected scheme: " + getClassifierSpec(m_ClassifierIndex) + "\n\n" + m_Classifier.toString(); return result; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } /** * Main method for testing this class. * * @param argv should contain the following arguments: * -t training file [-T test file] [-c class index] */ public static void main(String [] argv) { runClassifier(new MultiScheme(), argv); } }
13,444
27.127615
241
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/meta/OptimisedRotationForest.java
/* Wrapper for rotation forest that peforms a Naive model selection Based on findings from http://pages.bangor.ac.uk/~mas00a/papers/lkjrmcs07.pdf Search for Number of Feature Subsets, K= Number of features in a subset: M = 3; Number of classifiers in the ensemble, L= Maybe try get the OOB error? the feature set is split randomly into K subsets, principal component analysis (PCA) is run separately on each subset, and a new set of n linear extracted features is constructed by pooling all principal components. */ package weka.classifiers.meta; import utilities.ClassifierTools; import weka.core.Instances; /** * * @author ajb */ public class OptimisedRotationForest extends RotationForest{ static int[] MVALUES={1,2,3,4,5,6,7,8,9,10}; static int[] LVALUES={5,10,15,20,25,30,40,50,60,70}; int folds=10; @Override public void buildClassifier(Instances train) throws Exception{ int bestM=0; double bestAcc=0; int bestL=0; if(train.numInstances()<folds) folds=train.numInstances(); for(int m:MVALUES){ for( int l:LVALUES){ RotationForest trainer=new RotationForest(); trainer.setMaxGroup(m); trainer.setMinGroup(m); trainer.setNumIterations(l); double acc=ClassifierTools.stratifiedCrossValidation(train, trainer, folds, 0); if(acc>bestAcc){ bestM=m; bestL=l; bestAcc=acc; } } } setMaxGroup(bestM); setMinGroup(bestM); setNumIterations(bestL); super.buildClassifier(train); } }
1,704
27.898305
95
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/meta/OrdinalClassClassifier.java
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* * OrdinalClassClassifier.java * Copyright (C) 2001 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.meta; import weka.classifiers.Classifier; import weka.classifiers.SingleClassifierEnhancer; import weka.classifiers.rules.ZeroR; import weka.core.Capabilities; import weka.core.Instance; import weka.core.Instances; import weka.core.OptionHandler; import weka.core.RevisionUtils; import weka.core.TechnicalInformation; import weka.core.TechnicalInformationHandler; import weka.core.Utils; import weka.core.Capabilities.Capability; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import weka.filters.Filter; import weka.filters.unsupervised.attribute.MakeIndicator; import java.util.Enumeration; import java.util.Vector; import weka.classifiers.AbstractClassifier; /** <!-- globalinfo-start --> * Meta classifier that allows standard classification algorithms to be applied to ordinal class problems.<br/> * <br/> * For more information see: <br/> * <br/> * Eibe Frank, Mark Hall: A Simple Approach to Ordinal Classification. In: 12th European Conference on Machine Learning, 145-156, 2001. * <p/> <!-- globalinfo-end --> * <!-- technical-bibtex-start --> * BibTeX: * <pre> * &#64;inproceedings{Frank2001, * author = {Eibe Frank and Mark Hall}, * booktitle = {12th European Conference on Machine Learning}, * pages = {145-156}, * publisher = {Springer}, * title = {A Simple Approach to Ordinal Classification}, * year = {2001} * } * </pre> * <p/> <!-- technical-bibtex-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -D * If set, classifier is run in debug mode and * may output additional info to the console</pre> * * <pre> -W * Full name of base classifier. * (default: weka.classifiers.trees.J48)</pre> * * <pre> * Options specific to classifier weka.classifiers.trees.J48: * </pre> * * <pre> -U * Use unpruned tree.</pre> * * <pre> -C &lt;pruning confidence&gt; * Set confidence threshold for pruning. * (default 0.25)</pre> * * <pre> -M &lt;minimum number of instances&gt; * Set minimum number of instances per leaf. * (default 2)</pre> * * <pre> -R * Use reduced error pruning.</pre> * * <pre> -N &lt;number of folds&gt; * Set number of folds for reduced error * pruning. One fold is used as pruning set. * (default 3)</pre> * * <pre> -B * Use binary splits only.</pre> * * <pre> -S * Don't perform subtree raising.</pre> * * <pre> -L * Do not clean up after the tree has been built.</pre> * * <pre> -A * Laplace smoothing for predicted probabilities.</pre> * * <pre> -Q &lt;seed&gt; * Seed for random data shuffling (default 1).</pre> * <!-- options-end --> * * @author <a href="mailto:mhall@cs.waikato.ac.nz">Mark Hall</a> * @version $Revision 1.0 $ * @see OptionHandler */ public class OrdinalClassClassifier extends SingleClassifierEnhancer implements OptionHandler, TechnicalInformationHandler { /** for serialization */ static final long serialVersionUID = -3461971774059603636L; /** The classifiers. (One for each class.) */ private Classifier [] m_Classifiers; /** The filters used to transform the class. */ private MakeIndicator[] m_ClassFilters; /** ZeroR classifier for when all base classifier return zero probability. */ private ZeroR m_ZeroR; /** * String describing default classifier. * * @return the default classifier classname */ protected String defaultClassifierString() { return "weka.classifiers.trees.J48"; } /** * Default constructor. */ public OrdinalClassClassifier() { m_Classifier = new weka.classifiers.trees.J48(); } /** * Returns a string describing this attribute evaluator * @return a description of the evaluator suitable for * displaying in the explorer/experimenter gui */ public String globalInfo() { return "Meta classifier that allows standard classification algorithms " +"to be applied to ordinal class problems.\n\n" + "For more information see: \n\n" + getTechnicalInformation().toString(); } /** * Returns an instance of a TechnicalInformation object, containing * detailed information about the technical background of this class, * e.g., paper reference or book this class is based on. * * @return the technical information about this class */ public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; result = new TechnicalInformation(Type.INPROCEEDINGS); result.setValue(Field.AUTHOR, "Eibe Frank and Mark Hall"); result.setValue(Field.TITLE, "A Simple Approach to Ordinal Classification"); result.setValue(Field.BOOKTITLE, "12th European Conference on Machine Learning"); result.setValue(Field.YEAR, "2001"); result.setValue(Field.PAGES, "145-156"); result.setValue(Field.PUBLISHER, "Springer"); return result; } /** * Returns default capabilities of the classifier. * * @return the capabilities of this classifier */ public Capabilities getCapabilities() { Capabilities result = super.getCapabilities(); // class result.disableAllClasses(); result.disableAllClassDependencies(); result.enable(Capability.NOMINAL_CLASS); return result; } /** * Builds the classifiers. * * @param insts the training data. * @throws Exception if a classifier can't be built */ public void buildClassifier(Instances insts) throws Exception { Instances newInsts; // can classifier handle the data? getCapabilities().testWithFail(insts); // remove instances with missing class insts = new Instances(insts); insts.deleteWithMissingClass(); if (m_Classifier == null) { throw new Exception("No base classifier has been set!"); } m_ZeroR = new ZeroR(); m_ZeroR.buildClassifier(insts); int numClassifiers = insts.numClasses() - 1; numClassifiers = (numClassifiers == 0) ? 1 : numClassifiers; if (numClassifiers == 1) { m_Classifiers = AbstractClassifier.makeCopies(m_Classifier, 1); m_Classifiers[0].buildClassifier(insts); } else { m_Classifiers = AbstractClassifier.makeCopies(m_Classifier, numClassifiers); m_ClassFilters = new MakeIndicator[numClassifiers]; for (int i = 0; i < m_Classifiers.length; i++) { m_ClassFilters[i] = new MakeIndicator(); m_ClassFilters[i].setAttributeIndex("" + (insts.classIndex() + 1)); m_ClassFilters[i].setValueIndices(""+(i+2)+"-last"); m_ClassFilters[i].setNumeric(false); m_ClassFilters[i].setInputFormat(insts); newInsts = Filter.useFilter(insts, m_ClassFilters[i]); m_Classifiers[i].buildClassifier(newInsts); } } } /** * Returns the distribution for an instance. * * @param inst the instance to compute the distribution for * @return the class distribution for the given instance * @throws Exception if the distribution can't be computed successfully */ public double [] distributionForInstance(Instance inst) throws Exception { if (m_Classifiers.length == 1) { return m_Classifiers[0].distributionForInstance(inst); } double [] probs = new double[inst.numClasses()]; double [][] distributions = new double[m_ClassFilters.length][0]; for(int i = 0; i < m_ClassFilters.length; i++) { m_ClassFilters[i].input(inst); m_ClassFilters[i].batchFinished(); distributions[i] = m_Classifiers[i]. distributionForInstance(m_ClassFilters[i].output()); } for (int i = 0; i < inst.numClasses(); i++) { if (i == 0) { probs[i] = distributions[0][0]; } else if (i == inst.numClasses() - 1) { probs[i] = distributions[i - 1][1]; } else { probs[i] = distributions[i - 1][1] - distributions[i][1]; if (!(probs[i] > 0)) { System.err.println("Warning: estimated probability " + probs[i] + ". Rounding to 0."); probs[i] = 0; } } } if (Utils.gr(Utils.sum(probs), 0)) { Utils.normalize(probs); return probs; } else { return m_ZeroR.distributionForInstance(inst); } } /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ public Enumeration listOptions() { Vector vec = new Vector(); Enumeration enu = super.listOptions(); while (enu.hasMoreElements()) { vec.addElement(enu.nextElement()); } return vec.elements(); } /** * Parses a given list of options. <p/> * <!-- options-start --> * Valid options are: <p/> * * <pre> -D * If set, classifier is run in debug mode and * may output additional info to the console</pre> * * <pre> -W * Full name of base classifier. * (default: weka.classifiers.trees.J48)</pre> * * <pre> * Options specific to classifier weka.classifiers.trees.J48: * </pre> * * <pre> -U * Use unpruned tree.</pre> * * <pre> -C &lt;pruning confidence&gt; * Set confidence threshold for pruning. * (default 0.25)</pre> * * <pre> -M &lt;minimum number of instances&gt; * Set minimum number of instances per leaf. * (default 2)</pre> * * <pre> -R * Use reduced error pruning.</pre> * * <pre> -N &lt;number of folds&gt; * Set number of folds for reduced error * pruning. One fold is used as pruning set. * (default 3)</pre> * * <pre> -B * Use binary splits only.</pre> * * <pre> -S * Don't perform subtree raising.</pre> * * <pre> -L * Do not clean up after the tree has been built.</pre> * * <pre> -A * Laplace smoothing for predicted probabilities.</pre> * * <pre> -Q &lt;seed&gt; * Seed for random data shuffling (default 1).</pre> * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { super.setOptions(options); } /** * Gets the current settings of the Classifier. * * @return an array of strings suitable for passing to setOptions */ public String [] getOptions() { return super.getOptions(); } /** * Prints the classifiers. * * @return a string representation of this classifier */ public String toString() { if (m_Classifiers == null) { return "OrdinalClassClassifier: No model built yet."; } StringBuffer text = new StringBuffer(); text.append("OrdinalClassClassifier\n\n"); for (int i = 0; i < m_Classifiers.length; i++) { text.append("Classifier ").append(i + 1); if (m_Classifiers[i] != null) { if ((m_ClassFilters != null) && (m_ClassFilters[i] != null)) { text.append(", using indicator values: "); text.append(m_ClassFilters[i].getValueRange()); } text.append('\n'); text.append(m_Classifiers[i].toString() + "\n"); } else { text.append(" Skipped (no training examples)\n"); } } return text.toString(); } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 1.18 $"); } /** * Main method for testing this class. * * @param argv the options */ public static void main(String [] argv) { runClassifier(new OrdinalClassClassifier(), argv); } }
12,356
27.276888
135
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/meta/RacedIncrementalLogitBoost.java
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* * RacedIncrementalLogitBoost.java * Copyright (C) 2002 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.meta; import weka.classifiers.Classifier; import weka.classifiers.RandomizableSingleClassifierEnhancer; import weka.classifiers.UpdateableClassifier; import weka.classifiers.rules.ZeroR; import weka.core.Attribute; import weka.core.Capabilities; import weka.core.FastVector; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.RevisionHandler; import weka.core.RevisionUtils; import weka.core.SelectedTag; import weka.core.Tag; import weka.core.TechnicalInformation; import weka.core.TechnicalInformationHandler; import weka.core.Utils; import weka.core.WeightedInstancesHandler; import weka.core.Capabilities.Capability; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import java.io.Serializable; import java.util.Enumeration; import java.util.Random; import java.util.Vector; import weka.classifiers.AbstractClassifier; /** <!-- globalinfo-start --> * Classifier for incremental learning of large datasets by way of racing logit-boosted committees.<br/> * <br/> * For more information see:<br/> * <br/> * Eibe Frank, Geoffrey Holmes, Richard Kirkby, Mark Hall: Racing committees for large datasets. In: Proceedings of the 5th International Conferenceon Discovery Science, 153-164, 2002. * <p/> <!-- globalinfo-end --> * <!-- technical-bibtex-start --> * BibTeX: * <pre> * &#64;inproceedings{Frank2002, * author = {Eibe Frank and Geoffrey Holmes and Richard Kirkby and Mark Hall}, * booktitle = {Proceedings of the 5th International Conferenceon Discovery Science}, * pages = {153-164}, * publisher = {Springer}, * title = { Racing committees for large datasets}, * year = {2002} * } * </pre> * <p/> <!-- technical-bibtex-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -C &lt;num&gt; * Minimum size of chunks. * (default 500)</pre> * * <pre> -M &lt;num&gt; * Maximum size of chunks. * (default 2000)</pre> * * <pre> -V &lt;num&gt; * Size of validation set. * (default 1000)</pre> * * <pre> -P &lt;pruning type&gt; * Committee pruning to perform. * 0=none, 1=log likelihood (default)</pre> * * <pre> -Q * Use resampling for boosting.</pre> * * <pre> -S &lt;num&gt; * Random number seed. * (default 1)</pre> * * <pre> -D * If set, classifier is run in debug mode and * may output additional info to the console</pre> * * <pre> -W * Full name of base classifier. * (default: weka.classifiers.trees.DecisionStump)</pre> * * <pre> * Options specific to classifier weka.classifiers.trees.DecisionStump: * </pre> * * <pre> -D * If set, classifier is run in debug mode and * may output additional info to the console</pre> * <!-- options-end --> * * Options after -- are passed to the designated learner.<p> * * @author Richard Kirkby (rkirkby@cs.waikato.ac.nz) * @author Eibe Frank (eibe@cs.waikato.ac.nz) * @version $Revision: 6477 $ */ public class RacedIncrementalLogitBoost extends RandomizableSingleClassifierEnhancer implements UpdateableClassifier, TechnicalInformationHandler { /** for serialization */ static final long serialVersionUID = 908598343772170052L; /** no pruning */ public static final int PRUNETYPE_NONE = 0; /** log likelihood pruning */ public static final int PRUNETYPE_LOGLIKELIHOOD = 1; /** The pruning types */ public static final Tag [] TAGS_PRUNETYPE = { new Tag(PRUNETYPE_NONE, "No pruning"), new Tag(PRUNETYPE_LOGLIKELIHOOD, "Log likelihood pruning") }; /** The committees */ protected FastVector m_committees; /** The pruning type used */ protected int m_PruningType = PRUNETYPE_LOGLIKELIHOOD; /** Whether to use resampling */ protected boolean m_UseResampling = false; /** The number of classes */ protected int m_NumClasses; /** A threshold for responses (Friedman suggests between 2 and 4) */ protected static final double Z_MAX = 4; /** Dummy dataset with a numeric class */ protected Instances m_NumericClassData; /** The actual class attribute (for getting class names) */ protected Attribute m_ClassAttribute; /** The minimum chunk size used for training */ protected int m_minChunkSize = 500; /** The maimum chunk size used for training */ protected int m_maxChunkSize = 2000; /** The size of the validation set */ protected int m_validationChunkSize = 1000; /** The number of instances consumed */ protected int m_numInstancesConsumed; /** The instances used for validation */ protected Instances m_validationSet; /** The instances currently in memory for training */ protected Instances m_currentSet; /** The current best committee */ protected Committee m_bestCommittee; /** The default scheme used when committees aren't ready */ protected ZeroR m_zeroR = null; /** Whether the validation set has recently been changed */ protected boolean m_validationSetChanged; /** The maximum number of instances required for processing */ protected int m_maxBatchSizeRequired; /** The random number generator used */ protected Random m_RandomInstance = null; /** * Constructor. */ public RacedIncrementalLogitBoost() { m_Classifier = new weka.classifiers.trees.DecisionStump(); } /** * String describing default classifier. * * @return the default classifier classname */ protected String defaultClassifierString() { return "weka.classifiers.trees.DecisionStump"; } /** * Class representing a committee of LogitBoosted models */ protected class Committee implements Serializable, RevisionHandler { /** for serialization */ static final long serialVersionUID = 5559880306684082199L; protected int m_chunkSize; /** number eaten from m_currentSet */ protected int m_instancesConsumed; protected FastVector m_models; protected double m_lastValidationError; protected double m_lastLogLikelihood; protected boolean m_modelHasChanged; protected boolean m_modelHasChangedLL; protected double[][] m_validationFs; protected double[][] m_newValidationFs; /** * constructor * * @param chunkSize the size of the chunk */ public Committee(int chunkSize) { m_chunkSize = chunkSize; m_instancesConsumed = 0; m_models = new FastVector(); m_lastValidationError = 1.0; m_lastLogLikelihood = Double.MAX_VALUE; m_modelHasChanged = true; m_modelHasChangedLL = true; m_validationFs = new double[m_validationChunkSize][m_NumClasses]; m_newValidationFs = new double[m_validationChunkSize][m_NumClasses]; } /** * update the committee * * @return true if the committee has changed * @throws Exception if anything goes wrong */ public boolean update() throws Exception { boolean hasChanged = false; while (m_currentSet.numInstances() - m_instancesConsumed >= m_chunkSize) { Classifier[] newModel = boost(new Instances(m_currentSet, m_instancesConsumed, m_chunkSize)); for (int i=0; i<m_validationSet.numInstances(); i++) { m_newValidationFs[i] = updateFS(m_validationSet.instance(i), newModel, m_validationFs[i]); } m_models.addElement(newModel); m_instancesConsumed += m_chunkSize; hasChanged = true; } if (hasChanged) { m_modelHasChanged = true; m_modelHasChangedLL = true; } return hasChanged; } /** reset consumation counts */ public void resetConsumed() { m_instancesConsumed = 0; } /** remove the last model from the committee */ public void pruneLastModel() { if (m_models.size() > 0) { m_models.removeElementAt(m_models.size()-1); m_modelHasChanged = true; m_modelHasChangedLL = true; } } /** * decide to keep the last model in the committee * @throws Exception if anything goes wrong */ public void keepLastModel() throws Exception { m_validationFs = m_newValidationFs; m_newValidationFs = new double[m_validationChunkSize][m_NumClasses]; m_modelHasChanged = true; m_modelHasChangedLL = true; } /** * calculate the log likelihood on the validation data * @return the log likelihood * @throws Exception if computation fails */ public double logLikelihood() throws Exception { if (m_modelHasChangedLL) { Instance inst; double llsum = 0.0; for (int i=0; i<m_validationSet.numInstances(); i++) { inst = m_validationSet.instance(i); llsum += (logLikelihood(m_validationFs[i],(int) inst.classValue())); } m_lastLogLikelihood = llsum / (double) m_validationSet.numInstances(); m_modelHasChangedLL = false; } return m_lastLogLikelihood; } /** * calculate the log likelihood on the validation data after adding the last model * @return the log likelihood * @throws Exception if computation fails */ public double logLikelihoodAfter() throws Exception { Instance inst; double llsum = 0.0; for (int i=0; i<m_validationSet.numInstances(); i++) { inst = m_validationSet.instance(i); llsum += (logLikelihood(m_newValidationFs[i],(int) inst.classValue())); } return llsum / (double) m_validationSet.numInstances(); } /** * calculates the log likelihood of an instance * @param Fs the Fs values * @param classIndex the class index * @return the log likelihood * @throws Exception if computation fails */ private double logLikelihood(double[] Fs, int classIndex) throws Exception { return -Math.log(distributionForInstance(Fs)[classIndex]); } /** * calculates the validation error of the committee * @return the validation error * @throws Exception if computation fails */ public double validationError() throws Exception { if (m_modelHasChanged) { Instance inst; int numIncorrect = 0; for (int i=0; i<m_validationSet.numInstances(); i++) { inst = m_validationSet.instance(i); if (classifyInstance(m_validationFs[i]) != inst.classValue()) numIncorrect++; } m_lastValidationError = (double) numIncorrect / (double) m_validationSet.numInstances(); m_modelHasChanged = false; } return m_lastValidationError; } /** * returns the chunk size used by the committee * * @return the chunk size */ public int chunkSize() { return m_chunkSize; } /** * returns the number of models in the committee * * @return the committee size */ public int committeeSize() { return m_models.size(); } /** * classifies an instance (given Fs values) with the committee * * @param Fs the Fs values * @return the classification * @throws Exception if anything goes wrong */ public double classifyInstance(double[] Fs) throws Exception { double [] dist = distributionForInstance(Fs); double max = 0; int maxIndex = 0; for (int i = 0; i < dist.length; i++) { if (dist[i] > max) { maxIndex = i; max = dist[i]; } } if (max > 0) { return maxIndex; } else { return Utils.missingValue(); } } /** * classifies an instance with the committee * * @param instance the instance to classify * @return the classification * @throws Exception if anything goes wrong */ public double classifyInstance(Instance instance) throws Exception { double [] dist = distributionForInstance(instance); switch (instance.classAttribute().type()) { case Attribute.NOMINAL: double max = 0; int maxIndex = 0; for (int i = 0; i < dist.length; i++) { if (dist[i] > max) { maxIndex = i; max = dist[i]; } } if (max > 0) { return maxIndex; } else { return Utils.missingValue(); } case Attribute.NUMERIC: return dist[0]; default: return Utils.missingValue(); } } /** * returns the distribution the committee generates for an instance (given Fs values) * * @param Fs the Fs values * @return the distribution * @throws Exception if anything goes wrong */ public double[] distributionForInstance(double[] Fs) throws Exception { double [] distribution = new double [m_NumClasses]; for (int j = 0; j < m_NumClasses; j++) { distribution[j] = RtoP(Fs, j); } return distribution; } /** * updates the Fs values given a new model in the committee * * @param instance the instance to use * @param newModel the new model * @param Fs the Fs values to update * @return the updated Fs values * @throws Exception if anything goes wrong */ public double[] updateFS(Instance instance, Classifier[] newModel, double[] Fs) throws Exception { instance = (Instance)instance.copy(); instance.setDataset(m_NumericClassData); double [] Fi = new double [m_NumClasses]; double Fsum = 0; for (int j = 0; j < m_NumClasses; j++) { Fi[j] = newModel[j].classifyInstance(instance); Fsum += Fi[j]; } Fsum /= m_NumClasses; double[] newFs = new double[Fs.length]; for (int j = 0; j < m_NumClasses; j++) { newFs[j] = Fs[j] + ((Fi[j] - Fsum) * (m_NumClasses - 1) / m_NumClasses); } return newFs; } /** * returns the distribution the committee generates for an instance * * @param instance the instance to get the distribution for * @return the distribution * @throws Exception if anything goes wrong */ public double[] distributionForInstance(Instance instance) throws Exception { instance = (Instance)instance.copy(); instance.setDataset(m_NumericClassData); double [] Fs = new double [m_NumClasses]; for (int i = 0; i < m_models.size(); i++) { double [] Fi = new double [m_NumClasses]; double Fsum = 0; Classifier[] model = (Classifier[]) m_models.elementAt(i); for (int j = 0; j < m_NumClasses; j++) { Fi[j] = model[j].classifyInstance(instance); Fsum += Fi[j]; } Fsum /= m_NumClasses; for (int j = 0; j < m_NumClasses; j++) { Fs[j] += (Fi[j] - Fsum) * (m_NumClasses - 1) / m_NumClasses; } } double [] distribution = new double [m_NumClasses]; for (int j = 0; j < m_NumClasses; j++) { distribution[j] = RtoP(Fs, j); } return distribution; } /** * performs a boosting iteration, returning a new model for the committee * * @param data the data to boost on * @return the new model * @throws Exception if anything goes wrong */ protected Classifier[] boost(Instances data) throws Exception { Classifier[] newModel = AbstractClassifier.makeCopies(m_Classifier, m_NumClasses); // Create a copy of the data with the class transformed into numeric Instances boostData = new Instances(data); boostData.deleteWithMissingClass(); int numInstances = boostData.numInstances(); // Temporarily unset the class index int classIndex = data.classIndex(); boostData.setClassIndex(-1); boostData.deleteAttributeAt(classIndex); boostData.insertAttributeAt(new Attribute("'pseudo class'"), classIndex); boostData.setClassIndex(classIndex); double [][] trainFs = new double [numInstances][m_NumClasses]; double [][] trainYs = new double [numInstances][m_NumClasses]; for (int j = 0; j < m_NumClasses; j++) { for (int i = 0, k = 0; i < numInstances; i++, k++) { while (data.instance(k).classIsMissing()) k++; trainYs[i][j] = (data.instance(k).classValue() == j) ? 1 : 0; } } // Evaluate / increment trainFs from the classifiers for (int x = 0; x < m_models.size(); x++) { for (int i = 0; i < numInstances; i++) { double [] pred = new double [m_NumClasses]; double predSum = 0; Classifier[] model = (Classifier[]) m_models.elementAt(x); for (int j = 0; j < m_NumClasses; j++) { pred[j] = model[j].classifyInstance(boostData.instance(i)); predSum += pred[j]; } predSum /= m_NumClasses; for (int j = 0; j < m_NumClasses; j++) { trainFs[i][j] += (pred[j] - predSum) * (m_NumClasses-1) / m_NumClasses; } } } for (int j = 0; j < m_NumClasses; j++) { // Set instance pseudoclass and weights for (int i = 0; i < numInstances; i++) { double p = RtoP(trainFs[i], j); Instance current = boostData.instance(i); double z, actual = trainYs[i][j]; if (actual == 1) { z = 1.0 / p; if (z > Z_MAX) { // threshold z = Z_MAX; } } else if (actual == 0) { z = -1.0 / (1.0 - p); if (z < -Z_MAX) { // threshold z = -Z_MAX; } } else { z = (actual - p) / (p * (1 - p)); } double w = (actual - p) / z; current.setValue(classIndex, z); current.setWeight(numInstances * w); } Instances trainData = boostData; if (m_UseResampling) { double[] weights = new double[boostData.numInstances()]; for (int kk = 0; kk < weights.length; kk++) { weights[kk] = boostData.instance(kk).weight(); } trainData = boostData.resampleWithWeights(m_RandomInstance, weights); } // Build the classifier newModel[j].buildClassifier(trainData); } return newModel; } /** * outputs description of the committee * * @return a string representation of the classifier */ public String toString() { StringBuffer text = new StringBuffer(); text.append("RacedIncrementalLogitBoost: Best committee on validation data\n"); text.append("Base classifiers: \n"); for (int i = 0; i < m_models.size(); i++) { text.append("\nModel "+(i+1)); Classifier[] cModels = (Classifier[]) m_models.elementAt(i); for (int j = 0; j < m_NumClasses; j++) { text.append("\n\tClass " + (j + 1) + " (" + m_ClassAttribute.name() + "=" + m_ClassAttribute.value(j) + ")\n\n" + cModels[j].toString() + "\n"); } } text.append("Number of models: " + m_models.size() + "\n"); text.append("Chunk size per model: " + m_chunkSize + "\n"); return text.toString(); } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 6477 $"); } } /** * Returns default capabilities of the classifier. * * @return the capabilities of this classifier */ public Capabilities getCapabilities() { Capabilities result = super.getCapabilities(); // class result.disableAllClasses(); result.disableAllClassDependencies(); result.enable(Capability.NOMINAL_CLASS); // instances result.setMinimumNumberInstances(0); return result; } /** * Builds the classifier. * * @param data the instances to train the classifier with * @throws Exception if something goes wrong */ public void buildClassifier(Instances data) throws Exception { m_RandomInstance = new Random(m_Seed); Instances boostData; int classIndex = data.classIndex(); // can classifier handle the data? getCapabilities().testWithFail(data); // remove instances with missing class data = new Instances(data); data.deleteWithMissingClass(); if (m_Classifier == null) { throw new Exception("A base classifier has not been specified!"); } if (!(m_Classifier instanceof WeightedInstancesHandler) && !m_UseResampling) { m_UseResampling = true; } m_NumClasses = data.numClasses(); m_ClassAttribute = data.classAttribute(); // Create a copy of the data with the class transformed into numeric boostData = new Instances(data); // Temporarily unset the class index boostData.setClassIndex(-1); boostData.deleteAttributeAt(classIndex); boostData.insertAttributeAt(new Attribute("'pseudo class'"), classIndex); boostData.setClassIndex(classIndex); m_NumericClassData = new Instances(boostData, 0); data.randomize(m_RandomInstance); // create the committees int cSize = m_minChunkSize; m_committees = new FastVector(); while (cSize <= m_maxChunkSize) { m_committees.addElement(new Committee(cSize)); m_maxBatchSizeRequired = cSize; cSize *= 2; } // set up for consumption m_validationSet = new Instances(data, m_validationChunkSize); m_currentSet = new Instances(data, m_maxBatchSizeRequired); m_bestCommittee = null; m_numInstancesConsumed = 0; // start eating what we've been given for (int i=0; i<data.numInstances(); i++) updateClassifier(data.instance(i)); } /** * Updates the classifier. * * @param instance the next instance in the stream of training data * @throws Exception if something goes wrong */ public void updateClassifier(Instance instance) throws Exception { m_numInstancesConsumed++; if (m_validationSet.numInstances() < m_validationChunkSize) { m_validationSet.add(instance); m_validationSetChanged = true; } else { m_currentSet.add(instance); boolean hasChanged = false; // update each committee for (int i=0; i<m_committees.size(); i++) { Committee c = (Committee) m_committees.elementAt(i); if (c.update()) { hasChanged = true; if (m_PruningType == PRUNETYPE_LOGLIKELIHOOD) { double oldLL = c.logLikelihood(); double newLL = c.logLikelihoodAfter(); if (newLL >= oldLL && c.committeeSize() > 1) { c.pruneLastModel(); if (m_Debug) System.out.println("Pruning " + c.chunkSize()+ " committee (" + oldLL + " < " + newLL + ")"); } else c.keepLastModel(); } else c.keepLastModel(); // no pruning } } if (hasChanged) { if (m_Debug) System.out.println("After consuming " + m_numInstancesConsumed + " instances... (" + m_validationSet.numInstances() + " + " + m_currentSet.numInstances() + " instances currently in memory)"); // find best committee double lowestError = 1.0; for (int i=0; i<m_committees.size(); i++) { Committee c = (Committee) m_committees.elementAt(i); if (c.committeeSize() > 0) { double err = c.validationError(); double ll = c.logLikelihood(); if (m_Debug) System.out.println("Chunk size " + c.chunkSize() + " with " + c.committeeSize() + " models, has validation error of " + err + ", log likelihood of " + ll); if (err < lowestError) { lowestError = err; m_bestCommittee = c; } } } } if (m_currentSet.numInstances() >= m_maxBatchSizeRequired) { m_currentSet = new Instances(m_currentSet, m_maxBatchSizeRequired); // reset consumation counts for (int i=0; i<m_committees.size(); i++) { Committee c = (Committee) m_committees.elementAt(i); c.resetConsumed(); } } } } /** * Convert from function responses to probabilities * * @param Fs an array containing the responses from each function * @param j the class value of interest * @return the probability prediction for j * @throws Exception if can't normalize */ protected static double RtoP(double []Fs, int j) throws Exception { double maxF = -Double.MAX_VALUE; for (int i = 0; i < Fs.length; i++) { if (Fs[i] > maxF) { maxF = Fs[i]; } } double sum = 0; double[] probs = new double[Fs.length]; for (int i = 0; i < Fs.length; i++) { probs[i] = Math.exp(Fs[i] - maxF); sum += probs[i]; } if (sum == 0) { throw new Exception("Can't normalize"); } return probs[j] / sum; } /** * Computes class distribution of an instance using the best committee. * * @param instance the instance to get the distribution for * @return the distribution * @throws Exception if anything goes wrong */ public double[] distributionForInstance(Instance instance) throws Exception { if (m_bestCommittee != null) return m_bestCommittee.distributionForInstance(instance); else { if (m_validationSetChanged || m_zeroR == null) { m_zeroR = new ZeroR(); m_zeroR.buildClassifier(m_validationSet); m_validationSetChanged = false; } return m_zeroR.distributionForInstance(instance); } } /** * Returns an enumeration describing the available options * * @return an enumeration of all the available options */ public Enumeration listOptions() { Vector newVector = new Vector(9); newVector.addElement(new Option( "\tMinimum size of chunks.\n" +"\t(default 500)", "C", 1, "-C <num>")); newVector.addElement(new Option( "\tMaximum size of chunks.\n" +"\t(default 2000)", "M", 1, "-M <num>")); newVector.addElement(new Option( "\tSize of validation set.\n" +"\t(default 1000)", "V", 1, "-V <num>")); newVector.addElement(new Option( "\tCommittee pruning to perform.\n" +"\t0=none, 1=log likelihood (default)", "P", 1, "-P <pruning type>")); newVector.addElement(new Option( "\tUse resampling for boosting.", "Q", 0, "-Q")); Enumeration enu = super.listOptions(); while (enu.hasMoreElements()) { newVector.addElement(enu.nextElement()); } return newVector.elements(); } /** * Parses a given list of options. <p/> * <!-- options-start --> * Valid options are: <p/> * * <pre> -C &lt;num&gt; * Minimum size of chunks. * (default 500)</pre> * * <pre> -M &lt;num&gt; * Maximum size of chunks. * (default 2000)</pre> * * <pre> -V &lt;num&gt; * Size of validation set. * (default 1000)</pre> * * <pre> -P &lt;pruning type&gt; * Committee pruning to perform. * 0=none, 1=log likelihood (default)</pre> * * <pre> -Q * Use resampling for boosting.</pre> * * <pre> -S &lt;num&gt; * Random number seed. * (default 1)</pre> * * <pre> -D * If set, classifier is run in debug mode and * may output additional info to the console</pre> * * <pre> -W * Full name of base classifier. * (default: weka.classifiers.trees.DecisionStump)</pre> * * <pre> * Options specific to classifier weka.classifiers.trees.DecisionStump: * </pre> * * <pre> -D * If set, classifier is run in debug mode and * may output additional info to the console</pre> * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { String minChunkSize = Utils.getOption('C', options); if (minChunkSize.length() != 0) { setMinChunkSize(Integer.parseInt(minChunkSize)); } else { setMinChunkSize(500); } String maxChunkSize = Utils.getOption('M', options); if (maxChunkSize.length() != 0) { setMaxChunkSize(Integer.parseInt(maxChunkSize)); } else { setMaxChunkSize(2000); } String validationChunkSize = Utils.getOption('V', options); if (validationChunkSize.length() != 0) { setValidationChunkSize(Integer.parseInt(validationChunkSize)); } else { setValidationChunkSize(1000); } String pruneType = Utils.getOption('P', options); if (pruneType.length() != 0) { setPruningType(new SelectedTag(Integer.parseInt(pruneType), TAGS_PRUNETYPE)); } else { setPruningType(new SelectedTag(PRUNETYPE_LOGLIKELIHOOD, TAGS_PRUNETYPE)); } setUseResampling(Utils.getFlag('Q', options)); super.setOptions(options); } /** * Gets the current settings of the Classifier. * * @return an array of strings suitable for passing to setOptions */ public String [] getOptions() { String [] superOptions = super.getOptions(); String [] options = new String [superOptions.length + 9]; int current = 0; if (getUseResampling()) { options[current++] = "-Q"; } options[current++] = "-C"; options[current++] = "" + getMinChunkSize(); options[current++] = "-M"; options[current++] = "" + getMaxChunkSize(); options[current++] = "-V"; options[current++] = "" + getValidationChunkSize(); options[current++] = "-P"; options[current++] = "" + m_PruningType; System.arraycopy(superOptions, 0, options, current, superOptions.length); current += superOptions.length; while (current < options.length) { options[current++] = ""; } return options; } /** * @return a description of the classifier suitable for * displaying in the explorer/experimenter gui */ public String globalInfo() { return "Classifier for incremental learning of large datasets by way of " + "racing logit-boosted committees.\n\nFor more information see:\n\n" + getTechnicalInformation().toString(); } /** * Returns an instance of a TechnicalInformation object, containing * detailed information about the technical background of this class, * e.g., paper reference or book this class is based on. * * @return the technical information about this class */ public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; result = new TechnicalInformation(Type.INPROCEEDINGS); result.setValue(Field.AUTHOR, "Eibe Frank and Geoffrey Holmes and Richard " + "Kirkby and Mark Hall"); result.setValue(Field.TITLE, " Racing committees for large datasets"); result.setValue(Field.BOOKTITLE, "Proceedings of the 5th International Conference" + "on Discovery Science"); result.setValue(Field.YEAR, "2002"); result.setValue(Field.PAGES, "153-164"); result.setValue(Field.PUBLISHER, "Springer"); return result; } /** * Set the base learner. * * @param newClassifier the classifier to use. * @throws IllegalArgumentException if base classifier cannot handle numeric * class */ public void setClassifier(Classifier newClassifier) { Capabilities cap = newClassifier.getCapabilities(); if (!cap.handles(Capability.NUMERIC_CLASS)) throw new IllegalArgumentException("Base classifier cannot handle numeric class!"); super.setClassifier(newClassifier); } /** * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String minChunkSizeTipText() { return "The minimum number of instances to train the base learner with."; } /** * Set the minimum chunk size * * @param chunkSize the minimum chunk size */ public void setMinChunkSize(int chunkSize) { m_minChunkSize = chunkSize; } /** * Get the minimum chunk size * * @return the chunk size */ public int getMinChunkSize() { return m_minChunkSize; } /** * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String maxChunkSizeTipText() { return "The maximum number of instances to train the base learner with. The chunk sizes used will start at minChunkSize and grow twice as large for as many times as they are less than or equal to the maximum size."; } /** * Set the maximum chunk size * * @param chunkSize the maximum chunk size */ public void setMaxChunkSize(int chunkSize) { m_maxChunkSize = chunkSize; } /** * Get the maximum chunk size * * @return the chunk size */ public int getMaxChunkSize() { return m_maxChunkSize; } /** * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String validationChunkSizeTipText() { return "The number of instances to hold out for validation. These instances will be taken from the beginning of the stream, so learning will not start until these instances have been consumed first."; } /** * Set the validation chunk size * * @param chunkSize the validation chunk size */ public void setValidationChunkSize(int chunkSize) { m_validationChunkSize = chunkSize; } /** * Get the validation chunk size * * @return the chunk size */ public int getValidationChunkSize() { return m_validationChunkSize; } /** * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String pruningTypeTipText() { return "The pruning method to use within each committee. Log likelihood pruning will discard new models if they have a negative effect on the log likelihood of the validation data."; } /** * Set the pruning type * * @param pruneType the pruning type */ public void setPruningType(SelectedTag pruneType) { if (pruneType.getTags() == TAGS_PRUNETYPE) { m_PruningType = pruneType.getSelectedTag().getID(); } } /** * Get the pruning type * * @return the type */ public SelectedTag getPruningType() { return new SelectedTag(m_PruningType, TAGS_PRUNETYPE); } /** * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String useResamplingTipText() { return "Force the use of resampling data rather than using the weight-handling capabilities of the base classifier. Resampling is always used if the base classifier cannot handle weighted instances."; } /** * Set resampling mode * * @param r true if resampling should be done */ public void setUseResampling(boolean r) { m_UseResampling = r; } /** * Get whether resampling is turned on * * @return true if resampling output is on */ public boolean getUseResampling() { return m_UseResampling; } /** * Get the best committee chunk size * * @return the best committee chunk size */ public int getBestCommitteeChunkSize() { if (m_bestCommittee != null) { return m_bestCommittee.chunkSize(); } else return 0; } /** * Get the number of members in the best committee * * @return the number of members */ public int getBestCommitteeSize() { if (m_bestCommittee != null) { return m_bestCommittee.committeeSize(); } else return 0; } /** * Get the best committee's error on the validation data * * @return the best committee's error */ public double getBestCommitteeErrorEstimate() { if (m_bestCommittee != null) { try { return m_bestCommittee.validationError() * 100.0; } catch (Exception e) { System.err.println(e.getMessage()); return 100.0; } } else return 100.0; } /** * Get the best committee's log likelihood on the validation data * * @return best committee's log likelihood */ public double getBestCommitteeLLEstimate() { if (m_bestCommittee != null) { try { return m_bestCommittee.logLikelihood(); } catch (Exception e) { System.err.println(e.getMessage()); return Double.MAX_VALUE; } } else return Double.MAX_VALUE; } /** * Returns description of the boosted classifier. * * @return description of the boosted classifier as a string */ public String toString() { if (m_bestCommittee != null) { return m_bestCommittee.toString(); } else { if ((m_validationSetChanged || m_zeroR == null) && m_validationSet != null && m_validationSet.numInstances() > 0) { m_zeroR = new ZeroR(); try { m_zeroR.buildClassifier(m_validationSet); } catch (Exception e) {} m_validationSetChanged = false; } if (m_zeroR != null) { return ("RacedIncrementalLogitBoost: insufficient data to build model, resorting to ZeroR:\n\n" + m_zeroR.toString()); } else return ("RacedIncrementalLogitBoost: no model built yet."); } } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 6477 $"); } /** * Main method for this class. * * @param argv the commandline parameters */ public static void main(String[] argv) { runClassifier(new RacedIncrementalLogitBoost(), argv); } }
37,430
26.891952
219
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/meta/RandomCommittee.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * RandomCommittee.java * Copyright (C) 2003-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.meta; import java.util.Random; import java.util.ArrayList; import weka.classifiers.AbstractClassifier; import weka.classifiers.RandomizableParallelIteratedSingleClassifierEnhancer; import weka.core.Instance; import weka.core.Instances; import weka.core.Randomizable; import weka.core.RevisionUtils; import weka.core.Utils; import weka.core.WeightedInstancesHandler; import weka.core.PartitionGenerator; /** <!-- globalinfo-start --> * Class for building an ensemble of randomizable base classifiers. Each base classifiers is built using a different random number seed (but based one the same data). The final prediction is a straight average of the predictions generated by the individual base classifiers. * <p/> <!-- globalinfo-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -S &lt;num&gt; * Random number seed. * (default 1)</pre> * * <pre> -I &lt;num&gt; * Number of iterations. * (default 10)</pre> * * <pre> -D * If set, classifier is run in debug mode and * may output additional info to the console</pre> * * <pre> -W * Full name of base classifier. * (default: weka.classifiers.trees.RandomTree)</pre> * * <pre> * Options specific to classifier weka.classifiers.trees.RandomTree: * </pre> * * <pre> -K &lt;number of attributes&gt; * Number of attributes to randomly investigate * (&lt;1 = int(log(#attributes)+1)).</pre> * * <pre> -M &lt;minimum number of instances&gt; * Set minimum number of instances per leaf.</pre> * * <pre> -S &lt;num&gt; * Seed for random number generator. * (default 1)</pre> * * <pre> -depth &lt;num&gt; * The maximum depth of the tree, 0 for unlimited. * (default 0)</pre> * * <pre> -D * If set, classifier is run in debug mode and * may output additional info to the console</pre> * <!-- options-end --> * * Options after -- are passed to the designated classifier.<p> * * @author Eibe Frank (eibe@cs.waikato.ac.nz) * @version $Revision: 9317 $ */ public class RandomCommittee extends RandomizableParallelIteratedSingleClassifierEnhancer implements WeightedInstancesHandler, PartitionGenerator { /** for serialization */ static final long serialVersionUID = -9204394360557300093L; /** training data */ protected Instances m_data; /** * Constructor. */ public RandomCommittee() { m_Classifier = new weka.classifiers.trees.RandomTree(); } /** * String describing default classifier. * * @return the default classifier classname */ protected String defaultClassifierString() { return "weka.classifiers.trees.RandomTree"; } /** * Returns a string describing classifier * @return a description suitable for * displaying in the explorer/experimenter gui */ public String globalInfo() { return "Class for building an ensemble of randomizable base classifiers. Each " + "base classifiers is built using a different random number seed (but based " + "one the same data). The final prediction is a straight average of the " + "predictions generated by the individual base classifiers."; } /** * Builds the committee of randomizable classifiers. * * @param data the training data to be used for generating the * bagged classifier. * @exception Exception if the classifier could not be built successfully */ public void buildClassifier(Instances data) throws Exception { // can classifier handle the data? getCapabilities().testWithFail(data); // remove instances with missing class m_data = new Instances(data); m_data.deleteWithMissingClass(); super.buildClassifier(m_data); if (!(m_Classifier instanceof Randomizable)) { throw new IllegalArgumentException("Base learner must implement Randomizable!"); } m_Classifiers = AbstractClassifier.makeCopies(m_Classifier, m_NumIterations); Random random = m_data.getRandomNumberGenerator(m_Seed); // Resample data based on weights if base learner can't handle weights if (!(m_Classifier instanceof WeightedInstancesHandler)) { m_data = m_data.resampleWithWeights(random); } for (int j = 0; j < m_Classifiers.length; j++) { // Set the random number seed for the current classifier. ((Randomizable) m_Classifiers[j]).setSeed(random.nextInt()); // Build the classifier. // m_Classifiers[j].buildClassifier(m_data); } buildClassifiers(); // save memory m_data = null; } /** * Returns a training set for a particular iteration. * * @param iteration the number of the iteration for the requested training set. * @return the training set for the supplied iteration number * @throws Exception if something goes wrong when generating a training set. */ protected synchronized Instances getTrainingSet(int iteration) throws Exception { // we don't manipulate the training data in any way. return m_data; } /** * Calculates the class membership probabilities for the given test * instance. * * @param instance the instance to be classified * @return preedicted class probability distribution * @exception Exception if distribution can't be computed successfully */ public double[] distributionForInstance(Instance instance) throws Exception { double [] sums = new double [instance.numClasses()], newProbs; for (int i = 0; i < m_NumIterations; i++) { if (instance.classAttribute().isNumeric() == true) { sums[0] += m_Classifiers[i].classifyInstance(instance); } else { newProbs = m_Classifiers[i].distributionForInstance(instance); for (int j = 0; j < newProbs.length; j++) sums[j] += newProbs[j]; } } if (instance.classAttribute().isNumeric() == true) { sums[0] /= (double)m_NumIterations; return sums; } else if (Utils.eq(Utils.sum(sums), 0)) { return sums; } else { Utils.normalize(sums); return sums; } } /** * Returns description of the committee. * * @return description of the committee as a string */ public String toString() { if (m_Classifiers == null) { return "RandomCommittee: No model built yet."; } StringBuffer text = new StringBuffer(); text.append("All the base classifiers: \n\n"); for (int i = 0; i < m_Classifiers.length; i++) text.append(m_Classifiers[i].toString() + "\n\n"); return text.toString(); } /** * Builds the classifier to generate a partition. */ public void generatePartition(Instances data) throws Exception { if (m_Classifier instanceof PartitionGenerator) buildClassifier(data); else throw new Exception("Classifier: " + getClassifierSpec() + " cannot generate a partition"); } /** * Computes an array that indicates leaf membership */ public double[] getMembershipValues(Instance inst) throws Exception { if (m_Classifier instanceof PartitionGenerator) { ArrayList<double[]> al = new ArrayList<double[]>(); int size = 0; for (int i = 0; i < m_Classifiers.length; i++) { double[] r = ((PartitionGenerator)m_Classifiers[i]). getMembershipValues(inst); size += r.length; al.add(r); } double[] values = new double[size]; int pos = 0; for (double[] v: al) { System.arraycopy(v, 0, values, pos, v.length); pos += v.length; } return values; } else throw new Exception("Classifier: " + getClassifierSpec() + " cannot generate a partition"); } /** * Returns the number of elements in the partition. */ public int numElements() throws Exception { if (m_Classifier instanceof PartitionGenerator) { int size = 0; for (int i = 0; i < m_Classifiers.length; i++) { size += ((PartitionGenerator)m_Classifiers[i]).numElements(); } return size; } else throw new Exception("Classifier: " + getClassifierSpec() + " cannot generate a partition"); } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 9317 $"); } /** * Main method for testing this class. * * @param argv the options */ public static void main(String [] argv) { runClassifier(new RandomCommittee(), argv); } }
9,299
28.903537
274
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/meta/RandomSubSpace.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * RandomSubSpace.java * Copyright (C) 2006-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.meta; import java.util.Arrays; import java.util.Collections; import java.util.Enumeration; import java.util.Random; import java.util.Vector; import weka.classifiers.Classifier; import weka.classifiers.RandomizableParallelIteratedSingleClassifierEnhancer; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.Randomizable; import weka.core.RevisionUtils; import weka.core.TechnicalInformation; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import weka.core.TechnicalInformationHandler; import weka.core.Utils; import weka.core.WeightedInstancesHandler; import weka.filters.unsupervised.attribute.Remove; /** <!-- globalinfo-start --> * This method constructs a decision tree based classifier that maintains highest accuracy on training data and improves on generalization accuracy as it grows in complexity. The classifier consists of multiple trees constructed systematically by pseudorandomly selecting subsets of components of the feature vector, that is, trees constructed in randomly chosen subspaces.<br/> * <br/> * For more information, see<br/> * <br/> * Tin Kam Ho (1998). The Random Subspace Method for Constructing Decision Forests. IEEE Transactions on Pattern Analysis and Machine Intelligence. 20(8):832-844. URL http://citeseer.ist.psu.edu/ho98random.html. * <p/> <!-- globalinfo-end --> * <!-- technical-bibtex-start --> * BibTeX: * <pre> * &#64;article{Ho1998, * author = {Tin Kam Ho}, * journal = {IEEE Transactions on Pattern Analysis and Machine Intelligence}, * number = {8}, * pages = {832-844}, * title = {The Random Subspace Method for Constructing Decision Forests}, * volume = {20}, * year = {1998}, * ISSN = {0162-8828}, * URL = {http://citeseer.ist.psu.edu/ho98random.html} * } * </pre> * <p/> <!-- technical-bibtex-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -P * Size of each subspace: * &lt; 1: percentage of the number of attributes * &gt;=1: absolute number of attributes * </pre> * * <pre> -S &lt;num&gt; * Random number seed. * (default 1)</pre> * * <pre> -I &lt;num&gt; * Number of iterations. * (default 10)</pre> * * <pre> -D * If set, classifier is run in debug mode and * may output additional info to the console</pre> * * <pre> -W * Full name of base classifier. * (default: weka.classifiers.trees.REPTree)</pre> * * <pre> * Options specific to classifier weka.classifiers.trees.REPTree: * </pre> * * <pre> -M &lt;minimum number of instances&gt; * Set minimum number of instances per leaf (default 2).</pre> * * <pre> -V &lt;minimum variance for split&gt; * Set minimum numeric class variance proportion * of train variance for split (default 1e-3).</pre> * * <pre> -N &lt;number of folds&gt; * Number of folds for reduced error pruning (default 3).</pre> * * <pre> -S &lt;seed&gt; * Seed for random data shuffling (default 1).</pre> * * <pre> -P * No pruning.</pre> * * <pre> -L * Maximum tree depth (default -1, no maximum)</pre> * <!-- options-end --> * * Options after -- are passed to the designated classifier.<p> * * @author Bernhard Pfahringer (bernhard@cs.waikato.ac.nz) * @author Peter Reutemann (fracpete@cs.waikato.ac.nz) * @version $Revision: 8034 $ */ public class RandomSubSpace extends RandomizableParallelIteratedSingleClassifierEnhancer implements WeightedInstancesHandler, TechnicalInformationHandler { /** for serialization */ private static final long serialVersionUID = 1278172513912424947L; /** The size of each bag sample, as a percentage of the training size */ protected double m_SubSpaceSize = 0.5; /** a ZeroR model in case no model can be built from the data */ protected Classifier m_ZeroR; /** Training data */ protected Instances m_data; /** * Constructor. */ public RandomSubSpace() { super(); m_Classifier = new weka.classifiers.trees.REPTree(); } /** * Returns a string describing classifier * * @return a description suitable for * displaying in the explorer/experimenter gui */ public String globalInfo() { return "This method constructs a decision tree based classifier that " + "maintains highest accuracy on training data and improves on " + "generalization accuracy as it grows in complexity. The classifier " + "consists of multiple trees constructed systematically by " + "pseudorandomly selecting subsets of components of the feature vector, " + "that is, trees constructed in randomly chosen subspaces.\n\n" + "For more information, see\n\n" + getTechnicalInformation().toString(); } /** * Returns an instance of a TechnicalInformation object, containing * detailed information about the technical background of this class, * e.g., paper reference or book this class is based on. * * @return the technical information about this class */ public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; result = new TechnicalInformation(Type.ARTICLE); result.setValue(Field.AUTHOR, "Tin Kam Ho"); result.setValue(Field.YEAR, "1998"); result.setValue(Field.TITLE, "The Random Subspace Method for Constructing Decision Forests"); result.setValue(Field.JOURNAL, "IEEE Transactions on Pattern Analysis and Machine Intelligence"); result.setValue(Field.VOLUME, "20"); result.setValue(Field.NUMBER, "8"); result.setValue(Field.PAGES, "832-844"); result.setValue(Field.URL, "http://citeseer.ist.psu.edu/ho98random.html"); result.setValue(Field.ISSN, "0162-8828"); return result; } /** * String describing default classifier. * * @return the default classifier classname */ protected String defaultClassifierString() { return "weka.classifiers.trees.REPTree"; } /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ public Enumeration listOptions() { Vector result = new Vector(); result.addElement(new Option( "\tSize of each subspace:\n" + "\t\t< 1: percentage of the number of attributes\n" + "\t\t>=1: absolute number of attributes\n", "P", 1, "-P")); Enumeration enu = super.listOptions(); while (enu.hasMoreElements()) { result.addElement(enu.nextElement()); } return result.elements(); } /** * Parses a given list of options. <p/> * <!-- options-start --> * Valid options are: <p/> * * <pre> -P * Size of each subspace: * &lt; 1: percentage of the number of attributes * &gt;=1: absolute number of attributes * </pre> * * <pre> -S &lt;num&gt; * Random number seed. * (default 1)</pre> * * <pre> -I &lt;num&gt; * Number of iterations. * (default 10)</pre> * * <pre> -D * If set, classifier is run in debug mode and * may output additional info to the console</pre> * * <pre> -W * Full name of base classifier. * (default: weka.classifiers.trees.REPTree)</pre> * * <pre> * Options specific to classifier weka.classifiers.trees.REPTree: * </pre> * * <pre> -M &lt;minimum number of instances&gt; * Set minimum number of instances per leaf (default 2).</pre> * * <pre> -V &lt;minimum variance for split&gt; * Set minimum numeric class variance proportion * of train variance for split (default 1e-3).</pre> * * <pre> -N &lt;number of folds&gt; * Number of folds for reduced error pruning (default 3).</pre> * * <pre> -S &lt;seed&gt; * Seed for random data shuffling (default 1).</pre> * * <pre> -P * No pruning.</pre> * * <pre> -L * Maximum tree depth (default -1, no maximum)</pre> * <!-- options-end --> * * Options after -- are passed to the designated classifier.<p> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { String tmpStr; tmpStr = Utils.getOption('P', options); if (tmpStr.length() != 0) setSubSpaceSize(Double.parseDouble(tmpStr)); else setSubSpaceSize(0.5); super.setOptions(options); } /** * Gets the current settings of the Classifier. * * @return an array of strings suitable for passing to setOptions */ public String [] getOptions() { Vector result; String[] options; int i; result = new Vector(); result.add("-P"); result.add("" + getSubSpaceSize()); options = super.getOptions(); for (i = 0; i < options.length; i++) result.add(options[i]); return (String[]) result.toArray(new String[result.size()]); } /** * Returns the tip text for this property * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String subSpaceSizeTipText() { return "Size of each subSpace: if less than 1 as a percentage of the " + "number of attributes, otherwise the absolute number of attributes."; } /** * Gets the size of each subSpace, as a percentage of the training set size. * * @return the subSpace size, as a percentage. */ public double getSubSpaceSize() { return m_SubSpaceSize; } /** * Sets the size of each subSpace, as a percentage of the training set size. * * @param value the subSpace size, as a percentage. */ public void setSubSpaceSize(double value) { m_SubSpaceSize = value; } /** * calculates the number of attributes * * @param total the available number of attributes * @param fraction the fraction - if less than 1 it represents the * percentage, otherwise the absolute number of attributes * @return the number of attributes to use */ protected int numberOfAttributes(int total, double fraction) { int k = (int) Math.round((fraction < 1.0) ? total*fraction : fraction); if (k > total) k = total; if (k < 1) k = 1; return k; } /** * generates an index string describing a random subspace, suitable for * the Remove filter. * * @param indices the attribute indices * @param subSpaceSize the size of the subspace * @param classIndex the class index * @param random the random number generator * @return the generated string describing the subspace */ protected String randomSubSpace(Integer[] indices, int subSpaceSize, int classIndex, Random random) { Collections.shuffle(Arrays.asList(indices), random); StringBuffer sb = new StringBuffer(""); for(int i = 0; i < subSpaceSize; i++) { sb.append(indices[i]+","); } sb.append(classIndex); if (getDebug()) System.out.println("subSPACE = " + sb); return sb.toString(); } /** * builds the classifier. * * @param data the training data to be used for generating the * classifier. * @throws Exception if the classifier could not be built successfully */ public void buildClassifier(Instances data) throws Exception { // can classifier handle the data? getCapabilities().testWithFail(data); // remove instances with missing class m_data = new Instances(data); m_data.deleteWithMissingClass(); // only class? -> build ZeroR model if (m_data.numAttributes() == 1) { System.err.println( "Cannot build model (only class attribute present in data!), " + "using ZeroR model instead!"); m_ZeroR = new weka.classifiers.rules.ZeroR(); m_ZeroR.buildClassifier(m_data); return; } else { m_ZeroR = null; } super.buildClassifier(data); Integer[] indices = new Integer[data.numAttributes()-1]; int classIndex = data.classIndex(); int offset = 0; for(int i = 0; i < indices.length+1; i++) { if (i != classIndex) { indices[offset++] = i+1; } } int subSpaceSize = numberOfAttributes(indices.length, getSubSpaceSize()); Random random = data.getRandomNumberGenerator(m_Seed); for (int j = 0; j < m_Classifiers.length; j++) { if (m_Classifier instanceof Randomizable) { ((Randomizable) m_Classifiers[j]).setSeed(random.nextInt()); } FilteredClassifier fc = new FilteredClassifier(); fc.setClassifier(m_Classifiers[j]); m_Classifiers[j] = fc; Remove rm = new Remove(); rm.setOptions(new String[]{"-V", "-R", randomSubSpace(indices,subSpaceSize,classIndex+1,random)}); fc.setFilter(rm); // build the classifier //m_Classifiers[j].buildClassifier(m_data); } buildClassifiers(); // save memory m_data = null; } /** * Returns a training set for a particular iteration. * * @param iteration the number of the iteration for the requested training set. * @return the training set for the supplied iteration number * @throws Exception if something goes wrong when generating a training set. */ protected synchronized Instances getTrainingSet(int iteration) throws Exception { // We don't manipulate the training data in any way. The FilteredClassifiers // take care of generating the sub-spaces. return m_data; } /** * Calculates the class membership probabilities for the given test * instance. * * @param instance the instance to be classified * @return preedicted class probability distribution * @throws Exception if distribution can't be computed successfully */ public double[] distributionForInstance(Instance instance) throws Exception { // default model? if (m_ZeroR != null) { return m_ZeroR.distributionForInstance(instance); } double[] sums = new double [instance.numClasses()], newProbs; for (int i = 0; i < m_NumIterations; i++) { if (instance.classAttribute().isNumeric() == true) { sums[0] += m_Classifiers[i].classifyInstance(instance); } else { newProbs = m_Classifiers[i].distributionForInstance(instance); for (int j = 0; j < newProbs.length; j++) sums[j] += newProbs[j]; } } if (instance.classAttribute().isNumeric() == true) { sums[0] /= (double)m_NumIterations; return sums; } else if (Utils.eq(Utils.sum(sums), 0)) { return sums; } else { Utils.normalize(sums); return sums; } } /** * Returns description of the bagged classifier. * * @return description of the bagged classifier as a string */ public String toString() { // only ZeroR model? if (m_ZeroR != null) { StringBuffer buf = new StringBuffer(); buf.append(this.getClass().getName().replaceAll(".*\\.", "") + "\n"); buf.append(this.getClass().getName().replaceAll(".*\\.", "").replaceAll(".", "=") + "\n\n"); buf.append("Warning: No model could be built, hence ZeroR model is used:\n\n"); buf.append(m_ZeroR.toString()); return buf.toString(); } if (m_Classifiers == null) { return "RandomSubSpace: No model built yet."; } StringBuffer text = new StringBuffer(); text.append("All the base classifiers: \n\n"); for (int i = 0; i < m_Classifiers.length; i++) text.append(m_Classifiers[i].toString() + "\n\n"); return text.toString(); } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } /** * Main method for testing this class. * * @param args the options */ public static void main(String[] args) { runClassifier(new RandomSubSpace(), args); } }
16,718
29.288043
378
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/meta/RegressionByDiscretization.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * RegressionByDiscretization.java * Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.meta; import java.util.Enumeration; import java.util.Vector; import weka.classifiers.ConditionalDensityEstimator; import weka.classifiers.IntervalEstimator; import weka.classifiers.SingleClassifierEnhancer; import weka.core.Attribute; import weka.core.Capabilities; import weka.core.Capabilities.Capability; import weka.core.FastVector; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.RevisionUtils; import weka.core.SelectedTag; import weka.core.Tag; import weka.core.TechnicalInformation; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import weka.core.Utils; import weka.estimators.UnivariateDensityEstimator; import weka.estimators.UnivariateEqualFrequencyHistogramEstimator; import weka.estimators.UnivariateIntervalEstimator; import weka.estimators.UnivariateKernelEstimator; import weka.estimators.UnivariateNormalEstimator; import weka.estimators.UnivariateQuantileEstimator; import weka.filters.Filter; import weka.filters.unsupervised.attribute.Discretize; /** <!-- globalinfo-start --> * A regression scheme that employs any classifier on a copy of the data that has the class attribute (equal-width) discretized. The predicted value is the expected value of the mean class value for each discretized interval (based on the predicted probabilities for each interval). * <p/> <!-- globalinfo-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -B &lt;int&gt; * Number of bins for equal-width discretization * (default 10). * </pre> * * <pre> -E * Whether to delete empty bins after discretization * (default false). * </pre> * * <pre> -F * Use equal-frequency instead of equal-width discretization.</pre> * * <pre> -D * If set, classifier is run in debug mode and * may output additional info to the console</pre> * * <pre> -W * Full name of base classifier. * (default: weka.classifiers.trees.J48)</pre> * * <pre> * Options specific to classifier weka.classifiers.trees.J48: * </pre> * * <pre> -U * Use unpruned tree.</pre> * * <pre> -C &lt;pruning confidence&gt; * Set confidence threshold for pruning. * (default 0.25)</pre> * * <pre> -M &lt;minimum number of instances&gt; * Set minimum number of instances per leaf. * (default 2)</pre> * * <pre> -R * Use reduced error pruning.</pre> * * <pre> -N &lt;number of folds&gt; * Set number of folds for reduced error * pruning. One fold is used as pruning set. * (default 3)</pre> * * <pre> -B * Use binary splits only.</pre> * * <pre> -S * Don't perform subtree raising.</pre> * * <pre> -L * Do not clean up after the tree has been built.</pre> * * <pre> -A * Laplace smoothing for predicted probabilities.</pre> * * <pre> -Q &lt;seed&gt; * Seed for random data shuffling (default 1).</pre> * <!-- options-end --> * * @author Len Trigg (trigg@cs.waikato.ac.nz) * @author Eibe Frank (eibe@cs.waikato.ac.nz) * @version $Revision: 8034 $ */ public class RegressionByDiscretization extends SingleClassifierEnhancer implements IntervalEstimator, ConditionalDensityEstimator { /** for serialization */ static final long serialVersionUID = 5066426153134050378L; /** The discretization filter. */ protected Discretize m_Discretizer = new Discretize(); /** The number of discretization intervals. */ protected int m_NumBins = 10; /** The mean values for each Discretized class interval. */ protected double [] m_ClassMeans; /** The class counts for each Discretized class interval. */ protected int [] m_ClassCounts; /** Whether to delete empty intervals. */ protected boolean m_DeleteEmptyBins; /** Mapping to convert indices in case empty bins are deleted. */ protected int[] m_OldIndexToNewIndex; /** Header of discretized data. */ protected Instances m_DiscretizedHeader = null; /** Use equal-frequency binning */ protected boolean m_UseEqualFrequency = false; /** Whether to minimize absolute error, rather than squared error. */ protected boolean m_MinimizeAbsoluteError = false; /** Use histogram estimator */ public static final int ESTIMATOR_HISTOGRAM = 0; /** filter: Standardize training data */ public static final int ESTIMATOR_KERNEL = 1; /** filter: No normalization/standardization */ public static final int ESTIMATOR_NORMAL = 2; /** The filter to apply to the training data */ public static final Tag [] TAGS_ESTIMATOR = { new Tag(ESTIMATOR_HISTOGRAM, "Histogram density estimator"), new Tag(ESTIMATOR_KERNEL, "Kernel density estimator"), new Tag(ESTIMATOR_NORMAL, "Normal density estimator"), }; /** Which estimator to use (default: histogram) */ protected int m_estimatorType = ESTIMATOR_HISTOGRAM; /** The original target values in the training data */ protected double[] m_OriginalTargetValues = null; /** The converted target values in the training data */ protected int[] m_NewTargetValues = null; /** * Returns a string describing classifier * @return a description suitable for * displaying in the explorer/experimenter gui */ public String globalInfo() { return "A regression scheme that employs any " + "classifier on a copy of the data that has the class attribute " + "discretized. The predicted value is the expected value of the " + "mean class value for each discretized interval (based on the " + "predicted probabilities for each interval). This class now " + "also supports conditional density estimation by building " + "a univariate density estimator from the target values in " + "the training data, weighted by the class probabilities. \n\n" + "For more information on this process, see\n\n" + getTechnicalInformation().toString(); } /** * Returns an instance of a TechnicalInformation object, containing * detailed information about the technical background of this class, * e.g., paper reference or book this class is based on. * * @return the technical information about this class */ public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; result = new TechnicalInformation(Type.INPROCEEDINGS); result.setValue(Field.AUTHOR, "Eibe Frank and Remco R. Bouckaert"); result.setValue(Field.TITLE, "Conditional Density Estimation with Class Probability Estimators"); result.setValue(Field.BOOKTITLE, "First Asian Conference on Machine Learning"); result.setValue(Field.YEAR, "2009"); result.setValue(Field.PAGES, "65-81"); result.setValue(Field.PUBLISHER, "Springer Verlag"); result.setValue(Field.ADDRESS, "Berlin"); return result; } /** * String describing default classifier. * * @return the default classifier classname */ protected String defaultClassifierString() { return "weka.classifiers.trees.J48"; } /** * Default constructor. */ public RegressionByDiscretization() { m_Classifier = new weka.classifiers.trees.J48(); } /** * Returns default capabilities of the classifier. * * @return the capabilities of this classifier */ public Capabilities getCapabilities() { Capabilities result = super.getCapabilities(); // class result.disableAllClasses(); result.disableAllClassDependencies(); result.enable(Capability.NUMERIC_CLASS); result.enable(Capability.DATE_CLASS); result.setMinimumNumberInstances(2); return result; } /** * Generates the classifier. * * @param instances set of instances serving as training data * @throws Exception if the classifier has not been generated successfully */ public void buildClassifier(Instances instances) throws Exception { // can classifier handle the data? getCapabilities().testWithFail(instances); // remove instances with missing class instances = new Instances(instances); instances.deleteWithMissingClass(); // Discretize the training data m_Discretizer.setIgnoreClass(true); m_Discretizer.setAttributeIndices("" + (instances.classIndex() + 1)); m_Discretizer.setBins(getNumBins()); m_Discretizer.setUseEqualFrequency(getUseEqualFrequency()); m_Discretizer.setInputFormat(instances); Instances newTrain = Filter.useFilter(instances, m_Discretizer); // Should empty bins be deleted? m_OldIndexToNewIndex = null; if (m_DeleteEmptyBins) { // Figure out which classes are empty after discretization int numNonEmptyClasses = 0; boolean[] notEmptyClass = new boolean[newTrain.numClasses()]; for (int i = 0; i < newTrain.numInstances(); i++) { if (!notEmptyClass[(int)newTrain.instance(i).classValue()]) { numNonEmptyClasses++; notEmptyClass[(int)newTrain.instance(i).classValue()] = true; } } // Compute new list of non-empty classes and mapping of indices FastVector newClassVals = new FastVector(numNonEmptyClasses); m_OldIndexToNewIndex = new int[newTrain.numClasses()]; for (int i = 0; i < newTrain.numClasses(); i++) { if (notEmptyClass[i]) { m_OldIndexToNewIndex[i] = newClassVals.size(); newClassVals.addElement(newTrain.classAttribute().value(i)); } } // Compute new header information Attribute newClass = new Attribute(newTrain.classAttribute().name(), newClassVals); FastVector newAttributes = new FastVector(newTrain.numAttributes()); for (int i = 0; i < newTrain.numAttributes(); i++) { if (i != newTrain.classIndex()) { newAttributes.addElement(newTrain.attribute(i).copy()); } else { newAttributes.addElement(newClass); } } // Create new header and modify instances Instances newTrainTransformed = new Instances(newTrain.relationName(), newAttributes, newTrain.numInstances()); newTrainTransformed.setClassIndex(newTrain.classIndex()); for (int i = 0; i < newTrain.numInstances(); i++) { Instance inst = newTrain.instance(i); newTrainTransformed.add(inst); newTrainTransformed.lastInstance(). setClassValue(m_OldIndexToNewIndex[(int)inst.classValue()]); } newTrain = newTrainTransformed; } // Store target values, in case a prediction interval or computation of median is required m_OriginalTargetValues = new double[instances.numInstances()]; m_NewTargetValues = new int[instances.numInstances()]; for (int i = 0; i < m_OriginalTargetValues.length; i++) { m_OriginalTargetValues[i] = instances.instance(i).classValue(); m_NewTargetValues[i] = (int)newTrain.instance(i).classValue(); } m_DiscretizedHeader = new Instances(newTrain, 0); int numClasses = newTrain.numClasses(); // Calculate the mean value for each bin of the new class attribute m_ClassMeans = new double [numClasses]; m_ClassCounts = new int [numClasses]; for (int i = 0; i < instances.numInstances(); i++) { Instance inst = newTrain.instance(i); if (!inst.classIsMissing()) { int classVal = (int) inst.classValue(); m_ClassCounts[classVal]++; m_ClassMeans[classVal] += instances.instance(i).classValue(); } } for (int i = 0; i < numClasses; i++) { if (m_ClassCounts[i] > 0) { m_ClassMeans[i] /= m_ClassCounts[i]; } } if (m_Debug) { System.out.println("Bin Means"); System.out.println("=========="); for (int i = 0; i < m_ClassMeans.length; i++) { System.out.println(m_ClassMeans[i]); } System.out.println(); } // Train the sub-classifier m_Classifier.buildClassifier(newTrain); } /** * Get density estimator for given instance. * * @param inst the instance * @return the univariate density estimator * @exception Exception if the estimator can't be computed */ protected UnivariateDensityEstimator getDensityEstimator(Instance instance, boolean print) throws Exception { // Initialize estimator UnivariateDensityEstimator e; if (m_estimatorType == ESTIMATOR_KERNEL) { e = new UnivariateKernelEstimator(); } else if (m_estimatorType == ESTIMATOR_NORMAL) { e = new UnivariateNormalEstimator(); } else { e = new UnivariateEqualFrequencyHistogramEstimator(); // Set the number of bins appropriately ((UnivariateEqualFrequencyHistogramEstimator)e).setNumBins(getNumBins()); // Initialize boundaries of equal frequency estimator for (int i = 0; i < m_OriginalTargetValues.length; i++) { e.addValue(m_OriginalTargetValues[i], 1.0); } // Construct estimator, then initialize statistics, so that only boundaries will be kept ((UnivariateEqualFrequencyHistogramEstimator)e).initializeStatistics(); // Now that boundaries have been determined, we only need to update the bin weights ((UnivariateEqualFrequencyHistogramEstimator)e).setUpdateWeightsOnly(true); } // Make sure structure of class attribute correct m_Discretizer.input(instance); m_Discretizer.batchFinished(); Instance newInstance = m_Discretizer.output();//(Instance)instance.copy(); if (m_OldIndexToNewIndex != null) { newInstance.setClassValue(m_OldIndexToNewIndex[(int)newInstance.classValue()]); } newInstance.setDataset(m_DiscretizedHeader); double [] probs = m_Classifier.distributionForInstance(newInstance); // Add values to estimator for (int i = 0; i < m_OriginalTargetValues.length; i++) { e.addValue(m_OriginalTargetValues[i], probs[m_NewTargetValues[i]] * m_OriginalTargetValues.length / m_ClassCounts[m_NewTargetValues[i]]); } // Return estimator return e; } /** * Returns an N * 2 array, where N is the number of prediction * intervals. In each row, the first element contains the lower * boundary of the corresponding prediction interval and the second * element the upper boundary. * * @param inst the instance to make the prediction for. * @param confidenceLevel the percentage of cases that the interval should cover. * @return an array of prediction intervals * @exception Exception if the intervals can't be computed */ public double[][] predictIntervals(Instance instance, double confidenceLevel) throws Exception { // Get density estimator UnivariateIntervalEstimator e = (UnivariateIntervalEstimator)getDensityEstimator(instance, false); // Return intervals return e.predictIntervals(confidenceLevel); } /** * Returns natural logarithm of density estimate for given value based on given instance. * * @param inst the instance to make the prediction for. * @param the value to make the prediction for. * @return the natural logarithm of the density estimate * @exception Exception if the intervals can't be computed */ public double logDensity(Instance instance, double value) throws Exception { // Get density estimator UnivariateDensityEstimator e = getDensityEstimator(instance, true); // Return estimate return e.logDensity(value); } /** * Returns a predicted class for the test instance. * * @param instance the instance to be classified * @return predicted class value * @throws Exception if the prediction couldn't be made */ public double classifyInstance(Instance instance) throws Exception { // Make sure structure of class attribute correct m_Discretizer.input(instance); m_Discretizer.batchFinished(); Instance newInstance = m_Discretizer.output();//(Instance)instance.copy(); if (m_OldIndexToNewIndex != null) { newInstance.setClassValue(m_OldIndexToNewIndex[(int)newInstance.classValue()]); } newInstance.setDataset(m_DiscretizedHeader); double [] probs = m_Classifier.distributionForInstance(newInstance); if (!m_MinimizeAbsoluteError) { // Compute actual prediction double prediction = 0, probSum = 0; for (int j = 0; j < probs.length; j++) { prediction += probs[j] * m_ClassMeans[j]; probSum += probs[j]; } return prediction / probSum; } else { // Get density estimator UnivariateQuantileEstimator e = (UnivariateQuantileEstimator)getDensityEstimator(instance, true); // Return estimate return e.predictQuantile(0.5); } } /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ public Enumeration listOptions() { Vector newVector = new Vector(5); newVector.addElement(new Option( "\tNumber of bins for equal-width discretization\n" + "\t(default 10).\n", "B", 1, "-B <int>")); newVector.addElement(new Option( "\tWhether to delete empty bins after discretization\n" + "\t(default false).\n", "E", 0, "-E")); newVector.addElement(new Option( "\tWhether to minimize absolute error, rather than squared error.\n" + "\t(default false).\n", "A", 0, "-A")); newVector.addElement(new Option( "\tUse equal-frequency instead of equal-width discretization.", "F", 0, "-F")); newVector.addElement(new Option( "\tWhat type of density estimator to use: 0=histogram/1=kernel/2=normal (default: 0).", "K", 1, "-K")); Enumeration enu = super.listOptions(); while (enu.hasMoreElements()) { newVector.addElement(enu.nextElement()); } return newVector.elements(); } /** * Parses a given list of options. <p/> * <!-- options-start --> <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { String binsString = Utils.getOption('B', options); if (binsString.length() != 0) { setNumBins(Integer.parseInt(binsString)); } else { setNumBins(10); } setDeleteEmptyBins(Utils.getFlag('E', options)); setUseEqualFrequency(Utils.getFlag('F', options)); setMinimizeAbsoluteError(Utils.getFlag('A', options)); String tmpStr = Utils.getOption('K', options); if (tmpStr.length() != 0) setEstimatorType(new SelectedTag(Integer.parseInt(tmpStr), TAGS_ESTIMATOR)); else setEstimatorType(new SelectedTag(ESTIMATOR_HISTOGRAM, TAGS_ESTIMATOR)); super.setOptions(options); } /** * Gets the current settings of the Classifier. * * @return an array of strings suitable for passing to setOptions */ public String [] getOptions() { String [] superOptions = super.getOptions(); String [] options = new String [superOptions.length + 7]; int current = 0; options[current++] = "-B"; options[current++] = "" + getNumBins(); if (getDeleteEmptyBins()) { options[current++] = "-E"; } if (getUseEqualFrequency()) { options[current++] = "-F"; } if (getMinimizeAbsoluteError()) { options[current++] = "-A"; } options[current++] = "-K"; options[current++] = "" + m_estimatorType; System.arraycopy(superOptions, 0, options, current, superOptions.length); current += superOptions.length; while (current < options.length) { options[current++] = ""; } return options; } /** * Returns the tip text for this property * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String numBinsTipText() { return "Number of bins for discretization."; } /** * Gets the number of bins numeric attributes will be divided into * * @return the number of bins. */ public int getNumBins() { return m_NumBins; } /** * Sets the number of bins to divide each selected numeric attribute into * * @param numBins the number of bins */ public void setNumBins(int numBins) { m_NumBins = numBins; } /** * Returns the tip text for this property * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String deleteEmptyBinsTipText() { return "Whether to delete empty bins after discretization."; } /** * Gets whether empty bins are deleted. * * @return true if empty bins get deleted. */ public boolean getDeleteEmptyBins() { return m_DeleteEmptyBins; } /** * Sets whether to delete empty bins. * * @param b if true, empty bins will be deleted */ public void setDeleteEmptyBins(boolean b) { m_DeleteEmptyBins = b; } /** * Returns the tip text for this property * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String minimizeAbsoluteErrorTipText() { return "Whether to minimize absolute error."; } /** * Gets whether to min. abs. error * * @return true if abs. err. is to be minimized */ public boolean getMinimizeAbsoluteError() { return m_MinimizeAbsoluteError; } /** * Sets whether to min. abs. error. * * @param b if true, abs. err. is minimized */ public void setMinimizeAbsoluteError(boolean b) { m_MinimizeAbsoluteError = b; } /** * Returns the tip text for this property * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String useEqualFrequencyTipText() { return "If set to true, equal-frequency binning will be used instead of" + " equal-width binning."; } /** * Get the value of UseEqualFrequency. * * @return Value of UseEqualFrequency. */ public boolean getUseEqualFrequency() { return m_UseEqualFrequency; } /** * Set the value of UseEqualFrequency. * * @param newUseEqualFrequency Value to assign to UseEqualFrequency. */ public void setUseEqualFrequency(boolean newUseEqualFrequency) { m_UseEqualFrequency = newUseEqualFrequency; } /** * Returns the tip text for this property * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String estimatorTypeTipText() { return "The density estimator to use."; } /** * Get the estimator type * * @return the estimator type */ public SelectedTag getEstimatorType() { return new SelectedTag(m_estimatorType, TAGS_ESTIMATOR); } /** * Set the estimator * * @param newEstimator the estimator to use */ public void setEstimatorType(SelectedTag newEstimator) { if (newEstimator.getTags() == TAGS_ESTIMATOR) { m_estimatorType = newEstimator.getSelectedTag().getID(); } } /** * Returns a description of the classifier. * * @return a description of the classifier as a string. */ public String toString() { StringBuffer text = new StringBuffer(); text.append("Regression by discretization"); if (m_ClassMeans == null) { text.append(": No model built yet."); } else { text.append("\n\nClass attribute discretized into " + m_ClassMeans.length + " values\n"); text.append("\nClassifier spec: " + getClassifierSpec() + "\n"); text.append(m_Classifier.toString()); } return text.toString(); } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } /** * Main method for testing this class. * * @param argv the options */ public static void main(String [] argv) { runClassifier(new RegressionByDiscretization(), argv); } }
24,893
29.358537
282
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/meta/RotationForest.java
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* * RotationForest.java * Copyright (C) 2008 Juan Jose Rodriguez * Copyright (C) 2008 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.meta; import weka.classifiers.RandomizableIteratedSingleClassifierEnhancer; import weka.core.Attribute; import weka.core.FastVector; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.OptionHandler; import weka.core.Randomizable; import weka.core.RevisionUtils; import weka.core.TechnicalInformation; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import weka.core.TechnicalInformationHandler; import weka.core.WeightedInstancesHandler; import weka.core.Utils; import weka.filters.Filter; import weka.filters.unsupervised.attribute.Normalize; import weka.filters.unsupervised.attribute.PrincipalComponents; import weka.filters.unsupervised.attribute.RemoveUseless; import weka.filters.unsupervised.instance.RemovePercentage; import java.util.Enumeration; import java.util.LinkedList; import java.util.Random; import java.util.Vector; import weka.core.DenseInstance; /** <!-- globalinfo-start --> * Class for construction a Rotation Forest. Can do classification and regression depending on the base learner. <br/> * <br/> * For more information, see<br/> * <br/> * Juan J. Rodriguez, Ludmila I. Kuncheva, Carlos J. Alonso (2006). Rotation Forest: A new classifier ensemble method. IEEE Transactions on Pattern Analysis and Machine Intelligence. 28(10):1619-1630. URL http://doi.ieeecomputersociety.org/10.1109/TPAMI.2006.211. * <p/> <!-- globalinfo-end --> * <!-- technical-bibtex-start --> * BibTeX: * <pre> * &#64;article{Rodriguez2006, * author = {Juan J. Rodriguez and Ludmila I. Kuncheva and Carlos J. Alonso}, * journal = {IEEE Transactions on Pattern Analysis and Machine Intelligence}, * number = {10}, * pages = {1619-1630}, * title = {Rotation Forest: A new classifier ensemble method}, * volume = {28}, * year = {2006}, * ISSN = {0162-8828}, * URL = {http://doi.ieeecomputersociety.org/10.1109/TPAMI.2006.211} * } * </pre> * <p/> <!-- technical-bibtex-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -N * Whether minGroup (-G) and maxGroup (-H) refer to * the number of groups or their size. * (default: false)</pre> * * <pre> -G &lt;num&gt; * Minimum size of a group of attributes: * if numberOfGroups is true, the minimum number * of groups. * (default: 3)</pre> * * <pre> -H &lt;num&gt; * Maximum size of a group of attributes: * if numberOfGroups is true, the maximum number * of groups. * (default: 3)</pre> * * <pre> -P &lt;num&gt; * Percentage of instances to be removed. * (default: 50)</pre> * * <pre> -F &lt;filter specification&gt; * Full class name of filter to use, followed * by filter options. * eg: "weka.filters.unsupervised.attribute.PrincipalComponents-R 1.0"</pre> * * <pre> -S &lt;num&gt; * Random number seed. * (default 1)</pre> * * <pre> -I &lt;num&gt; * Number of iterations. * (default 10)</pre> * * <pre> -D * If set, classifier is run in debug mode and * may output additional info to the console</pre> * * <pre> -W * Full name of base classifier. * (default: weka.classifiers.trees.J48)</pre> * * <pre> * Options specific to classifier weka.classifiers.trees.J48: * </pre> * * <pre> -U * Use unpruned tree.</pre> * * <pre> -C &lt;pruning confidence&gt; * Set confidence threshold for pruning. * (default 0.25)</pre> * * <pre> -M &lt;minimum number of instances&gt; * Set minimum number of instances per leaf. * (default 2)</pre> * * <pre> -R * Use reduced error pruning.</pre> * * <pre> -N &lt;number of folds&gt; * Set number of folds for reduced error * pruning. One fold is used as pruning set. * (default 3)</pre> * * <pre> -B * Use binary splits only.</pre> * * <pre> -S * Don't perform subtree raising.</pre> * * <pre> -L * Do not clean up after the tree has been built.</pre> * * <pre> -A * Laplace smoothing for predicted probabilities.</pre> * * <pre> -Q &lt;seed&gt; * Seed for random data shuffling (default 1).</pre> * <!-- options-end --> * * @author Juan Jose Rodriguez (jjrodriguez@ubu.es) * @version $Revision: 7012 $ */ public class RotationForest extends RandomizableIteratedSingleClassifierEnhancer implements WeightedInstancesHandler, TechnicalInformationHandler { // It implements WeightedInstancesHandler because the base classifier // can implement this interface, but in this method the weights are // not used /** for serialization */ static final long serialVersionUID = -3255631880798499936L; /** The minimum size of a group */ protected int m_MinGroup = 3; /** The maximum size of a group */ protected int m_MaxGroup = 3; /** * Whether minGroup and maxGroup refer to the number of groups or their * size */ protected boolean m_NumberOfGroups = false; /** The percentage of instances to be removed */ protected int m_RemovedPercentage = 50; /** The attributes of each group */ protected int [][][] m_Groups = null; /** The type of projection filter */ protected Filter m_ProjectionFilter = null; /** The projection filters */ protected Filter [][] m_ProjectionFilters = null; /** Headers of the transformed dataset */ protected Instances [] m_Headers = null; /** Headers of the reduced datasets */ protected Instances [][] m_ReducedHeaders = null; /** Filter that remove useless attributes */ protected RemoveUseless m_RemoveUseless = null; /** Filter that normalized the attributes */ protected Normalize m_Normalize = null; /** * Constructor. */ public RotationForest() { m_Classifier = new weka.classifiers.trees.J48(); m_ProjectionFilter = defaultFilter(); } /** * Default projection method. */ protected Filter defaultFilter() { PrincipalComponents filter = new PrincipalComponents(); //filter.setNormalize(false); filter.setVarianceCovered(1.0); return filter; } /** * Returns a string describing classifier * @return a description suitable for * displaying in the explorer/experimenter gui */ public String globalInfo() { return "Class for construction a Rotation Forest. Can do classification " + "and regression depending on the base learner. \n\n" + "For more information, see\n\n" + getTechnicalInformation().toString(); } /** * Returns an instance of a TechnicalInformation object, containing * detailed information about the technical background of this class, * e.g., paper reference or book this class is based on. * * @return the technical information about this class */ public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; result = new TechnicalInformation(Type.ARTICLE); result.setValue(Field.AUTHOR, "Juan J. Rodriguez and Ludmila I. Kuncheva and Carlos J. Alonso"); result.setValue(Field.YEAR, "2006"); result.setValue(Field.TITLE, "Rotation Forest: A new classifier ensemble method"); result.setValue(Field.JOURNAL, "IEEE Transactions on Pattern Analysis and Machine Intelligence"); result.setValue(Field.VOLUME, "28"); result.setValue(Field.NUMBER, "10"); result.setValue(Field.PAGES, "1619-1630"); result.setValue(Field.ISSN, "0162-8828"); result.setValue(Field.URL, "http://doi.ieeecomputersociety.org/10.1109/TPAMI.2006.211"); return result; } /** * String describing default classifier. * * @return the default classifier classname */ protected String defaultClassifierString() { return "weka.classifiers.trees.J48"; } /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ public Enumeration listOptions() { Vector newVector = new Vector(5); newVector.addElement(new Option( "\tWhether minGroup (-G) and maxGroup (-H) refer to" + "\n\tthe number of groups or their size." + "\n\t(default: false)", "N", 0, "-N")); newVector.addElement(new Option( "\tMinimum size of a group of attributes:" + "\n\t\tif numberOfGroups is true, the minimum number" + "\n\t\tof groups." + "\n\t\t(default: 3)", "G", 1, "-G <num>")); newVector.addElement(new Option( "\tMaximum size of a group of attributes:" + "\n\t\tif numberOfGroups is true, the maximum number" + "\n\t\tof groups." + "\n\t\t(default: 3)", "H", 1, "-H <num>")); newVector.addElement(new Option( "\tPercentage of instances to be removed." + "\n\t\t(default: 50)", "P", 1, "-P <num>")); newVector.addElement(new Option( "\tFull class name of filter to use, followed\n" + "\tby filter options.\n" + "\teg: \"weka.filters.unsupervised.attribute.PrincipalComponents-R 1.0\"", "F", 1, "-F <filter specification>")); Enumeration enu = super.listOptions(); while (enu.hasMoreElements()) { newVector.addElement(enu.nextElement()); } return newVector.elements(); } /** * Parses a given list of options. <p/> * <!-- options-start --> * Valid options are: <p/> * * <pre> -N * Whether minGroup (-G) and maxGroup (-H) refer to * the number of groups or their size. * (default: false)</pre> * * <pre> -G &lt;num&gt; * Minimum size of a group of attributes: * if numberOfGroups is true, the minimum number * of groups. * (default: 3)</pre> * * <pre> -H &lt;num&gt; * Maximum size of a group of attributes: * if numberOfGroups is true, the maximum number * of groups. * (default: 3)</pre> * * <pre> -P &lt;num&gt; * Percentage of instances to be removed. * (default: 50)</pre> * * <pre> -F &lt;filter specification&gt; * Full class name of filter to use, followed * by filter options. * eg: "weka.filters.unsupervised.attribute.PrincipalComponents-R 1.0"</pre> * * <pre> -S &lt;num&gt; * Random number seed. * (default 1)</pre> * * <pre> -I &lt;num&gt; * Number of iterations. * (default 10)</pre> * * <pre> -D * If set, classifier is run in debug mode and * may output additional info to the console</pre> * * <pre> -W * Full name of base classifier. * (default: weka.classifiers.trees.J48)</pre> * * <pre> * Options specific to classifier weka.classifiers.trees.J48: * </pre> * * <pre> -U * Use unpruned tree.</pre> * * <pre> -C &lt;pruning confidence&gt; * Set confidence threshold for pruning. * (default 0.25)</pre> * * <pre> -M &lt;minimum number of instances&gt; * Set minimum number of instances per leaf. * (default 2)</pre> * * <pre> -R * Use reduced error pruning.</pre> * * <pre> -N &lt;number of folds&gt; * Set number of folds for reduced error * pruning. One fold is used as pruning set. * (default 3)</pre> * * <pre> -B * Use binary splits only.</pre> * * <pre> -S * Don't perform subtree raising.</pre> * * <pre> -L * Do not clean up after the tree has been built.</pre> * * <pre> -A * Laplace smoothing for predicted probabilities.</pre> * * <pre> -Q &lt;seed&gt; * Seed for random data shuffling (default 1).</pre> * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { /* Taken from FilteredClassifier */ String filterString = Utils.getOption('F', options); if (filterString.length() > 0) { String [] filterSpec = Utils.splitOptions(filterString); if (filterSpec.length == 0) { throw new IllegalArgumentException("Invalid filter specification string"); } String filterName = filterSpec[0]; filterSpec[0] = ""; setProjectionFilter((Filter) Utils.forName(Filter.class, filterName, filterSpec)); } else { setProjectionFilter(defaultFilter()); } String tmpStr; tmpStr = Utils.getOption('G', options); if (tmpStr.length() != 0) setMinGroup(Integer.parseInt(tmpStr)); else setMinGroup(3); tmpStr = Utils.getOption('H', options); if (tmpStr.length() != 0) setMaxGroup(Integer.parseInt(tmpStr)); else setMaxGroup(3); tmpStr = Utils.getOption('P', options); if (tmpStr.length() != 0) setRemovedPercentage(Integer.parseInt(tmpStr)); else setRemovedPercentage(50); setNumberOfGroups(Utils.getFlag('N', options)); super.setOptions(options); } /** * Gets the current settings of the Classifier. * * @return an array of strings suitable for passing to setOptions */ public String [] getOptions() { String [] superOptions = super.getOptions(); String [] options = new String [superOptions.length + 9]; int current = 0; if (getNumberOfGroups()) { options[current++] = "-N"; } options[current++] = "-G"; options[current++] = "" + getMinGroup(); options[current++] = "-H"; options[current++] = "" + getMaxGroup(); options[current++] = "-P"; options[current++] = "" + getRemovedPercentage(); options[current++] = "-F"; options[current++] = getProjectionFilterSpec(); System.arraycopy(superOptions, 0, options, current, superOptions.length); current += superOptions.length; while (current < options.length) { options[current++] = ""; } return options; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String numberOfGroupsTipText() { return "Whether minGroup and maxGroup refer to the number of groups or their size."; } /** * Set whether minGroup and maxGroup refer to the number of groups or their * size * * @param numberOfGroups whether minGroup and maxGroup refer to the number * of groups or their size */ public void setNumberOfGroups(boolean numberOfGroups) { m_NumberOfGroups = numberOfGroups; } /** * Get whether minGroup and maxGroup refer to the number of groups or their * size * * @return whether minGroup and maxGroup refer to the number of groups or * their size */ public boolean getNumberOfGroups() { return m_NumberOfGroups; } /** * Returns the tip text for this property * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String minGroupTipText() { return "Minimum size of a group (if numberOfGrups is true, the minimum number of groups."; } /** * Sets the minimum size of a group. * * @param minGroup the minimum value. * of attributes. */ public void setMinGroup( int minGroup ) throws IllegalArgumentException { if( minGroup <= 0 ) throw new IllegalArgumentException( "MinGroup has to be positive." ); m_MinGroup = minGroup; } /** * Gets the minimum size of a group. * * @return the minimum value. */ public int getMinGroup() { return m_MinGroup; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String maxGroupTipText() { return "Maximum size of a group (if numberOfGrups is true, the maximum number of groups."; } /** * Sets the maximum size of a group. * * @param maxGroup the maximum value. * of attributes. */ public void setMaxGroup( int maxGroup ) throws IllegalArgumentException { if( maxGroup <= 0 ) throw new IllegalArgumentException( "MaxGroup has to be positive." ); m_MaxGroup = maxGroup; } /** * Gets the maximum size of a group. * * @return the maximum value. */ public int getMaxGroup() { return m_MaxGroup; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String removedPercentageTipText() { return "The percentage of instances to be removed."; } /** * Sets the percentage of instance to be removed * * @param removedPercentage the percentage. */ public void setRemovedPercentage( int removedPercentage ) throws IllegalArgumentException { if( removedPercentage < 0 ) throw new IllegalArgumentException( "RemovedPercentage has to be >=0." ); if( removedPercentage >= 100 ) throw new IllegalArgumentException( "RemovedPercentage has to be <100." ); m_RemovedPercentage = removedPercentage; } /** * Gets the percentage of instances to be removed * * @return the percentage. */ public int getRemovedPercentage() { return m_RemovedPercentage; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String projectionFilterTipText() { return "The filter used to project the data (e.g., PrincipalComponents)."; } /** * Sets the filter used to project the data. * * @param projectionFilter the filter. */ public void setProjectionFilter( Filter projectionFilter ) { m_ProjectionFilter = projectionFilter; } /** * Gets the filter used to project the data. * * @return the filter. */ public Filter getProjectionFilter() { return m_ProjectionFilter; } /** * Gets the filter specification string, which contains the class name of * the filter and any options to the filter * * @return the filter string. */ /* Taken from FilteredClassifier */ protected String getProjectionFilterSpec() { Filter c = getProjectionFilter(); if (c instanceof OptionHandler) { return c.getClass().getName() + " " + Utils.joinOptions(((OptionHandler)c).getOptions()); } return c.getClass().getName(); } /** * Returns description of the Rotation Forest classifier. * * @return description of the Rotation Forest classifier as a string */ public String toString() { if (m_Classifiers == null) { return "RotationForest: No model built yet."; } StringBuffer text = new StringBuffer(); text.append("All the base classifiers: \n\n"); for (int i = 0; i < m_Classifiers.length; i++) text.append(m_Classifiers[i].toString() + "\n\n"); return text.toString(); } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 7012 $"); } /** * builds the classifier. * * @param data the training data to be used for generating the * classifier. * @throws Exception if the classifier could not be built successfully */ public void buildClassifier(Instances data) throws Exception { // can classifier handle the data? getCapabilities().testWithFail(data); data = new Instances( data ); super.buildClassifier(data); checkMinMax(data); Random random; if( data.numInstances() > 0 ) { // This function fails if there are 0 instances random = data.getRandomNumberGenerator(m_Seed); } else { random = new Random(m_Seed); } m_RemoveUseless = new RemoveUseless(); m_RemoveUseless.setInputFormat(data); data = Filter.useFilter(data, m_RemoveUseless); m_Normalize = new Normalize(); m_Normalize.setInputFormat(data); data = Filter.useFilter(data, m_Normalize); if(m_NumberOfGroups) { generateGroupsFromNumbers(data, random); } else { generateGroupsFromSizes(data, random); } m_ProjectionFilters = new Filter[m_Groups.length][]; for(int i = 0; i < m_ProjectionFilters.length; i++ ) { m_ProjectionFilters[i] = Filter.makeCopies( m_ProjectionFilter, m_Groups[i].length ); } int numClasses = data.numClasses(); // Split the instances according to their class Instances [] instancesOfClass = new Instances[numClasses + 1]; if( data.classAttribute().isNumeric() ) { instancesOfClass = new Instances[numClasses]; instancesOfClass[0] = data; } else { instancesOfClass = new Instances[numClasses+1]; for( int i = 0; i < instancesOfClass.length; i++ ) { instancesOfClass[ i ] = new Instances( data, 0 ); } Enumeration enu = data.enumerateInstances(); while( enu.hasMoreElements() ) { Instance instance = (Instance)enu.nextElement(); if( instance.classIsMissing() ) { instancesOfClass[numClasses].add( instance ); } else { int c = (int)instance.classValue(); instancesOfClass[c].add( instance ); } } // If there are not instances with a missing class, we do not need to // consider them if( instancesOfClass[numClasses].numInstances() == 0 ) { Instances [] tmp = instancesOfClass; instancesOfClass = new Instances[ numClasses ]; System.arraycopy( tmp, 0, instancesOfClass, 0, numClasses ); } } // These arrays keep the information of the transformed data set m_Headers = new Instances[ m_Classifiers.length ]; m_ReducedHeaders = new Instances[ m_Classifiers.length ][]; // Construction of the base classifiers for(int i = 0; i < m_Classifiers.length; i++) { m_ReducedHeaders[i] = new Instances[ m_Groups[i].length ]; FastVector transformedAttributes = new FastVector( data.numAttributes() ); // Construction of the dataset for each group of attributes for( int j = 0; j < m_Groups[ i ].length; j++ ) { FastVector fv = new FastVector( m_Groups[i][j].length + 1 ); for( int k = 0; k < m_Groups[i][j].length; k++ ) { String newName = data.attribute( m_Groups[i][j][k] ).name() + "_" + k; fv.addElement( data.attribute( m_Groups[i][j][k] ).copy(newName) ); } fv.addElement( data.classAttribute( ).copy() ); Instances dataSubSet = new Instances( "rotated-" + i + "-" + j + "-", fv, 0); dataSubSet.setClassIndex( dataSubSet.numAttributes() - 1 ); // Select instances for the dataset m_ReducedHeaders[i][j] = new Instances( dataSubSet, 0 ); boolean [] selectedClasses = selectClasses( instancesOfClass.length, random ); for( int c = 0; c < selectedClasses.length; c++ ) { if( !selectedClasses[c] ) continue; Enumeration enu = instancesOfClass[c].enumerateInstances(); while( enu.hasMoreElements() ) { Instance instance = (Instance)enu.nextElement(); Instance newInstance = new DenseInstance(dataSubSet.numAttributes()); newInstance.setDataset( dataSubSet ); for( int k = 0; k < m_Groups[i][j].length; k++ ) { newInstance.setValue( k, instance.value( m_Groups[i][j][k] ) ); } newInstance.setClassValue( instance.classValue( ) ); dataSubSet.add( newInstance ); } } dataSubSet.randomize(random); // Remove a percentage of the instances Instances originalDataSubSet = dataSubSet; dataSubSet.randomize(random); RemovePercentage rp = new RemovePercentage(); rp.setPercentage( m_RemovedPercentage ); rp.setInputFormat( dataSubSet ); dataSubSet = Filter.useFilter( dataSubSet, rp ); if( dataSubSet.numInstances() < 2 ) { dataSubSet = originalDataSubSet; } // Project de data m_ProjectionFilters[i][j].setInputFormat( dataSubSet ); Instances projectedData = null; do { try { projectedData = Filter.useFilter( dataSubSet, m_ProjectionFilters[i][j] ); } catch ( Exception e ) { // The data could not be projected, we add some random instances addRandomInstances( dataSubSet, 10, random ); } } while( projectedData == null ); // Include the projected attributes in the attributes of the // transformed dataset for( int a = 0; a < projectedData.numAttributes() - 1; a++ ) { String newName = projectedData.attribute(a).name() + "_" + j; transformedAttributes.addElement( projectedData.attribute(a).copy(newName)); } } transformedAttributes.addElement( data.classAttribute().copy() ); Instances buildClas = new Instances( "rotated-" + i + "-", transformedAttributes, 0 ); buildClas.setClassIndex( buildClas.numAttributes() - 1 ); m_Headers[ i ] = new Instances( buildClas, 0 ); // Project all the training data Enumeration enu = data.enumerateInstances(); while( enu.hasMoreElements() ) { Instance instance = (Instance)enu.nextElement(); Instance newInstance = convertInstance( instance, i ); buildClas.add( newInstance ); } // Build the base classifier if (m_Classifier instanceof Randomizable) { ((Randomizable) m_Classifiers[i]).setSeed(random.nextInt()); } m_Classifiers[i].buildClassifier( buildClas ); } if(m_Debug){ printGroups(); } } /** * Adds random instances to the dataset. * * @param dataset the dataset * @param numInstances the number of instances * @param random a random number generator */ protected void addRandomInstances( Instances dataset, int numInstances, Random random ) { int n = dataset.numAttributes(); double [] v = new double[ n ]; for( int i = 0; i < numInstances; i++ ) { for( int j = 0; j < n; j++ ) { Attribute att = dataset.attribute( j ); if( att.isNumeric() ) { v[ j ] = random.nextDouble(); } else if ( att.isNominal() ) { v[ j ] = random.nextInt( att.numValues() ); } } dataset.add( new DenseInstance( 1, v ) ); } } /** * Checks m_MinGroup and m_MaxGroup * * @param data the dataset */ protected void checkMinMax(Instances data) { if( m_MinGroup > m_MaxGroup ) { int tmp = m_MaxGroup; m_MaxGroup = m_MinGroup; m_MinGroup = tmp; } int n = data.numAttributes(); if( m_MaxGroup >= n ) m_MaxGroup = n - 1; if( m_MinGroup >= n ) m_MinGroup = n - 1; } /** * Selects a non-empty subset of the classes * * @param numClasses the number of classes * @param random the random number generator. * @return a random subset of classes */ protected boolean [] selectClasses( int numClasses, Random random ) { int numSelected = 0; boolean selected[] = new boolean[ numClasses ]; for( int i = 0; i < selected.length; i++ ) { if(random.nextBoolean()) { selected[i] = true; numSelected++; } } if( numSelected == 0 ) { selected[random.nextInt( selected.length )] = true; } return selected; } /** * generates the groups of attributes, given their minimum and maximum * sizes. * * @param data the training data to be used for generating the * groups. * @param random the random number generator. */ protected void generateGroupsFromSizes(Instances data, Random random) { m_Groups = new int[m_Classifiers.length][][]; for( int i = 0; i < m_Classifiers.length; i++ ) { int [] permutation = attributesPermutation(data.numAttributes(), data.classIndex(), random); // The number of groups that have a given size int [] numGroupsOfSize = new int[m_MaxGroup - m_MinGroup + 1]; int numAttributes = 0; int numGroups; // Select the size of each group for( numGroups = 0; numAttributes < permutation.length; numGroups++ ) { int n = random.nextInt( numGroupsOfSize.length ); numGroupsOfSize[n]++; numAttributes += m_MinGroup + n; } m_Groups[i] = new int[numGroups][]; int currentAttribute = 0; int currentSize = 0; for( int j = 0; j < numGroups; j++ ) { while( numGroupsOfSize[ currentSize ] == 0 ) currentSize++; numGroupsOfSize[ currentSize ]--; int n = m_MinGroup + currentSize; m_Groups[i][j] = new int[n]; for( int k = 0; k < n; k++ ) { if( currentAttribute < permutation.length ) m_Groups[i][j][k] = permutation[ currentAttribute ]; else // For the last group, it can be necessary to reuse some attributes m_Groups[i][j][k] = permutation[ random.nextInt( permutation.length ) ]; currentAttribute++; } } } } /** * generates the groups of attributes, given their minimum and maximum * numbers. * * @param data the training data to be used for generating the * groups. * @param random the random number generator. */ protected void generateGroupsFromNumbers(Instances data, Random random) { m_Groups = new int[m_Classifiers.length][][]; for( int i = 0; i < m_Classifiers.length; i++ ) { int [] permutation = attributesPermutation(data.numAttributes(), data.classIndex(), random); int numGroups = m_MinGroup + random.nextInt(m_MaxGroup - m_MinGroup + 1); m_Groups[i] = new int[numGroups][]; int groupSize = permutation.length / numGroups; // Some groups will have an additional attribute int numBiggerGroups = permutation.length % numGroups; // Distribute the attributes in the groups int currentAttribute = 0; for( int j = 0; j < numGroups; j++ ) { if( j < numBiggerGroups ) { m_Groups[i][j] = new int[groupSize + 1]; } else { m_Groups[i][j] = new int[groupSize]; } for( int k = 0; k < m_Groups[i][j].length; k++ ) { m_Groups[i][j][k] = permutation[currentAttribute++]; } } } } /** * generates a permutation of the attributes. * * @param numAttributes the number of attributes. * @param classAttributes the index of the class attribute. * @param random the random number generator. * @return a permutation of the attributes */ protected int [] attributesPermutation(int numAttributes, int classAttribute, Random random) { int [] permutation = new int[numAttributes-1]; int i = 0; for(; i < classAttribute; i++){ permutation[i] = i; } for(; i < permutation.length; i++){ permutation[i] = i + 1; } permute( permutation, random ); return permutation; } /** * permutes the elements of a given array. * * @param v the array to permute * @param random the random number generator. */ protected void permute( int v[], Random random ) { for(int i = v.length - 1; i > 0; i-- ) { int j = random.nextInt( i + 1 ); if( i != j ) { int tmp = v[i]; v[i] = v[j]; v[j] = tmp; } } } /** * prints the groups. */ protected void printGroups( ) { for( int i = 0; i < m_Groups.length; i++ ) { for( int j = 0; j < m_Groups[i].length; j++ ) { System.err.print( "( " ); for( int k = 0; k < m_Groups[i][j].length; k++ ) { System.err.print( m_Groups[i][j][k] ); System.err.print( " " ); } System.err.print( ") " ); } System.err.println( ); } } /** * Transforms an instance for the i-th classifier. * * @param instance the instance to be transformed * @param i the base classifier number * @return the transformed instance * @throws Exception if the instance can't be converted successfully */ protected Instance convertInstance( Instance instance, int i ) throws Exception { Instance newInstance = new DenseInstance( m_Headers[ i ].numAttributes( ) ); newInstance.setWeight(instance.weight()); newInstance.setDataset( m_Headers[ i ] ); int currentAttribute = 0; // Project the data for each group for( int j = 0; j < m_Groups[i].length; j++ ) { Instance auxInstance = new DenseInstance( m_Groups[i][j].length + 1 ); int k; for( k = 0; k < m_Groups[i][j].length; k++ ) { auxInstance.setValue( k, instance.value( m_Groups[i][j][k] ) ); } auxInstance.setValue( k, instance.classValue( ) ); auxInstance.setDataset( m_ReducedHeaders[ i ][ j ] ); m_ProjectionFilters[i][j].input( auxInstance ); auxInstance = m_ProjectionFilters[i][j].output( ); m_ProjectionFilters[i][j].batchFinished(); for( int a = 0; a < auxInstance.numAttributes() - 1; a++ ) { newInstance.setValue( currentAttribute++, auxInstance.value( a ) ); } } newInstance.setClassValue( instance.classValue() ); return newInstance; } /** * Calculates the class membership probabilities for the given test * instance. * * @param instance the instance to be classified * @return preedicted class probability distribution * @throws Exception if distribution can't be computed successfully */ public double[] distributionForInstance(Instance instance) throws Exception { m_RemoveUseless.input(instance); instance =m_RemoveUseless.output(); m_RemoveUseless.batchFinished(); m_Normalize.input(instance); instance =m_Normalize.output(); m_Normalize.batchFinished(); double [] sums = new double [instance.numClasses()], newProbs; for (int i = 0; i < m_Classifiers.length; i++) { Instance convertedInstance = convertInstance(instance, i); if (instance.classAttribute().isNumeric() == true) { sums[0] += m_Classifiers[i].classifyInstance(convertedInstance); } else { newProbs = m_Classifiers[i].distributionForInstance(convertedInstance); for (int j = 0; j < newProbs.length; j++) sums[j] += newProbs[j]; } } if (instance.classAttribute().isNumeric() == true) { sums[0] /= (double)m_NumIterations; return sums; } else if (Utils.eq(Utils.sum(sums), 0)) { return sums; } else { Utils.normalize(sums); return sums; } } /** * Main method for testing this class. * * @param argv the options */ public static void main(String [] argv) { runClassifier(new RotationForest(), argv); } }
35,688
29.270568
263
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/meta/Stacking.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * Stacking.java * Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.meta; import java.util.ArrayList; import java.util.Enumeration; import java.util.Random; import java.util.Vector; import weka.classifiers.AbstractClassifier; import weka.classifiers.Classifier; import weka.classifiers.RandomizableParallelMultipleClassifiersCombiner; import weka.classifiers.rules.ZeroR; import weka.core.Attribute; import weka.core.Capabilities; import weka.core.DenseInstance; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.OptionHandler; import weka.core.RevisionUtils; import weka.core.TechnicalInformation; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import weka.core.TechnicalInformationHandler; import weka.core.Utils; /** <!-- globalinfo-start --> * Combines several classifiers using the stacking method. Can do classification or regression.<br/> * <br/> * For more information, see<br/> * <br/> * David H. Wolpert (1992). Stacked generalization. Neural Networks. 5:241-259. * <p/> <!-- globalinfo-end --> * <!-- technical-bibtex-start --> * BibTeX: * <pre> * &#64;article{Wolpert1992, * author = {David H. Wolpert}, * journal = {Neural Networks}, * pages = {241-259}, * publisher = {Pergamon Press}, * title = {Stacked generalization}, * volume = {5}, * year = {1992} * } * </pre> * <p/> <!-- technical-bibtex-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -M &lt;scheme specification&gt; * Full name of meta classifier, followed by options. * (default: "weka.classifiers.rules.Zero")</pre> * * <pre> -X &lt;number of folds&gt; * Sets the number of cross-validation folds.</pre> * * <pre> -S &lt;num&gt; * Random number seed. * (default 1)</pre> * * <pre> -B &lt;classifier specification&gt; * Full class name of classifier to include, followed * by scheme options. May be specified multiple times. * (default: "weka.classifiers.rules.ZeroR")</pre> * * <pre> -D * If set, classifier is run in debug mode and * may output additional info to the console</pre> * <!-- options-end --> * * @author Eibe Frank (eibe@cs.waikato.ac.nz) * @version $Revision: 8034 $ */ public class Stacking extends RandomizableParallelMultipleClassifiersCombiner implements TechnicalInformationHandler { /** for serialization */ static final long serialVersionUID = 5134738557155845452L; /** The meta classifier */ protected Classifier m_MetaClassifier = new ZeroR(); /** Format for meta data */ protected Instances m_MetaFormat = null; /** Format for base data */ protected Instances m_BaseFormat = null; /** Set the number of folds for the cross-validation */ protected int m_NumFolds = 10; /** * Returns a string describing classifier * @return a description suitable for * displaying in the explorer/experimenter gui */ public String globalInfo() { return "Combines several classifiers using the stacking method. " + "Can do classification or regression.\n\n" + "For more information, see\n\n" + getTechnicalInformation().toString(); } /** * Returns an instance of a TechnicalInformation object, containing * detailed information about the technical background of this class, * e.g., paper reference or book this class is based on. * * @return the technical information about this class */ public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; result = new TechnicalInformation(Type.ARTICLE); result.setValue(Field.AUTHOR, "David H. Wolpert"); result.setValue(Field.YEAR, "1992"); result.setValue(Field.TITLE, "Stacked generalization"); result.setValue(Field.JOURNAL, "Neural Networks"); result.setValue(Field.VOLUME, "5"); result.setValue(Field.PAGES, "241-259"); result.setValue(Field.PUBLISHER, "Pergamon Press"); return result; } /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ public Enumeration listOptions() { Vector newVector = new Vector(2); newVector.addElement(new Option( metaOption(), "M", 0, "-M <scheme specification>")); newVector.addElement(new Option( "\tSets the number of cross-validation folds.", "X", 1, "-X <number of folds>")); Enumeration enu = super.listOptions(); while (enu.hasMoreElements()) { newVector.addElement(enu.nextElement()); } return newVector.elements(); } /** * String describing option for setting meta classifier * * @return the string describing the option */ protected String metaOption() { return "\tFull name of meta classifier, followed by options.\n" + "\t(default: \"weka.classifiers.rules.Zero\")"; } /** * Parses a given list of options. <p/> * <!-- options-start --> * Valid options are: <p/> * * <pre> -M &lt;scheme specification&gt; * Full name of meta classifier, followed by options. * (default: "weka.classifiers.rules.Zero")</pre> * * <pre> -X &lt;number of folds&gt; * Sets the number of cross-validation folds.</pre> * * <pre> -S &lt;num&gt; * Random number seed. * (default 1)</pre> * * <pre> -B &lt;classifier specification&gt; * Full class name of classifier to include, followed * by scheme options. May be specified multiple times. * (default: "weka.classifiers.rules.ZeroR")</pre> * * <pre> -D * If set, classifier is run in debug mode and * may output additional info to the console</pre> * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { String numFoldsString = Utils.getOption('X', options); if (numFoldsString.length() != 0) { setNumFolds(Integer.parseInt(numFoldsString)); } else { setNumFolds(10); } processMetaOptions(options); super.setOptions(options); } /** * Process options setting meta classifier. * * @param options the options to parse * @throws Exception if the parsing fails */ protected void processMetaOptions(String[] options) throws Exception { String classifierString = Utils.getOption('M', options); String [] classifierSpec = Utils.splitOptions(classifierString); String classifierName; if (classifierSpec.length == 0) { classifierName = "weka.classifiers.rules.ZeroR"; } else { classifierName = classifierSpec[0]; classifierSpec[0] = ""; } setMetaClassifier(AbstractClassifier.forName(classifierName, classifierSpec)); } /** * Gets the current settings of the Classifier. * * @return an array of strings suitable for passing to setOptions */ public String [] getOptions() { String [] superOptions = super.getOptions(); String [] options = new String [superOptions.length + 4]; int current = 0; options[current++] = "-X"; options[current++] = "" + getNumFolds(); options[current++] = "-M"; options[current++] = getMetaClassifier().getClass().getName() + " " + Utils.joinOptions(((OptionHandler)getMetaClassifier()).getOptions()); System.arraycopy(superOptions, 0, options, current, superOptions.length); return options; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String numFoldsTipText() { return "The number of folds used for cross-validation."; } /** * Gets the number of folds for the cross-validation. * * @return the number of folds for the cross-validation */ public int getNumFolds() { return m_NumFolds; } /** * Sets the number of folds for the cross-validation. * * @param numFolds the number of folds for the cross-validation * @throws Exception if parameter illegal */ public void setNumFolds(int numFolds) throws Exception { if (numFolds < 0) { throw new IllegalArgumentException("Stacking: Number of cross-validation " + "folds must be positive."); } m_NumFolds = numFolds; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String metaClassifierTipText() { return "The meta classifiers to be used."; } /** * Adds meta classifier * * @param classifier the classifier with all options set. */ public void setMetaClassifier(Classifier classifier) { m_MetaClassifier = classifier; } /** * Gets the meta classifier. * * @return the meta classifier */ public Classifier getMetaClassifier() { return m_MetaClassifier; } /** * Returns combined capabilities of the base classifiers, i.e., the * capabilities all of them have in common. * * @return the capabilities of the base classifiers */ public Capabilities getCapabilities() { Capabilities result; result = super.getCapabilities(); result.setMinimumNumberInstances(getNumFolds()); return result; } /** * Buildclassifier selects a classifier from the set of classifiers * by minimising error on the training data. * * @param data the training data to be used for generating the * boosted classifier. * @throws Exception if the classifier could not be built successfully */ public void buildClassifier(Instances data) throws Exception { if (m_MetaClassifier == null) { throw new IllegalArgumentException("No meta classifier has been set"); } // can classifier handle the data? getCapabilities().testWithFail(data); // remove instances with missing class Instances newData = new Instances(data); m_BaseFormat = new Instances(data, 0); newData.deleteWithMissingClass(); Random random = new Random(m_Seed); newData.randomize(random); if (newData.classAttribute().isNominal()) { newData.stratify(m_NumFolds); } // Create meta level generateMetaLevel(newData, random); // restart the executor pool because at the end of processing // a set of classifiers it gets shutdown to prevent the program // executing as a server super.buildClassifier(newData); // Rebuild all the base classifiers on the full training data buildClassifiers(newData); } /** * Generates the meta data * * @param newData the data to work on * @param random the random number generator to use for cross-validation * @throws Exception if generation fails */ protected void generateMetaLevel(Instances newData, Random random) throws Exception { Instances metaData = metaFormat(newData); m_MetaFormat = new Instances(metaData, 0); for (int j = 0; j < m_NumFolds; j++) { Instances train = newData.trainCV(m_NumFolds, j, random); // start the executor pool (if necessary) // has to be done after each set of classifiers as the // executor pool gets shut down in order to prevent the // program executing as a server (and not returning to // the command prompt when run from the command line super.buildClassifier(train); // construct the actual classifiers buildClassifiers(train); // Classify test instances and add to meta data Instances test = newData.testCV(m_NumFolds, j); for (int i = 0; i < test.numInstances(); i++) { metaData.add(metaInstance(test.instance(i))); } } m_MetaClassifier.buildClassifier(metaData); } /** * Returns class probabilities. * * @param instance the instance to be classified * @return the distribution * @throws Exception if instance could not be classified * successfully */ public double[] distributionForInstance(Instance instance) throws Exception { return m_MetaClassifier.distributionForInstance(metaInstance(instance)); } /** * Output a representation of this classifier * * @return a string representation of the classifier */ public String toString() { if (m_Classifiers.length == 0) { return "Stacking: No base schemes entered."; } if (m_MetaClassifier == null) { return "Stacking: No meta scheme selected."; } if (m_MetaFormat == null) { return "Stacking: No model built yet."; } String result = "Stacking\n\nBase classifiers\n\n"; for (int i = 0; i < m_Classifiers.length; i++) { result += getClassifier(i).toString() +"\n\n"; } result += "\n\nMeta classifier\n\n"; result += m_MetaClassifier.toString(); return result; } /** * Makes the format for the level-1 data. * * @param instances the level-0 format * @return the format for the meta data * @throws Exception if the format generation fails */ protected Instances metaFormat(Instances instances) throws Exception { ArrayList<Attribute> attributes = new ArrayList<Attribute>(); Instances metaFormat; for (int k = 0; k < m_Classifiers.length; k++) { Classifier classifier = (Classifier) getClassifier(k); String name = classifier.getClass().getName() + "-" + (k+1); if (m_BaseFormat.classAttribute().isNumeric()) { attributes.add(new Attribute(name)); } else { for (int j = 0; j < m_BaseFormat.classAttribute().numValues(); j++) { attributes.add( new Attribute( name + ":" + m_BaseFormat.classAttribute().value(j))); } } } attributes.add((Attribute) m_BaseFormat.classAttribute().copy()); metaFormat = new Instances("Meta format", attributes, 0); metaFormat.setClassIndex(metaFormat.numAttributes() - 1); return metaFormat; } /** * Makes a level-1 instance from the given instance. * * @param instance the instance to be transformed * @return the level-1 instance * @throws Exception if the instance generation fails */ protected Instance metaInstance(Instance instance) throws Exception { double[] values = new double[m_MetaFormat.numAttributes()]; Instance metaInstance; int i = 0; for (int k = 0; k < m_Classifiers.length; k++) { Classifier classifier = getClassifier(k); if (m_BaseFormat.classAttribute().isNumeric()) { values[i++] = classifier.classifyInstance(instance); } else { double[] dist = classifier.distributionForInstance(instance); for (int j = 0; j < dist.length; j++) { values[i++] = dist[j]; } } } values[i] = instance.classValue(); metaInstance = new DenseInstance(1, values); metaInstance.setDataset(m_MetaFormat); return metaInstance; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } /** * Main method for testing this class. * * @param argv should contain the following arguments: * -t training file [-T test file] [-c class index] */ public static void main(String [] argv) { runClassifier(new Stacking(), argv); } }
16,110
28.67035
100
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/meta/StackingC.java
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* * StackingC.java * Copyright (C) 1999 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.meta; import weka.classifiers.Classifier; import weka.classifiers.functions.LinearRegression; import weka.core.Instance; import weka.core.Instances; import weka.core.OptionHandler; import weka.core.RevisionUtils; import weka.core.TechnicalInformation; import weka.core.TechnicalInformationHandler; import weka.core.Utils; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import weka.filters.Filter; import weka.filters.unsupervised.attribute.MakeIndicator; import weka.filters.unsupervised.attribute.Remove; import java.util.Random; import weka.classifiers.AbstractClassifier; /** <!-- globalinfo-start --> * Implements StackingC (more efficient version of stacking).<br/> * <br/> * For more information, see<br/> * <br/> * A.K. Seewald: How to Make Stacking Better and Faster While Also Taking Care of an Unknown Weakness. In: Nineteenth International Conference on Machine Learning, 554-561, 2002.<br/> * <br/> * Note: requires meta classifier to be a numeric prediction scheme. * <p/> <!-- globalinfo-end --> * <!-- technical-bibtex-start --> * BibTeX: * <pre> * &#64;inproceedings{Seewald2002, * author = {A.K. Seewald}, * booktitle = {Nineteenth International Conference on Machine Learning}, * editor = {C. Sammut and A. Hoffmann}, * pages = {554-561}, * publisher = {Morgan Kaufmann Publishers}, * title = {How to Make Stacking Better and Faster While Also Taking Care of an Unknown Weakness}, * year = {2002} * } * </pre> * <p/> <!-- technical-bibtex-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -M &lt;scheme specification&gt; * Full name of meta classifier, followed by options. * Must be a numeric prediction scheme. Default: Linear Regression.</pre> * * <pre> -X &lt;number of folds&gt; * Sets the number of cross-validation folds.</pre> * * <pre> -S &lt;num&gt; * Random number seed. * (default 1)</pre> * * <pre> -B &lt;classifier specification&gt; * Full class name of classifier to include, followed * by scheme options. May be specified multiple times. * (default: "weka.classifiers.rules.ZeroR")</pre> * * <pre> -D * If set, classifier is run in debug mode and * may output additional info to the console</pre> * <!-- options-end --> * * @author Eibe Frank (eibe@cs.waikato.ac.nz) * @author Alexander K. Seewald (alex@seewald.at) * @version $Revision: 1.15 $ */ public class StackingC extends Stacking implements OptionHandler, TechnicalInformationHandler { /** for serialization */ static final long serialVersionUID = -6717545616603725198L; /** The meta classifiers (one for each class, like in ClassificationViaRegression) */ protected Classifier [] m_MetaClassifiers = null; /** Filter to transform metaData - Remove */ protected Remove m_attrFilter = null; /** Filter to transform metaData - MakeIndicator */ protected MakeIndicator m_makeIndicatorFilter = null; /** * The constructor. */ public StackingC() { m_MetaClassifier = new weka.classifiers.functions.LinearRegression(); ((LinearRegression)(getMetaClassifier())). setAttributeSelectionMethod(new weka.core.SelectedTag(1, LinearRegression.TAGS_SELECTION)); } /** * Returns a string describing classifier * @return a description suitable for * displaying in the explorer/experimenter gui */ public String globalInfo() { return "Implements StackingC (more efficient version of stacking).\n\n" + "For more information, see\n\n" + getTechnicalInformation().toString() + "\n\n" + "Note: requires meta classifier to be a numeric prediction scheme."; } /** * Returns an instance of a TechnicalInformation object, containing * detailed information about the technical background of this class, * e.g., paper reference or book this class is based on. * * @return the technical information about this class */ public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; result = new TechnicalInformation(Type.INPROCEEDINGS); result.setValue(Field.AUTHOR, "A.K. Seewald"); result.setValue(Field.TITLE, "How to Make Stacking Better and Faster While Also Taking Care of an Unknown Weakness"); result.setValue(Field.BOOKTITLE, "Nineteenth International Conference on Machine Learning"); result.setValue(Field.EDITOR, "C. Sammut and A. Hoffmann"); result.setValue(Field.YEAR, "2002"); result.setValue(Field.PAGES, "554-561"); result.setValue(Field.PUBLISHER, "Morgan Kaufmann Publishers"); return result; } /** * String describing option for setting meta classifier * * @return string describing the option */ protected String metaOption() { return "\tFull name of meta classifier, followed by options.\n" + "\tMust be a numeric prediction scheme. Default: Linear Regression."; } /** * Process options setting meta classifier. * * @param options the meta options to parse * @throws Exception if parsing fails */ protected void processMetaOptions(String[] options) throws Exception { String classifierString = Utils.getOption('M', options); String [] classifierSpec = Utils.splitOptions(classifierString); if (classifierSpec.length != 0) { String classifierName = classifierSpec[0]; classifierSpec[0] = ""; setMetaClassifier(AbstractClassifier.forName(classifierName, classifierSpec)); } else { ((LinearRegression)(getMetaClassifier())). setAttributeSelectionMethod(new weka.core.SelectedTag(1,LinearRegression.TAGS_SELECTION)); } } /** * Method that builds meta level. * * @param newData the data to work with * @param random the random number generator to use for cross-validation * @throws Exception if generation fails */ protected void generateMetaLevel(Instances newData, Random random) throws Exception { Instances metaData = metaFormat(newData); m_MetaFormat = new Instances(metaData, 0); for (int j = 0; j < m_NumFolds; j++) { Instances train = newData.trainCV(m_NumFolds, j, random); // Build base classifiers for (int i = 0; i < m_Classifiers.length; i++) { getClassifier(i).buildClassifier(train); } // Classify test instances and add to meta data Instances test = newData.testCV(m_NumFolds, j); for (int i = 0; i < test.numInstances(); i++) { metaData.add(metaInstance(test.instance(i))); } } m_MetaClassifiers = AbstractClassifier.makeCopies(m_MetaClassifier, m_BaseFormat.numClasses()); int [] arrIdc = new int[m_Classifiers.length + 1]; arrIdc[m_Classifiers.length] = metaData.numAttributes() - 1; Instances newInsts; for (int i = 0; i < m_MetaClassifiers.length; i++) { for (int j = 0; j < m_Classifiers.length; j++) { arrIdc[j] = m_BaseFormat.numClasses() * j + i; } m_makeIndicatorFilter = new weka.filters.unsupervised.attribute.MakeIndicator(); m_makeIndicatorFilter.setAttributeIndex("" + (metaData.classIndex() + 1)); m_makeIndicatorFilter.setNumeric(true); m_makeIndicatorFilter.setValueIndex(i); m_makeIndicatorFilter.setInputFormat(metaData); newInsts = Filter.useFilter(metaData,m_makeIndicatorFilter); m_attrFilter = new weka.filters.unsupervised.attribute.Remove(); m_attrFilter.setInvertSelection(true); m_attrFilter.setAttributeIndicesArray(arrIdc); m_attrFilter.setInputFormat(m_makeIndicatorFilter.getOutputFormat()); newInsts = Filter.useFilter(newInsts,m_attrFilter); newInsts.setClassIndex(newInsts.numAttributes()-1); m_MetaClassifiers[i].buildClassifier(newInsts); } } /** * Classifies a given instance using the stacked classifier. * * @param instance the instance to be classified * @return the distribution * @throws Exception if instance could not be classified * successfully */ public double[] distributionForInstance(Instance instance) throws Exception { int [] arrIdc = new int[m_Classifiers.length+1]; arrIdc[m_Classifiers.length] = m_MetaFormat.numAttributes() - 1; double [] classProbs = new double[m_BaseFormat.numClasses()]; Instance newInst; double sum = 0; for (int i = 0; i < m_MetaClassifiers.length; i++) { for (int j = 0; j < m_Classifiers.length; j++) { arrIdc[j] = m_BaseFormat.numClasses() * j + i; } m_makeIndicatorFilter.setAttributeIndex("" + (m_MetaFormat.classIndex() + 1)); m_makeIndicatorFilter.setNumeric(true); m_makeIndicatorFilter.setValueIndex(i); m_makeIndicatorFilter.setInputFormat(m_MetaFormat); m_makeIndicatorFilter.input(metaInstance(instance)); m_makeIndicatorFilter.batchFinished(); newInst = m_makeIndicatorFilter.output(); m_attrFilter.setAttributeIndicesArray(arrIdc); m_attrFilter.setInvertSelection(true); m_attrFilter.setInputFormat(m_makeIndicatorFilter.getOutputFormat()); m_attrFilter.input(newInst); m_attrFilter.batchFinished(); newInst = m_attrFilter.output(); classProbs[i]=m_MetaClassifiers[i].classifyInstance(newInst); if (classProbs[i] > 1) { classProbs[i] = 1; } if (classProbs[i] < 0) { classProbs[i] = 0; } sum += classProbs[i]; } if (sum!=0) Utils.normalize(classProbs,sum); return classProbs; } /** * Output a representation of this classifier * * @return a string representation of the classifier */ public String toString() { if (m_MetaFormat == null) { return "StackingC: No model built yet."; } String result = "StackingC\n\nBase classifiers\n\n"; for (int i = 0; i < m_Classifiers.length; i++) { result += getClassifier(i).toString() +"\n\n"; } result += "\n\nMeta classifiers (one for each class)\n\n"; for (int i = 0; i< m_MetaClassifiers.length; i++) { result += m_MetaClassifiers[i].toString() +"\n\n"; } return result; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 1.15 $"); } /** * Main method for testing this class. * * @param argv should contain the following arguments: * -t training file [-T test file] [-c class index] */ public static void main(String [] argv) { runClassifier(new StackingC(), argv); } }
11,415
32.97619
183
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/meta/ThresholdSelector.java
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* * ThresholdSelector.java * Copyright (C) 1999 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.meta; import weka.classifiers.RandomizableSingleClassifierEnhancer; import weka.classifiers.evaluation.EvaluationUtils; import weka.classifiers.evaluation.ThresholdCurve; import weka.core.Attribute; import weka.core.AttributeStats; import weka.core.Capabilities; import weka.core.Drawable; import weka.core.FastVector; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.OptionHandler; import weka.core.RevisionUtils; import weka.core.SelectedTag; import weka.core.Tag; import weka.core.Utils; import weka.core.Capabilities.Capability; import java.util.Enumeration; import java.util.Random; import java.util.Vector; /** <!-- globalinfo-start --> * A metaclassifier that selecting a mid-point threshold on the probability output by a Classifier. The midpoint threshold is set so that a given performance measure is optimized. Currently this is the F-measure. Performance is measured either on the training data, a hold-out set or using cross-validation. In addition, the probabilities returned by the base learner can have their range expanded so that the output probabilities will reside between 0 and 1 (this is useful if the scheme normally produces probabilities in a very narrow range). * <p/> <!-- globalinfo-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -C &lt;integer&gt; * The class for which threshold is determined. Valid values are: * 1, 2 (for first and second classes, respectively), 3 (for whichever * class is least frequent), and 4 (for whichever class value is most * frequent), and 5 (for the first class named any of "yes","pos(itive)" * "1", or method 3 if no matches). (default 5).</pre> * * <pre> -X &lt;number of folds&gt; * Number of folds used for cross validation. If just a * hold-out set is used, this determines the size of the hold-out set * (default 3).</pre> * * <pre> -R &lt;integer&gt; * Sets whether confidence range correction is applied. This * can be used to ensure the confidences range from 0 to 1. * Use 0 for no range correction, 1 for correction based on * the min/max values seen during threshold selection * (default 0).</pre> * * <pre> -E &lt;integer&gt; * Sets the evaluation mode. Use 0 for * evaluation using cross-validation, * 1 for evaluation using hold-out set, * and 2 for evaluation on the * training data (default 1).</pre> * * <pre> -M [FMEASURE|ACCURACY|TRUE_POS|TRUE_NEG|TP_RATE|PRECISION|RECALL] * Measure used for evaluation (default is FMEASURE). * </pre> * * <pre> -manual &lt;real&gt; * Set a manual threshold to use. This option overrides * automatic selection and options pertaining to * automatic selection will be ignored. * (default -1, i.e. do not use a manual threshold).</pre> * * <pre> -S &lt;num&gt; * Random number seed. * (default 1)</pre> * * <pre> -D * If set, classifier is run in debug mode and * may output additional info to the console</pre> * * <pre> -W * Full name of base classifier. * (default: weka.classifiers.functions.Logistic)</pre> * * <pre> * Options specific to classifier weka.classifiers.functions.Logistic: * </pre> * * <pre> -D * Turn on debugging output.</pre> * * <pre> -R &lt;ridge&gt; * Set the ridge in the log-likelihood.</pre> * * <pre> -M &lt;number&gt; * Set the maximum number of iterations (default -1, until convergence).</pre> * <!-- options-end --> * * Options after -- are passed to the designated sub-classifier. <p> * * @author Eibe Frank (eibe@cs.waikato.ac.nz) * @version $Revision: 1.43 $ */ public class ThresholdSelector extends RandomizableSingleClassifierEnhancer implements OptionHandler, Drawable { /** for serialization */ static final long serialVersionUID = -1795038053239867444L; /** no range correction */ public static final int RANGE_NONE = 0; /** Correct based on min/max observed */ public static final int RANGE_BOUNDS = 1; /** Type of correction applied to threshold range */ public static final Tag [] TAGS_RANGE = { new Tag(RANGE_NONE, "No range correction"), new Tag(RANGE_BOUNDS, "Correct based on min/max observed") }; /** entire training set */ public static final int EVAL_TRAINING_SET = 2; /** single tuned fold */ public static final int EVAL_TUNED_SPLIT = 1; /** n-fold cross-validation */ public static final int EVAL_CROSS_VALIDATION = 0; /** The evaluation modes */ public static final Tag [] TAGS_EVAL = { new Tag(EVAL_TRAINING_SET, "Entire training set"), new Tag(EVAL_TUNED_SPLIT, "Single tuned fold"), new Tag(EVAL_CROSS_VALIDATION, "N-Fold cross validation") }; /** first class value */ public static final int OPTIMIZE_0 = 0; /** second class value */ public static final int OPTIMIZE_1 = 1; /** least frequent class value */ public static final int OPTIMIZE_LFREQ = 2; /** most frequent class value */ public static final int OPTIMIZE_MFREQ = 3; /** class value name, either 'yes' or 'pos(itive)' */ public static final int OPTIMIZE_POS_NAME = 4; /** How to determine which class value to optimize for */ public static final Tag [] TAGS_OPTIMIZE = { new Tag(OPTIMIZE_0, "First class value"), new Tag(OPTIMIZE_1, "Second class value"), new Tag(OPTIMIZE_LFREQ, "Least frequent class value"), new Tag(OPTIMIZE_MFREQ, "Most frequent class value"), new Tag(OPTIMIZE_POS_NAME, "Class value named: \"yes\", \"pos(itive)\",\"1\"") }; /** F-measure */ public static final int FMEASURE = 1; /** accuracy */ public static final int ACCURACY = 2; /** true-positive */ public static final int TRUE_POS = 3; /** true-negative */ public static final int TRUE_NEG = 4; /** true-positive rate */ public static final int TP_RATE = 5; /** precision */ public static final int PRECISION = 6; /** recall */ public static final int RECALL = 7; /** the measure to use */ public static final Tag[] TAGS_MEASURE = { new Tag(FMEASURE, "FMEASURE"), new Tag(ACCURACY, "ACCURACY"), new Tag(TRUE_POS, "TRUE_POS"), new Tag(TRUE_NEG, "TRUE_NEG"), new Tag(TP_RATE, "TP_RATE"), new Tag(PRECISION, "PRECISION"), new Tag(RECALL, "RECALL") }; /** The upper threshold used as the basis of correction */ protected double m_HighThreshold = 1; /** The lower threshold used as the basis of correction */ protected double m_LowThreshold = 0; /** The threshold that lead to the best performance */ protected double m_BestThreshold = -Double.MAX_VALUE; /** The best value that has been observed */ protected double m_BestValue = - Double.MAX_VALUE; /** The number of folds used in cross-validation */ protected int m_NumXValFolds = 3; /** Designated class value, determined during building */ protected int m_DesignatedClass = 0; /** Method to determine which class to optimize for */ protected int m_ClassMode = OPTIMIZE_POS_NAME; /** The evaluation mode */ protected int m_EvalMode = EVAL_TUNED_SPLIT; /** The range correction mode */ protected int m_RangeMode = RANGE_NONE; /** evaluation measure used for determining threshold **/ int m_nMeasure = FMEASURE; /** True if a manually set threshold is being used */ protected boolean m_manualThreshold = false; /** -1 = not used by default */ protected double m_manualThresholdValue = -1; /** The minimum value for the criterion. If threshold adjustment yields less than that, the default threshold of 0.5 is used. */ protected static final double MIN_VALUE = 0.05; /** * Constructor. */ public ThresholdSelector() { m_Classifier = new weka.classifiers.functions.Logistic(); } /** * String describing default classifier. * * @return the default classifier classname */ protected String defaultClassifierString() { return "weka.classifiers.functions.Logistic"; } /** * Collects the classifier predictions using the specified evaluation method. * * @param instances the set of <code>Instances</code> to generate * predictions for. * @param mode the evaluation mode. * @param numFolds the number of folds to use if not evaluating on the * full training set. * @return a <code>FastVector</code> containing the predictions. * @throws Exception if an error occurs generating the predictions. */ protected FastVector getPredictions(Instances instances, int mode, int numFolds) throws Exception { EvaluationUtils eu = new EvaluationUtils(); eu.setSeed(m_Seed); switch (mode) { case EVAL_TUNED_SPLIT: Instances trainData = null, evalData = null; Instances data = new Instances(instances); Random random = new Random(m_Seed); data.randomize(random); data.stratify(numFolds); // Make sure that both subsets contain at least one positive instance for (int subsetIndex = 0; subsetIndex < numFolds; subsetIndex++) { trainData = data.trainCV(numFolds, subsetIndex, random); evalData = data.testCV(numFolds, subsetIndex); if (checkForInstance(trainData) && checkForInstance(evalData)) { break; } } return eu.getTrainTestPredictions(m_Classifier, trainData, evalData); case EVAL_TRAINING_SET: return eu.getTrainTestPredictions(m_Classifier, instances, instances); case EVAL_CROSS_VALIDATION: return eu.getCVPredictions(m_Classifier, instances, numFolds); default: throw new RuntimeException("Unrecognized evaluation mode"); } } /** * Tooltip for this property. * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String measureTipText() { return "Sets the measure for determining the threshold."; } /** * set measure used for determining threshold * * @param newMeasure Tag representing measure to be used */ public void setMeasure(SelectedTag newMeasure) { if (newMeasure.getTags() == TAGS_MEASURE) { m_nMeasure = newMeasure.getSelectedTag().getID(); } } /** * get measure used for determining threshold * * @return Tag representing measure used */ public SelectedTag getMeasure() { return new SelectedTag(m_nMeasure, TAGS_MEASURE); } /** * Finds the best threshold, this implementation searches for the * highest FMeasure. If no FMeasure higher than MIN_VALUE is found, * the default threshold of 0.5 is used. * * @param predictions a <code>FastVector</code> containing the predictions. */ protected void findThreshold(FastVector predictions) { Instances curve = (new ThresholdCurve()).getCurve(predictions, m_DesignatedClass); double low = 1.0; double high = 0.0; //System.err.println(curve); if (curve.numInstances() > 0) { Instance maxInst = curve.instance(0); double maxValue = 0; int index1 = 0; int index2 = 0; switch (m_nMeasure) { case FMEASURE: index1 = curve.attribute(ThresholdCurve.FMEASURE_NAME).index(); maxValue = maxInst.value(index1); break; case TRUE_POS: index1 = curve.attribute(ThresholdCurve.TRUE_POS_NAME).index(); maxValue = maxInst.value(index1); break; case TRUE_NEG: index1 = curve.attribute(ThresholdCurve.TRUE_NEG_NAME).index(); maxValue = maxInst.value(index1); break; case TP_RATE: index1 = curve.attribute(ThresholdCurve.TP_RATE_NAME).index(); maxValue = maxInst.value(index1); break; case PRECISION: index1 = curve.attribute(ThresholdCurve.PRECISION_NAME).index(); maxValue = maxInst.value(index1); break; case RECALL: index1 = curve.attribute(ThresholdCurve.RECALL_NAME).index(); maxValue = maxInst.value(index1); break; case ACCURACY: index1 = curve.attribute(ThresholdCurve.TRUE_POS_NAME).index(); index2 = curve.attribute(ThresholdCurve.TRUE_NEG_NAME).index(); maxValue = maxInst.value(index1) + maxInst.value(index2); break; } int indexThreshold = curve.attribute(ThresholdCurve.THRESHOLD_NAME).index(); for (int i = 1; i < curve.numInstances(); i++) { Instance current = curve.instance(i); double currentValue = 0; if (m_nMeasure == ACCURACY) { currentValue= current.value(index1) + current.value(index2); } else { currentValue= current.value(index1); } if (currentValue> maxValue) { maxInst = current; maxValue = currentValue; } if (m_RangeMode == RANGE_BOUNDS) { double thresh = current.value(indexThreshold); if (thresh < low) { low = thresh; } if (thresh > high) { high = thresh; } } } if (maxValue > MIN_VALUE) { m_BestThreshold = maxInst.value(indexThreshold); m_BestValue = maxValue; //System.err.println("maxFM: " + maxFM); } if (m_RangeMode == RANGE_BOUNDS) { m_LowThreshold = low; m_HighThreshold = high; //System.err.println("Threshold range: " + low + " - " + high); } } } /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ public Enumeration listOptions() { Vector newVector = new Vector(5); newVector.addElement(new Option( "\tThe class for which threshold is determined. Valid values are:\n" + "\t1, 2 (for first and second classes, respectively), 3 (for whichever\n" + "\tclass is least frequent), and 4 (for whichever class value is most\n" + "\tfrequent), and 5 (for the first class named any of \"yes\",\"pos(itive)\"\n" + "\t\"1\", or method 3 if no matches). (default 5).", "C", 1, "-C <integer>")); newVector.addElement(new Option( "\tNumber of folds used for cross validation. If just a\n" + "\thold-out set is used, this determines the size of the hold-out set\n" + "\t(default 3).", "X", 1, "-X <number of folds>")); newVector.addElement(new Option( "\tSets whether confidence range correction is applied. This\n" + "\tcan be used to ensure the confidences range from 0 to 1.\n" + "\tUse 0 for no range correction, 1 for correction based on\n" + "\tthe min/max values seen during threshold selection\n"+ "\t(default 0).", "R", 1, "-R <integer>")); newVector.addElement(new Option( "\tSets the evaluation mode. Use 0 for\n" + "\tevaluation using cross-validation,\n" + "\t1 for evaluation using hold-out set,\n" + "\tand 2 for evaluation on the\n" + "\ttraining data (default 1).", "E", 1, "-E <integer>")); newVector.addElement(new Option( "\tMeasure used for evaluation (default is FMEASURE).\n", "M", 1, "-M [FMEASURE|ACCURACY|TRUE_POS|TRUE_NEG|TP_RATE|PRECISION|RECALL]")); newVector.addElement(new Option( "\tSet a manual threshold to use. This option overrides\n" + "\tautomatic selection and options pertaining to\n" + "\tautomatic selection will be ignored.\n" + "\t(default -1, i.e. do not use a manual threshold).", "manual", 1, "-manual <real>")); Enumeration enu = super.listOptions(); while (enu.hasMoreElements()) { newVector.addElement(enu.nextElement()); } return newVector.elements(); } /** * Parses a given list of options. <p/> * <!-- options-start --> * Valid options are: <p/> * * <pre> -C &lt;integer&gt; * The class for which threshold is determined. Valid values are: * 1, 2 (for first and second classes, respectively), 3 (for whichever * class is least frequent), and 4 (for whichever class value is most * frequent), and 5 (for the first class named any of "yes","pos(itive)" * "1", or method 3 if no matches). (default 5).</pre> * * <pre> -X &lt;number of folds&gt; * Number of folds used for cross validation. If just a * hold-out set is used, this determines the size of the hold-out set * (default 3).</pre> * * <pre> -R &lt;integer&gt; * Sets whether confidence range correction is applied. This * can be used to ensure the confidences range from 0 to 1. * Use 0 for no range correction, 1 for correction based on * the min/max values seen during threshold selection * (default 0).</pre> * * <pre> -E &lt;integer&gt; * Sets the evaluation mode. Use 0 for * evaluation using cross-validation, * 1 for evaluation using hold-out set, * and 2 for evaluation on the * training data (default 1).</pre> * * <pre> -M [FMEASURE|ACCURACY|TRUE_POS|TRUE_NEG|TP_RATE|PRECISION|RECALL] * Measure used for evaluation (default is FMEASURE). * </pre> * * <pre> -manual &lt;real&gt; * Set a manual threshold to use. This option overrides * automatic selection and options pertaining to * automatic selection will be ignored. * (default -1, i.e. do not use a manual threshold).</pre> * * <pre> -S &lt;num&gt; * Random number seed. * (default 1)</pre> * * <pre> -D * If set, classifier is run in debug mode and * may output additional info to the console</pre> * * <pre> -W * Full name of base classifier. * (default: weka.classifiers.functions.Logistic)</pre> * * <pre> * Options specific to classifier weka.classifiers.functions.Logistic: * </pre> * * <pre> -D * Turn on debugging output.</pre> * * <pre> -R &lt;ridge&gt; * Set the ridge in the log-likelihood.</pre> * * <pre> -M &lt;number&gt; * Set the maximum number of iterations (default -1, until convergence).</pre> * <!-- options-end --> * * Options after -- are passed to the designated sub-classifier. <p> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { String manualS = Utils.getOption("manual", options); if (manualS.length() > 0) { double val = Double.parseDouble(manualS); if (val >= 0.0) { setManualThresholdValue(val); } } String classString = Utils.getOption('C', options); if (classString.length() != 0) { setDesignatedClass(new SelectedTag(Integer.parseInt(classString) - 1, TAGS_OPTIMIZE)); } else { setDesignatedClass(new SelectedTag(OPTIMIZE_POS_NAME, TAGS_OPTIMIZE)); } String modeString = Utils.getOption('E', options); if (modeString.length() != 0) { setEvaluationMode(new SelectedTag(Integer.parseInt(modeString), TAGS_EVAL)); } else { setEvaluationMode(new SelectedTag(EVAL_TUNED_SPLIT, TAGS_EVAL)); } String rangeString = Utils.getOption('R', options); if (rangeString.length() != 0) { setRangeCorrection(new SelectedTag(Integer.parseInt(rangeString), TAGS_RANGE)); } else { setRangeCorrection(new SelectedTag(RANGE_NONE, TAGS_RANGE)); } String measureString = Utils.getOption('M', options); if (measureString.length() != 0) { setMeasure(new SelectedTag(measureString, TAGS_MEASURE)); } else { setMeasure(new SelectedTag(FMEASURE, TAGS_MEASURE)); } String foldsString = Utils.getOption('X', options); if (foldsString.length() != 0) { setNumXValFolds(Integer.parseInt(foldsString)); } else { setNumXValFolds(3); } super.setOptions(options); } /** * Gets the current settings of the Classifier. * * @return an array of strings suitable for passing to setOptions */ public String [] getOptions() { String [] superOptions = super.getOptions(); String [] options = new String [superOptions.length + 12]; int current = 0; if (m_manualThreshold) { options[current++] = "-manual"; options[current++] = "" + getManualThresholdValue(); } options[current++] = "-C"; options[current++] = "" + (m_ClassMode + 1); options[current++] = "-X"; options[current++] = "" + getNumXValFolds(); options[current++] = "-E"; options[current++] = "" + m_EvalMode; options[current++] = "-R"; options[current++] = "" + m_RangeMode; options[current++] = "-M"; options[current++] = "" + getMeasure().getSelectedTag().getReadable(); System.arraycopy(superOptions, 0, options, current, superOptions.length); current += superOptions.length; while (current < options.length) { options[current++] = ""; } return options; } /** * Returns default capabilities of the classifier. * * @return the capabilities of this classifier */ public Capabilities getCapabilities() { Capabilities result = super.getCapabilities(); // class result.disableAllClasses(); result.disableAllClassDependencies(); result.enable(Capability.BINARY_CLASS); return result; } /** * Generates the classifier. * * @param instances set of instances serving as training data * @throws Exception if the classifier has not been generated successfully */ public void buildClassifier(Instances instances) throws Exception { // can classifier handle the data? getCapabilities().testWithFail(instances); // remove instances with missing class instances = new Instances(instances); instances.deleteWithMissingClass(); AttributeStats stats = instances.attributeStats(instances.classIndex()); if (m_manualThreshold) { m_BestThreshold = m_manualThresholdValue; } else { m_BestThreshold = 0.5; } m_BestValue = MIN_VALUE; m_HighThreshold = 1; m_LowThreshold = 0; // If data contains only one instance of positive data // optimize on training data if (stats.distinctCount != 2) { System.err.println("Couldn't find examples of both classes. No adjustment."); m_Classifier.buildClassifier(instances); } else { // Determine which class value to look for switch (m_ClassMode) { case OPTIMIZE_0: m_DesignatedClass = 0; break; case OPTIMIZE_1: m_DesignatedClass = 1; break; case OPTIMIZE_POS_NAME: Attribute cAtt = instances.classAttribute(); boolean found = false; for (int i = 0; i < cAtt.numValues() && !found; i++) { String name = cAtt.value(i).toLowerCase(); if (name.startsWith("yes") || name.equals("1") || name.startsWith("pos")) { found = true; m_DesignatedClass = i; } } if (found) { break; } // No named class found, so fall through to default of least frequent case OPTIMIZE_LFREQ: m_DesignatedClass = (stats.nominalCounts[0] > stats.nominalCounts[1]) ? 1 : 0; break; case OPTIMIZE_MFREQ: m_DesignatedClass = (stats.nominalCounts[0] > stats.nominalCounts[1]) ? 0 : 1; break; default: throw new Exception("Unrecognized class value selection mode"); } /* System.err.println("ThresholdSelector: Using mode=" + TAGS_OPTIMIZE[m_ClassMode].getReadable()); System.err.println("ThresholdSelector: Optimizing using class " + m_DesignatedClass + "/" + instances.classAttribute().value(m_DesignatedClass)); */ if (m_manualThreshold) { m_Classifier.buildClassifier(instances); return; } if (stats.nominalCounts[m_DesignatedClass] == 1) { System.err.println("Only 1 positive found: optimizing on training data"); findThreshold(getPredictions(instances, EVAL_TRAINING_SET, 0)); } else { int numFolds = Math.min(m_NumXValFolds, stats.nominalCounts[m_DesignatedClass]); //System.err.println("Number of folds for threshold selector: " + numFolds); findThreshold(getPredictions(instances, m_EvalMode, numFolds)); if (m_EvalMode != EVAL_TRAINING_SET) { m_Classifier.buildClassifier(instances); } } } } /** * Checks whether instance of designated class is in subset. * * @param data the data to check for instance * @return true if the instance is in the subset * @throws Exception if checking fails */ private boolean checkForInstance(Instances data) throws Exception { for (int i = 0; i < data.numInstances(); i++) { if (((int)data.instance(i).classValue()) == m_DesignatedClass) { return true; } } return false; } /** * Calculates the class membership probabilities for the given test instance. * * @param instance the instance to be classified * @return predicted class probability distribution * @throws Exception if instance could not be classified * successfully */ public double [] distributionForInstance(Instance instance) throws Exception { double [] pred = m_Classifier.distributionForInstance(instance); double prob = pred[m_DesignatedClass]; // Warp probability if (prob > m_BestThreshold) { prob = 0.5 + (prob - m_BestThreshold) / ((m_HighThreshold - m_BestThreshold) * 2); } else { prob = (prob - m_LowThreshold) / ((m_BestThreshold - m_LowThreshold) * 2); } if (prob < 0) { prob = 0.0; } else if (prob > 1) { prob = 1.0; } // Alter the distribution pred[m_DesignatedClass] = prob; if (pred.length == 2) { // Handle case when there's only one class pred[(m_DesignatedClass + 1) % 2] = 1.0 - prob; } return pred; } /** * @return a description of the classifier suitable for * displaying in the explorer/experimenter gui */ public String globalInfo() { return "A metaclassifier that selecting a mid-point threshold on the " + "probability output by a Classifier. The midpoint " + "threshold is set so that a given performance measure is optimized. " + "Currently this is the F-measure. Performance is measured either on " + "the training data, a hold-out set or using cross-validation. In " + "addition, the probabilities returned by the base learner can " + "have their range expanded so that the output probabilities will " + "reside between 0 and 1 (this is useful if the scheme normally " + "produces probabilities in a very narrow range)."; } /** * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String designatedClassTipText() { return "Sets the class value for which the optimization is performed. " + "The options are: pick the first class value; pick the second " + "class value; pick whichever class is least frequent; pick whichever " + "class value is most frequent; pick the first class named any of " + "\"yes\",\"pos(itive)\", \"1\", or the least frequent if no matches)."; } /** * Gets the method to determine which class value to optimize. Will * be one of OPTIMIZE_0, OPTIMIZE_1, OPTIMIZE_LFREQ, OPTIMIZE_MFREQ, * OPTIMIZE_POS_NAME. * * @return the class selection mode. */ public SelectedTag getDesignatedClass() { return new SelectedTag(m_ClassMode, TAGS_OPTIMIZE); } /** * Sets the method to determine which class value to optimize. Will * be one of OPTIMIZE_0, OPTIMIZE_1, OPTIMIZE_LFREQ, OPTIMIZE_MFREQ, * OPTIMIZE_POS_NAME. * * @param newMethod the new class selection mode. */ public void setDesignatedClass(SelectedTag newMethod) { if (newMethod.getTags() == TAGS_OPTIMIZE) { m_ClassMode = newMethod.getSelectedTag().getID(); } } /** * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String evaluationModeTipText() { return "Sets the method used to determine the threshold/performance " + "curve. The options are: perform optimization based on the entire " + "training set (may result in overfitting); perform an n-fold " + "cross-validation (may be time consuming); perform one fold of " + "an n-fold cross-validation (faster but likely less accurate)."; } /** * Sets the evaluation mode used. Will be one of * EVAL_TRAINING, EVAL_TUNED_SPLIT, or EVAL_CROSS_VALIDATION * * @param newMethod the new evaluation mode. */ public void setEvaluationMode(SelectedTag newMethod) { if (newMethod.getTags() == TAGS_EVAL) { m_EvalMode = newMethod.getSelectedTag().getID(); } } /** * Gets the evaluation mode used. Will be one of * EVAL_TRAINING, EVAL_TUNED_SPLIT, or EVAL_CROSS_VALIDATION * * @return the evaluation mode. */ public SelectedTag getEvaluationMode() { return new SelectedTag(m_EvalMode, TAGS_EVAL); } /** * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String rangeCorrectionTipText() { return "Sets the type of prediction range correction performed. " + "The options are: do not do any range correction; " + "expand predicted probabilities so that the minimum probability " + "observed during the optimization maps to 0, and the maximum " + "maps to 1 (values outside this range are clipped to 0 and 1)."; } /** * Sets the confidence range correction mode used. Will be one of * RANGE_NONE, or RANGE_BOUNDS * * @param newMethod the new correciton mode. */ public void setRangeCorrection(SelectedTag newMethod) { if (newMethod.getTags() == TAGS_RANGE) { m_RangeMode = newMethod.getSelectedTag().getID(); } } /** * Gets the confidence range correction mode used. Will be one of * RANGE_NONE, or RANGE_BOUNDS * * @return the confidence correction mode. */ public SelectedTag getRangeCorrection() { return new SelectedTag(m_RangeMode, TAGS_RANGE); } /** * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String numXValFoldsTipText() { return "Sets the number of folds used during full cross-validation " + "and tuned fold evaluation. This number will be automatically " + "reduced if there are insufficient positive examples."; } /** * Get the number of folds used for cross-validation. * * @return the number of folds used for cross-validation. */ public int getNumXValFolds() { return m_NumXValFolds; } /** * Set the number of folds used for cross-validation. * * @param newNumFolds the number of folds used for cross-validation. */ public void setNumXValFolds(int newNumFolds) { if (newNumFolds < 2) { throw new IllegalArgumentException("Number of folds must be greater than 1"); } m_NumXValFolds = newNumFolds; } /** * Returns the type of graph this classifier * represents. * * @return the type of graph this classifier represents */ public int graphType() { if (m_Classifier instanceof Drawable) return ((Drawable)m_Classifier).graphType(); else return Drawable.NOT_DRAWABLE; } /** * Returns graph describing the classifier (if possible). * * @return the graph of the classifier in dotty format * @throws Exception if the classifier cannot be graphed */ public String graph() throws Exception { if (m_Classifier instanceof Drawable) return ((Drawable)m_Classifier).graph(); else throw new Exception("Classifier: " + getClassifierSpec() + " cannot be graphed"); } /** * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String manualThresholdValueTipText() { return "Sets a manual threshold value to use. " + "If this is set (non-negative value between 0 and 1), then " + "all options pertaining to automatic threshold selection are " + "ignored. "; } /** * Sets the value for a manual threshold. If this option * is set (non-negative value between 0 and 1), then options * pertaining to automatic threshold selection are ignored. * * @param threshold the manual threshold to use */ public void setManualThresholdValue(double threshold) throws Exception { m_manualThresholdValue = threshold; if (threshold >= 0.0 && threshold <= 1.0) { m_manualThreshold = true; } else { m_manualThreshold = false; if (threshold >= 0) { throw new IllegalArgumentException("Threshold must be in the " + "range 0..1."); } } } /** * Returns the value of the manual threshold. (a negative * value indicates that no manual threshold is being used. * * @return the value of the manual threshold. */ public double getManualThresholdValue() { return m_manualThresholdValue; } /** * Returns description of the cross-validated classifier. * * @return description of the cross-validated classifier as a string */ public String toString() { if (m_BestValue == -Double.MAX_VALUE) return "ThresholdSelector: No model built yet."; String result = "Threshold Selector.\n" + "Classifier: " + m_Classifier.getClass().getName() + "\n"; result += "Index of designated class: " + m_DesignatedClass + "\n"; if (m_manualThreshold) { result += "User supplied threshold: " + m_BestThreshold + "\n"; } else { result += "Evaluation mode: "; switch (m_EvalMode) { case EVAL_CROSS_VALIDATION: result += m_NumXValFolds + "-fold cross-validation"; break; case EVAL_TUNED_SPLIT: result += "tuning on 1/" + m_NumXValFolds + " of the data"; break; case EVAL_TRAINING_SET: default: result += "tuning on the training data"; } result += "\n"; result += "Threshold: " + m_BestThreshold + "\n"; result += "Best value: " + m_BestValue + "\n"; if (m_RangeMode == RANGE_BOUNDS) { result += "Expanding range [" + m_LowThreshold + "," + m_HighThreshold + "] to [0, 1]\n"; } result += "Measure: " + getMeasure().getSelectedTag().getReadable() + "\n"; } result += m_Classifier.toString(); return result; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 1.43 $"); } /** * Main method for testing this class. * * @param argv the options */ public static void main(String [] argv) { runClassifier(new ThresholdSelector(), argv); } }
36,038
32.032997
545
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/meta/Vote.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * Vote.java * Copyright (C) 2000-2012 University of Waikato * */ package weka.classifiers.meta; import java.io.BufferedInputStream; import java.io.File; import java.io.FileInputStream; import java.io.ObjectInputStream; import java.util.ArrayList; import java.util.Enumeration; import java.util.List; import java.util.Random; import java.util.Vector; import weka.classifiers.Classifier; import weka.classifiers.RandomizableMultipleClassifiersCombiner; import weka.core.Capabilities; import weka.core.Capabilities.Capability; import weka.core.Aggregateable; import weka.core.Environment; import weka.core.EnvironmentHandler; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.OptionHandler; import weka.core.RevisionUtils; import weka.core.SelectedTag; import weka.core.Tag; import weka.core.TechnicalInformation; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import weka.core.TechnicalInformationHandler; import weka.core.Utils; /** <!-- globalinfo-start --> * Class for combining classifiers. Different * combinations of probability estimates for classification are available.<br/> * <br/> * For more information see:<br/> * <br/> * Ludmila I. Kuncheva (2004). Combining Pattern Classifiers: Methods and * Algorithms. John Wiley and Sons, Inc..<br/> * <br/> * J. Kittler, M. Hatef, Robert P.W. Duin, J. Matas (1998). On combining * classifiers. IEEE Transactions on Pattern Analysis and Machine Intelligence. * 20(3):226-239. * <p/> <!-- globalinfo-end --> * <!-- options-start --> * Valid options are: * <p/> * * <pre> * -S &lt;num&gt; * Random number seed. * (default 1) * </pre> * * <pre> * -B &lt;classifier specification&gt; * Full class name of classifier to include, followed * by scheme options. May be specified multiple times. * (default: "weka.classifiers.rules.ZeroR") * </pre> * * <pre> * -D * If set, classifier is run in debug mode and * may output additional info to the console * </pre> * * <pre> * -P &lt;path to serialized classifier&gt; * Full path to serialized classifier to include. * May be specified multiple times to include * multiple serialized classifiers. Note: it does * not make sense to use pre-built classifiers in * a cross-validation. * </pre> * * <pre> * -R &lt;AVG|PROD|MAJ|MIN|MAX|MED&gt; * The combination rule to use * (default: AVG) * </pre> * <!-- options-end --> * <!-- technical-bibtex-start --> * BibTeX: * * <pre> * &#64;book{Kuncheva2004, * author = {Ludmila I. Kuncheva}, * publisher = {John Wiley and Sons, Inc.}, * title = {Combining Pattern Classifiers: Methods and Algorithms}, * year = {2004} * } * * &#64;article{Kittler1998, * author = {J. Kittler and M. Hatef and Robert P.W. Duin and J. Matas}, * journal = {IEEE Transactions on Pattern Analysis and Machine Intelligence}, * number = {3}, * pages = {226-239}, * title = {On combining classifiers}, * volume = {20}, * year = {1998} * } * </pre> * <p/> <!-- technical-bibtex-end --> * * @author Alexander K. Seewald (alex@seewald.at) * @author Eibe Frank (eibe@cs.waikato.ac.nz) * @author Roberto Perdisci (roberto.perdisci@gmail.com) * @version $Revision: 9785 $ */ public class Vote extends RandomizableMultipleClassifiersCombiner implements TechnicalInformationHandler, EnvironmentHandler, Aggregateable<Classifier> { /** for serialization */ static final long serialVersionUID = -637891196294399624L; /** combination rule: Average of Probabilities */ public static final int AVERAGE_RULE = 1; /** combination rule: Product of Probabilities (only nominal classes) */ public static final int PRODUCT_RULE = 2; /** combination rule: Majority Voting (only nominal classes) */ public static final int MAJORITY_VOTING_RULE = 3; /** combination rule: Minimum Probability */ public static final int MIN_RULE = 4; /** combination rule: Maximum Probability */ public static final int MAX_RULE = 5; /** combination rule: Median Probability (only numeric class) */ public static final int MEDIAN_RULE = 6; /** combination rules */ public static final Tag[] TAGS_RULES = { new Tag(AVERAGE_RULE, "AVG", "Average of Probabilities"), new Tag(PRODUCT_RULE, "PROD", "Product of Probabilities"), new Tag(MAJORITY_VOTING_RULE, "MAJ", "Majority Voting"), new Tag(MIN_RULE, "MIN", "Minimum Probability"), new Tag(MAX_RULE, "MAX", "Maximum Probability"), new Tag(MEDIAN_RULE, "MED", "Median") }; /** Combination Rule variable */ protected int m_CombinationRule = AVERAGE_RULE; /** * the random number generator used for breaking ties in majority voting * * @see #distributionForInstanceMajorityVoting(Instance) */ protected Random m_Random; /** List of file paths to serialized models to load */ protected List<String> m_classifiersToLoad = new ArrayList<String>(); /** List of de-serialized pre-built classifiers to include in the ensemble */ protected List<Classifier> m_preBuiltClassifiers = new ArrayList<Classifier>(); /** Environment variables */ protected transient Environment m_env = Environment.getSystemWide(); /** Structure of the training data */ protected Instances m_structure; /** * Returns a string describing classifier * * @return a description suitable for displaying in the explorer/experimenter * gui */ public String globalInfo() { return "Class for combining classifiers. Different combinations of " + "probability estimates for classification are available.\n\n" + "For more information see:\n\n" + getTechnicalInformation().toString(); } /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ @Override public Enumeration listOptions() { Enumeration enm; Vector result; result = new Vector(); enm = super.listOptions(); while (enm.hasMoreElements()) result.addElement(enm.nextElement()); result.addElement(new Option( "\tFull path to serialized classifier to include.\n" + "\tMay be specified multiple times to include\n" + "\tmultiple serialized classifiers. Note: it does\n" + "\tnot make sense to use pre-built classifiers in\n" + "\ta cross-validation.", "P", 1, "-P <path to serialized " + "classifier>")); result.addElement(new Option("\tThe combination rule to use\n" + "\t(default: AVG)", "R", 1, "-R " + Tag.toOptionList(TAGS_RULES))); return result.elements(); } /** * Gets the current settings of Vote. * * @return an array of strings suitable for passing to setOptions() */ @Override public String[] getOptions() { int i; Vector result; String[] options; result = new Vector(); options = super.getOptions(); for (i = 0; i < options.length; i++) result.add(options[i]); result.add("-R"); result.add("" + getCombinationRule()); for (i = 0; i < m_classifiersToLoad.size(); i++) { result.add("-P"); result.add(m_classifiersToLoad.get(i)); } return (String[]) result.toArray(new String[result.size()]); } /** * Parses a given list of options. * <p/> * <!-- options-start --> * Valid options are: * <p/> * * <pre> * -S &lt;num&gt; * Random number seed. * (default 1) * </pre> * * <pre> * -B &lt;classifier specification&gt; * Full class name of classifier to include, followed * by scheme options. May be specified multiple times. * (default: "weka.classifiers.rules.ZeroR") * </pre> * * <pre> * -D * If set, classifier is run in debug mode and * may output additional info to the console * </pre> * * <pre> * -P &lt;path to serialized classifier&gt; * Full path to serialized classifier to include. * May be specified multiple times to include * multiple serialized classifiers. Note: it does * not make sense to use pre-built classifiers in * a cross-validation. * </pre> * * <pre> * -R &lt;AVG|PROD|MAJ|MIN|MAX|MED&gt; * The combination rule to use * (default: AVG) * </pre> * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ @Override public void setOptions(String[] options) throws Exception { String tmpStr; tmpStr = Utils.getOption('R', options); if (tmpStr.length() != 0) setCombinationRule(new SelectedTag(tmpStr, TAGS_RULES)); else setCombinationRule(new SelectedTag(AVERAGE_RULE, TAGS_RULES)); m_classifiersToLoad.clear(); while (true) { String loadString = Utils.getOption('P', options); if (loadString.length() == 0) { break; } m_classifiersToLoad.add(loadString); } super.setOptions(options); } /** * Returns an instance of a TechnicalInformation object, containing detailed * information about the technical background of this class, e.g., paper * reference or book this class is based on. * * @return the technical information about this class */ @Override public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; TechnicalInformation additional; result = new TechnicalInformation(Type.BOOK); result.setValue(Field.AUTHOR, "Ludmila I. Kuncheva"); result.setValue(Field.TITLE, "Combining Pattern Classifiers: Methods and Algorithms"); result.setValue(Field.YEAR, "2004"); result.setValue(Field.PUBLISHER, "John Wiley and Sons, Inc."); additional = result.add(Type.ARTICLE); additional.setValue(Field.AUTHOR, "J. Kittler and M. Hatef and Robert P.W. Duin and J. Matas"); additional.setValue(Field.YEAR, "1998"); additional.setValue(Field.TITLE, "On combining classifiers"); additional.setValue(Field.JOURNAL, "IEEE Transactions on Pattern Analysis and Machine Intelligence"); additional.setValue(Field.VOLUME, "20"); additional.setValue(Field.NUMBER, "3"); additional.setValue(Field.PAGES, "226-239"); return result; } /** * Returns default capabilities of the classifier. * * @return the capabilities of this classifier */ @Override public Capabilities getCapabilities() { Capabilities result = super.getCapabilities(); if (m_preBuiltClassifiers.size() == 0 && m_classifiersToLoad.size() > 0) { try { loadClassifiers(null); } catch (Exception e) { e.printStackTrace(); } } if (m_preBuiltClassifiers.size() > 0) { if (m_Classifiers.length == 0) { result = (Capabilities) m_preBuiltClassifiers.get(0).getCapabilities() .clone(); } for (int i = 1; i < m_preBuiltClassifiers.size(); i++) { result.and(m_preBuiltClassifiers.get(i).getCapabilities()); } for (Capability cap : Capability.values()) { result.enableDependency(cap); } } // class if ((m_CombinationRule == PRODUCT_RULE) || (m_CombinationRule == MAJORITY_VOTING_RULE)) { result.disableAllClasses(); result.disableAllClassDependencies(); result.enable(Capability.NOMINAL_CLASS); result.enableDependency(Capability.NOMINAL_CLASS); } else if (m_CombinationRule == MEDIAN_RULE) { result.disableAllClasses(); result.disableAllClassDependencies(); result.enable(Capability.NUMERIC_CLASS); result.enableDependency(Capability.NUMERIC_CLASS); } return result; } /** * Buildclassifier selects a classifier from the set of classifiers by * minimising error on the training data. * * @param data the training data to be used for generating the boosted * classifier. * @throws Exception if the classifier could not be built successfully */ @Override public void buildClassifier(Instances data) throws Exception { // remove instances with missing class Instances newData = new Instances(data); newData.deleteWithMissingClass(); m_structure = new Instances(newData, 0); m_Random = new Random(getSeed()); if (m_classifiersToLoad.size() > 0) { m_preBuiltClassifiers.clear(); loadClassifiers(data); int index = 0; if (m_Classifiers.length == 1 && m_Classifiers[0] instanceof weka.classifiers.rules.ZeroR) { // remove the single ZeroR m_Classifiers = new Classifier[0]; } } // can classifier handle the data? getCapabilities().testWithFail(data); for (int i = 0; i < m_Classifiers.length; i++) { getClassifier(i).buildClassifier(newData); } } /** * Load serialized models to include in the ensemble * * @param data training instances (used in a header compatibility check with * each of the loaded models) * * @throws Exception if there is a problem de-serializing a model */ private void loadClassifiers(Instances data) throws Exception { for (String path : m_classifiersToLoad) { if (Environment.containsEnvVariables(path)) { try { path = m_env.substitute(path); } catch (Exception ex) { } } File toLoad = new File(path); if (!toLoad.isFile()) { throw new Exception("\"" + path + "\" does not seem to be a valid file!"); } ObjectInputStream is = new ObjectInputStream(new BufferedInputStream( new FileInputStream(toLoad))); Object c = is.readObject(); if (!(c instanceof Classifier)) { throw new Exception("\"" + path + "\" does not contain a classifier!"); } Object header = null; header = is.readObject(); if (header instanceof Instances) { if (data != null && !data.equalHeaders((Instances) header)) { throw new Exception("\"" + path + "\" was trained with data that is " + "of a differnet structure than the incoming training data"); } } if (header == null) { System.out.println("[Vote] warning: no header instances for \"" + path + "\""); } addPreBuiltClassifier((Classifier) c); } } /** * Add a prebuilt classifier to the list for use in the ensemble * * @param c a prebuilt Classifier to add. */ public void addPreBuiltClassifier(Classifier c) { m_preBuiltClassifiers.add(c); } /** * Remove a prebuilt classifier from the list to use in the ensemble * * @param c the classifier to remove */ public void removePreBuiltClassifier(Classifier c) { m_preBuiltClassifiers.remove(c); } /** * Classifies the given test instance. * * @param instance the instance to be classified * @return the predicted most likely class for the instance or * Utils.missingValue() if no prediction is made * @throws Exception if an error occurred during the prediction */ @Override public double classifyInstance(Instance instance) throws Exception { double result; double[] dist; int index; switch (m_CombinationRule) { case AVERAGE_RULE: case PRODUCT_RULE: case MAJORITY_VOTING_RULE: case MIN_RULE: case MAX_RULE: dist = distributionForInstance(instance); if (instance.classAttribute().isNominal()) { index = Utils.maxIndex(dist); if (dist[index] == 0) result = Utils.missingValue(); else result = index; } else if (instance.classAttribute().isNumeric()) { result = dist[0]; } else { result = Utils.missingValue(); } break; case MEDIAN_RULE: result = classifyInstanceMedian(instance); break; default: throw new IllegalStateException("Unknown combination rule '" + m_CombinationRule + "'!"); } return result; } /** * Classifies the given test instance, returning the median from all * classifiers. * * @param instance the instance to be classified * @return the predicted most likely class for the instance or * Utils.missingValue() if no prediction is made * @throws Exception if an error occurred during the prediction */ protected double classifyInstanceMedian(Instance instance) throws Exception { double[] results = new double[m_Classifiers.length + m_preBuiltClassifiers.size()]; double result; for (int i = 0; i < m_Classifiers.length; i++) results[i] = m_Classifiers[i].classifyInstance(instance); for (int i = 0; i < m_preBuiltClassifiers.size(); i++) { results[i + m_Classifiers.length] = m_preBuiltClassifiers.get(i) .classifyInstance(instance); } if (results.length == 0) result = 0; else if (results.length == 1) result = results[0]; else result = Utils.kthSmallestValue(results, results.length / 2); return result; } /** * Classifies a given instance using the selected combination rule. * * @param instance the instance to be classified * @return the distribution * @throws Exception if instance could not be classified successfully */ @Override public double[] distributionForInstance(Instance instance) throws Exception { double[] result = new double[instance.numClasses()]; switch (m_CombinationRule) { case AVERAGE_RULE: result = distributionForInstanceAverage(instance); break; case PRODUCT_RULE: result = distributionForInstanceProduct(instance); break; case MAJORITY_VOTING_RULE: result = distributionForInstanceMajorityVoting(instance); break; case MIN_RULE: result = distributionForInstanceMin(instance); break; case MAX_RULE: result = distributionForInstanceMax(instance); break; case MEDIAN_RULE: result[0] = classifyInstance(instance); break; default: throw new IllegalStateException("Unknown combination rule '" + m_CombinationRule + "'!"); } if (!instance.classAttribute().isNumeric() && (Utils.sum(result) > 0)) Utils.normalize(result); return result; } /** * Classifies a given instance using the Average of Probabilities combination * rule. * * @param instance the instance to be classified * @return the distribution * @throws Exception if instance could not be classified successfully */ protected double[] distributionForInstanceAverage(Instance instance) throws Exception { double[] probs = (m_Classifiers.length > 0) ? getClassifier(0) .distributionForInstance(instance) : m_preBuiltClassifiers.get(0) .distributionForInstance(instance); probs = probs.clone(); for (int i = 1; i < m_Classifiers.length; i++) { double[] dist = getClassifier(i).distributionForInstance(instance); for (int j = 0; j < dist.length; j++) { probs[j] += dist[j]; } } int index = (m_Classifiers.length > 0) ? 0 : 1; for (int i = index; i < m_preBuiltClassifiers.size(); i++) { double[] dist = m_preBuiltClassifiers.get(i).distributionForInstance( instance); for (int j = 0; j < dist.length; j++) { probs[j] += dist[j]; } } for (int j = 0; j < probs.length; j++) { probs[j] /= (m_Classifiers.length + m_preBuiltClassifiers.size()); } return probs; } /** * Classifies a given instance using the Product of Probabilities combination * rule. * * @param instance the instance to be classified * @return the distribution * @throws Exception if instance could not be classified successfully */ protected double[] distributionForInstanceProduct(Instance instance) throws Exception { double[] probs = (m_Classifiers.length > 0) ? getClassifier(0) .distributionForInstance(instance) : m_preBuiltClassifiers.get(0) .distributionForInstance(instance); probs = probs.clone(); for (int i = 1; i < m_Classifiers.length; i++) { double[] dist = getClassifier(i).distributionForInstance(instance); for (int j = 0; j < dist.length; j++) { probs[j] *= dist[j]; } } int index = (m_Classifiers.length > 0) ? 0 : 1; for (int i = index; i < m_preBuiltClassifiers.size(); i++) { double[] dist = m_preBuiltClassifiers.get(i).distributionForInstance( instance); for (int j = 0; j < dist.length; j++) { probs[j] *= dist[j]; } } return probs; } /** * Classifies a given instance using the Majority Voting combination rule. * * @param instance the instance to be classified * @return the distribution * @throws Exception if instance could not be classified successfully */ protected double[] distributionForInstanceMajorityVoting(Instance instance) throws Exception { double[] probs = new double[instance.classAttribute().numValues()]; double[] votes = new double[probs.length]; for (int i = 0; i < m_Classifiers.length; i++) { probs = getClassifier(i).distributionForInstance(instance); int maxIndex = 0; for (int j = 0; j < probs.length; j++) { if (probs[j] > probs[maxIndex]) maxIndex = j; } // Consider the cases when multiple classes happen to have the same // probability for (int j = 0; j < probs.length; j++) { if (probs[j] == probs[maxIndex]) votes[j]++; } } for (int i = 0; i < m_preBuiltClassifiers.size(); i++) { probs = m_preBuiltClassifiers.get(i).distributionForInstance(instance); int maxIndex = 0; for (int j = 0; j < probs.length; j++) { if (probs[j] > probs[maxIndex]) maxIndex = j; } // Consider the cases when multiple classes happen to have the same // probability for (int j = 0; j < probs.length; j++) { if (probs[j] == probs[maxIndex]) votes[j]++; } } int tmpMajorityIndex = 0; for (int k = 1; k < votes.length; k++) { if (votes[k] > votes[tmpMajorityIndex]) tmpMajorityIndex = k; } // Consider the cases when multiple classes receive the same amount of votes Vector<Integer> majorityIndexes = new Vector<Integer>(); for (int k = 0; k < votes.length; k++) { if (votes[k] == votes[tmpMajorityIndex]) majorityIndexes.add(k); } // Resolve the ties according to a uniform random distribution int majorityIndex = majorityIndexes.get(m_Random.nextInt(majorityIndexes .size())); // set probs to 0 probs = new double[probs.length]; probs[majorityIndex] = 1; // the class that have been voted the most // receives 1 return probs; } /** * Classifies a given instance using the Maximum Probability combination rule. * * @param instance the instance to be classified * @return the distribution * @throws Exception if instance could not be classified successfully */ protected double[] distributionForInstanceMax(Instance instance) throws Exception { double[] max = (m_Classifiers.length > 0) ? getClassifier(0) .distributionForInstance(instance) : m_preBuiltClassifiers.get(0) .distributionForInstance(instance); max = max.clone(); for (int i = 1; i < m_Classifiers.length; i++) { double[] dist = getClassifier(i).distributionForInstance(instance); for (int j = 0; j < dist.length; j++) { if (max[j] < dist[j]) max[j] = dist[j]; } } int index = (m_Classifiers.length > 0) ? 0 : 1; for (int i = index; i < m_preBuiltClassifiers.size(); i++) { double[] dist = m_preBuiltClassifiers.get(i).distributionForInstance( instance); for (int j = 0; j < dist.length; j++) { if (max[j] < dist[j]) max[j] = dist[j]; } } return max; } /** * Classifies a given instance using the Minimum Probability combination rule. * * @param instance the instance to be classified * @return the distribution * @throws Exception if instance could not be classified successfully */ protected double[] distributionForInstanceMin(Instance instance) throws Exception { double[] min = (m_Classifiers.length > 0) ? getClassifier(0) .distributionForInstance(instance) : m_preBuiltClassifiers.get(0) .distributionForInstance(instance); min = min.clone(); for (int i = 1; i < m_Classifiers.length; i++) { double[] dist = getClassifier(i).distributionForInstance(instance); for (int j = 0; j < dist.length; j++) { if (dist[j] < min[j]) min[j] = dist[j]; } } int index = (m_Classifiers.length > 0) ? 0 : 1; for (int i = index; i < m_preBuiltClassifiers.size(); i++) { double[] dist = m_preBuiltClassifiers.get(i).distributionForInstance( instance); for (int j = 0; j < dist.length; j++) { if (dist[j] < min[j]) min[j] = dist[j]; } } return min; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String combinationRuleTipText() { return "The combination rule used."; } /** * Gets the combination rule used * * @return the combination rule used */ public SelectedTag getCombinationRule() { return new SelectedTag(m_CombinationRule, TAGS_RULES); } /** * Sets the combination rule to use. Values other than * * @param newRule the combination rule method to use */ public void setCombinationRule(SelectedTag newRule) { if (newRule.getTags() == TAGS_RULES) m_CombinationRule = newRule.getSelectedTag().getID(); } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String preBuiltClassifiersTipText() { return "The pre-built serialized classifiers to include. Multiple " + "serialized classifiers can be included alongside those " + "that are built from scratch when this classifier runs. " + "Note that it does not make sense to include pre-built " + "classifiers in a cross-validation since they are static " + "and their models do not change from fold to fold."; } /** * Set the paths to pre-built serialized classifiers to load and include in * the ensemble * * @param preBuilt an array of File paths to serialized models */ public void setPreBuiltClassifiers(File[] preBuilt) { m_classifiersToLoad.clear(); if (preBuilt != null && preBuilt.length > 0) { for (int i = 0; i < preBuilt.length; i++) { String path = preBuilt[i].toString(); m_classifiersToLoad.add(path); } } } /** * Get the paths to pre-built serialized classifiers to load and include in * the ensemble * * @return an array of File paths to serialized models */ public File[] getPreBuiltClassifiers() { File[] result = new File[m_classifiersToLoad.size()]; for (int i = 0; i < m_classifiersToLoad.size(); i++) { result[i] = new File(m_classifiersToLoad.get(i)); } return result; } /** * Output a representation of this classifier * * @return a string representation of the classifier */ @Override public String toString() { if (m_Classifiers == null) { return "Vote: No model built yet."; } String result = "Vote combines"; result += " the probability distributions of these base learners:\n"; for (int i = 0; i < m_Classifiers.length; i++) { result += '\t' + getClassifierSpec(i) + '\n'; } for (Classifier c : m_preBuiltClassifiers) { result += "\t" + c.getClass().getName() + Utils.joinOptions(((OptionHandler) c).getOptions()) + "\n"; } result += "using the '"; switch (m_CombinationRule) { case AVERAGE_RULE: result += "Average of Probabilities"; break; case PRODUCT_RULE: result += "Product of Probabilities"; break; case MAJORITY_VOTING_RULE: result += "Majority Voting"; break; case MIN_RULE: result += "Minimum Probability"; break; case MAX_RULE: result += "Maximum Probability"; break; case MEDIAN_RULE: result += "Median Probability"; break; default: throw new IllegalStateException("Unknown combination rule '" + m_CombinationRule + "'!"); } result += "' combination rule \n"; return result; } /** * Returns the revision string. * * @return the revision */ @Override public String getRevision() { return RevisionUtils.extract("$Revision: 9785 $"); } /** * Set environment variable values to substitute in the paths of serialized * models to load * * @param env the environment variables to use */ @Override public void setEnvironment(Environment env) { m_env = env; } /** * Aggregate an object with this one * * @param toAggregate the object to aggregate * @return the result of aggregation * @throws Exception if the supplied object can't be aggregated for some * reason */ @Override public Classifier aggregate(Classifier toAggregate) throws Exception { if (m_structure == null && m_Classifiers.length == 1 && (m_Classifiers[0] instanceof weka.classifiers.rules.ZeroR)) { // remove the single untrained ZeroR setClassifiers(new Classifier[0]); } // Can't do any training data compatibility checks unfortunately addPreBuiltClassifier(toAggregate); return this; } /** * Call to complete the aggregation process. Allows implementers to do any * final processing based on how many objects were aggregated. * * @throws Exception if the aggregation can't be finalized for some reason */ @Override public void finalizeAggregation() throws Exception { // nothing to do } /** * Main method for testing this class. * * @param argv should contain the following arguments: -t training file [-T * test file] [-c class index] */ public static void main(String[] argv) { runClassifier(new Vote(), argv); } }
31,320
28.829524
81
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/meta/nestedDichotomies/ClassBalancedND.java
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* * ClassBalancedND.java * Copyright (C) 2005 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.meta.nestedDichotomies; import weka.classifiers.Classifier; import weka.classifiers.RandomizableSingleClassifierEnhancer; import weka.classifiers.meta.FilteredClassifier; import weka.core.Capabilities; import weka.core.Instance; import weka.core.Instances; import weka.core.Range; import weka.core.RevisionUtils; import weka.core.TechnicalInformation; import weka.core.TechnicalInformationHandler; import weka.core.Utils; import weka.core.Capabilities.Capability; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import weka.filters.Filter; import weka.filters.unsupervised.attribute.MakeIndicator; import weka.filters.unsupervised.instance.RemoveWithValues; import java.util.Hashtable; import java.util.Random; import weka.classifiers.AbstractClassifier; /** <!-- globalinfo-start --> * A meta classifier for handling multi-class datasets with 2-class classifiers by building a random class-balanced tree structure.<br/> * <br/> * For more info, check<br/> * <br/> * Lin Dong, Eibe Frank, Stefan Kramer: Ensembles of Balanced Nested Dichotomies for Multi-class Problems. In: PKDD, 84-95, 2005.<br/> * <br/> * Eibe Frank, Stefan Kramer: Ensembles of nested dichotomies for multi-class problems. In: Twenty-first International Conference on Machine Learning, 2004. * <p/> <!-- globalinfo-end --> * <!-- technical-bibtex-start --> * BibTeX: * <pre> * &#64;inproceedings{Dong2005, * author = {Lin Dong and Eibe Frank and Stefan Kramer}, * booktitle = {PKDD}, * pages = {84-95}, * publisher = {Springer}, * title = {Ensembles of Balanced Nested Dichotomies for Multi-class Problems}, * year = {2005} * } * * &#64;inproceedings{Frank2004, * author = {Eibe Frank and Stefan Kramer}, * booktitle = {Twenty-first International Conference on Machine Learning}, * publisher = {ACM}, * title = {Ensembles of nested dichotomies for multi-class problems}, * year = {2004} * } * </pre> * <p/> <!-- technical-bibtex-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -S &lt;num&gt; * Random number seed. * (default 1)</pre> * * <pre> -D * If set, classifier is run in debug mode and * may output additional info to the console</pre> * * <pre> -W * Full name of base classifier. * (default: weka.classifiers.trees.J48)</pre> * * <pre> * Options specific to classifier weka.classifiers.trees.J48: * </pre> * * <pre> -U * Use unpruned tree.</pre> * * <pre> -C &lt;pruning confidence&gt; * Set confidence threshold for pruning. * (default 0.25)</pre> * * <pre> -M &lt;minimum number of instances&gt; * Set minimum number of instances per leaf. * (default 2)</pre> * * <pre> -R * Use reduced error pruning.</pre> * * <pre> -N &lt;number of folds&gt; * Set number of folds for reduced error * pruning. One fold is used as pruning set. * (default 3)</pre> * * <pre> -B * Use binary splits only.</pre> * * <pre> -S * Don't perform subtree raising.</pre> * * <pre> -L * Do not clean up after the tree has been built.</pre> * * <pre> -A * Laplace smoothing for predicted probabilities.</pre> * * <pre> -Q &lt;seed&gt; * Seed for random data shuffling (default 1).</pre> * <!-- options-end --> * * @author Lin Dong * @author Eibe Frank */ public class ClassBalancedND extends RandomizableSingleClassifierEnhancer implements TechnicalInformationHandler { /** for serialization */ static final long serialVersionUID = 5944063630650811903L; /** The filtered classifier in which the base classifier is wrapped. */ protected FilteredClassifier m_FilteredClassifier; /** The hashtable for this node. */ protected Hashtable m_classifiers; /** The first successor */ protected ClassBalancedND m_FirstSuccessor = null; /** The second successor */ protected ClassBalancedND m_SecondSuccessor = null; /** The classes that are grouped together at the current node */ protected Range m_Range = null; /** Is Hashtable given from END? */ protected boolean m_hashtablegiven = false; /** * Constructor. */ public ClassBalancedND() { m_Classifier = new weka.classifiers.trees.J48(); } /** * String describing default classifier. * * @return the default classifier classname */ protected String defaultClassifierString() { return "weka.classifiers.trees.J48"; } /** * Returns an instance of a TechnicalInformation object, containing * detailed information about the technical background of this class, * e.g., paper reference or book this class is based on. * * @return the technical information about this class */ public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; TechnicalInformation additional; result = new TechnicalInformation(Type.INPROCEEDINGS); result.setValue(Field.AUTHOR, "Lin Dong and Eibe Frank and Stefan Kramer"); result.setValue(Field.TITLE, "Ensembles of Balanced Nested Dichotomies for Multi-class Problems"); result.setValue(Field.BOOKTITLE, "PKDD"); result.setValue(Field.YEAR, "2005"); result.setValue(Field.PAGES, "84-95"); result.setValue(Field.PUBLISHER, "Springer"); additional = result.add(Type.INPROCEEDINGS); additional.setValue(Field.AUTHOR, "Eibe Frank and Stefan Kramer"); additional.setValue(Field.TITLE, "Ensembles of nested dichotomies for multi-class problems"); additional.setValue(Field.BOOKTITLE, "Twenty-first International Conference on Machine Learning"); additional.setValue(Field.YEAR, "2004"); additional.setValue(Field.PUBLISHER, "ACM"); return result; } /** * Set hashtable from END. * * @param table the hashtable to use */ public void setHashtable(Hashtable table) { m_hashtablegiven = true; m_classifiers = table; } /** * Generates a classifier for the current node and proceeds recursively. * * @param data contains the (multi-class) instances * @param classes contains the indices of the classes that are present * @param rand the random number generator to use * @param classifier the classifier to use * @param table the Hashtable to use * @throws Exception if anything goes worng */ private void generateClassifierForNode(Instances data, Range classes, Random rand, Classifier classifier, Hashtable table) throws Exception { // Get the indices int[] indices = classes.getSelection(); // Randomize the order of the indices for (int j = indices.length - 1; j > 0; j--) { int randPos = rand.nextInt(j + 1); int temp = indices[randPos]; indices[randPos] = indices[j]; indices[j] = temp; } // Pick the classes for the current split int first = indices.length / 2; int second = indices.length - first; int[] firstInds = new int[first]; int[] secondInds = new int[second]; System.arraycopy(indices, 0, firstInds, 0, first); System.arraycopy(indices, first, secondInds, 0, second); // Sort the indices (important for hash key)! int[] sortedFirst = Utils.sort(firstInds); int[] sortedSecond = Utils.sort(secondInds); int[] firstCopy = new int[first]; int[] secondCopy = new int[second]; for (int i = 0; i < sortedFirst.length; i++) { firstCopy[i] = firstInds[sortedFirst[i]]; } firstInds = firstCopy; for (int i = 0; i < sortedSecond.length; i++) { secondCopy[i] = secondInds[sortedSecond[i]]; } secondInds = secondCopy; // Unify indices to improve hashing if (firstInds[0] > secondInds[0]) { int[] help = secondInds; secondInds = firstInds; firstInds = help; int help2 = second; second = first; first = help2; } m_Range = new Range(Range.indicesToRangeList(firstInds)); m_Range.setUpper(data.numClasses() - 1); Range secondRange = new Range(Range.indicesToRangeList(secondInds)); secondRange.setUpper(data.numClasses() - 1); // Change the class labels and build the classifier MakeIndicator filter = new MakeIndicator(); filter.setAttributeIndex("" + (data.classIndex() + 1)); filter.setValueIndices(m_Range.getRanges()); filter.setNumeric(false); filter.setInputFormat(data); m_FilteredClassifier = new FilteredClassifier(); if (data.numInstances() > 0) { m_FilteredClassifier.setClassifier(AbstractClassifier.makeCopies(classifier, 1)[0]); } else { m_FilteredClassifier.setClassifier(new weka.classifiers.rules.ZeroR()); } m_FilteredClassifier.setFilter(filter); // Save reference to hash table at current node m_classifiers=table; if (!m_classifiers.containsKey( getString(firstInds) + "|" + getString(secondInds))) { m_FilteredClassifier.buildClassifier(data); m_classifiers.put(getString(firstInds) + "|" + getString(secondInds), m_FilteredClassifier); } else { m_FilteredClassifier=(FilteredClassifier)m_classifiers.get(getString(firstInds) + "|" + getString(secondInds)); } // Create two successors if necessary m_FirstSuccessor = new ClassBalancedND(); if (first == 1) { m_FirstSuccessor.m_Range = m_Range; } else { RemoveWithValues rwv = new RemoveWithValues(); rwv.setInvertSelection(true); rwv.setNominalIndices(m_Range.getRanges()); rwv.setAttributeIndex("" + (data.classIndex() + 1)); rwv.setInputFormat(data); Instances firstSubset = Filter.useFilter(data, rwv); m_FirstSuccessor.generateClassifierForNode(firstSubset, m_Range, rand, classifier, m_classifiers); } m_SecondSuccessor = new ClassBalancedND(); if (second == 1) { m_SecondSuccessor.m_Range = secondRange; } else { RemoveWithValues rwv = new RemoveWithValues(); rwv.setInvertSelection(true); rwv.setNominalIndices(secondRange.getRanges()); rwv.setAttributeIndex("" + (data.classIndex() + 1)); rwv.setInputFormat(data); Instances secondSubset = Filter.useFilter(data, rwv); m_SecondSuccessor = new ClassBalancedND(); m_SecondSuccessor.generateClassifierForNode(secondSubset, secondRange, rand, classifier, m_classifiers); } } /** * Returns default capabilities of the classifier. * * @return the capabilities of this classifier */ public Capabilities getCapabilities() { Capabilities result = super.getCapabilities(); // class result.disableAllClasses(); result.enable(Capability.NOMINAL_CLASS); result.enable(Capability.MISSING_CLASS_VALUES); // instances result.setMinimumNumberInstances(1); return result; } /** * Builds tree recursively. * * @param data contains the (multi-class) instances * @throws Exception if the building fails */ public void buildClassifier(Instances data) throws Exception { // can classifier handle the data? getCapabilities().testWithFail(data); // remove instances with missing class data = new Instances(data); data.deleteWithMissingClass(); Random random = data.getRandomNumberGenerator(m_Seed); if (!m_hashtablegiven) { m_classifiers = new Hashtable(); } // Check which classes are present in the // data and construct initial list of classes boolean[] present = new boolean[data.numClasses()]; for (int i = 0; i < data.numInstances(); i++) { present[(int)data.instance(i).classValue()] = true; } StringBuffer list = new StringBuffer(); for (int i = 0; i < present.length; i++) { if (present[i]) { if (list.length() > 0) { list.append(","); } list.append(i + 1); } } Range newRange = new Range(list.toString()); newRange.setUpper(data.numClasses() - 1); generateClassifierForNode(data, newRange, random, m_Classifier, m_classifiers); } /** * Predicts the class distribution for a given instance * * @param inst the (multi-class) instance to be classified * @return the class distribution * @throws Exception if computing fails */ public double[] distributionForInstance(Instance inst) throws Exception { double[] newDist = new double[inst.numClasses()]; if (m_FirstSuccessor == null) { for (int i = 0; i < inst.numClasses(); i++) { if (m_Range.isInRange(i)) { newDist[i] = 1; } } return newDist; } else { double[] firstDist = m_FirstSuccessor.distributionForInstance(inst); double[] secondDist = m_SecondSuccessor.distributionForInstance(inst); double[] dist = m_FilteredClassifier.distributionForInstance(inst); for (int i = 0; i < inst.numClasses(); i++) { if ((firstDist[i] > 0) && (secondDist[i] > 0)) { System.err.println("Panik!!"); } if (m_Range.isInRange(i)) { newDist[i] = dist[1] * firstDist[i]; } else { newDist[i] = dist[0] * secondDist[i]; } } return newDist; } } /** * Returns the list of indices as a string. * * @param indices the indices to return as string * @return the indices as string */ public String getString(int [] indices) { StringBuffer string = new StringBuffer(); for (int i = 0; i < indices.length; i++) { if (i > 0) { string.append(','); } string.append(indices[i]); } return string.toString(); } /** * @return a description of the classifier suitable for * displaying in the explorer/experimenter gui */ public String globalInfo() { return "A meta classifier for handling multi-class datasets with 2-class " + "classifiers by building a random class-balanced tree structure.\n\n" + "For more info, check\n\n" + getTechnicalInformation().toString(); } /** * Outputs the classifier as a string. * * @return a string representation of the classifier */ public String toString() { if (m_classifiers == null) { return "ClassBalancedND: No model built yet."; } StringBuffer text = new StringBuffer(); text.append("ClassBalancedND"); treeToString(text, 0); return text.toString(); } /** * Returns string description of the tree. * * @param text the buffer to add the node to * @param nn the node number * @return the next node number */ private int treeToString(StringBuffer text, int nn) { nn++; text.append("\n\nNode number: " + nn + "\n\n"); if (m_FilteredClassifier != null) { text.append(m_FilteredClassifier); } else { text.append("null"); } if (m_FirstSuccessor != null) { nn = m_FirstSuccessor.treeToString(text, nn); nn = m_SecondSuccessor.treeToString(text, nn); } return nn; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 1.8 $"); } /** * Main method for testing this class. * * @param argv the options */ public static void main(String [] argv) { runClassifier(new ClassBalancedND(), argv); } }
16,333
29.877127
156
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/meta/nestedDichotomies/DataNearBalancedND.java
/* * This program is free software; you can redistribsute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* * DataNearBalancedND.java * Copyright (C) 2005 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.meta.nestedDichotomies; import weka.classifiers.Classifier; import weka.classifiers.RandomizableSingleClassifierEnhancer; import weka.classifiers.meta.FilteredClassifier; import weka.core.Capabilities; import weka.core.Instance; import weka.core.Instances; import weka.core.Range; import weka.core.RevisionUtils; import weka.core.TechnicalInformation; import weka.core.TechnicalInformationHandler; import weka.core.Utils; import weka.core.Capabilities.Capability; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import weka.filters.Filter; import weka.filters.unsupervised.attribute.MakeIndicator; import weka.filters.unsupervised.instance.RemoveWithValues; import java.util.Hashtable; import java.util.Random; import weka.classifiers.AbstractClassifier; /** <!-- globalinfo-start --> * A meta classifier for handling multi-class datasets with 2-class classifiers by building a random data-balanced tree structure.<br/> * <br/> * For more info, check<br/> * <br/> * Lin Dong, Eibe Frank, Stefan Kramer: Ensembles of Balanced Nested Dichotomies for Multi-class Problems. In: PKDD, 84-95, 2005.<br/> * <br/> * Eibe Frank, Stefan Kramer: Ensembles of nested dichotomies for multi-class problems. In: Twenty-first International Conference on Machine Learning, 2004. * <p/> <!-- globalinfo-end --> * <!-- technical-bibtex-start --> * BibTeX: * <pre> * &#64;inproceedings{Dong2005, * author = {Lin Dong and Eibe Frank and Stefan Kramer}, * booktitle = {PKDD}, * pages = {84-95}, * publisher = {Springer}, * title = {Ensembles of Balanced Nested Dichotomies for Multi-class Problems}, * year = {2005} * } * * &#64;inproceedings{Frank2004, * author = {Eibe Frank and Stefan Kramer}, * booktitle = {Twenty-first International Conference on Machine Learning}, * publisher = {ACM}, * title = {Ensembles of nested dichotomies for multi-class problems}, * year = {2004} * } * </pre> * <p/> <!-- technical-bibtex-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -S &lt;num&gt; * Random number seed. * (default 1)</pre> * * <pre> -D * If set, classifier is run in debug mode and * may output additional info to the console</pre> * * <pre> -W * Full name of base classifier. * (default: weka.classifiers.trees.J48)</pre> * * <pre> * Options specific to classifier weka.classifiers.trees.J48: * </pre> * * <pre> -U * Use unpruned tree.</pre> * * <pre> -C &lt;pruning confidence&gt; * Set confidence threshold for pruning. * (default 0.25)</pre> * * <pre> -M &lt;minimum number of instances&gt; * Set minimum number of instances per leaf. * (default 2)</pre> * * <pre> -R * Use reduced error pruning.</pre> * * <pre> -N &lt;number of folds&gt; * Set number of folds for reduced error * pruning. One fold is used as pruning set. * (default 3)</pre> * * <pre> -B * Use binary splits only.</pre> * * <pre> -S * Don't perform subtree raising.</pre> * * <pre> -L * Do not clean up after the tree has been built.</pre> * * <pre> -A * Laplace smoothing for predicted probabilities.</pre> * * <pre> -Q &lt;seed&gt; * Seed for random data shuffling (default 1).</pre> * <!-- options-end --> * * @author Lin Dong * @author Eibe Frank */ public class DataNearBalancedND extends RandomizableSingleClassifierEnhancer implements TechnicalInformationHandler { /** for serialization */ static final long serialVersionUID = 5117477294209496368L; /** The filtered classifier in which the base classifier is wrapped. */ protected FilteredClassifier m_FilteredClassifier; /** The hashtable for this node. */ protected Hashtable m_classifiers=new Hashtable(); /** The first successor */ protected DataNearBalancedND m_FirstSuccessor = null; /** The second successor */ protected DataNearBalancedND m_SecondSuccessor = null; /** The classes that are grouped together at the current node */ protected Range m_Range = null; /** Is Hashtable given from END? */ protected boolean m_hashtablegiven = false; /** * Constructor. */ public DataNearBalancedND() { m_Classifier = new weka.classifiers.trees.J48(); } /** * String describing default classifier. * * @return the default classifier classname */ protected String defaultClassifierString() { return "weka.classifiers.trees.J48"; } /** * Returns an instance of a TechnicalInformation object, containing * detailed information about the technical background of this class, * e.g., paper reference or book this class is based on. * * @return the technical information about this class */ public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; TechnicalInformation additional; result = new TechnicalInformation(Type.INPROCEEDINGS); result.setValue(Field.AUTHOR, "Lin Dong and Eibe Frank and Stefan Kramer"); result.setValue(Field.TITLE, "Ensembles of Balanced Nested Dichotomies for Multi-class Problems"); result.setValue(Field.BOOKTITLE, "PKDD"); result.setValue(Field.YEAR, "2005"); result.setValue(Field.PAGES, "84-95"); result.setValue(Field.PUBLISHER, "Springer"); additional = result.add(Type.INPROCEEDINGS); additional.setValue(Field.AUTHOR, "Eibe Frank and Stefan Kramer"); additional.setValue(Field.TITLE, "Ensembles of nested dichotomies for multi-class problems"); additional.setValue(Field.BOOKTITLE, "Twenty-first International Conference on Machine Learning"); additional.setValue(Field.YEAR, "2004"); additional.setValue(Field.PUBLISHER, "ACM"); return result; } /** * Set hashtable from END. * * @param table the hashtable to use */ public void setHashtable(Hashtable table) { m_hashtablegiven = true; m_classifiers = table; } /** * Generates a classifier for the current node and proceeds recursively. * * @param data contains the (multi-class) instances * @param classes contains the indices of the classes that are present * @param rand the random number generator to use * @param classifier the classifier to use * @param table the Hashtable to use * @param instsNumAllClasses * @throws Exception if anything goes worng */ private void generateClassifierForNode(Instances data, Range classes, Random rand, Classifier classifier, Hashtable table, double[] instsNumAllClasses) throws Exception { // Get the indices int[] indices = classes.getSelection(); // Randomize the order of the indices for (int j = indices.length - 1; j > 0; j--) { int randPos = rand.nextInt(j + 1); int temp = indices[randPos]; indices[randPos] = indices[j]; indices[j] = temp; } // Pick the classes for the current split double total = 0; for (int j = 0; j < indices.length; j++) { total += instsNumAllClasses[indices[j]]; } double halfOfTotal = total / 2; // Go through the list of classes until the either the left or // right subset exceeds half the total weight double sumLeft = 0, sumRight = 0; int i = 0, j = indices.length - 1; do { if (i == j) { if (rand.nextBoolean()) { sumLeft += instsNumAllClasses[indices[i++]]; } else { sumRight += instsNumAllClasses[indices[j--]]; } } else { sumLeft += instsNumAllClasses[indices[i++]]; sumRight += instsNumAllClasses[indices[j--]]; } } while (Utils.sm(sumLeft, halfOfTotal) && Utils.sm(sumRight, halfOfTotal)); int first = 0, second = 0; if (!Utils.sm(sumLeft, halfOfTotal)) { first = i; } else { first = j + 1; } second = indices.length - first; int[] firstInds = new int[first]; int[] secondInds = new int[second]; System.arraycopy(indices, 0, firstInds, 0, first); System.arraycopy(indices, first, secondInds, 0, second); // Sort the indices (important for hash key)! int[] sortedFirst = Utils.sort(firstInds); int[] sortedSecond = Utils.sort(secondInds); int[] firstCopy = new int[first]; int[] secondCopy = new int[second]; for (int k = 0; k < sortedFirst.length; k++) { firstCopy[k] = firstInds[sortedFirst[k]]; } firstInds = firstCopy; for (int k = 0; k < sortedSecond.length; k++) { secondCopy[k] = secondInds[sortedSecond[k]]; } secondInds = secondCopy; // Unify indices to improve hashing if (firstInds[0] > secondInds[0]) { int[] help = secondInds; secondInds = firstInds; firstInds = help; int help2 = second; second = first; first = help2; } m_Range = new Range(Range.indicesToRangeList(firstInds)); m_Range.setUpper(data.numClasses() - 1); Range secondRange = new Range(Range.indicesToRangeList(secondInds)); secondRange.setUpper(data.numClasses() - 1); // Change the class labels and build the classifier MakeIndicator filter = new MakeIndicator(); filter.setAttributeIndex("" + (data.classIndex() + 1)); filter.setValueIndices(m_Range.getRanges()); filter.setNumeric(false); filter.setInputFormat(data); m_FilteredClassifier = new FilteredClassifier(); if (data.numInstances() > 0) { m_FilteredClassifier.setClassifier(AbstractClassifier.makeCopies(classifier, 1)[0]); } else { m_FilteredClassifier.setClassifier(new weka.classifiers.rules.ZeroR()); } m_FilteredClassifier.setFilter(filter); // Save reference to hash table at current node m_classifiers=table; if (!m_classifiers.containsKey( getString(firstInds) + "|" + getString(secondInds))) { m_FilteredClassifier.buildClassifier(data); m_classifiers.put(getString(firstInds) + "|" + getString(secondInds), m_FilteredClassifier); } else { m_FilteredClassifier=(FilteredClassifier)m_classifiers.get(getString(firstInds) + "|" + getString(secondInds)); } // Create two successors if necessary m_FirstSuccessor = new DataNearBalancedND(); if (first == 1) { m_FirstSuccessor.m_Range = m_Range; } else { RemoveWithValues rwv = new RemoveWithValues(); rwv.setInvertSelection(true); rwv.setNominalIndices(m_Range.getRanges()); rwv.setAttributeIndex("" + (data.classIndex() + 1)); rwv.setInputFormat(data); Instances firstSubset = Filter.useFilter(data, rwv); m_FirstSuccessor.generateClassifierForNode(firstSubset, m_Range, rand, classifier, m_classifiers, instsNumAllClasses); } m_SecondSuccessor = new DataNearBalancedND(); if (second == 1) { m_SecondSuccessor.m_Range = secondRange; } else { RemoveWithValues rwv = new RemoveWithValues(); rwv.setInvertSelection(true); rwv.setNominalIndices(secondRange.getRanges()); rwv.setAttributeIndex("" + (data.classIndex() + 1)); rwv.setInputFormat(data); Instances secondSubset = Filter.useFilter(data, rwv); m_SecondSuccessor = new DataNearBalancedND(); m_SecondSuccessor.generateClassifierForNode(secondSubset, secondRange, rand, classifier, m_classifiers, instsNumAllClasses); } } /** * Returns default capabilities of the classifier. * * @return the capabilities of this classifier */ public Capabilities getCapabilities() { Capabilities result = super.getCapabilities(); // class result.disableAllClasses(); result.enable(Capability.NOMINAL_CLASS); result.enable(Capability.MISSING_CLASS_VALUES); // instances result.setMinimumNumberInstances(1); return result; } /** * Builds tree recursively. * * @param data contains the (multi-class) instances * @throws Exception if the building fails */ public void buildClassifier(Instances data) throws Exception { // can classifier handle the data? getCapabilities().testWithFail(data); // remove instances with missing class data = new Instances(data); data.deleteWithMissingClass(); Random random = data.getRandomNumberGenerator(m_Seed); if (!m_hashtablegiven) { m_classifiers = new Hashtable(); } // Check which classes are present in the // data and construct initial list of classes boolean[] present = new boolean[data.numClasses()]; for (int i = 0; i < data.numInstances(); i++) { present[(int)data.instance(i).classValue()] = true; } StringBuffer list = new StringBuffer(); for (int i = 0; i < present.length; i++) { if (present[i]) { if (list.length() > 0) { list.append(","); } list.append(i + 1); } } // Determine the number of instances in each class double[] instsNum = new double[data.numClasses()]; for (int i = 0; i < data.numInstances(); i++) { instsNum[(int)data.instance(i).classValue()] += data.instance(i).weight(); } Range newRange = new Range(list.toString()); newRange.setUpper(data.numClasses() - 1); generateClassifierForNode(data, newRange, random, m_Classifier, m_classifiers, instsNum); } /** * Predicts the class distribution for a given instance * * @param inst the (multi-class) instance to be classified * @return the class distribution * @throws Exception if computing fails */ public double[] distributionForInstance(Instance inst) throws Exception { double[] newDist = new double[inst.numClasses()]; if (m_FirstSuccessor == null) { for (int i = 0; i < inst.numClasses(); i++) { if (m_Range.isInRange(i)) { newDist[i] = 1; } } return newDist; } else { double[] firstDist = m_FirstSuccessor.distributionForInstance(inst); double[] secondDist = m_SecondSuccessor.distributionForInstance(inst); double[] dist = m_FilteredClassifier.distributionForInstance(inst); for (int i = 0; i < inst.numClasses(); i++) { if ((firstDist[i] > 0) && (secondDist[i] > 0)) { System.err.println("Panik!!"); } if (m_Range.isInRange(i)) { newDist[i] = dist[1] * firstDist[i]; } else { newDist[i] = dist[0] * secondDist[i]; } } if (!Utils.eq(Utils.sum(newDist), 1)) { System.err.println(Utils.sum(newDist)); for (int j = 0; j < dist.length; j++) { System.err.print(dist[j] + " "); } System.err.println(); for (int j = 0; j < newDist.length; j++) { System.err.print(newDist[j] + " "); } System.err.println(); System.err.println(inst); System.err.println(m_FilteredClassifier); //System.err.println(m_Data); System.err.println("bad"); } return newDist; } } /** * Returns the list of indices as a string. * * @param indices the indices to return as string * @return the indices as string */ public String getString(int [] indices) { StringBuffer string = new StringBuffer(); for (int i = 0; i < indices.length; i++) { if (i > 0) { string.append(','); } string.append(indices[i]); } return string.toString(); } /** * @return a description of the classifier suitable for * displaying in the explorer/experimenter gui */ public String globalInfo() { return "A meta classifier for handling multi-class datasets with 2-class " + "classifiers by building a random data-balanced tree structure.\n\n" + "For more info, check\n\n" + getTechnicalInformation().toString(); } /** * Outputs the classifier as a string. * * @return a string representation of the classifier */ public String toString() { if (m_classifiers == null) { return "DataNearBalancedND: No model built yet."; } StringBuffer text = new StringBuffer(); text.append("DataNearBalancedND"); treeToString(text, 0); return text.toString(); } /** * Returns string description of the tree. * * @param text the buffer to add the node to * @param nn the node number * @return the next node number */ private int treeToString(StringBuffer text, int nn) { nn++; text.append("\n\nNode number: " + nn + "\n\n"); if (m_FilteredClassifier != null) { text.append(m_FilteredClassifier); } else { text.append("null"); } if (m_FirstSuccessor != null) { nn = m_FirstSuccessor.treeToString(text, nn); nn = m_SecondSuccessor.treeToString(text, nn); } return nn; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 1.8 $"); } /** * Main method for testing this class. * * @param argv the options */ public static void main(String [] argv) { runClassifier(new DataNearBalancedND(), argv); } }
18,271
30.341338
156
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/meta/nestedDichotomies/ND.java
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* * ND.java * Copyright (C) 2003-2005 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.meta.nestedDichotomies; import weka.classifiers.Classifier; import weka.classifiers.RandomizableSingleClassifierEnhancer; import weka.classifiers.meta.FilteredClassifier; import weka.classifiers.rules.ZeroR; import weka.core.Capabilities; import weka.core.FastVector; import weka.core.Instance; import weka.core.Instances; import weka.core.RevisionHandler; import weka.core.RevisionUtils; import weka.core.TechnicalInformation; import weka.core.TechnicalInformationHandler; import weka.core.Capabilities.Capability; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import weka.filters.Filter; import weka.filters.unsupervised.attribute.MakeIndicator; import weka.filters.unsupervised.instance.RemoveWithValues; import java.io.Serializable; import java.util.Hashtable; import java.util.Random; import weka.classifiers.AbstractClassifier; /** <!-- globalinfo-start --> * A meta classifier for handling multi-class datasets with 2-class classifiers by building a random tree structure.<br/> * <br/> * For more info, check<br/> * <br/> * Lin Dong, Eibe Frank, Stefan Kramer: Ensembles of Balanced Nested Dichotomies for Multi-class Problems. In: PKDD, 84-95, 2005.<br/> * <br/> * Eibe Frank, Stefan Kramer: Ensembles of nested dichotomies for multi-class problems. In: Twenty-first International Conference on Machine Learning, 2004. * <p/> <!-- globalinfo-end --> * <!-- technical-bibtex-start --> * BibTeX: * <pre> * &#64;inproceedings{Dong2005, * author = {Lin Dong and Eibe Frank and Stefan Kramer}, * booktitle = {PKDD}, * pages = {84-95}, * publisher = {Springer}, * title = {Ensembles of Balanced Nested Dichotomies for Multi-class Problems}, * year = {2005} * } * * &#64;inproceedings{Frank2004, * author = {Eibe Frank and Stefan Kramer}, * booktitle = {Twenty-first International Conference on Machine Learning}, * publisher = {ACM}, * title = {Ensembles of nested dichotomies for multi-class problems}, * year = {2004} * } * </pre> * <p/> <!-- technical-bibtex-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -S &lt;num&gt; * Random number seed. * (default 1)</pre> * * <pre> -D * If set, classifier is run in debug mode and * may output additional info to the console</pre> * * <pre> -W * Full name of base classifier. * (default: weka.classifiers.trees.J48)</pre> * * <pre> * Options specific to classifier weka.classifiers.trees.J48: * </pre> * * <pre> -U * Use unpruned tree.</pre> * * <pre> -C &lt;pruning confidence&gt; * Set confidence threshold for pruning. * (default 0.25)</pre> * * <pre> -M &lt;minimum number of instances&gt; * Set minimum number of instances per leaf. * (default 2)</pre> * * <pre> -R * Use reduced error pruning.</pre> * * <pre> -N &lt;number of folds&gt; * Set number of folds for reduced error * pruning. One fold is used as pruning set. * (default 3)</pre> * * <pre> -B * Use binary splits only.</pre> * * <pre> -S * Don't perform subtree raising.</pre> * * <pre> -L * Do not clean up after the tree has been built.</pre> * * <pre> -A * Laplace smoothing for predicted probabilities.</pre> * * <pre> -Q &lt;seed&gt; * Seed for random data shuffling (default 1).</pre> * <!-- options-end --> * * @author Eibe Frank * @author Lin Dong */ public class ND extends RandomizableSingleClassifierEnhancer implements TechnicalInformationHandler { /** for serialization */ static final long serialVersionUID = -6355893369855683820L; /** * a node class */ protected class NDTree implements Serializable, RevisionHandler { /** for serialization */ private static final long serialVersionUID = 4284655952754474880L; /** The indices associated with this node */ protected FastVector m_indices = null; /** The parent */ protected NDTree m_parent = null; /** The left successor */ protected NDTree m_left = null; /** The right successor */ protected NDTree m_right = null; /** * Constructor. */ protected NDTree() { m_indices = new FastVector(1); m_indices.addElement(new Integer(Integer.MAX_VALUE)); } /** * Locates the node with the given index (depth-first traversal). */ protected NDTree locateNode(int nodeIndex, int[] currentIndex) { if (nodeIndex == currentIndex[0]) { return this; } else if (m_left == null) { return null; } else { currentIndex[0]++; NDTree leftresult = m_left.locateNode(nodeIndex, currentIndex); if (leftresult != null) { return leftresult; } else { currentIndex[0]++; return m_right.locateNode(nodeIndex, currentIndex); } } } /** * Inserts a class index into the tree. * * @param classIndex the class index to insert */ protected void insertClassIndex(int classIndex) { // Create new nodes NDTree right = new NDTree(); if (m_left != null) { m_right.m_parent = right; m_left.m_parent = right; right.m_right = m_right; right.m_left = m_left; } m_right = right; m_right.m_indices = (FastVector)m_indices.copy(); m_right.m_parent = this; m_left = new NDTree(); m_left.insertClassIndexAtNode(classIndex); m_left.m_parent = this; // Propagate class Index propagateClassIndex(classIndex); } /** * Propagates class index to the root. * * @param classIndex the index to propagate to the root */ protected void propagateClassIndex(int classIndex) { insertClassIndexAtNode(classIndex); if (m_parent != null) { m_parent.propagateClassIndex(classIndex); } } /** * Inserts the class index at a given node. * * @param classIndex the classIndex to insert */ protected void insertClassIndexAtNode(int classIndex) { int i = 0; while (classIndex > ((Integer)m_indices.elementAt(i)).intValue()) { i++; } m_indices.insertElementAt(new Integer(classIndex), i); } /** * Gets the indices in an array of ints. * * @return the indices */ protected int[] getIndices() { int[] ints = new int[m_indices.size() - 1]; for (int i = 0; i < m_indices.size() - 1; i++) { ints[i] = ((Integer)m_indices.elementAt(i)).intValue(); } return ints; } /** * Checks whether an index is in the array. * * @param index the index to check * @return true of the index is in the array */ protected boolean contains(int index) { for (int i = 0; i < m_indices.size() - 1; i++) { if (index == ((Integer)m_indices.elementAt(i)).intValue()) { return true; } } return false; } /** * Returns the list of indices as a string. * * @return the indices as string */ protected String getString() { StringBuffer string = new StringBuffer(); for (int i = 0; i < m_indices.size() - 1; i++) { if (i > 0) { string.append(','); } string.append(((Integer)m_indices.elementAt(i)).intValue() + 1); } return string.toString(); } /** * Unifies tree for improve hashing. */ protected void unifyTree() { if (m_left != null) { if (((Integer)m_left.m_indices.elementAt(0)).intValue() > ((Integer)m_right.m_indices.elementAt(0)).intValue()) { NDTree temp = m_left; m_left = m_right; m_right = temp; } m_left.unifyTree(); m_right.unifyTree(); } } /** * Returns a description of the tree rooted at this node. * * @param text the buffer to add the node to * @param id the node id * @param level the level of the tree */ protected void toString(StringBuffer text, int[] id, int level) { for (int i = 0; i < level; i++) { text.append(" | "); } text.append(id[0] + ": " + getString() + "\n"); if (m_left != null) { id[0]++; m_left.toString(text, id, level + 1); id[0]++; m_right.toString(text, id, level + 1); } } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 1.9 $"); } } /** The tree of classes */ protected NDTree m_ndtree = null; /** The hashtable containing all the classifiers */ protected Hashtable m_classifiers = null; /** Is Hashtable given from END? */ protected boolean m_hashtablegiven = false; /** * Constructor. */ public ND() { m_Classifier = new weka.classifiers.trees.J48(); } /** * String describing default classifier. * * @return the default classifier classname */ protected String defaultClassifierString() { return "weka.classifiers.trees.J48"; } /** * Returns an instance of a TechnicalInformation object, containing * detailed information about the technical background of this class, * e.g., paper reference or book this class is based on. * * @return the technical information about this class */ public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; TechnicalInformation additional; result = new TechnicalInformation(Type.INPROCEEDINGS); result.setValue(Field.AUTHOR, "Lin Dong and Eibe Frank and Stefan Kramer"); result.setValue(Field.TITLE, "Ensembles of Balanced Nested Dichotomies for Multi-class Problems"); result.setValue(Field.BOOKTITLE, "PKDD"); result.setValue(Field.YEAR, "2005"); result.setValue(Field.PAGES, "84-95"); result.setValue(Field.PUBLISHER, "Springer"); additional = result.add(Type.INPROCEEDINGS); additional.setValue(Field.AUTHOR, "Eibe Frank and Stefan Kramer"); additional.setValue(Field.TITLE, "Ensembles of nested dichotomies for multi-class problems"); additional.setValue(Field.BOOKTITLE, "Twenty-first International Conference on Machine Learning"); additional.setValue(Field.YEAR, "2004"); additional.setValue(Field.PUBLISHER, "ACM"); return result; } /** * Set hashtable from END. * * @param table the hashtable to use */ public void setHashtable(Hashtable table) { m_hashtablegiven = true; m_classifiers = table; } /** * Returns default capabilities of the classifier. * * @return the capabilities of this classifier */ public Capabilities getCapabilities() { Capabilities result = super.getCapabilities(); // class result.disableAllClasses(); result.enable(Capability.NOMINAL_CLASS); result.enable(Capability.MISSING_CLASS_VALUES); // instances result.setMinimumNumberInstances(1); return result; } /** * Builds the classifier. * * @param data the data to train the classifier with * @throws Exception if anything goes wrong */ public void buildClassifier(Instances data) throws Exception { // can classifier handle the data? getCapabilities().testWithFail(data); // remove instances with missing class data = new Instances(data); data.deleteWithMissingClass(); Random random = data.getRandomNumberGenerator(m_Seed); if (!m_hashtablegiven) { m_classifiers = new Hashtable(); } // Generate random class hierarchy int[] indices = new int[data.numClasses()]; for (int i = 0; i < indices.length; i++) { indices[i] = i; } // Randomize list of class indices for (int i = indices.length - 1; i > 0; i--) { int help = indices[i]; int index = random.nextInt(i + 1); indices[i] = indices[index]; indices[index] = help; } // Insert random class index at randomly chosen node m_ndtree = new NDTree(); m_ndtree.insertClassIndexAtNode(indices[0]); for (int i = 1; i < indices.length; i++) { int nodeIndex = random.nextInt(2 * i - 1); NDTree node = m_ndtree.locateNode(nodeIndex, new int[1]); node.insertClassIndex(indices[i]); } m_ndtree.unifyTree(); // Build classifiers buildClassifierForNode(m_ndtree, data); } /** * Builds the classifier for one node. * * @param node the node to build the classifier for * @param data the data to work with * @throws Exception if anything goes wrong */ public void buildClassifierForNode(NDTree node, Instances data) throws Exception { // Are we at a leaf node ? if (node.m_left != null) { // Create classifier MakeIndicator filter = new MakeIndicator(); filter.setAttributeIndex("" + (data.classIndex() + 1)); filter.setValueIndices(node.m_right.getString()); filter.setNumeric(false); filter.setInputFormat(data); FilteredClassifier classifier = new FilteredClassifier(); if (data.numInstances() > 0) { classifier.setClassifier(AbstractClassifier.makeCopies(m_Classifier, 1)[0]); } else { classifier.setClassifier(new ZeroR()); } classifier.setFilter(filter); if (!m_classifiers.containsKey(node.m_left.getString() + "|" + node.m_right.getString())) { classifier.buildClassifier(data); m_classifiers.put(node.m_left.getString() + "|" + node.m_right.getString(), classifier); } else { classifier=(FilteredClassifier)m_classifiers.get(node.m_left.getString() + "|" + node.m_right.getString()); } // Generate successors if (node.m_left.m_left != null) { RemoveWithValues rwv = new RemoveWithValues(); rwv.setInvertSelection(true); rwv.setNominalIndices(node.m_left.getString()); rwv.setAttributeIndex("" + (data.classIndex() + 1)); rwv.setInputFormat(data); Instances firstSubset = Filter.useFilter(data, rwv); buildClassifierForNode(node.m_left, firstSubset); } if (node.m_right.m_left != null) { RemoveWithValues rwv = new RemoveWithValues(); rwv.setInvertSelection(true); rwv.setNominalIndices(node.m_right.getString()); rwv.setAttributeIndex("" + (data.classIndex() + 1)); rwv.setInputFormat(data); Instances secondSubset = Filter.useFilter(data, rwv); buildClassifierForNode(node.m_right, secondSubset); } } } /** * Predicts the class distribution for a given instance * * @param inst the (multi-class) instance to be classified * @return the class distribution * @throws Exception if computing fails */ public double[] distributionForInstance(Instance inst) throws Exception { return distributionForInstance(inst, m_ndtree); } /** * Predicts the class distribution for a given instance * * @param inst the (multi-class) instance to be classified * @param node the node to do get the distribution for * @return the class distribution * @throws Exception if computing fails */ protected double[] distributionForInstance(Instance inst, NDTree node) throws Exception { double[] newDist = new double[inst.numClasses()]; if (node.m_left == null) { newDist[node.getIndices()[0]] = 1.0; return newDist; } else { Classifier classifier = (Classifier)m_classifiers.get(node.m_left.getString() + "|" + node.m_right.getString()); double[] leftDist = distributionForInstance(inst, node.m_left); double[] rightDist = distributionForInstance(inst, node.m_right); double[] dist = classifier.distributionForInstance(inst); for (int i = 0; i < inst.numClasses(); i++) { if (node.m_right.contains(i)) { newDist[i] = dist[1] * rightDist[i]; } else { newDist[i] = dist[0] * leftDist[i]; } } return newDist; } } /** * Outputs the classifier as a string. * * @return a string representation of the classifier */ public String toString() { if (m_classifiers == null) { return "ND: No model built yet."; } StringBuffer text = new StringBuffer(); text.append("ND\n\n"); m_ndtree.toString(text, new int[1], 0); return text.toString(); } /** * @return a description of the classifier suitable for * displaying in the explorer/experimenter gui */ public String globalInfo() { return "A meta classifier for handling multi-class datasets with 2-class " + "classifiers by building a random tree structure.\n\n" + "For more info, check\n\n" + getTechnicalInformation().toString(); } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 1.9 $"); } /** * Main method for testing this class. * * @param argv the options */ public static void main(String [] argv) { runClassifier(new ND(), argv); } }
17,867
27.227488
156
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/mi/CitationKNN.java
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* * CitationKNN.java * Copyright (C) 2005 Miguel Garcia Torres */ package weka.classifiers.mi; import weka.classifiers.Classifier; import weka.core.Capabilities; import weka.core.Instance; import weka.core.Instances; import weka.core.MultiInstanceCapabilitiesHandler; import weka.core.Option; import weka.core.OptionHandler; import weka.core.RevisionHandler; import weka.core.RevisionUtils; import weka.core.TechnicalInformation; import weka.core.TechnicalInformationHandler; import weka.core.Utils; import weka.core.Capabilities.Capability; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import java.io.Serializable; import java.util.Enumeration; import java.util.Vector; import weka.classifiers.AbstractClassifier; /** <!-- globalinfo-start --> * Modified version of the Citation kNN multi instance classifier.<br/> * <br/> * For more information see:<br/> * <br/> * Jun Wang, Zucker, Jean-Daniel: Solving Multiple-Instance Problem: A Lazy Learning Approach. In: 17th International Conference on Machine Learning, 1119-1125, 2000. * <p/> <!-- globalinfo-end --> * <!-- technical-bibtex-start --> * BibTeX: * <pre> * &#64;inproceedings{Wang2000, * author = {Jun Wang and Zucker and Jean-Daniel}, * booktitle = {17th International Conference on Machine Learning}, * editor = {Pat Langley}, * pages = {1119-1125}, * title = {Solving Multiple-Instance Problem: A Lazy Learning Approach}, * year = {2000} * } * </pre> * <p/> <!-- technical-bibtex-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -R &lt;number of references&gt; * Number of Nearest References (default 1)</pre> * * <pre> -C &lt;number of citers&gt; * Number of Nearest Citers (default 1)</pre> * * <pre> -H &lt;rank&gt; * Rank of the Hausdorff Distance (default 1)</pre> * <!-- options-end --> * * @author Miguel Garcia Torres (mgarciat@ull.es) * @version $Revision: 5527 $ */ public class CitationKNN extends AbstractClassifier implements OptionHandler, MultiInstanceCapabilitiesHandler, TechnicalInformationHandler { /** for serialization */ static final long serialVersionUID = -8435377743874094852L; /** The index of the class attribute */ protected int m_ClassIndex; /** The number of the class labels */ protected int m_NumClasses; /** */ protected int m_IdIndex; /** Debugging output */ protected boolean m_Debug; /** Class labels for each bag */ protected int[] m_Classes; /** attribute name structure of the relational attribute*/ protected Instances m_Attributes; /** Number of references */ protected int m_NumReferences = 1; /** Number of citers*/ protected int m_NumCiters = 1; /** Training bags*/ protected Instances m_TrainBags; /** Different debugging output */ protected boolean m_CNNDebug = false; protected boolean m_CitersDebug = false; protected boolean m_ReferencesDebug = false; protected boolean m_HDistanceDebug = false; protected boolean m_NeighborListDebug = false; /** C nearest neighbors considering all the bags*/ protected NeighborList[] m_CNN; /** C nearest citers */ protected int[] m_Citers; /** R nearest references */ protected int[] m_References; /** Rank associated to the Hausdorff distance*/ protected int m_HDRank = 1; /** Normalization of the euclidean distance */ private double[] m_Diffs; private double[] m_Min; private double m_MinNorm = 0.95; private double[] m_Max; private double m_MaxNorm = 1.05; /** * Returns a string describing this filter * * @return a description of the filter suitable for * displaying in the explorer/experimenter gui */ public String globalInfo() { return "Modified version of the Citation kNN multi instance classifier.\n\n" + "For more information see:\n\n" + getTechnicalInformation().toString(); } /** * Returns an instance of a TechnicalInformation object, containing * detailed information about the technical background of this class, * e.g., paper reference or book this class is based on. * * @return the technical information about this class */ public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; result = new TechnicalInformation(Type.INPROCEEDINGS); result.setValue(Field.AUTHOR, "Jun Wang and Zucker and Jean-Daniel"); result.setValue(Field.TITLE, "Solving Multiple-Instance Problem: A Lazy Learning Approach"); result.setValue(Field.BOOKTITLE, "17th International Conference on Machine Learning"); result.setValue(Field.EDITOR, "Pat Langley"); result.setValue(Field.YEAR, "2000"); result.setValue(Field.PAGES, "1119-1125"); return result; } /** * Calculates the normalization of each attribute. */ public void preprocessData(){ int i,j, k; double min, max; Instances instances; Instance instance; // compute the min/max of each feature for (i=0;i<m_Attributes.numAttributes();i++) { min=Double.POSITIVE_INFINITY ; max=Double.NEGATIVE_INFINITY ; for(j = 0; j < m_TrainBags.numInstances(); j++){ instances = m_TrainBags.instance(j).relationalValue(1); for (k=0;k<instances.numInstances();k++) { instance = instances.instance(k); if(instance.value(i) < min) min= instance.value(i); if(instance.value(i) > max) max= instance.value(i); } } m_Min[i] = min * m_MinNorm; m_Max[i] = max * m_MaxNorm; m_Diffs[i]= max * m_MaxNorm - min * m_MinNorm; } } /** * Returns the tip text for this property * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String HDRankTipText() { return "The rank associated to the Hausdorff distance."; } /** * Sets the rank associated to the Hausdorff distance * @param hDRank the rank of the Hausdorff distance */ public void setHDRank(int hDRank){ m_HDRank = hDRank; } /** * Returns the rank associated to the Hausdorff distance * @return the rank number */ public int getHDRank(){ return m_HDRank; } /** * Returns the tip text for this property * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String numReferencesTipText() { return "The number of references considered to estimate the class " + "prediction of tests bags."; } /** * Sets the number of references considered to estimate * the class prediction of tests bags * @param numReferences the number of references */ public void setNumReferences(int numReferences){ m_NumReferences = numReferences; } /** * Returns the number of references considered to estimate * the class prediction of tests bags * @return the number of references */ public int getNumReferences(){ return m_NumReferences; } /** * Returns the tip text for this property * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String numCitersTipText() { return "The number of citers considered to estimate the class " + "prediction of test bags."; } /** * Sets the number of citers considered to estimate * the class prediction of tests bags * @param numCiters the number of citers */ public void setNumCiters(int numCiters){ m_NumCiters = numCiters; } /** * Returns the number of citers considered to estimate * the class prediction of tests bags * @return the number of citers */ public int getNumCiters(){ return m_NumCiters; } /** * Returns default capabilities of the classifier. * * @return the capabilities of this classifier */ public Capabilities getCapabilities() { Capabilities result = super.getCapabilities(); result.disableAll(); // attributes result.enable(Capability.NOMINAL_ATTRIBUTES); result.enable(Capability.NUMERIC_ATTRIBUTES); result.enable(Capability.DATE_ATTRIBUTES); result.enable(Capability.RELATIONAL_ATTRIBUTES); result.enable(Capability.MISSING_VALUES); // class result.enable(Capability.NOMINAL_CLASS); result.enable(Capability.MISSING_CLASS_VALUES); // other result.enable(Capability.ONLY_MULTIINSTANCE); return result; } /** * Returns the capabilities of this multi-instance classifier for the * relational data. * * @return the capabilities of this object * @see Capabilities */ public Capabilities getMultiInstanceCapabilities() { Capabilities result = super.getCapabilities(); result.disableAll(); // attributes result.enable(Capability.NOMINAL_ATTRIBUTES); result.enable(Capability.NUMERIC_ATTRIBUTES); result.enable(Capability.DATE_ATTRIBUTES); result.enable(Capability.MISSING_VALUES); // class result.disableAllClasses(); result.enable(Capability.NO_CLASS); return result; } /** * Builds the classifier * * @param train the training data to be used for generating the * boosted classifier. * @throws Exception if the classifier could not be built successfully */ public void buildClassifier(Instances train) throws Exception { // can classifier handle the data? getCapabilities().testWithFail(train); // remove instances with missing class train = new Instances(train); train.deleteWithMissingClass(); m_TrainBags = train; m_ClassIndex = train.classIndex(); m_IdIndex = 0; m_NumClasses = train.numClasses(); m_Classes = new int [train.numInstances()]; // Class values m_Attributes = train.instance(0).relationalValue(1).stringFreeStructure(); m_Citers = new int[train.numClasses()]; m_References = new int[train.numClasses()]; m_Diffs = new double[m_Attributes.numAttributes()]; m_Min = new double[m_Attributes.numAttributes()]; m_Max = new double[m_Attributes.numAttributes()]; preprocessData(); buildCNN(); if(m_CNNDebug){ System.out.println("########################################### "); System.out.println("###########CITATION######################## "); System.out.println("########################################### "); for(int i = 0; i < m_CNN.length; i++){ System.out.println("Bag: " + i); m_CNN[i].printReducedList(); } } } /** * generates all the variables associated to the citation * classifier * * @throws Exception if generation fails */ public void buildCNN() throws Exception { int numCiters = 0; if((m_NumCiters >= m_TrainBags.numInstances()) || (m_NumCiters < 0)) throw new Exception("Number of citers is out of the range [0, numInstances)"); else numCiters = m_NumCiters; m_CNN = new NeighborList[m_TrainBags.numInstances()]; Instance bag; for(int i = 0; i< m_TrainBags.numInstances(); i++){ bag = m_TrainBags.instance(i); //first we find its neighbors NeighborList neighborList = findNeighbors(bag, numCiters, m_TrainBags); m_CNN[i] = neighborList; } } /** * calculates the citers associated to a bag * @param bag the bag cited */ public void countBagCiters(Instance bag){ //Initialization of the vector for(int i = 0; i < m_TrainBags.numClasses(); i++) m_Citers[i] = 0; // if(m_CitersDebug == true) System.out.println("-------CITERS--------"); NeighborList neighborList; NeighborNode current; boolean stopSearch = false; int index; // compute the distance between the test bag and each training bag. Update // the bagCiter count in case it be a neighbour double bagDistance = 0; for(int i = 0; i < m_TrainBags.numInstances(); i++){ //measure the distance bagDistance = distanceSet(bag, m_TrainBags.instance(i)); if(m_CitersDebug == true){ System.out.print("bag - bag(" + i + "): " + bagDistance); System.out.println(" <" + m_TrainBags.instance(i).classValue() + ">"); } //compare the distance to see if it would belong to the // neighborhood of each training exemplar neighborList = m_CNN[i]; current = neighborList.mFirst; while((current != null) && (!stopSearch)) { if(m_CitersDebug == true) System.out.println("\t\tciter Distance: " + current.mDistance); if(current.mDistance < bagDistance){ current = current.mNext; } else{ stopSearch = true; if(m_CitersDebug == true){ System.out.println("\t***"); } } } if(stopSearch == true){ stopSearch = false; index = (int)(m_TrainBags.instance(i)).classValue(); m_Citers[index] += 1; } } if(m_CitersDebug == true){ for(int i= 0; i < m_Citers.length; i++){ System.out.println("[" + i + "]: " + m_Citers[i]); } } } /** * Calculates the references of the exemplar bag * @param bag the exemplar to which the nearest references * will be calculated */ public void countBagReferences(Instance bag){ int index = 0, referencesIndex = 0; if(m_TrainBags.numInstances() < m_NumReferences) referencesIndex = m_TrainBags.numInstances() - 1; else referencesIndex = m_NumReferences; if(m_CitersDebug == true){ System.out.println("-------References (" + referencesIndex+ ")--------"); } //Initialization of the vector for(int i = 0; i < m_References.length; i++) m_References[i] = 0; if(referencesIndex > 0){ //first we find its neighbors NeighborList neighborList = findNeighbors(bag, referencesIndex, m_TrainBags); if(m_ReferencesDebug == true){ System.out.println("Bag: " + bag + " Neighbors: "); neighborList.printReducedList(); } NeighborNode current = neighborList.mFirst; while(current != null){ index = (int) current.mBag.classValue(); m_References[index] += 1; current = current.mNext; } } if(m_ReferencesDebug == true){ System.out.println("References:"); for(int j = 0; j < m_References.length; j++) System.out.println("[" + j + "]: " + m_References[j]); } } /** * Build the list of nearest k neighbors to the given test instance. * @param bag the bag to search for neighbors of * @param kNN the number of nearest neighbors * @param bags the data * @return a list of neighbors */ protected NeighborList findNeighbors(Instance bag, int kNN, Instances bags){ double distance; int index = 0; if(kNN > bags.numInstances()) kNN = bags.numInstances() - 1; NeighborList neighborList = new NeighborList(kNN); for(int i = 0; i < bags.numInstances(); i++){ if(bag != bags.instance(i)){ // for hold-one-out cross-validation distance = distanceSet(bag, bags.instance(i)) ; //mDistanceSet.distance(bag, mInstances, bags.exemplar(i), mInstances); if(m_NeighborListDebug) System.out.println("distance(bag, " + i + "): " + distance); if(neighborList.isEmpty() || (index < kNN) || (distance <= neighborList.mLast.mDistance)) neighborList.insertSorted(distance, bags.instance(i), i); index++; } } if(m_NeighborListDebug){ System.out.println("bag neighbors:"); neighborList.printReducedList(); } return neighborList; } /** * Calculates the distance between two instances * @param first instance * @param second instance * @return the distance value */ public double distanceSet(Instance first, Instance second){ double[] h_f = new double[first.relationalValue(1).numInstances()]; double distance; //initilization for(int i = 0; i < h_f.length; i++) h_f[i] = Double.MAX_VALUE; int rank; if(m_HDRank >= first.relationalValue(1).numInstances()) rank = first.relationalValue(1).numInstances(); else if(m_HDRank < 1) rank = 1; else rank = m_HDRank; if(m_HDistanceDebug){ System.out.println("-------HAUSDORFF DISTANCE--------"); System.out.println("rank: " + rank + "\nset of instances:"); System.out.println("\tset 1:"); for(int i = 0; i < first.relationalValue(1).numInstances(); i++) System.out.println(first.relationalValue(1).instance(i)); System.out.println("\n\tset 2:"); for(int i = 0; i < second.relationalValue(1).numInstances(); i++) System.out.println(second.relationalValue(1).instance(i)); System.out.println("\n"); } //for each instance in bag first for(int i = 0; i < first.relationalValue(1).numInstances(); i++){ // calculate the distance to each instance in // bag second if(m_HDistanceDebug){ System.out.println("\nDistances:"); } for(int j = 0; j < second.relationalValue(1).numInstances(); j++){ distance = distance(first.relationalValue(1).instance(i), second.relationalValue(1).instance(j)); if(distance < h_f[i]) h_f[i] = distance; if(m_HDistanceDebug){ System.out.println("\tdist(" + i + ", "+ j + "): " + distance + " --> h_f[" + i + "]: " + h_f[i]); } } } int[] index_f = Utils.stableSort(h_f); if(m_HDistanceDebug){ System.out.println("\nRanks:\n"); for(int i = 0; i < index_f.length; i++) System.out.println("\trank " + (i + 1) + ": " + h_f[index_f[i]]); System.out.println("\n\t\t>>>>> rank " + rank + ": " + h_f[index_f[rank - 1]] + " <<<<<"); } return h_f[index_f[rank - 1]]; } /** * distance between two instances * @param first the first instance * @param second the other instance * @return the distance in double precision */ public double distance(Instance first, Instance second){ double sum = 0, diff; for(int i = 0; i < m_Attributes.numAttributes(); i++){ diff = (first.value(i) - m_Min[i])/ m_Diffs[i] - (second.value(i) - m_Min[i])/ m_Diffs[i]; sum += diff * diff; } return sum = Math.sqrt(sum); } /** * Computes the distribution for a given exemplar * * @param bag the exemplar for which distribution is computed * @return the distribution * @throws Exception if the distribution can't be computed successfully */ public double[] distributionForInstance(Instance bag) throws Exception { if(m_TrainBags.numInstances() == 0) throw new Exception("No training bags!"); updateNormalization(bag); //build references (R nearest neighbors) countBagReferences(bag); //build citers countBagCiters(bag); return makeDistribution(); } /** * Updates the normalization of each attribute. * * @param bag the exemplar to update the normalization for */ public void updateNormalization(Instance bag){ int i, k; double min, max; Instances instances; Instance instance; // compute the min/max of each feature for (i = 0; i < m_TrainBags.attribute(1).relation().numAttributes(); i++) { min = m_Min[i] / m_MinNorm; max = m_Max[i] / m_MaxNorm; instances = bag.relationalValue(1); for (k=0;k<instances.numInstances();k++) { instance = instances.instance(k); if(instance.value(i) < min) min = instance.value(i); if(instance.value(i) > max) max = instance.value(i); } m_Min[i] = min * m_MinNorm; m_Max[i] = max * m_MaxNorm; m_Diffs[i]= max * m_MaxNorm - min * m_MinNorm; } } /** * Wether the instances of two exemplars are or are not equal * @param exemplar1 first exemplar * @param exemplar2 second exemplar * @return if the instances of the exemplars are equal or not */ public boolean equalExemplars(Instance exemplar1, Instance exemplar2){ if(exemplar1.relationalValue(1).numInstances() == exemplar2.relationalValue(1).numInstances()){ Instances instances1 = exemplar1.relationalValue(1); Instances instances2 = exemplar2.relationalValue(1); for(int i = 0; i < instances1.numInstances(); i++){ Instance instance1 = instances1.instance(i); Instance instance2 = instances2.instance(i); for(int j = 0; j < instance1.numAttributes(); j++){ if(instance1.value(j) != instance2.value(j)){ return false; } } } return true; } return false; } /** * Turn the references and citers list into a probability distribution * * @return the probability distribution * @throws Exception if computation of distribution fails */ protected double[] makeDistribution() throws Exception { double total = 0; double[] distribution = new double[m_TrainBags.numClasses()]; boolean debug = false; total = (double)m_TrainBags.numClasses() / Math.max(1, m_TrainBags.numInstances()); for(int i = 0; i < m_TrainBags.numClasses(); i++){ distribution[i] = 1.0 / Math.max(1, m_TrainBags.numInstances()); if(debug) System.out.println("distribution[" + i + "]: " + distribution[i]); } if(debug)System.out.println("total: " + total); for(int i = 0; i < m_TrainBags.numClasses(); i++){ distribution[i] += m_References[i]; distribution[i] += m_Citers[i]; } total = 0; //total for(int i = 0; i < m_TrainBags.numClasses(); i++){ total += distribution[i]; if(debug)System.out.println("distribution[" + i + "]: " + distribution[i]); } for(int i = 0; i < m_TrainBags.numClasses(); i++){ distribution[i] = distribution[i] / total; if(debug)System.out.println("distribution[" + i + "]: " + distribution[i]); } return distribution; } /** * Returns an enumeration of all the available options.. * * @return an enumeration of all available options. */ public Enumeration listOptions(){ Vector result = new Vector(); result.addElement(new Option( "\tNumber of Nearest References (default 1)", "R", 0, "-R <number of references>")); result.addElement(new Option( "\tNumber of Nearest Citers (default 1)", "C", 0, "-C <number of citers>")); result.addElement(new Option( "\tRank of the Hausdorff Distance (default 1)", "H", 0, "-H <rank>")); return result.elements(); } /** * Sets the OptionHandler's options using the given list. All options * will be set (or reset) during this call (i.e. incremental setting * of options is not possible). <p/> * <!-- options-start --> * Valid options are: <p/> * * <pre> -R &lt;number of references&gt; * Number of Nearest References (default 1)</pre> * * <pre> -C &lt;number of citers&gt; * Number of Nearest Citers (default 1)</pre> * * <pre> -H &lt;rank&gt; * Rank of the Hausdorff Distance (default 1)</pre> * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ public void setOptions(String[] options) throws Exception{ setDebug(Utils.getFlag('D', options)); String option = Utils.getOption('R', options); if(option.length() != 0) setNumReferences(Integer.parseInt(option)); else setNumReferences(1); option = Utils.getOption('C', options); if(option.length() != 0) setNumCiters(Integer.parseInt(option)); else setNumCiters(1); option = Utils.getOption('H', options); if(option.length() != 0) setHDRank(Integer.parseInt(option)); else setHDRank(1); } /** * Gets the current option settings for the OptionHandler. * * @return the list of current option settings as an array of strings */ public String[] getOptions() { Vector result; result = new Vector(); if (getDebug()) result.add("-D"); result.add("-R"); result.add("" + getNumReferences()); result.add("-C"); result.add("" + getNumCiters()); result.add("-H"); result.add("" + getHDRank()); return (String[]) result.toArray(new String[result.size()]); } /** * returns a string representation of the classifier * * @return the string representation */ public String toString() { StringBuffer result; int i; result = new StringBuffer(); // title result.append(this.getClass().getName().replaceAll(".*\\.", "") + "\n"); result.append(this.getClass().getName().replaceAll(".*\\.", "").replaceAll(".", "=") + "\n\n"); if (m_Citers == null) { result.append("no model built yet!\n"); } else { // internal representation result.append("Citers....: " + Utils.arrayToString(m_Citers) + "\n"); result.append("References: " + Utils.arrayToString(m_References) + "\n"); result.append("Min.......: "); for (i = 0; i < m_Min.length; i++) { if (i > 0) result.append(","); result.append(Utils.doubleToString(m_Min[i], 3)); } result.append("\n"); result.append("Max.......: "); for (i = 0; i < m_Max.length; i++) { if (i > 0) result.append(","); result.append(Utils.doubleToString(m_Max[i], 3)); } result.append("\n"); result.append("Diffs.....: "); for (i = 0; i < m_Diffs.length; i++) { if (i > 0) result.append(","); result.append(Utils.doubleToString(m_Diffs[i], 3)); } result.append("\n"); } return result.toString(); } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 5527 $"); } /** * Main method for testing this class. * * @param argv should contain the command line arguments to the * scheme (see Evaluation) */ public static void main(String[] argv) { runClassifier(new CitationKNN(), argv); } //######################################################################## //######################################################################## //######################################################################## //######################################################################## //######################################################################## /** * A class for storing data about a neighboring instance */ private class NeighborNode implements Serializable, RevisionHandler { /** for serialization */ static final long serialVersionUID = -3947320761906511289L; /** The neighbor bag */ private Instance mBag; /** The distance from the current instance to this neighbor */ private double mDistance; /** A link to the next neighbor instance */ private NeighborNode mNext; /** the position in the bag */ private int mBagPosition; /** * Create a new neighbor node. * * @param distance the distance to the neighbor * @param bag the bag instance * @param position the position in the bag * @param next the next neighbor node */ public NeighborNode(double distance, Instance bag, int position, NeighborNode next){ mDistance = distance; mBag = bag; mNext = next; mBagPosition = position; } /** * Create a new neighbor node that doesn't link to any other nodes. * * @param distance the distance to the neighbor * @param bag the neighbor instance * @param position the position in the bag */ public NeighborNode(double distance, Instance bag, int position) { this(distance, bag, position, null); } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 5527 $"); } } //################################################## /** * A class for a linked list to store the nearest k neighbours * to an instance. We use a list so that we can take care of * cases where multiple neighbours are the same distance away. * i.e. the minimum length of the list is k. */ private class NeighborList implements Serializable, RevisionHandler { /** for serialization */ static final long serialVersionUID = 3432555644456217394L; /** The first node in the list */ private NeighborNode mFirst; /** The last node in the list */ private NeighborNode mLast; /** The number of nodes to attempt to maintain in the list */ private int mLength = 1; /** * Creates the neighborlist with a desired length * * @param length the length of list to attempt to maintain */ public NeighborList(int length) { mLength = length; } /** * Gets whether the list is empty. * * @return true if so */ public boolean isEmpty() { return (mFirst == null); } /** * Gets the current length of the list. * * @return the current length of the list */ public int currentLength() { int i = 0; NeighborNode current = mFirst; while (current != null) { i++; current = current.mNext; } return i; } /** * Inserts an instance neighbor into the list, maintaining the list * sorted by distance. * * @param distance the distance to the instance * @param bag the neighboring instance * @param position the position in the bag */ public void insertSorted(double distance, Instance bag, int position) { if (isEmpty()) { mFirst = mLast = new NeighborNode(distance, bag, position); } else { NeighborNode current = mFirst; if (distance < mFirst.mDistance) {// Insert at head mFirst = new NeighborNode(distance, bag, position, mFirst); } else { // Insert further down the list for( ;(current.mNext != null) && (current.mNext.mDistance < distance); current = current.mNext); current.mNext = new NeighborNode(distance, bag, position, current.mNext); if (current.equals(mLast)) { mLast = current.mNext; } } // Trip down the list until we've got k list elements (or more if the // distance to the last elements is the same). int valcount = 0; for(current = mFirst; current.mNext != null; current = current.mNext) { valcount++; if ((valcount >= mLength) && (current.mDistance != current.mNext.mDistance)) { mLast = current; current.mNext = null; break; } } } } /** * Prunes the list to contain the k nearest neighbors. If there are * multiple neighbors at the k'th distance, all will be kept. * * @param k the number of neighbors to keep in the list. */ public void pruneToK(int k) { if (isEmpty()) return; if (k < 1) k = 1; int currentK = 0; double currentDist = mFirst.mDistance; NeighborNode current = mFirst; for(; current.mNext != null; current = current.mNext) { currentK++; currentDist = current.mDistance; if ((currentK >= k) && (currentDist != current.mNext.mDistance)) { mLast = current; current.mNext = null; break; } } } /** * Prints out the contents of the neighborlist */ public void printList() { if (isEmpty()) { System.out.println("Empty list"); } else { NeighborNode current = mFirst; while (current != null) { System.out.print("Node: instance " + current.mBagPosition + "\n"); System.out.println(current.mBag); System.out.println(", distance " + current.mDistance); current = current.mNext; } System.out.println(); } } /** * Prints out the contents of the neighborlist */ public void printReducedList() { if (isEmpty()) { System.out.println("Empty list"); } else { NeighborNode current = mFirst; while (current != null) { System.out.print("Node: bag " + current.mBagPosition + " (" + current.mBag.relationalValue(1).numInstances() +"): "); //for(int i = 0; i < current.mBag.getInstances().numInstances(); i++){ //System.out.print(" " + (current.mBag).getInstances().instance(i)); //} System.out.print(" <" + current.mBag.classValue() + ">"); System.out.println(" (d: " + current.mDistance + ")"); current = current.mNext; } System.out.println(); } } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 5527 $"); } } }
34,106
28.251286
166
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/mi/MDD.java
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* * MDD.java * Copyright (C) 2005 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.mi; import weka.classifiers.Classifier; import weka.core.Capabilities; import weka.core.FastVector; import weka.core.Instance; import weka.core.Instances; import weka.core.MultiInstanceCapabilitiesHandler; import weka.core.Optimization; import weka.core.Option; import weka.core.OptionHandler; import weka.core.RevisionUtils; import weka.core.SelectedTag; import weka.core.Tag; import weka.core.TechnicalInformation; import weka.core.TechnicalInformationHandler; import weka.core.Utils; import weka.core.Capabilities.Capability; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import weka.filters.Filter; import weka.filters.unsupervised.attribute.Normalize; import weka.filters.unsupervised.attribute.ReplaceMissingValues; import weka.filters.unsupervised.attribute.Standardize; import java.util.Enumeration; import java.util.Vector; import weka.classifiers.AbstractClassifier; /** <!-- globalinfo-start --> * Modified Diverse Density algorithm, with collective assumption.<br/> * <br/> * More information about DD:<br/> * <br/> * Oded Maron (1998). Learning from ambiguity.<br/> * <br/> * O. Maron, T. Lozano-Perez (1998). A Framework for Multiple Instance Learning. Neural Information Processing Systems. 10. * <p/> <!-- globalinfo-end --> * <!-- technical-bibtex-start --> * BibTeX: * <pre> * &#64;phdthesis{Maron1998, * author = {Oded Maron}, * school = {Massachusetts Institute of Technology}, * title = {Learning from ambiguity}, * year = {1998} * } * * &#64;article{Maron1998, * author = {O. Maron and T. Lozano-Perez}, * journal = {Neural Information Processing Systems}, * title = {A Framework for Multiple Instance Learning}, * volume = {10}, * year = {1998} * } * </pre> * <p/> <!-- technical-bibtex-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -D * Turn on debugging output.</pre> * * <pre> -N &lt;num&gt; * Whether to 0=normalize/1=standardize/2=neither. * (default 1=standardize)</pre> * <!-- options-end --> * * @author Eibe Frank (eibe@cs.waikato.ac.nz) * @author Xin Xu (xx5@cs.waikato.ac.nz) * @version $Revision: 5545 $ */ public class MDD extends AbstractClassifier implements OptionHandler, MultiInstanceCapabilitiesHandler, TechnicalInformationHandler { /** for serialization */ static final long serialVersionUID = -7273119490545290581L; /** The index of the class attribute */ protected int m_ClassIndex; protected double[] m_Par; /** The number of the class labels */ protected int m_NumClasses; /** Class labels for each bag */ protected int[] m_Classes; /** MI data */ protected double[][][] m_Data; /** All attribute names */ protected Instances m_Attributes; /** The filter used to standardize/normalize all values. */ protected Filter m_Filter =null; /** Whether to normalize/standardize/neither, default:standardize */ protected int m_filterType = FILTER_STANDARDIZE; /** Normalize training data */ public static final int FILTER_NORMALIZE = 0; /** Standardize training data */ public static final int FILTER_STANDARDIZE = 1; /** No normalization/standardization */ public static final int FILTER_NONE = 2; /** The filter to apply to the training data */ public static final Tag [] TAGS_FILTER = { new Tag(FILTER_NORMALIZE, "Normalize training data"), new Tag(FILTER_STANDARDIZE, "Standardize training data"), new Tag(FILTER_NONE, "No normalization/standardization"), }; /** The filter used to get rid of missing values. */ protected ReplaceMissingValues m_Missing = new ReplaceMissingValues(); /** * Returns a string describing this filter * * @return a description of the filter suitable for * displaying in the explorer/experimenter gui */ public String globalInfo() { return "Modified Diverse Density algorithm, with collective assumption.\n\n" + "More information about DD:\n\n" + getTechnicalInformation().toString(); } /** * Returns an instance of a TechnicalInformation object, containing * detailed information about the technical background of this class, * e.g., paper reference or book this class is based on. * * @return the technical information about this class */ public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; TechnicalInformation additional; result = new TechnicalInformation(Type.PHDTHESIS); result.setValue(Field.AUTHOR, "Oded Maron"); result.setValue(Field.YEAR, "1998"); result.setValue(Field.TITLE, "Learning from ambiguity"); result.setValue(Field.SCHOOL, "Massachusetts Institute of Technology"); additional = result.add(Type.ARTICLE); additional.setValue(Field.AUTHOR, "O. Maron and T. Lozano-Perez"); additional.setValue(Field.YEAR, "1998"); additional.setValue(Field.TITLE, "A Framework for Multiple Instance Learning"); additional.setValue(Field.JOURNAL, "Neural Information Processing Systems"); additional.setValue(Field.VOLUME, "10"); return result; } /** * Returns an enumeration describing the available options * * @return an enumeration of all the available options */ public Enumeration listOptions() { Vector result = new Vector(); result.addElement(new Option( "\tTurn on debugging output.", "D", 0, "-D")); result.addElement(new Option( "\tWhether to 0=normalize/1=standardize/2=neither.\n" + "\t(default 1=standardize)", "N", 1, "-N <num>")); return result.elements(); } /** * Parses a given list of options. * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { setDebug(Utils.getFlag('D', options)); String nString = Utils.getOption('N', options); if (nString.length() != 0) { setFilterType(new SelectedTag(Integer.parseInt(nString), TAGS_FILTER)); } else { setFilterType(new SelectedTag(FILTER_STANDARDIZE, TAGS_FILTER)); } } /** * Gets the current settings of the classifier. * * @return an array of strings suitable for passing to setOptions */ public String[] getOptions() { Vector result; result = new Vector(); if (getDebug()) result.add("-D"); result.add("-N"); result.add("" + m_filterType); return (String[]) result.toArray(new String[result.size()]); } /** * Returns the tip text for this property * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String filterTypeTipText() { return "The filter type for transforming the training data."; } /** * Gets how the training data will be transformed. Will be one of * FILTER_NORMALIZE, FILTER_STANDARDIZE, FILTER_NONE. * * @return the filtering mode */ public SelectedTag getFilterType() { return new SelectedTag(m_filterType, TAGS_FILTER); } /** * Sets how the training data will be transformed. Should be one of * FILTER_NORMALIZE, FILTER_STANDARDIZE, FILTER_NONE. * * @param newType the new filtering mode */ public void setFilterType(SelectedTag newType) { if (newType.getTags() == TAGS_FILTER) { m_filterType = newType.getSelectedTag().getID(); } } private class OptEng extends Optimization { /** * Evaluate objective function * @param x the current values of variables * @return the value of the objective function */ protected double objectiveFunction(double[] x){ double nll = 0; // -LogLikelihood for(int i=0; i<m_Classes.length; i++){ // ith bag int nI = m_Data[i][0].length; // numInstances in ith bag double bag = 0; // NLL of each bag for(int j=0; j<nI; j++){ double ins=0.0; for(int k=0; k<m_Data[i].length; k++) { ins += (m_Data[i][k][j]-x[k*2])*(m_Data[i][k][j]-x[k*2])/ (x[k*2+1]*x[k*2+1]); } ins = Math.exp(-ins); if(m_Classes[i] == 1) bag += ins/(double)nI; else bag += (1.0-ins)/(double)nI; } if(bag<=m_Zero) bag=m_Zero; nll -= Math.log(bag); } return nll; } /** * Evaluate Jacobian vector * @param x the current values of variables * @return the gradient vector */ protected double[] evaluateGradient(double[] x){ double[] grad = new double[x.length]; for(int i=0; i<m_Classes.length; i++){ // ith bag int nI = m_Data[i][0].length; // numInstances in ith bag double denom=0.0; double[] numrt = new double[x.length]; for(int j=0; j<nI; j++){ double exp=0.0; for(int k=0; k<m_Data[i].length; k++) exp += (m_Data[i][k][j]-x[k*2])*(m_Data[i][k][j]-x[k*2])/ (x[k*2+1]*x[k*2+1]); exp = Math.exp(-exp); if(m_Classes[i]==1) denom += exp; else denom += (1.0-exp); // Instance-wise update for(int p=0; p<m_Data[i].length; p++){ // pth variable numrt[2*p] += exp*2.0*(x[2*p]-m_Data[i][p][j])/ (x[2*p+1]*x[2*p+1]); numrt[2*p+1] += exp*(x[2*p]-m_Data[i][p][j])*(x[2*p]-m_Data[i][p][j])/ (x[2*p+1]*x[2*p+1]*x[2*p+1]); } } if(denom <= m_Zero){ denom = m_Zero; } // Bag-wise update for(int q=0; q<m_Data[i].length; q++){ if(m_Classes[i]==1){ grad[2*q] += numrt[2*q]/denom; grad[2*q+1] -= numrt[2*q+1]/denom; }else{ grad[2*q] -= numrt[2*q]/denom; grad[2*q+1] += numrt[2*q+1]/denom; } } } return grad; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 5545 $"); } } /** * Returns default capabilities of the classifier. * * @return the capabilities of this classifier */ public Capabilities getCapabilities() { Capabilities result = super.getCapabilities(); result.disableAll(); // attributes result.enable(Capability.NOMINAL_ATTRIBUTES); result.enable(Capability.RELATIONAL_ATTRIBUTES); result.enable(Capability.MISSING_VALUES); // class result.enable(Capability.BINARY_CLASS); result.enable(Capability.MISSING_CLASS_VALUES); // other result.enable(Capability.ONLY_MULTIINSTANCE); return result; } /** * Returns the capabilities of this multi-instance classifier for the * relational data. * * @return the capabilities of this object * @see Capabilities */ public Capabilities getMultiInstanceCapabilities() { Capabilities result = super.getCapabilities(); result.disableAll(); // attributes result.enable(Capability.NOMINAL_ATTRIBUTES); result.enable(Capability.NUMERIC_ATTRIBUTES); result.enable(Capability.DATE_ATTRIBUTES); result.enable(Capability.MISSING_VALUES); // class result.disableAllClasses(); result.enable(Capability.NO_CLASS); return result; } /** * Builds the classifier * * @param train the training data to be used for generating the * boosted classifier. * @throws Exception if the classifier could not be built successfully */ public void buildClassifier(Instances train) throws Exception { // can classifier handle the data? getCapabilities().testWithFail(train); // remove instances with missing class train = new Instances(train); train.deleteWithMissingClass(); m_ClassIndex = train.classIndex(); m_NumClasses = train.numClasses(); int nR = train.attribute(1).relation().numAttributes(); int nC = train.numInstances(); int [] bagSize=new int [nC]; Instances datasets= new Instances(train.attribute(1).relation(),0); m_Data = new double [nC][nR][]; // Data values m_Classes = new int [nC]; // Class values m_Attributes = datasets.stringFreeStructure(); double sY1=0, sY0=0; // Number of classes if (m_Debug) { System.out.println("Extracting data..."); } FastVector maxSzIdx=new FastVector(); int maxSz=0; for(int h=0; h<nC; h++){ Instance current = train.instance(h); m_Classes[h] = (int)current.classValue(); // Class value starts from 0 Instances currInsts = current.relationalValue(1); int nI = currInsts.numInstances(); bagSize[h]=nI; for (int i=0; i<nI;i++){ Instance inst=currInsts.instance(i); datasets.add(inst); } if(m_Classes[h]==1){ if(nI>maxSz){ maxSz=nI; maxSzIdx=new FastVector(1); maxSzIdx.addElement(new Integer(h)); } else if(nI == maxSz) maxSzIdx.addElement(new Integer(h)); } } /* filter the training data */ if (m_filterType == FILTER_STANDARDIZE) m_Filter = new Standardize(); else if (m_filterType == FILTER_NORMALIZE) m_Filter = new Normalize(); else m_Filter = null; if (m_Filter!=null) { m_Filter.setInputFormat(datasets); datasets = Filter.useFilter(datasets, m_Filter); } m_Missing.setInputFormat(datasets); datasets = Filter.useFilter(datasets, m_Missing); int instIndex=0; int start=0; for(int h=0; h<nC; h++) { for (int i = 0; i < datasets.numAttributes(); i++) { // initialize m_data[][][] m_Data[h][i] = new double[bagSize[h]]; instIndex=start; for (int k=0; k<bagSize[h]; k++){ m_Data[h][i][k]=datasets.instance(instIndex).value(i); instIndex ++; } } start=instIndex; // Class count if (m_Classes[h] == 1) sY1++; else sY0++; } if (m_Debug) { System.out.println("\nIteration History..." ); } double[] x = new double[nR*2], tmp = new double[x.length]; double[][] b = new double[2][x.length]; OptEng opt; double nll, bestnll = Double.MAX_VALUE; for (int t=0; t<x.length; t++){ b[0][t] = Double.NaN; b[1][t] = Double.NaN; } // Largest positive exemplar for(int s=0; s<maxSzIdx.size(); s++){ int exIdx = ((Integer)maxSzIdx.elementAt(s)).intValue(); for(int p=0; p<m_Data[exIdx][0].length; p++){ for (int q=0; q < nR;q++){ x[2*q] = m_Data[exIdx][q][p]; // pick one instance x[2*q+1] = 1.0; } opt = new OptEng(); tmp = opt.findArgmin(x, b); while(tmp==null){ tmp = opt.getVarbValues(); if (m_Debug) System.out.println("200 iterations finished, not enough!"); tmp = opt.findArgmin(tmp, b); } nll = opt.getMinFunction(); if(nll < bestnll){ bestnll = nll; m_Par = tmp; if (m_Debug) System.out.println("!!!!!!!!!!!!!!!!Smaller NLL found: "+nll); } if (m_Debug) System.out.println(exIdx+": -------------<Converged>--------------"); } } } /** * Computes the distribution for a given exemplar * * @param exmp the exemplar for which distribution is computed * @return the distribution * @throws Exception if the distribution can't be computed successfully */ public double[] distributionForInstance(Instance exmp) throws Exception { // Extract the data Instances ins = exmp.relationalValue(1); if(m_Filter!=null) ins = Filter.useFilter(ins, m_Filter); ins = Filter.useFilter(ins, m_Missing); int nI = ins.numInstances(), nA = ins.numAttributes(); double[][] dat = new double [nI][nA]; for(int j=0; j<nI; j++){ for(int k=0; k<nA; k++){ dat[j][k] = ins.instance(j).value(k); } } // Compute the probability of the bag double [] distribution = new double[2]; distribution[1]=0.0; // Prob. for class 1 for(int i=0; i<nI; i++){ double exp = 0.0; for(int r=0; r<nA; r++) exp += (m_Par[r*2]-dat[i][r])*(m_Par[r*2]-dat[i][r])/ ((m_Par[r*2+1])*(m_Par[r*2+1])); exp = Math.exp(-exp); // Prob. updated for one instance distribution[1] += exp/(double)nI; distribution[0] += (1.0-exp)/(double)nI; } return distribution; } /** * Gets a string describing the classifier. * * @return a string describing the classifer built. */ public String toString() { String result = "Modified Logistic Regression"; if (m_Par == null) { return result + ": No model built yet."; } result += "\nCoefficients...\n" + "Variable Coeff.\n"; for (int j = 0, idx=0; j < m_Par.length/2; j++, idx++) { result += m_Attributes.attribute(idx).name(); result += " "+Utils.doubleToString(m_Par[j*2], 12, 4); result += " "+Utils.doubleToString(m_Par[j*2+1], 12, 4)+"\n"; } return result; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 5545 $"); } /** * Main method for testing this class. * * @param argv should contain the command line arguments to the * scheme (see Evaluation) */ public static void main(String[] argv) { runClassifier(new MDD(), argv); } }
18,712
27.83359
123
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/mi/MIBoost.java
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* * MIBoost.java * Copyright (C) 2005 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.mi; import weka.classifiers.Classifier; import weka.classifiers.SingleClassifierEnhancer; import weka.core.Capabilities; import weka.core.Instance; import weka.core.Instances; import weka.core.MultiInstanceCapabilitiesHandler; import weka.core.Optimization; import weka.core.Option; import weka.core.OptionHandler; import weka.core.RevisionUtils; import weka.core.TechnicalInformation; import weka.core.TechnicalInformationHandler; import weka.core.Utils; import weka.core.WeightedInstancesHandler; import weka.core.Capabilities.Capability; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import weka.filters.Filter; import weka.filters.unsupervised.attribute.Discretize; import weka.filters.unsupervised.attribute.MultiInstanceToPropositional; import java.util.Enumeration; import java.util.Vector; import weka.classifiers.AbstractClassifier; /** <!-- globalinfo-start --> * MI AdaBoost method, considers the geometric mean of posterior of instances inside a bag (arithmatic mean of log-posterior) and the expectation for a bag is taken inside the loss function.<br/> * <br/> * For more information about Adaboost, see:<br/> * <br/> * Yoav Freund, Robert E. Schapire: Experiments with a new boosting algorithm. In: Thirteenth International Conference on Machine Learning, San Francisco, 148-156, 1996. * <p/> <!-- globalinfo-end --> * <!-- technical-bibtex-start --> * BibTeX: * <pre> * &#64;inproceedings{Freund1996, * address = {San Francisco}, * author = {Yoav Freund and Robert E. Schapire}, * booktitle = {Thirteenth International Conference on Machine Learning}, * pages = {148-156}, * publisher = {Morgan Kaufmann}, * title = {Experiments with a new boosting algorithm}, * year = {1996} * } * </pre> * <p/> <!-- technical-bibtex-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -D * Turn on debugging output.</pre> * * <pre> -B &lt;num&gt; * The number of bins in discretization * (default 0, no discretization)</pre> * * <pre> -R &lt;num&gt; * Maximum number of boost iterations. * (default 10)</pre> * * <pre> -W &lt;class name&gt; * Full name of classifier to boost. * eg: weka.classifiers.bayes.NaiveBayes</pre> * * <pre> -D * If set, classifier is run in debug mode and * may output additional info to the console</pre> * <!-- options-end --> * * @author Eibe Frank (eibe@cs.waikato.ac.nz) * @author Xin Xu (xx5@cs.waikato.ac.nz) * @version $Revision: 1.6 $ */ public class MIBoost extends SingleClassifierEnhancer implements OptionHandler, MultiInstanceCapabilitiesHandler, TechnicalInformationHandler { /** for serialization */ static final long serialVersionUID = -3808427225599279539L; /** the models for the iterations */ protected Classifier[] m_Models; /** The number of the class labels */ protected int m_NumClasses; /** Class labels for each bag */ protected int[] m_Classes; /** attributes name for the new dataset used to build the model */ protected Instances m_Attributes; /** Number of iterations */ private int m_NumIterations = 100; /** Voting weights of models */ protected double[] m_Beta; /** the maximum number of boost iterations */ protected int m_MaxIterations = 10; /** the number of discretization bins */ protected int m_DiscretizeBin = 0; /** filter used for discretization */ protected Discretize m_Filter = null; /** filter used to convert the MI dataset into single-instance dataset */ protected MultiInstanceToPropositional m_ConvertToSI = new MultiInstanceToPropositional(); /** * Returns a string describing this filter * * @return a description of the filter suitable for * displaying in the explorer/experimenter gui */ public String globalInfo() { return "MI AdaBoost method, considers the geometric mean of posterior " + "of instances inside a bag (arithmatic mean of log-posterior) and " + "the expectation for a bag is taken inside the loss function.\n\n" + "For more information about Adaboost, see:\n\n" + getTechnicalInformation().toString(); } /** * Returns an instance of a TechnicalInformation object, containing * detailed information about the technical background of this class, * e.g., paper reference or book this class is based on. * * @return the technical information about this class */ public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; result = new TechnicalInformation(Type.INPROCEEDINGS); result.setValue(Field.AUTHOR, "Yoav Freund and Robert E. Schapire"); result.setValue(Field.TITLE, "Experiments with a new boosting algorithm"); result.setValue(Field.BOOKTITLE, "Thirteenth International Conference on Machine Learning"); result.setValue(Field.YEAR, "1996"); result.setValue(Field.PAGES, "148-156"); result.setValue(Field.PUBLISHER, "Morgan Kaufmann"); result.setValue(Field.ADDRESS, "San Francisco"); return result; } /** * Returns an enumeration describing the available options * * @return an enumeration of all the available options */ public Enumeration listOptions() { Vector result = new Vector(); result.addElement(new Option( "\tTurn on debugging output.", "D", 0, "-D")); result.addElement(new Option( "\tThe number of bins in discretization\n" + "\t(default 0, no discretization)", "B", 1, "-B <num>")); result.addElement(new Option( "\tMaximum number of boost iterations.\n" + "\t(default 10)", "R", 1, "-R <num>")); result.addElement(new Option( "\tFull name of classifier to boost.\n" + "\teg: weka.classifiers.bayes.NaiveBayes", "W", 1, "-W <class name>")); Enumeration enu = ((OptionHandler)m_Classifier).listOptions(); while (enu.hasMoreElements()) { result.addElement(enu.nextElement()); } return result.elements(); } /** * Parses a given list of options. <p/> * <!-- options-start --> * Valid options are: <p/> * * <pre> -D * Turn on debugging output.</pre> * * <pre> -B &lt;num&gt; * The number of bins in discretization * (default 0, no discretization)</pre> * * <pre> -R &lt;num&gt; * Maximum number of boost iterations. * (default 10)</pre> * * <pre> -W &lt;class name&gt; * Full name of classifier to boost. * eg: weka.classifiers.bayes.NaiveBayes</pre> * * <pre> -D * If set, classifier is run in debug mode and * may output additional info to the console</pre> * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { setDebug(Utils.getFlag('D', options)); String bin = Utils.getOption('B', options); if (bin.length() != 0) { setDiscretizeBin(Integer.parseInt(bin)); } else { setDiscretizeBin(0); } String boostIterations = Utils.getOption('R', options); if (boostIterations.length() != 0) { setMaxIterations(Integer.parseInt(boostIterations)); } else { setMaxIterations(10); } super.setOptions(options); } /** * Gets the current settings of the classifier. * * @return an array of strings suitable for passing to setOptions */ public String[] getOptions() { Vector result; String[] options; int i; result = new Vector(); result.add("-R"); result.add("" + getMaxIterations()); result.add("-B"); result.add("" + getDiscretizeBin()); options = super.getOptions(); for (i = 0; i < options.length; i++) result.add(options[i]); return (String[]) result.toArray(new String[result.size()]); } /** * Returns the tip text for this property * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String maxIterationsTipText() { return "The maximum number of boost iterations."; } /** * Set the maximum number of boost iterations * * @param maxIterations the maximum number of boost iterations */ public void setMaxIterations(int maxIterations) { m_MaxIterations = maxIterations; } /** * Get the maximum number of boost iterations * * @return the maximum number of boost iterations */ public int getMaxIterations() { return m_MaxIterations; } /** * Returns the tip text for this property * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String discretizeBinTipText() { return "The number of bins in discretization."; } /** * Set the number of bins in discretization * * @param bin the number of bins in discretization */ public void setDiscretizeBin(int bin) { m_DiscretizeBin = bin; } /** * Get the number of bins in discretization * * @return the number of bins in discretization */ public int getDiscretizeBin() { return m_DiscretizeBin; } private class OptEng extends Optimization { private double[] weights, errs; public void setWeights(double[] w){ weights = w; } public void setErrs(double[] e){ errs = e; } /** * Evaluate objective function * @param x the current values of variables * @return the value of the objective function * @throws Exception if result is NaN */ protected double objectiveFunction(double[] x) throws Exception{ double obj=0; for(int i=0; i<weights.length; i++){ obj += weights[i]*Math.exp(x[0]*(2.0*errs[i]-1.0)); if(Double.isNaN(obj)) throw new Exception("Objective function value is NaN!"); } return obj; } /** * Evaluate Jacobian vector * @param x the current values of variables * @return the gradient vector * @throws Exception if gradient is NaN */ protected double[] evaluateGradient(double[] x) throws Exception{ double[] grad = new double[1]; for(int i=0; i<weights.length; i++){ grad[0] += weights[i]*(2.0*errs[i]-1.0)*Math.exp(x[0]*(2.0*errs[i]-1.0)); if(Double.isNaN(grad[0])) throw new Exception("Gradient is NaN!"); } return grad; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 1.6 $"); } } /** * Returns default capabilities of the classifier. * * @return the capabilities of this classifier */ public Capabilities getCapabilities() { Capabilities result = super.getCapabilities(); // attributes result.enable(Capability.NOMINAL_ATTRIBUTES); result.enable(Capability.RELATIONAL_ATTRIBUTES); result.enable(Capability.MISSING_VALUES); // class result.disableAllClasses(); result.disableAllClassDependencies(); if (super.getCapabilities().handles(Capability.BINARY_CLASS)) result.enable(Capability.BINARY_CLASS); result.enable(Capability.MISSING_CLASS_VALUES); // other result.enable(Capability.ONLY_MULTIINSTANCE); return result; } /** * Returns the capabilities of this multi-instance classifier for the * relational data. * * @return the capabilities of this object * @see Capabilities */ public Capabilities getMultiInstanceCapabilities() { Capabilities result = super.getCapabilities(); // class result.disableAllClasses(); result.enable(Capability.NO_CLASS); return result; } /** * Builds the classifier * * @param exps the training data to be used for generating the * boosted classifier. * @throws Exception if the classifier could not be built successfully */ public void buildClassifier(Instances exps) throws Exception { // can classifier handle the data? getCapabilities().testWithFail(exps); // remove instances with missing class Instances train = new Instances(exps); train.deleteWithMissingClass(); m_NumClasses = train.numClasses(); m_NumIterations = m_MaxIterations; if (m_Classifier == null) throw new Exception("A base classifier has not been specified!"); if(!(m_Classifier instanceof WeightedInstancesHandler)) throw new Exception("Base classifier cannot handle weighted instances!"); m_Models = AbstractClassifier.makeCopies(m_Classifier, getMaxIterations()); if(m_Debug) System.err.println("Base classifier: "+m_Classifier.getClass().getName()); m_Beta = new double[m_NumIterations]; /* modified by Lin Dong. (use MIToSingleInstance filter to convert the MI datasets) */ //Initialize the bags' weights double N = (double)train.numInstances(), sumNi=0; for(int i=0; i<N; i++) sumNi += train.instance(i).relationalValue(1).numInstances(); for(int i=0; i<N; i++){ train.instance(i).setWeight(sumNi/N); } //convert the training dataset into single-instance dataset m_ConvertToSI.setInputFormat(train); Instances data = Filter.useFilter( train, m_ConvertToSI); data.deleteAttributeAt(0); //remove the bagIndex attribute; // Assume the order of the instances are preserved in the Discretize filter if(m_DiscretizeBin > 0){ m_Filter = new Discretize(); m_Filter.setInputFormat(new Instances(data, 0)); m_Filter.setBins(m_DiscretizeBin); data = Filter.useFilter(data, m_Filter); } // Main algorithm int dataIdx; iterations: for(int m=0; m < m_MaxIterations; m++){ if(m_Debug) System.err.println("\nIteration "+m); // Build a model m_Models[m].buildClassifier(data); // Prediction of each bag double[] err=new double[(int)N], weights=new double[(int)N]; boolean perfect = true, tooWrong=true; dataIdx = 0; for(int n=0; n<N; n++){ Instance exn = train.instance(n); // Prediction of each instance and the predicted class distribution // of the bag double nn = (double)exn.relationalValue(1).numInstances(); for(int p=0; p<nn; p++){ Instance testIns = data.instance(dataIdx++); if((int)m_Models[m].classifyInstance(testIns) != (int)exn.classValue()) // Weighted instance-wise 0-1 errors err[n] ++; } weights[n] = exn.weight(); err[n] /= nn; if(err[n] > 0.5) perfect = false; if(err[n] < 0.5) tooWrong = false; } if(perfect || tooWrong){ // No or 100% classification error, cannot find beta if (m == 0) m_Beta[m] = 1.0; else m_Beta[m] = 0; m_NumIterations = m+1; if(m_Debug) System.err.println("No errors"); break iterations; } double[] x = new double[1]; x[0] = 0; double[][] b = new double[2][x.length]; b[0][0] = Double.NaN; b[1][0] = Double.NaN; OptEng opt = new OptEng(); opt.setWeights(weights); opt.setErrs(err); //opt.setDebug(m_Debug); if (m_Debug) System.out.println("Start searching for c... "); x = opt.findArgmin(x, b); while(x==null){ x = opt.getVarbValues(); if (m_Debug) System.out.println("200 iterations finished, not enough!"); x = opt.findArgmin(x, b); } if (m_Debug) System.out.println("Finished."); m_Beta[m] = x[0]; if(m_Debug) System.err.println("c = "+m_Beta[m]); // Stop if error too small or error too big and ignore this model if (Double.isInfinite(m_Beta[m]) || Utils.smOrEq(m_Beta[m], 0) ) { if (m == 0) m_Beta[m] = 1.0; else m_Beta[m] = 0; m_NumIterations = m+1; if(m_Debug) System.err.println("Errors out of range!"); break iterations; } // Update weights of data and class label of wfData dataIdx=0; double totWeights=0; for(int r=0; r<N; r++){ Instance exr = train.instance(r); exr.setWeight(weights[r]*Math.exp(m_Beta[m]*(2.0*err[r]-1.0))); totWeights += exr.weight(); } if(m_Debug) System.err.println("Total weights = "+totWeights); for(int r=0; r<N; r++){ Instance exr = train.instance(r); double num = (double)exr.relationalValue(1).numInstances(); exr.setWeight(sumNi*exr.weight()/totWeights); //if(m_Debug) // System.err.print("\nExemplar "+r+"="+exr.weight()+": \t"); for(int s=0; s<num; s++){ Instance inss = data.instance(dataIdx); inss.setWeight(exr.weight()/num); // if(m_Debug) // System.err.print("instance "+s+"="+inss.weight()+ // "|ew*iw*sumNi="+data.instance(dataIdx).weight()+"\t"); if(Double.isNaN(inss.weight())) throw new Exception("instance "+s+" in bag "+r+" has weight NaN!"); dataIdx++; } //if(m_Debug) // System.err.println(); } } } /** * Computes the distribution for a given exemplar * * @param exmp the exemplar for which distribution is computed * @return the classification * @throws Exception if the distribution can't be computed successfully */ public double[] distributionForInstance(Instance exmp) throws Exception { double[] rt = new double[m_NumClasses]; Instances insts = new Instances(exmp.dataset(), 0); insts.add(exmp); // convert the training dataset into single-instance dataset insts = Filter.useFilter( insts, m_ConvertToSI); insts.deleteAttributeAt(0); //remove the bagIndex attribute double n = insts.numInstances(); if(m_DiscretizeBin > 0) insts = Filter.useFilter(insts, m_Filter); for(int y=0; y<n; y++){ Instance ins = insts.instance(y); for(int x=0; x<m_NumIterations; x++){ rt[(int)m_Models[x].classifyInstance(ins)] += m_Beta[x]/n; } } for(int i=0; i<rt.length; i++) rt[i] = Math.exp(rt[i]); Utils.normalize(rt); return rt; } /** * Gets a string describing the classifier. * * @return a string describing the classifer built. */ public String toString() { if (m_Models == null) { return "No model built yet!"; } StringBuffer text = new StringBuffer(); text.append("MIBoost: number of bins in discretization = "+m_DiscretizeBin+"\n"); if (m_NumIterations == 0) { text.append("No model built yet.\n"); } else if (m_NumIterations == 1) { text.append("No boosting possible, one classifier used: Weight = " + Utils.roundDouble(m_Beta[0], 2)+"\n"); text.append("Base classifiers:\n"+m_Models[0].toString()); } else { text.append("Base classifiers and their weights: \n"); for (int i = 0; i < m_NumIterations ; i++) { text.append("\n\n"+i+": Weight = " + Utils.roundDouble(m_Beta[i], 2) +"\nBase classifier:\n"+m_Models[i].toString() ); } } text.append("\n\nNumber of performed Iterations: " + m_NumIterations + "\n"); return text.toString(); } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 1.6 $"); } /** * Main method for testing this class. * * @param argv should contain the command line arguments to the * scheme (see Evaluation) */ public static void main(String[] argv) { runClassifier(new MIBoost(), argv); } }
20,902
28.565771
195
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/mi/MIDD.java
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* * MIDD.java * Copyright (C) 2005 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.mi; import weka.classifiers.Classifier; import weka.core.Capabilities; import weka.core.FastVector; import weka.core.Instance; import weka.core.Instances; import weka.core.MultiInstanceCapabilitiesHandler; import weka.core.Optimization; import weka.core.Option; import weka.core.OptionHandler; import weka.core.RevisionUtils; import weka.core.SelectedTag; import weka.core.Tag; import weka.core.TechnicalInformation; import weka.core.TechnicalInformationHandler; import weka.core.Utils; import weka.core.Capabilities.Capability; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import weka.filters.Filter; import weka.filters.unsupervised.attribute.Normalize; import weka.filters.unsupervised.attribute.ReplaceMissingValues; import weka.filters.unsupervised.attribute.Standardize; import java.util.Enumeration; import java.util.Vector; import weka.classifiers.AbstractClassifier; /** <!-- globalinfo-start --> * Re-implement the Diverse Density algorithm, changes the testing procedure.<br/> * <br/> * Oded Maron (1998). Learning from ambiguity.<br/> * <br/> * O. Maron, T. Lozano-Perez (1998). A Framework for Multiple Instance Learning. Neural Information Processing Systems. 10. * <p/> <!-- globalinfo-end --> * <!-- technical-bibtex-start --> * BibTeX: * <pre> * &#64;phdthesis{Maron1998, * author = {Oded Maron}, * school = {Massachusetts Institute of Technology}, * title = {Learning from ambiguity}, * year = {1998} * } * * &#64;article{Maron1998, * author = {O. Maron and T. Lozano-Perez}, * journal = {Neural Information Processing Systems}, * title = {A Framework for Multiple Instance Learning}, * volume = {10}, * year = {1998} * } * </pre> * <p/> <!-- technical-bibtex-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -D * Turn on debugging output.</pre> * * <pre> -N &lt;num&gt; * Whether to 0=normalize/1=standardize/2=neither. * (default 1=standardize)</pre> * <!-- options-end --> * * @author Eibe Frank (eibe@cs.waikato.ac.nz) * @author Xin Xu (xx5@cs.waikato.ac.nz) * @version $Revision: 5527 $ */ public class MIDD extends AbstractClassifier implements OptionHandler, MultiInstanceCapabilitiesHandler, TechnicalInformationHandler { /** for serialization */ static final long serialVersionUID = 4263507733600536168L; /** The index of the class attribute */ protected int m_ClassIndex; protected double[] m_Par; /** The number of the class labels */ protected int m_NumClasses; /** Class labels for each bag */ protected int[] m_Classes; /** MI data */ protected double[][][] m_Data; /** All attribute names */ protected Instances m_Attributes; /** The filter used to standardize/normalize all values. */ protected Filter m_Filter = null; /** Whether to normalize/standardize/neither, default:standardize */ protected int m_filterType = FILTER_STANDARDIZE; /** Normalize training data */ public static final int FILTER_NORMALIZE = 0; /** Standardize training data */ public static final int FILTER_STANDARDIZE = 1; /** No normalization/standardization */ public static final int FILTER_NONE = 2; /** The filter to apply to the training data */ public static final Tag [] TAGS_FILTER = { new Tag(FILTER_NORMALIZE, "Normalize training data"), new Tag(FILTER_STANDARDIZE, "Standardize training data"), new Tag(FILTER_NONE, "No normalization/standardization"), }; /** The filter used to get rid of missing values. */ protected ReplaceMissingValues m_Missing = new ReplaceMissingValues(); /** * Returns a string describing this filter * * @return a description of the filter suitable for * displaying in the explorer/experimenter gui */ public String globalInfo() { return "Re-implement the Diverse Density algorithm, changes the testing " + "procedure.\n\n" + getTechnicalInformation().toString(); } /** * Returns an instance of a TechnicalInformation object, containing * detailed information about the technical background of this class, * e.g., paper reference or book this class is based on. * * @return the technical information about this class */ public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; TechnicalInformation additional; result = new TechnicalInformation(Type.PHDTHESIS); result.setValue(Field.AUTHOR, "Oded Maron"); result.setValue(Field.YEAR, "1998"); result.setValue(Field.TITLE, "Learning from ambiguity"); result.setValue(Field.SCHOOL, "Massachusetts Institute of Technology"); additional = result.add(Type.ARTICLE); additional.setValue(Field.AUTHOR, "O. Maron and T. Lozano-Perez"); additional.setValue(Field.YEAR, "1998"); additional.setValue(Field.TITLE, "A Framework for Multiple Instance Learning"); additional.setValue(Field.JOURNAL, "Neural Information Processing Systems"); additional.setValue(Field.VOLUME, "10"); return result; } /** * Returns an enumeration describing the available options * * @return an enumeration of all the available options */ public Enumeration listOptions() { Vector result = new Vector(); result.addElement(new Option( "\tTurn on debugging output.", "D", 0, "-D")); result.addElement(new Option( "\tWhether to 0=normalize/1=standardize/2=neither.\n" + "\t(default 1=standardize)", "N", 1, "-N <num>")); return result.elements(); } /** * Parses a given list of options. <p/> * <!-- options-start --> * Valid options are: <p/> * * <pre> -D * Turn on debugging output.</pre> * * <pre> -N &lt;num&gt; * Whether to 0=normalize/1=standardize/2=neither. * (default 1=standardize)</pre> * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { setDebug(Utils.getFlag('D', options)); String nString = Utils.getOption('N', options); if (nString.length() != 0) { setFilterType(new SelectedTag(Integer.parseInt(nString), TAGS_FILTER)); } else { setFilterType(new SelectedTag(FILTER_STANDARDIZE, TAGS_FILTER)); } } /** * Gets the current settings of the classifier. * * @return an array of strings suitable for passing to setOptions */ public String[] getOptions() { Vector result; result = new Vector(); if (getDebug()) result.add("-D"); result.add("-N"); result.add("" + m_filterType); return (String[]) result.toArray(new String[result.size()]); } /** * Returns the tip text for this property * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String filterTypeTipText() { return "The filter type for transforming the training data."; } /** * Gets how the training data will be transformed. Will be one of * FILTER_NORMALIZE, FILTER_STANDARDIZE, FILTER_NONE. * * @return the filtering mode */ public SelectedTag getFilterType() { return new SelectedTag(m_filterType, TAGS_FILTER); } /** * Sets how the training data will be transformed. Should be one of * FILTER_NORMALIZE, FILTER_STANDARDIZE, FILTER_NONE. * * @param newType the new filtering mode */ public void setFilterType(SelectedTag newType) { if (newType.getTags() == TAGS_FILTER) { m_filterType = newType.getSelectedTag().getID(); } } private class OptEng extends Optimization { /** * Evaluate objective function * @param x the current values of variables * @return the value of the objective function */ protected double objectiveFunction(double[] x){ double nll = 0; // -LogLikelihood for(int i=0; i<m_Classes.length; i++){ // ith bag int nI = m_Data[i][0].length; // numInstances in ith bag double bag = 0.0; // NLL of pos bag for(int j=0; j<nI; j++){ double ins=0.0; for(int k=0; k<m_Data[i].length; k++) ins += (m_Data[i][k][j]-x[k*2])*(m_Data[i][k][j]-x[k*2])* x[k*2+1]*x[k*2+1]; ins = Math.exp(-ins); ins = 1.0-ins; if(m_Classes[i] == 1) bag += Math.log(ins); else{ if(ins<=m_Zero) ins=m_Zero; nll -= Math.log(ins); } } if(m_Classes[i] == 1){ bag = 1.0 - Math.exp(bag); if(bag<=m_Zero) bag=m_Zero; nll -= Math.log(bag); } } return nll; } /** * Evaluate Jacobian vector * @param x the current values of variables * @return the gradient vector */ protected double[] evaluateGradient(double[] x){ double[] grad = new double[x.length]; for(int i=0; i<m_Classes.length; i++){ // ith bag int nI = m_Data[i][0].length; // numInstances in ith bag double denom=0.0; double[] numrt = new double[x.length]; for(int j=0; j<nI; j++){ double exp=0.0; for(int k=0; k<m_Data[i].length; k++) exp += (m_Data[i][k][j]-x[k*2])*(m_Data[i][k][j]-x[k*2]) *x[k*2+1]*x[k*2+1]; exp = Math.exp(-exp); exp = 1.0-exp; if(m_Classes[i]==1) denom += Math.log(exp); if(exp<=m_Zero) exp=m_Zero; // Instance-wise update for(int p=0; p<m_Data[i].length; p++){ // pth variable numrt[2*p] += (1.0-exp)*2.0*(x[2*p]-m_Data[i][p][j])*x[p*2+1]*x[p*2+1] /exp; numrt[2*p+1] += 2.0*(1.0-exp)*(x[2*p]-m_Data[i][p][j])*(x[2*p]-m_Data[i][p][j]) *x[p*2+1]/exp; } } // Bag-wise update denom = 1.0-Math.exp(denom); if(denom <= m_Zero) denom = m_Zero; for(int q=0; q<m_Data[i].length; q++){ if(m_Classes[i]==1){ grad[2*q] += numrt[2*q]*(1.0-denom)/denom; grad[2*q+1] += numrt[2*q+1]*(1.0-denom)/denom; }else{ grad[2*q] -= numrt[2*q]; grad[2*q+1] -= numrt[2*q+1]; } } } // one bag return grad; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 5527 $"); } } /** * Returns default capabilities of the classifier. * * @return the capabilities of this classifier */ public Capabilities getCapabilities() { Capabilities result = super.getCapabilities(); result.disableAll(); // attributes result.enable(Capability.NOMINAL_ATTRIBUTES); result.enable(Capability.RELATIONAL_ATTRIBUTES); result.enable(Capability.MISSING_VALUES); // class result.enable(Capability.BINARY_CLASS); result.enable(Capability.MISSING_CLASS_VALUES); // other result.enable(Capability.ONLY_MULTIINSTANCE); return result; } /** * Returns the capabilities of this multi-instance classifier for the * relational data. * * @return the capabilities of this object * @see Capabilities */ public Capabilities getMultiInstanceCapabilities() { Capabilities result = super.getCapabilities(); result.disableAll(); // attributes result.enable(Capability.NOMINAL_ATTRIBUTES); result.enable(Capability.NUMERIC_ATTRIBUTES); result.enable(Capability.DATE_ATTRIBUTES); result.enable(Capability.MISSING_VALUES); // class result.disableAllClasses(); result.enable(Capability.NO_CLASS); return result; } /** * Builds the classifier * * @param train the training data to be used for generating the * boosted classifier. * @throws Exception if the classifier could not be built successfully */ public void buildClassifier(Instances train) throws Exception { // can classifier handle the data? getCapabilities().testWithFail(train); // remove instances with missing class train = new Instances(train); train.deleteWithMissingClass(); m_ClassIndex = train.classIndex(); m_NumClasses = train.numClasses(); int nR = train.attribute(1).relation().numAttributes(); int nC = train.numInstances(); FastVector maxSzIdx=new FastVector(); int maxSz=0; int [] bagSize=new int [nC]; Instances datasets= new Instances(train.attribute(1).relation(),0); m_Data = new double [nC][nR][]; // Data values m_Classes = new int [nC]; // Class values m_Attributes = datasets.stringFreeStructure(); if (m_Debug) { System.out.println("Extracting data..."); } for(int h=0; h<nC; h++) {//h_th bag Instance current = train.instance(h); m_Classes[h] = (int)current.classValue(); // Class value starts from 0 Instances currInsts = current.relationalValue(1); for (int i=0; i<currInsts.numInstances();i++){ Instance inst=currInsts.instance(i); datasets.add(inst); } int nI = currInsts.numInstances(); bagSize[h]=nI; if(m_Classes[h]==1){ if(nI>maxSz){ maxSz=nI; maxSzIdx=new FastVector(1); maxSzIdx.addElement(new Integer(h)); } else if(nI == maxSz) maxSzIdx.addElement(new Integer(h)); } } /* filter the training data */ if (m_filterType == FILTER_STANDARDIZE) m_Filter = new Standardize(); else if (m_filterType == FILTER_NORMALIZE) m_Filter = new Normalize(); else m_Filter = null; if (m_Filter!=null) { m_Filter.setInputFormat(datasets); datasets = Filter.useFilter(datasets, m_Filter); } m_Missing.setInputFormat(datasets); datasets = Filter.useFilter(datasets, m_Missing); int instIndex=0; int start=0; for(int h=0; h<nC; h++) { for (int i = 0; i < datasets.numAttributes(); i++) { // initialize m_data[][][] m_Data[h][i] = new double[bagSize[h]]; instIndex=start; for (int k=0; k<bagSize[h]; k++){ m_Data[h][i][k]=datasets.instance(instIndex).value(i); instIndex ++; } } start=instIndex; } if (m_Debug) { System.out.println("\nIteration History..." ); } double[] x = new double[nR*2], tmp = new double[x.length]; double[][] b = new double[2][x.length]; OptEng opt; double nll, bestnll = Double.MAX_VALUE; for (int t=0; t<x.length; t++){ b[0][t] = Double.NaN; b[1][t] = Double.NaN; } // Largest Positive exemplar for(int s=0; s<maxSzIdx.size(); s++){ int exIdx = ((Integer)maxSzIdx.elementAt(s)).intValue(); for(int p=0; p<m_Data[exIdx][0].length; p++){ for (int q=0; q < nR;q++){ x[2*q] = m_Data[exIdx][q][p]; // pick one instance x[2*q+1] = 1.0; } opt = new OptEng(); //opt.setDebug(m_Debug); tmp = opt.findArgmin(x, b); while(tmp==null){ tmp = opt.getVarbValues(); if (m_Debug) System.out.println("200 iterations finished, not enough!"); tmp = opt.findArgmin(tmp, b); } nll = opt.getMinFunction(); if(nll < bestnll){ bestnll = nll; m_Par = tmp; tmp = new double[x.length]; // Save memory if (m_Debug) System.out.println("!!!!!!!!!!!!!!!!Smaller NLL found: "+nll); } if (m_Debug) System.out.println(exIdx+": -------------<Converged>--------------"); } } } /** * Computes the distribution for a given exemplar * * @param exmp the exemplar for which distribution is computed * @return the distribution * @throws Exception if the distribution can't be computed successfully */ public double[] distributionForInstance(Instance exmp) throws Exception { // Extract the data Instances ins = exmp.relationalValue(1); if(m_Filter!=null) ins = Filter.useFilter(ins, m_Filter); ins = Filter.useFilter(ins, m_Missing); int nI = ins.numInstances(), nA = ins.numAttributes(); double[][] dat = new double [nI][nA]; for(int j=0; j<nI; j++){ for(int k=0; k<nA; k++){ dat[j][k] = ins.instance(j).value(k); } } // Compute the probability of the bag double [] distribution = new double[2]; distribution[0]=0.0; // log-Prob. for class 0 for(int i=0; i<nI; i++){ double exp = 0.0; for(int r=0; r<nA; r++) exp += (m_Par[r*2]-dat[i][r])*(m_Par[r*2]-dat[i][r])* m_Par[r*2+1]*m_Par[r*2+1]; exp = Math.exp(-exp); // Prob. updated for one instance distribution[0] += Math.log(1.0-exp); } distribution[0] = Math.exp(distribution[0]); distribution[1] = 1.0-distribution[0]; return distribution; } /** * Gets a string describing the classifier. * * @return a string describing the classifer built. */ public String toString() { //double CSq = m_LLn - m_LL; //int df = m_NumPredictors; String result = "Diverse Density"; if (m_Par == null) { return result + ": No model built yet."; } result += "\nCoefficients...\n" + "Variable Point Scale\n"; for (int j = 0, idx=0; j < m_Par.length/2; j++, idx++) { result += m_Attributes.attribute(idx).name(); result += " "+Utils.doubleToString(m_Par[j*2], 12, 4); result += " "+Utils.doubleToString(m_Par[j*2+1], 12, 4)+"\n"; } return result; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 5527 $"); } /** * Main method for testing this class. * * @param argv should contain the command line arguments to the * scheme (see Evaluation) */ public static void main(String[] argv) { runClassifier(new MIDD(), argv); } }
19,190
28.121396
123
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/mi/MIEMDD.java
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* * MIEMDD.java * Copyright (C) 2005 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.mi; import weka.classifiers.RandomizableClassifier; import weka.core.Capabilities; import weka.core.FastVector; import weka.core.Instance; import weka.core.Instances; import weka.core.MultiInstanceCapabilitiesHandler; import weka.core.Optimization; import weka.core.Option; import weka.core.OptionHandler; import weka.core.RevisionUtils; import weka.core.SelectedTag; import weka.core.Tag; import weka.core.TechnicalInformation; import weka.core.TechnicalInformationHandler; import weka.core.Utils; import weka.core.Capabilities.Capability; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import weka.filters.Filter; import weka.filters.unsupervised.attribute.Normalize; import weka.filters.unsupervised.attribute.ReplaceMissingValues; import weka.filters.unsupervised.attribute.Standardize; import java.util.Enumeration; import java.util.Random; import java.util.Vector; /** <!-- globalinfo-start --> * EMDD model builds heavily upon Dietterich's Diverse Density (DD) algorithm.<br/> * It is a general framework for MI learning of converting the MI problem to a single-instance setting using EM. In this implementation, we use most-likely cause DD model and only use 3 random selected postive bags as initial starting points of EM.<br/> * <br/> * For more information see:<br/> * <br/> * Qi Zhang, Sally A. Goldman: EM-DD: An Improved Multiple-Instance Learning Technique. In: Advances in Neural Information Processing Systems 14, 1073-108, 2001. * <p/> <!-- globalinfo-end --> * <!-- technical-bibtex-start --> * BibTeX: * <pre> * &#64;inproceedings{Zhang2001, * author = {Qi Zhang and Sally A. Goldman}, * booktitle = {Advances in Neural Information Processing Systems 14}, * pages = {1073-108}, * publisher = {MIT Press}, * title = {EM-DD: An Improved Multiple-Instance Learning Technique}, * year = {2001} * } * </pre> * <p/> <!-- technical-bibtex-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -N &lt;num&gt; * Whether to 0=normalize/1=standardize/2=neither. * (default 1=standardize)</pre> * * <pre> -S &lt;num&gt; * Random number seed. * (default 1)</pre> * * <pre> -D * If set, classifier is run in debug mode and * may output additional info to the console</pre> * <!-- options-end --> * * @author Eibe Frank (eibe@cs.waikato.ac.nz) * @author Lin Dong (ld21@cs.waikato.ac.nz) * @version $Revision: 5546 $ */ public class MIEMDD extends RandomizableClassifier implements OptionHandler, MultiInstanceCapabilitiesHandler, TechnicalInformationHandler { /** for serialization */ static final long serialVersionUID = 3899547154866223734L; /** The index of the class attribute */ protected int m_ClassIndex; protected double[] m_Par; /** The number of the class labels */ protected int m_NumClasses; /** Class labels for each bag */ protected int[] m_Classes; /** MI data */ protected double[][][] m_Data; /** All attribute names */ protected Instances m_Attributes; /** MI data */ protected double[][] m_emData; /** The filter used to standardize/normalize all values. */ protected Filter m_Filter = null; /** Whether to normalize/standardize/neither, default:standardize */ protected int m_filterType = FILTER_STANDARDIZE; /** Normalize training data */ public static final int FILTER_NORMALIZE = 0; /** Standardize training data */ public static final int FILTER_STANDARDIZE = 1; /** No normalization/standardization */ public static final int FILTER_NONE = 2; /** The filter to apply to the training data */ public static final Tag[] TAGS_FILTER = { new Tag(FILTER_NORMALIZE, "Normalize training data"), new Tag(FILTER_STANDARDIZE, "Standardize training data"), new Tag(FILTER_NONE, "No normalization/standardization"), }; /** The filter used to get rid of missing values. */ protected ReplaceMissingValues m_Missing = new ReplaceMissingValues(); /** * Returns a string describing this filter * * @return a description of the filter suitable for * displaying in the explorer/experimenter gui */ public String globalInfo() { return "EMDD model builds heavily upon Dietterich's Diverse Density (DD) " + "algorithm.\nIt is a general framework for MI learning of converting " + "the MI problem to a single-instance setting using EM. In this " + "implementation, we use most-likely cause DD model and only use 3 " + "random selected postive bags as initial starting points of EM.\n\n" + "For more information see:\n\n" + getTechnicalInformation().toString(); } /** * Returns an instance of a TechnicalInformation object, containing * detailed information about the technical background of this class, * e.g., paper reference or book this class is based on. * * @return the technical information about this class */ public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; result = new TechnicalInformation(Type.INPROCEEDINGS); result.setValue(Field.AUTHOR, "Qi Zhang and Sally A. Goldman"); result.setValue(Field.TITLE, "EM-DD: An Improved Multiple-Instance Learning Technique"); result.setValue(Field.BOOKTITLE, "Advances in Neural Information Processing Systems 14"); result.setValue(Field.YEAR, "2001"); result.setValue(Field.PAGES, "1073-108"); result.setValue(Field.PUBLISHER, "MIT Press"); return result; } /** * Returns an enumeration describing the available options * * @return an enumeration of all the available options */ public Enumeration listOptions() { Vector result = new Vector(); result.addElement(new Option( "\tWhether to 0=normalize/1=standardize/2=neither.\n" + "\t(default 1=standardize)", "N", 1, "-N <num>")); Enumeration enm = super.listOptions(); while (enm.hasMoreElements()) result.addElement(enm.nextElement()); return result.elements(); } /** * Parses a given list of options. <p/> * <!-- options-start --> * Valid options are: <p/> * * <pre> -N &lt;num&gt; * Whether to 0=normalize/1=standardize/2=neither. * (default 1=standardize)</pre> * * <pre> -S &lt;num&gt; * Random number seed. * (default 1)</pre> * * <pre> -D * If set, classifier is run in debug mode and * may output additional info to the console</pre> * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { String tmpStr; tmpStr = Utils.getOption('N', options); if (tmpStr.length() != 0) { setFilterType(new SelectedTag(Integer.parseInt(tmpStr), TAGS_FILTER)); } else { setFilterType(new SelectedTag(FILTER_STANDARDIZE, TAGS_FILTER)); } super.setOptions(options); } /** * Gets the current settings of the classifier. * * @return an array of strings suitable for passing to setOptions */ public String[] getOptions() { Vector result; String[] options; int i; result = new Vector(); options = super.getOptions(); for (i = 0; i < options.length; i++) result.add(options[i]); result.add("-N"); result.add("" + m_filterType); return (String[]) result.toArray(new String[result.size()]); } /** * Returns the tip text for this property * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String filterTypeTipText() { return "The filter type for transforming the training data."; } /** * Gets how the training data will be transformed. Will be one of * FILTER_NORMALIZE, FILTER_STANDARDIZE, FILTER_NONE. * * @return the filtering mode */ public SelectedTag getFilterType() { return new SelectedTag(m_filterType, TAGS_FILTER); } /** * Sets how the training data will be transformed. Should be one of * FILTER_NORMALIZE, FILTER_STANDARDIZE, FILTER_NONE. * * @param newType the new filtering mode */ public void setFilterType(SelectedTag newType) { if (newType.getTags() == TAGS_FILTER) { m_filterType = newType.getSelectedTag().getID(); } } private class OptEng extends Optimization { /** * Evaluate objective function * @param x the current values of variables * @return the value of the objective function */ protected double objectiveFunction(double[] x){ double nll = 0; // -LogLikelihood for (int i=0; i<m_Classes.length; i++){ // ith bag double ins=0.0; for (int k=0; k<m_emData[i].length; k++) //attribute index ins += (m_emData[i][k]-x[k*2])*(m_emData[i][k]-x[k*2])* x[k*2+1]*x[k*2+1]; ins = Math.exp(-ins); // Pr. of being positive if (m_Classes[i]==1){ if (ins <= m_Zero) ins = m_Zero; nll -= Math.log(ins); //bag level -LogLikelihood } else{ ins = 1.0 - ins; //Pr. of being negative if(ins<=m_Zero) ins=m_Zero; nll -= Math.log(ins); } } return nll; } /** * Evaluate Jacobian vector * @param x the current values of variables * @return the gradient vector */ protected double[] evaluateGradient(double[] x){ double[] grad = new double[x.length]; for (int i=0; i<m_Classes.length; i++){ // ith bag double[] numrt = new double[x.length]; double exp=0.0; for (int k=0; k<m_emData[i].length; k++) //attr index exp += (m_emData[i][k]-x[k*2])*(m_emData[i][k]-x[k*2]) *x[k*2+1]*x[k*2+1]; exp = Math.exp(-exp); //Pr. of being positive //Instance-wise update for (int p=0; p<m_emData[i].length; p++){ // pth variable numrt[2*p] = 2.0*(x[2*p]-m_emData[i][p])*x[p*2+1]*x[p*2+1]; numrt[2*p+1] = 2.0*(x[2*p]-m_emData[i][p])*(x[2*p]-m_emData[i][p]) *x[p*2+1]; } //Bag-wise update for (int q=0; q<m_emData[i].length; q++){ if (m_Classes[i] == 1) {//derivation of (-LogLikeliHood) for positive bags grad[2*q] += numrt[2*q]; grad[2*q+1] += numrt[2*q+1]; } else{ //derivation of (-LogLikeliHood) for negative bags grad[2*q] -= numrt[2*q]*exp/(1.0-exp); grad[2*q+1] -= numrt[2*q+1]*exp/(1.0-exp); } } } // one bag return grad; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 5546 $"); } } /** * Returns default capabilities of the classifier. * * @return the capabilities of this classifier */ public Capabilities getCapabilities() { Capabilities result = super.getCapabilities(); result.disableAll(); // attributes result.enable(Capability.NOMINAL_ATTRIBUTES); result.enable(Capability.RELATIONAL_ATTRIBUTES); result.enable(Capability.MISSING_VALUES); // class result.enable(Capability.BINARY_CLASS); result.enable(Capability.MISSING_CLASS_VALUES); // other result.enable(Capability.ONLY_MULTIINSTANCE); return result; } /** * Returns the capabilities of this multi-instance classifier for the * relational data. * * @return the capabilities of this object * @see Capabilities */ public Capabilities getMultiInstanceCapabilities() { Capabilities result = super.getCapabilities(); result.disableAll(); // attributes result.enable(Capability.NOMINAL_ATTRIBUTES); result.enable(Capability.NUMERIC_ATTRIBUTES); result.enable(Capability.DATE_ATTRIBUTES); result.enable(Capability.MISSING_VALUES); // class result.disableAllClasses(); result.enable(Capability.NO_CLASS); return result; } /** * Builds the classifier * * @param train the training data to be used for generating the * boosted classifier. * @throws Exception if the classifier could not be built successfully */ public void buildClassifier(Instances train) throws Exception { // can classifier handle the data? getCapabilities().testWithFail(train); // remove instances with missing class train = new Instances(train); train.deleteWithMissingClass(); m_ClassIndex = train.classIndex(); m_NumClasses = train.numClasses(); int nR = train.attribute(1).relation().numAttributes(); int nC = train.numInstances(); int[] bagSize = new int[nC]; Instances datasets = new Instances(train.attribute(1).relation(), 0); m_Data = new double [nC][nR][]; // Data values m_Classes = new int [nC]; // Class values m_Attributes = datasets.stringFreeStructure(); if (m_Debug) { System.out.println("\n\nExtracting data..."); } for (int h = 0; h < nC; h++) {//h_th bag Instance current = train.instance(h); m_Classes[h] = (int)current.classValue(); // Class value starts from 0 Instances currInsts = current.relationalValue(1); for (int i = 0; i < currInsts.numInstances(); i++){ Instance inst = currInsts.instance(i); datasets.add(inst); } int nI = currInsts.numInstances(); bagSize[h] = nI; } /* filter the training data */ if (m_filterType == FILTER_STANDARDIZE) m_Filter = new Standardize(); else if (m_filterType == FILTER_NORMALIZE) m_Filter = new Normalize(); else m_Filter = null; if (m_Filter != null) { m_Filter.setInputFormat(datasets); datasets = Filter.useFilter(datasets, m_Filter); } m_Missing.setInputFormat(datasets); datasets = Filter.useFilter(datasets, m_Missing); int instIndex = 0; int start = 0; for (int h = 0; h < nC; h++) { for (int i = 0; i < datasets.numAttributes(); i++) { // initialize m_data[][][] m_Data[h][i] = new double[bagSize[h]]; instIndex=start; for (int k = 0; k < bagSize[h]; k++){ m_Data[h][i][k] = datasets.instance(instIndex).value(i); instIndex++; } } start=instIndex; } if (m_Debug) { System.out.println("\n\nIteration History..." ); } m_emData =new double[nC][nR]; m_Par= new double[2*nR]; double[] x = new double[nR*2]; double[] tmp = new double[x.length]; double[] pre_x = new double[x.length]; double[] best_hypothesis = new double[x.length]; double[][] b = new double[2][x.length]; OptEng opt; double bestnll = Double.MAX_VALUE; double min_error = Double.MAX_VALUE; double nll, pre_nll; int iterationCount; for (int t = 0; t < x.length; t++) { b[0][t] = Double.NaN; b[1][t] = Double.NaN; } //random pick 3 positive bags Random r = new Random(getSeed()); FastVector index = new FastVector(); int n1, n2, n3; do { n1 = r.nextInt(nC-1); } while (m_Classes[n1] == 0); index.addElement(new Integer(n1)); do { n2 = r.nextInt(nC-1); } while (n2 == n1|| m_Classes[n2] == 0); index.addElement(new Integer(n2)); do { n3 = r.nextInt(nC-1); } while (n3 == n1 || n3 == n2 || m_Classes[n3] == 0); index.addElement(new Integer(n3)); for (int s = 0; s < index.size(); s++){ int exIdx = ((Integer)index.elementAt(s)).intValue(); if (m_Debug) System.out.println("\nH0 at "+exIdx); for (int p = 0; p < m_Data[exIdx][0].length; p++) { //initialize a hypothesis for (int q = 0; q < nR; q++) { x[2 * q] = m_Data[exIdx][q][p]; x[2 * q + 1] = 1.0; } pre_nll = Double.MAX_VALUE; nll = Double.MAX_VALUE/10.0; iterationCount = 0; //while (Math.abs(nll-pre_nll)>0.01*pre_nll && iterationCount<10) { //stop condition while (nll < pre_nll && iterationCount < 10) { iterationCount++; pre_nll = nll; if (m_Debug) System.out.println("\niteration: "+iterationCount); //E-step (find one instance from each bag with max likelihood ) for (int i = 0; i < m_Data.length; i++) { //for each bag int insIndex = findInstance(i, x); for (int att = 0; att < m_Data[0].length; att++) //for each attribute m_emData[i][att] = m_Data[i][att][insIndex]; } if (m_Debug) System.out.println("E-step for new H' finished"); //M-step opt = new OptEng(); tmp = opt.findArgmin(x, b); while (tmp == null) { tmp = opt.getVarbValues(); if (m_Debug) System.out.println("200 iterations finished, not enough!"); tmp = opt.findArgmin(tmp, b); } nll = opt.getMinFunction(); pre_x = x; x = tmp; // update hypothesis //keep the track of the best target point which has the minimum nll /* if (nll < bestnll) { bestnll = nll; m_Par = tmp; if (m_Debug) System.out.println("!!!!!!!!!!!!!!!!Smaller NLL found: " + nll); }*/ //if (m_Debug) //System.out.println(exIdx+" "+p+": "+nll+" "+pre_nll+" " +bestnll); } //converged for one instance //evaluate the hypothesis on the training data and //keep the track of the hypothesis with minimum error on training data double distribution[] = new double[2]; int error = 0; if (nll > pre_nll) m_Par = pre_x; else m_Par = x; for (int i = 0; i<train.numInstances(); i++) { distribution = distributionForInstance (train.instance(i)); if (distribution[1] >= 0.5 && m_Classes[i] == 0) error++; else if (distribution[1]<0.5 && m_Classes[i] == 1) error++; } if (error < min_error) { best_hypothesis = m_Par; min_error = error; if (nll > pre_nll) bestnll = pre_nll; else bestnll = nll; if (m_Debug) System.out.println("error= "+ error +" nll= " + bestnll); } } if (m_Debug) { System.out.println(exIdx+ ": -------------<Converged>--------------"); System.out.println("current minimum error= "+min_error+" nll= "+bestnll); } } m_Par = best_hypothesis; } /** * given x, find the instance in ith bag with the most likelihood * probability, which is most likely to responsible for the label of the * bag For a positive bag, find the instance with the maximal probability * of being positive For a negative bag, find the instance with the minimal * probability of being negative * * @param i the bag index * @param x the current values of variables * @return index of the instance in the bag */ protected int findInstance(int i, double[] x){ double min=Double.MAX_VALUE; int insIndex=0; int nI = m_Data[i][0].length; // numInstances in ith bag for (int j=0; j<nI; j++){ double ins=0.0; for (int k=0; k<m_Data[i].length; k++) // for each attribute ins += (m_Data[i][k][j]-x[k*2])*(m_Data[i][k][j]-x[k*2])* x[k*2+1]*x[k*2+1]; //the probability can be calculated as Math.exp(-ins) //to find the maximum Math.exp(-ins) is equivalent to find the minimum of (ins) if (ins<min) { min=ins; insIndex=j; } } return insIndex; } /** * Computes the distribution for a given exemplar * * @param exmp the exemplar for which distribution is computed * @return the distribution * @throws Exception if the distribution can't be computed successfully */ public double[] distributionForInstance(Instance exmp) throws Exception { // Extract the data Instances ins = exmp.relationalValue(1); if (m_Filter != null) ins = Filter.useFilter(ins, m_Filter); ins = Filter.useFilter(ins, m_Missing); int nI = ins.numInstances(), nA = ins.numAttributes(); double[][] dat = new double [nI][nA]; for (int j = 0; j < nI; j++){ for (int k=0; k<nA; k++){ dat[j][k] = ins.instance(j).value(k); } } //find the concept instance in the exemplar double min = Double.MAX_VALUE; double maxProb = -1.0; for (int j = 0; j < nI; j++){ double exp = 0.0; for (int k = 0; k<nA; k++) // for each attribute exp += (dat[j][k]-m_Par[k*2])*(dat[j][k]-m_Par[k*2])*m_Par[k*2+1]*m_Par[k*2+1]; //the probability can be calculated as Math.exp(-exp) //to find the maximum Math.exp(-exp) is equivalent to find the minimum of (exp) if (exp < min) { min = exp; maxProb = Math.exp(-exp); //maximum probability of being positive } } // Compute the probability of the bag double[] distribution = new double[2]; distribution[1] = maxProb; distribution[0] = 1.0 - distribution[1]; //mininum prob. of being negative return distribution; } /** * Gets a string describing the classifier. * * @return a string describing the classifer built. */ public String toString() { String result = "MIEMDD"; if (m_Par == null) { return result + ": No model built yet."; } result += "\nCoefficients...\n" + "Variable Point Scale\n"; for (int j = 0, idx=0; j < m_Par.length/2; j++, idx++) { result += m_Attributes.attribute(idx).name(); result += " "+Utils.doubleToString(m_Par[j*2], 12, 4); result += " "+Utils.doubleToString(m_Par[j*2+1], 12, 4)+"\n"; } return result; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 5546 $"); } /** * Main method for testing this class. * * @param argv should contain the command line arguments to the * scheme (see Evaluation) */ public static void main(String[] argv) { runClassifier(new MIEMDD(), argv); } }
23,367
29.546405
253
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/mi/MILR.java
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* * MILR.java * Copyright (C) 2005 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.mi; import weka.classifiers.Classifier; import weka.core.Capabilities; import weka.core.Instance; import weka.core.Instances; import weka.core.MultiInstanceCapabilitiesHandler; import weka.core.Optimization; import weka.core.Option; import weka.core.OptionHandler; import weka.core.RevisionUtils; import weka.core.SelectedTag; import weka.core.Tag; import weka.core.Utils; import weka.core.Capabilities.Capability; import java.util.Enumeration; import java.util.Vector; import weka.classifiers.AbstractClassifier; /** <!-- globalinfo-start --> * Uses either standard or collective multi-instance assumption, but within linear regression. For the collective assumption, it offers arithmetic or geometric mean for the posteriors. * <p/> <!-- globalinfo-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -D * Turn on debugging output.</pre> * * <pre> -R &lt;ridge&gt; * Set the ridge in the log-likelihood.</pre> * * <pre> -A [0|1|2] * Defines the type of algorithm: * 0. standard MI assumption * 1. collective MI assumption, arithmetic mean for posteriors * 2. collective MI assumption, geometric mean for posteriors</pre> * <!-- options-end --> * * @author Eibe Frank (eibe@cs.waikato.ac.nz) * @author Xin Xu (xx5@cs.waikato.ac.nz) * @version $Revision: 5527 $ */ public class MILR extends AbstractClassifier implements OptionHandler, MultiInstanceCapabilitiesHandler { /** for serialization */ static final long serialVersionUID = 1996101190172373826L; protected double[] m_Par; /** The number of the class labels */ protected int m_NumClasses; /** The ridge parameter. */ protected double m_Ridge = 1e-6; /** Class labels for each bag */ protected int[] m_Classes; /** MI data */ protected double[][][] m_Data; /** All attribute names */ protected Instances m_Attributes; protected double[] xMean = null, xSD = null; /** the type of processing */ protected int m_AlgorithmType = ALGORITHMTYPE_DEFAULT; /** standard MI assumption */ public static final int ALGORITHMTYPE_DEFAULT = 0; /** collective MI assumption, arithmetic mean for posteriors */ public static final int ALGORITHMTYPE_ARITHMETIC = 1; /** collective MI assumption, geometric mean for posteriors */ public static final int ALGORITHMTYPE_GEOMETRIC = 2; /** the types of algorithms */ public static final Tag [] TAGS_ALGORITHMTYPE = { new Tag(ALGORITHMTYPE_DEFAULT, "standard MI assumption"), new Tag(ALGORITHMTYPE_ARITHMETIC, "collective MI assumption, arithmetic mean for posteriors"), new Tag(ALGORITHMTYPE_GEOMETRIC, "collective MI assumption, geometric mean for posteriors"), }; /** * Returns the tip text for this property * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String globalInfo() { return "Uses either standard or collective multi-instance assumption, but " + "within linear regression. For the collective assumption, it offers " + "arithmetic or geometric mean for the posteriors."; } /** * Returns an enumeration describing the available options * * @return an enumeration of all the available options */ public Enumeration listOptions() { Vector result = new Vector(); result.addElement(new Option( "\tTurn on debugging output.", "D", 0, "-D")); result.addElement(new Option( "\tSet the ridge in the log-likelihood.", "R", 1, "-R <ridge>")); result.addElement(new Option( "\tDefines the type of algorithm:\n" + "\t 0. standard MI assumption\n" + "\t 1. collective MI assumption, arithmetic mean for posteriors\n" + "\t 2. collective MI assumption, geometric mean for posteriors", "A", 1, "-A [0|1|2]")); return result.elements(); } /** * Parses a given list of options. * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { String tmpStr; setDebug(Utils.getFlag('D', options)); tmpStr = Utils.getOption('R', options); if (tmpStr.length() != 0) setRidge(Double.parseDouble(tmpStr)); else setRidge(1.0e-6); tmpStr = Utils.getOption('A', options); if (tmpStr.length() != 0) { setAlgorithmType(new SelectedTag(Integer.parseInt(tmpStr), TAGS_ALGORITHMTYPE)); } else { setAlgorithmType(new SelectedTag(ALGORITHMTYPE_DEFAULT, TAGS_ALGORITHMTYPE)); } } /** * Gets the current settings of the classifier. * * @return an array of strings suitable for passing to setOptions */ public String[] getOptions() { Vector result; result = new Vector(); if (getDebug()) result.add("-D"); result.add("-R"); result.add("" + getRidge()); result.add("-A"); result.add("" + m_AlgorithmType); return (String[]) result.toArray(new String[result.size()]); } /** * Returns the tip text for this property * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String ridgeTipText() { return "The ridge in the log-likelihood."; } /** * Sets the ridge in the log-likelihood. * * @param ridge the ridge */ public void setRidge(double ridge) { m_Ridge = ridge; } /** * Gets the ridge in the log-likelihood. * * @return the ridge */ public double getRidge() { return m_Ridge; } /** * Returns the tip text for this property * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String algorithmTypeTipText() { return "The mean type for the posteriors."; } /** * Gets the type of algorithm. * * @return the algorithm type */ public SelectedTag getAlgorithmType() { return new SelectedTag(m_AlgorithmType, TAGS_ALGORITHMTYPE); } /** * Sets the algorithm type. * * @param newType the new algorithm type */ public void setAlgorithmType(SelectedTag newType) { if (newType.getTags() == TAGS_ALGORITHMTYPE) { m_AlgorithmType = newType.getSelectedTag().getID(); } } private class OptEng extends Optimization { /** the type to use * @see MILR#TAGS_ALGORITHMTYPE */ private int m_Type; /** * initializes the object * * @param type the type top use * @see MILR#TAGS_ALGORITHMTYPE */ public OptEng(int type) { super(); m_Type = type; } /** * Evaluate objective function * @param x the current values of variables * @return the value of the objective function */ protected double objectiveFunction(double[] x){ double nll = 0; // -LogLikelihood switch (m_Type) { case ALGORITHMTYPE_DEFAULT: for(int i=0; i<m_Classes.length; i++){ // ith bag int nI = m_Data[i][0].length; // numInstances in ith bag double bag = 0.0, // NLL of each bag prod = 0.0; // Log-prob. for(int j=0; j<nI; j++){ double exp=0.0; for(int k=m_Data[i].length-1; k>=0; k--) exp += m_Data[i][k][j]*x[k+1]; exp += x[0]; exp = Math.exp(exp); if(m_Classes[i]==1) prod -= Math.log(1.0+exp); else bag += Math.log(1.0+exp); } if(m_Classes[i]==1) bag = -Math.log(1.0-Math.exp(prod)); nll += bag; } break; case ALGORITHMTYPE_ARITHMETIC: for(int i=0; i<m_Classes.length; i++){ // ith bag int nI = m_Data[i][0].length; // numInstances in ith bag double bag = 0; // NLL of each bag for(int j=0; j<nI; j++){ double exp=0.0; for(int k=m_Data[i].length-1; k>=0; k--) exp += m_Data[i][k][j]*x[k+1]; exp += x[0]; exp = Math.exp(exp); if(m_Classes[i] == 1) bag += 1.0-1.0/(1.0+exp); // To avoid exp infinite else bag += 1.0/(1.0+exp); } bag /= (double)nI; nll -= Math.log(bag); } break; case ALGORITHMTYPE_GEOMETRIC: for(int i=0; i<m_Classes.length; i++){ // ith bag int nI = m_Data[i][0].length; // numInstances in ith bag double bag = 0; // Log-prob. for(int j=0; j<nI; j++){ double exp=0.0; for(int k=m_Data[i].length-1; k>=0; k--) exp += m_Data[i][k][j]*x[k+1]; exp += x[0]; if(m_Classes[i]==1) bag -= exp/(double)nI; else bag += exp/(double)nI; } nll += Math.log(1.0+Math.exp(bag)); } break; } // ridge: note that intercepts NOT included for(int r=1; r<x.length; r++) nll += m_Ridge*x[r]*x[r]; return nll; } /** * Evaluate Jacobian vector * @param x the current values of variables * @return the gradient vector */ protected double[] evaluateGradient(double[] x){ double[] grad = new double[x.length]; switch (m_Type) { case ALGORITHMTYPE_DEFAULT: for(int i=0; i<m_Classes.length; i++){ // ith bag int nI = m_Data[i][0].length; // numInstances in ith bag double denom = 0.0; // denominator, in log-scale double[] bag = new double[grad.length]; //gradient update with ith bag for(int j=0; j<nI; j++){ // Compute exp(b0+b1*Xi1j+...)/[1+exp(b0+b1*Xi1j+...)] double exp=0.0; for(int k=m_Data[i].length-1; k>=0; k--) exp += m_Data[i][k][j]*x[k+1]; exp += x[0]; exp = Math.exp(exp)/(1.0+Math.exp(exp)); if(m_Classes[i]==1) // Bug fix: it used to be denom += Math.log(1.0+exp); // Fixed 21 Jan 2005 (Eibe) denom -= Math.log(1.0-exp); // Instance-wise update of dNLL/dBk for(int p=0; p<x.length; p++){ // pth variable double m = 1.0; if(p>0) m=m_Data[i][p-1][j]; bag[p] += m*exp; } } denom = Math.exp(denom); // Bag-wise update of dNLL/dBk for(int q=0; q<grad.length; q++){ if(m_Classes[i]==1) grad[q] -= bag[q]/(denom-1.0); else grad[q] += bag[q]; } } break; case ALGORITHMTYPE_ARITHMETIC: for(int i=0; i<m_Classes.length; i++){ // ith bag int nI = m_Data[i][0].length; // numInstances in ith bag double denom=0.0; double[] numrt = new double[x.length]; for(int j=0; j<nI; j++){ // Compute exp(b0+b1*Xi1j+...)/[1+exp(b0+b1*Xi1j+...)] double exp=0.0; for(int k=m_Data[i].length-1; k>=0; k--) exp += m_Data[i][k][j]*x[k+1]; exp += x[0]; exp = Math.exp(exp); if(m_Classes[i]==1) denom += exp/(1.0+exp); else denom += 1.0/(1.0+exp); // Instance-wise update of dNLL/dBk for(int p=0; p<x.length; p++){ // pth variable double m = 1.0; if(p>0) m=m_Data[i][p-1][j]; numrt[p] += m*exp/((1.0+exp)*(1.0+exp)); } } // Bag-wise update of dNLL/dBk for(int q=0; q<grad.length; q++){ if(m_Classes[i]==1) grad[q] -= numrt[q]/denom; else grad[q] += numrt[q]/denom; } } break; case ALGORITHMTYPE_GEOMETRIC: for(int i=0; i<m_Classes.length; i++){ // ith bag int nI = m_Data[i][0].length; // numInstances in ith bag double bag = 0; double[] sumX = new double[x.length]; for(int j=0; j<nI; j++){ // Compute exp(b0+b1*Xi1j+...)/[1+exp(b0+b1*Xi1j+...)] double exp=0.0; for(int k=m_Data[i].length-1; k>=0; k--) exp += m_Data[i][k][j]*x[k+1]; exp += x[0]; if(m_Classes[i]==1){ bag -= exp/(double)nI; for(int q=0; q<grad.length; q++){ double m = 1.0; if(q>0) m=m_Data[i][q-1][j]; sumX[q] -= m/(double)nI; } } else{ bag += exp/(double)nI; for(int q=0; q<grad.length; q++){ double m = 1.0; if(q>0) m=m_Data[i][q-1][j]; sumX[q] += m/(double)nI; } } } for(int p=0; p<x.length; p++) grad[p] += Math.exp(bag)*sumX[p]/(1.0+Math.exp(bag)); } break; } // ridge: note that intercepts NOT included for(int r=1; r<x.length; r++){ grad[r] += 2.0*m_Ridge*x[r]; } return grad; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 5527 $"); } } /** * Returns default capabilities of the classifier. * * @return the capabilities of this classifier */ public Capabilities getCapabilities() { Capabilities result = super.getCapabilities(); result.disableAll(); // attributes result.enable(Capability.NOMINAL_ATTRIBUTES); result.enable(Capability.RELATIONAL_ATTRIBUTES); result.enable(Capability.MISSING_VALUES); // class result.enable(Capability.BINARY_CLASS); result.enable(Capability.MISSING_CLASS_VALUES); // other result.enable(Capability.ONLY_MULTIINSTANCE); return result; } /** * Returns the capabilities of this multi-instance classifier for the * relational data. * * @return the capabilities of this object * @see Capabilities */ public Capabilities getMultiInstanceCapabilities() { Capabilities result = super.getCapabilities(); result.disableAll(); // attributes result.enable(Capability.NOMINAL_ATTRIBUTES); result.enable(Capability.NUMERIC_ATTRIBUTES); result.enable(Capability.DATE_ATTRIBUTES); result.enable(Capability.MISSING_VALUES); // class result.disableAllClasses(); result.enable(Capability.NO_CLASS); return result; } /** * Builds the classifier * * @param train the training data to be used for generating the * boosted classifier. * @throws Exception if the classifier could not be built successfully */ public void buildClassifier(Instances train) throws Exception { // can classifier handle the data? getCapabilities().testWithFail(train); // remove instances with missing class train = new Instances(train); train.deleteWithMissingClass(); m_NumClasses = train.numClasses(); int nR = train.attribute(1).relation().numAttributes(); int nC = train.numInstances(); m_Data = new double [nC][nR][]; // Data values m_Classes = new int [nC]; // Class values m_Attributes = train.attribute(1).relation(); xMean = new double [nR]; // Mean of mean xSD = new double [nR]; // Mode of stddev double sY1=0, sY0=0, totIns=0; // Number of classes int[] missingbags = new int[nR]; if (m_Debug) { System.out.println("Extracting data..."); } for(int h=0; h<m_Data.length; h++){ Instance current = train.instance(h); m_Classes[h] = (int)current.classValue(); // Class value starts from 0 Instances currInsts = current.relationalValue(1); int nI = currInsts.numInstances(); totIns += (double)nI; for (int i = 0; i < nR; i++) { // initialize m_data[][][] m_Data[h][i] = new double[nI]; double avg=0, std=0, num=0; for (int k=0; k<nI; k++){ if(!currInsts.instance(k).isMissing(i)){ m_Data[h][i][k] = currInsts.instance(k).value(i); avg += m_Data[h][i][k]; std += m_Data[h][i][k]*m_Data[h][i][k]; num++; } else m_Data[h][i][k] = Double.NaN; } if(num > 0){ xMean[i] += avg/num; xSD[i] += std/num; } else missingbags[i]++; } // Class count if (m_Classes[h] == 1) sY1++; else sY0++; } for (int j = 0; j < nR; j++) { xMean[j] = xMean[j]/(double)(nC-missingbags[j]); xSD[j] = Math.sqrt(Math.abs(xSD[j]/((double)(nC-missingbags[j])-1.0) -xMean[j]*xMean[j]*(double)(nC-missingbags[j])/ ((double)(nC-missingbags[j])-1.0))); } if (m_Debug) { // Output stats about input data System.out.println("Descriptives..."); System.out.println(sY0 + " bags have class 0 and " + sY1 + " bags have class 1"); System.out.println("\n Variable Avg SD "); for (int j = 0; j < nR; j++) System.out.println(Utils.doubleToString(j,8,4) + Utils.doubleToString(xMean[j], 10, 4) + Utils.doubleToString(xSD[j], 10,4)); } // Normalise input data and remove ignored attributes for (int i = 0; i < nC; i++) { for (int j = 0; j < nR; j++) { for(int k=0; k < m_Data[i][j].length; k++){ if(xSD[j] != 0){ if(!Double.isNaN(m_Data[i][j][k])) m_Data[i][j][k] = (m_Data[i][j][k] - xMean[j]) / xSD[j]; else m_Data[i][j][k] = 0; } } } } if (m_Debug) { System.out.println("\nIteration History..." ); } double x[] = new double[nR + 1]; x[0] = Math.log((sY1+1.0) / (sY0+1.0)); double[][] b = new double[2][x.length]; b[0][0] = Double.NaN; b[1][0] = Double.NaN; for (int q=1; q < x.length;q++){ x[q] = 0.0; b[0][q] = Double.NaN; b[1][q] = Double.NaN; } OptEng opt = new OptEng(m_AlgorithmType); opt.setDebug(m_Debug); m_Par = opt.findArgmin(x, b); while(m_Par==null){ m_Par = opt.getVarbValues(); if (m_Debug) System.out.println("200 iterations finished, not enough!"); m_Par = opt.findArgmin(m_Par, b); } if (m_Debug) System.out.println(" -------------<Converged>--------------"); // feature selection use if (m_AlgorithmType == ALGORITHMTYPE_ARITHMETIC) { double[] fs = new double[nR]; for(int k=1; k<nR+1; k++) fs[k-1] = Math.abs(m_Par[k]); int[] idx = Utils.sort(fs); double max = fs[idx[idx.length-1]]; for(int k=idx.length-1; k>=0; k--) System.out.println(m_Attributes.attribute(idx[k]).name()+"\t"+(fs[idx[k]]*100/max)); } // Convert coefficients back to non-normalized attribute units for(int j = 1; j < nR+1; j++) { if (xSD[j-1] != 0) { m_Par[j] /= xSD[j-1]; m_Par[0] -= m_Par[j] * xMean[j-1]; } } } /** * Computes the distribution for a given exemplar * * @param exmp the exemplar for which distribution is computed * @return the distribution * @throws Exception if the distribution can't be computed successfully */ public double[] distributionForInstance(Instance exmp) throws Exception { // Extract the data Instances ins = exmp.relationalValue(1); int nI = ins.numInstances(), nA = ins.numAttributes(); double[][] dat = new double [nI][nA+1]; for(int j=0; j<nI; j++){ dat[j][0]=1.0; int idx=1; for(int k=0; k<nA; k++){ if(!ins.instance(j).isMissing(k)) dat[j][idx] = ins.instance(j).value(k); else dat[j][idx] = xMean[idx-1]; idx++; } } // Compute the probability of the bag double [] distribution = new double[2]; switch (m_AlgorithmType) { case ALGORITHMTYPE_DEFAULT: distribution[0]=0.0; // Log-Prob. for class 0 for(int i=0; i<nI; i++){ double exp = 0.0; for(int r=0; r<m_Par.length; r++) exp += m_Par[r]*dat[i][r]; exp = Math.exp(exp); // Prob. updated for one instance distribution[0] -= Math.log(1.0+exp); } // Prob. for class 0 distribution[0] = Math.exp(distribution[0]); // Prob. for class 1 distribution[1] = 1.0 - distribution[0]; break; case ALGORITHMTYPE_ARITHMETIC: distribution[0]=0.0; // Prob. for class 0 for(int i=0; i<nI; i++){ double exp = 0.0; for(int r=0; r<m_Par.length; r++) exp += m_Par[r]*dat[i][r]; exp = Math.exp(exp); // Prob. updated for one instance distribution[0] += 1.0/(1.0+exp); } // Prob. for class 0 distribution[0] /= (double)nI; // Prob. for class 1 distribution[1] = 1.0 - distribution[0]; break; case ALGORITHMTYPE_GEOMETRIC: for(int i=0; i<nI; i++){ double exp = 0.0; for(int r=0; r<m_Par.length; r++) exp += m_Par[r]*dat[i][r]; distribution[1] += exp/(double)nI; } // Prob. for class 1 distribution[1] = 1.0/(1.0+Math.exp(-distribution[1])); // Prob. for class 0 distribution[0] = 1-distribution[1]; break; } return distribution; } /** * Gets a string describing the classifier. * * @return a string describing the classifer built. */ public String toString() { String result = "Modified Logistic Regression"; if (m_Par == null) { return result + ": No model built yet."; } result += "\nMean type: " + getAlgorithmType().getSelectedTag().getReadable() + "\n"; result += "\nCoefficients...\n" + "Variable Coeff.\n"; for (int j = 1, idx=0; j < m_Par.length; j++, idx++) { result += m_Attributes.attribute(idx).name(); result += " "+Utils.doubleToString(m_Par[j], 12, 4); result += "\n"; } result += "Intercept:"; result += " "+Utils.doubleToString(m_Par[0], 10, 4); result += "\n"; result += "\nOdds Ratios...\n" + "Variable O.R.\n"; for (int j = 1, idx=0; j < m_Par.length; j++, idx++) { result += " " + m_Attributes.attribute(idx).name(); double ORc = Math.exp(m_Par[j]); result += " " + ((ORc > 1e10) ? "" + ORc : Utils.doubleToString(ORc, 12, 4)); } result += "\n"; return result; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 5527 $"); } /** * Main method for testing this class. * * @param argv should contain the command line arguments to the * scheme (see Evaluation) */ public static void main(String[] argv) { runClassifier(new MILR(), argv); } }
24,598
28.249703
184
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/mi/MINND.java
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* * MINND.java * Copyright (C) 2005 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.mi; import weka.classifiers.Classifier; import weka.core.Capabilities; import weka.core.Instance; import weka.core.Instances; import weka.core.MultiInstanceCapabilitiesHandler; import weka.core.Option; import weka.core.OptionHandler; import weka.core.RevisionUtils; import weka.core.TechnicalInformation; import weka.core.TechnicalInformationHandler; import weka.core.Utils; import weka.core.Capabilities.Capability; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import java.util.Enumeration; import java.util.Vector; import weka.classifiers.AbstractClassifier; import weka.core.DenseInstance; /** <!-- globalinfo-start --> * Multiple-Instance Nearest Neighbour with Distribution learner.<br/> * <br/> * It uses gradient descent to find the weight for each dimension of each exeamplar from the starting point of 1.0. In order to avoid overfitting, it uses mean-square function (i.e. the Euclidean distance) to search for the weights.<br/> * It then uses the weights to cleanse the training data. After that it searches for the weights again from the starting points of the weights searched before.<br/> * Finally it uses the most updated weights to cleanse the test exemplar and then finds the nearest neighbour of the test exemplar using partly-weighted Kullback distance. But the variances in the Kullback distance are the ones before cleansing.<br/> * <br/> * For more information see:<br/> * <br/> * Xin Xu (2001). A nearest distribution approach to multiple-instance learning. Hamilton, NZ. * <p/> <!-- globalinfo-end --> * <!-- technical-bibtex-start --> * BibTeX: * <pre> * &#64;misc{Xu2001, * address = {Hamilton, NZ}, * author = {Xin Xu}, * note = {0657.591B}, * school = {University of Waikato}, * title = {A nearest distribution approach to multiple-instance learning}, * year = {2001} * } * </pre> * <p/> <!-- technical-bibtex-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -K &lt;number of neighbours&gt; * Set number of nearest neighbour for prediction * (default 1)</pre> * * <pre> -S &lt;number of neighbours&gt; * Set number of nearest neighbour for cleansing the training data * (default 1)</pre> * * <pre> -E &lt;number of neighbours&gt; * Set number of nearest neighbour for cleansing the testing data * (default 1)</pre> * <!-- options-end --> * * @author Xin Xu (xx5@cs.waikato.ac.nz) * @version $Revision: 5527 $ */ public class MINND extends AbstractClassifier implements OptionHandler, MultiInstanceCapabilitiesHandler, TechnicalInformationHandler { /** for serialization */ static final long serialVersionUID = -4512599203273864994L; /** The number of nearest neighbour for prediction */ protected int m_Neighbour = 1; /** The mean for each attribute of each exemplar */ protected double[][] m_Mean = null; /** The variance for each attribute of each exemplar */ protected double[][] m_Variance = null; /** The dimension of each exemplar, i.e. (numAttributes-2) */ protected int m_Dimension = 0; /** header info of the data */ protected Instances m_Attributes;; /** The class label of each exemplar */ protected double[] m_Class = null; /** The number of class labels in the data */ protected int m_NumClasses = 0; /** The weight of each exemplar */ protected double[] m_Weights = null; /** The very small number representing zero */ static private double m_ZERO = 1.0e-45; /** The learning rate in the gradient descent */ protected double m_Rate = -1; /** The minimum values for numeric attributes. */ private double [] m_MinArray=null; /** The maximum values for numeric attributes. */ private double [] m_MaxArray=null; /** The stopping criteria of gradient descent*/ private double m_STOP = 1.0e-45; /** The weights that alter the dimnesion of each exemplar */ private double[][] m_Change=null; /** The noise data of each exemplar */ private double[][] m_NoiseM = null, m_NoiseV = null, m_ValidM = null, m_ValidV = null; /** The number of nearest neighbour instances in the selection of noises in the training data*/ private int m_Select = 1; /** The number of nearest neighbour exemplars in the selection of noises in the test data */ private int m_Choose = 1; /** The decay rate of learning rate */ private double m_Decay = 0.5; /** * Returns a string describing this filter * * @return a description of the filter suitable for * displaying in the explorer/experimenter gui */ public String globalInfo() { return "Multiple-Instance Nearest Neighbour with Distribution learner.\n\n" + "It uses gradient descent to find the weight for each dimension of " + "each exeamplar from the starting point of 1.0. In order to avoid " + "overfitting, it uses mean-square function (i.e. the Euclidean " + "distance) to search for the weights.\n " + "It then uses the weights to cleanse the training data. After that " + "it searches for the weights again from the starting points of the " + "weights searched before.\n " + "Finally it uses the most updated weights to cleanse the test exemplar " + "and then finds the nearest neighbour of the test exemplar using " + "partly-weighted Kullback distance. But the variances in the Kullback " + "distance are the ones before cleansing.\n\n" + "For more information see:\n\n" + getTechnicalInformation().toString(); } /** * Returns an instance of a TechnicalInformation object, containing * detailed information about the technical background of this class, * e.g., paper reference or book this class is based on. * * @return the technical information about this class */ public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; result = new TechnicalInformation(Type.MISC); result.setValue(Field.AUTHOR, "Xin Xu"); result.setValue(Field.YEAR, "2001"); result.setValue(Field.TITLE, "A nearest distribution approach to multiple-instance learning"); result.setValue(Field.SCHOOL, "University of Waikato"); result.setValue(Field.ADDRESS, "Hamilton, NZ"); result.setValue(Field.NOTE, "0657.591B"); return result; } /** * Returns default capabilities of the classifier. * * @return the capabilities of this classifier */ public Capabilities getCapabilities() { Capabilities result = super.getCapabilities(); result.disableAll(); // attributes result.enable(Capability.NOMINAL_ATTRIBUTES); result.enable(Capability.RELATIONAL_ATTRIBUTES); result.enable(Capability.MISSING_VALUES); // class result.enable(Capability.NOMINAL_CLASS); result.enable(Capability.MISSING_CLASS_VALUES); // other result.enable(Capability.ONLY_MULTIINSTANCE); return result; } /** * Returns the capabilities of this multi-instance classifier for the * relational data. * * @return the capabilities of this object * @see Capabilities */ public Capabilities getMultiInstanceCapabilities() { Capabilities result = super.getCapabilities(); result.disableAll(); // attributes result.enable(Capability.NUMERIC_ATTRIBUTES); result.enable(Capability.DATE_ATTRIBUTES); result.enable(Capability.MISSING_VALUES); // class result.disableAllClasses(); result.enable(Capability.NO_CLASS); return result; } /** * As normal Nearest Neighbour algorithm does, it's lazy and simply * records the exemplar information (i.e. mean and variance for each * dimension of each exemplar and their classes) when building the model. * There is actually no need to store the exemplars themselves. * * @param exs the training exemplars * @throws Exception if the model cannot be built properly */ public void buildClassifier(Instances exs)throws Exception{ // can classifier handle the data? getCapabilities().testWithFail(exs); // remove instances with missing class Instances newData = new Instances(exs); newData.deleteWithMissingClass(); int numegs = newData.numInstances(); m_Dimension = newData.attribute(1).relation().numAttributes(); m_Attributes = newData.stringFreeStructure(); m_Change = new double[numegs][m_Dimension]; m_NumClasses = exs.numClasses(); m_Mean = new double[numegs][m_Dimension]; m_Variance = new double[numegs][m_Dimension]; m_Class = new double[numegs]; m_Weights = new double[numegs]; m_NoiseM = new double[numegs][m_Dimension]; m_NoiseV = new double[numegs][m_Dimension]; m_ValidM = new double[numegs][m_Dimension]; m_ValidV = new double[numegs][m_Dimension]; m_MinArray = new double[m_Dimension]; m_MaxArray = new double[m_Dimension]; for(int v=0; v < m_Dimension; v++) m_MinArray[v] = m_MaxArray[v] = Double.NaN; for(int w=0; w < numegs; w++){ updateMinMax(newData.instance(w)); } // Scale exemplars Instances data = m_Attributes; for(int x=0; x < numegs; x++){ Instance example = newData.instance(x); example = scale(example); for (int i=0; i<m_Dimension; i++) { m_Mean[x][i] = example.relationalValue(1).meanOrMode(i); m_Variance[x][i] = example.relationalValue(1).variance(i); if(Utils.eq(m_Variance[x][i],0.0)) m_Variance[x][i] = m_ZERO; m_Change[x][i] = 1.0; } /* for(int y=0; y < m_Variance[x].length; y++){ if(Utils.eq(m_Variance[x][y],0.0)) m_Variance[x][y] = m_ZERO; m_Change[x][y] = 1.0; } */ data.add(example); m_Class[x] = example.classValue(); m_Weights[x] = example.weight(); } for(int z=0; z < numegs; z++) findWeights(z, m_Mean); // Pre-process and record "true estimated" parameters for distributions for(int x=0; x < numegs; x++){ Instance example = preprocess(data, x); if (getDebug()) System.out.println("???Exemplar "+x+" has been pre-processed:"+ data.instance(x).relationalValue(1).sumOfWeights()+ "|"+example.relationalValue(1).sumOfWeights()+ "; class:"+m_Class[x]); if(Utils.gr(example.relationalValue(1).sumOfWeights(), 0)){ for (int i=0; i<m_Dimension; i++) { m_ValidM[x][i] = example.relationalValue(1).meanOrMode(i); m_ValidV[x][i] = example.relationalValue(1).variance(i); if(Utils.eq(m_ValidV[x][i],0.0)) m_ValidV[x][i] = m_ZERO; } /* for(int y=0; y < m_ValidV[x].length; y++){ if(Utils.eq(m_ValidV[x][y],0.0)) m_ValidV[x][y] = m_ZERO; }*/ } else{ m_ValidM[x] = null; m_ValidV[x] = null; } } for(int z=0; z < numegs; z++) if(m_ValidM[z] != null) findWeights(z, m_ValidM); } /** * Pre-process the given exemplar according to the other exemplars * in the given exemplars. It also updates noise data statistics. * * @param data the whole exemplars * @param pos the position of given exemplar in data * @return the processed exemplar * @throws Exception if the returned exemplar is wrong */ public Instance preprocess(Instances data, int pos) throws Exception{ Instance before = data.instance(pos); if((int)before.classValue() == 0){ m_NoiseM[pos] = null; m_NoiseV[pos] = null; return before; } Instances after_relationInsts =before.attribute(1).relation().stringFreeStructure(); Instances noises_relationInsts =before.attribute(1).relation().stringFreeStructure(); Instances newData = m_Attributes; Instance after = new DenseInstance(before.numAttributes()); Instance noises = new DenseInstance(before.numAttributes()); after.setDataset(newData); noises.setDataset(newData); for(int g=0; g < before.relationalValue(1).numInstances(); g++){ Instance datum = before.relationalValue(1).instance(g); double[] dists = new double[data.numInstances()]; for(int i=0; i < data.numInstances(); i++){ if(i != pos) dists[i] = distance(datum, m_Mean[i], m_Variance[i], i); else dists[i] = Double.POSITIVE_INFINITY; } int[] pred = new int[m_NumClasses]; for(int n=0; n < pred.length; n++) pred[n] = 0; for(int o=0; o<m_Select; o++){ int index = Utils.minIndex(dists); pred[(int)m_Class[index]]++; dists[index] = Double.POSITIVE_INFINITY; } int clas = Utils.maxIndex(pred); if((int)before.classValue() != clas) noises_relationInsts.add(datum); else after_relationInsts.add(datum); } int relationValue; relationValue = noises.attribute(1).addRelation( noises_relationInsts); noises.setValue(0,before.value(0)); noises.setValue(1, relationValue); noises.setValue(2, before.classValue()); relationValue = after.attribute(1).addRelation( after_relationInsts); after.setValue(0,before.value(0)); after.setValue(1, relationValue); after.setValue(2, before.classValue()); if(Utils.gr(noises.relationalValue(1).sumOfWeights(), 0)){ for (int i=0; i<m_Dimension; i++) { m_NoiseM[pos][i] = noises.relationalValue(1).meanOrMode(i); m_NoiseV[pos][i] = noises.relationalValue(1).variance(i); if(Utils.eq(m_NoiseV[pos][i],0.0)) m_NoiseV[pos][i] = m_ZERO; } /* for(int y=0; y < m_NoiseV[pos].length; y++){ if(Utils.eq(m_NoiseV[pos][y],0.0)) m_NoiseV[pos][y] = m_ZERO; } */ } else{ m_NoiseM[pos] = null; m_NoiseV[pos] = null; } return after; } /** * Calculates the distance between two instances * * @param first the first instance * @param second the second instance * @return the distance between the two given instances */ private double distance(Instance first, double[] mean, double[] var, int pos) { double diff, distance = 0; for(int i = 0; i < m_Dimension; i++) { // If attribute is numeric if(first.attribute(i).isNumeric()){ if (!first.isMissing(i)){ diff = first.value(i) - mean[i]; if(Utils.gr(var[i], m_ZERO)) distance += m_Change[pos][i] * var[i] * diff * diff; else distance += m_Change[pos][i] * diff * diff; } else{ if(Utils.gr(var[i], m_ZERO)) distance += m_Change[pos][i] * var[i]; else distance += m_Change[pos][i] * 1.0; } } } return distance; } /** * Updates the minimum and maximum values for all the attributes * based on a new exemplar. * * @param ex the new exemplar */ private void updateMinMax(Instance ex) { Instances insts = ex.relationalValue(1); for (int j = 0;j < m_Dimension; j++) { if (insts.attribute(j).isNumeric()){ for(int k=0; k < insts.numInstances(); k++){ Instance ins = insts.instance(k); if(!ins.isMissing(j)){ if (Double.isNaN(m_MinArray[j])) { m_MinArray[j] = ins.value(j); m_MaxArray[j] = ins.value(j); } else { if (ins.value(j) < m_MinArray[j]) m_MinArray[j] = ins.value(j); else if (ins.value(j) > m_MaxArray[j]) m_MaxArray[j] = ins.value(j); } } } } } } /** * Scale the given exemplar so that the returned exemplar * has the value of 0 to 1 for each dimension * * @param before the given exemplar * @return the resultant exemplar after scaling * @throws Exception if given exampler cannot be scaled properly */ private Instance scale(Instance before) throws Exception{ Instances afterInsts = before.relationalValue(1).stringFreeStructure(); Instance after = new DenseInstance(before.numAttributes()); after.setDataset(m_Attributes); for(int i=0; i < before.relationalValue(1).numInstances(); i++){ Instance datum = before.relationalValue(1).instance(i); Instance inst = (Instance)datum.copy(); for(int j=0; j < m_Dimension; j++){ if(before.relationalValue(1).attribute(j).isNumeric()) inst.setValue(j, (datum.value(j) - m_MinArray[j])/(m_MaxArray[j] - m_MinArray[j])); } afterInsts.add(inst); } int attValue = after.attribute(1).addRelation(afterInsts); after.setValue(0, before.value( 0)); after.setValue(1, attValue); after.setValue(2, before.value( 2)); return after; } /** * Use gradient descent to distort the MU parameter for * the exemplar. The exemplar can be in the specified row in the * given matrix, which has numExemplar rows and numDimension columns; * or not in the matrix. * * @param row the given row index * @param mean */ public void findWeights(int row, double[][] mean){ double[] neww = new double[m_Dimension]; double[] oldw = new double[m_Dimension]; System.arraycopy(m_Change[row], 0, neww, 0, m_Dimension); //for(int z=0; z<m_Dimension; z++) //System.out.println("mu("+row+"): "+origin[z]+" | "+newmu[z]); double newresult = target(neww, mean, row, m_Class); double result = Double.POSITIVE_INFINITY; double rate= 0.05; if(m_Rate != -1) rate = m_Rate; //System.out.println("???Start searching ..."); search: while(Utils.gr((result-newresult), m_STOP)){ // Full step oldw = neww; neww= new double[m_Dimension]; double[] delta = delta(oldw, mean, row, m_Class); for(int i=0; i < m_Dimension; i++) if(Utils.gr(m_Variance[row][i], 0.0)) neww[i] = oldw[i] + rate * delta[i]; result = newresult; newresult = target(neww, mean, row, m_Class); //System.out.println("???old: "+result+"|new: "+newresult); while(Utils.gr(newresult, result)){ // Search back //System.out.println("search back"); if(m_Rate == -1){ rate *= m_Decay; // Decay for(int i=0; i < m_Dimension; i++) if(Utils.gr(m_Variance[row][i], 0.0)) neww[i] = oldw[i] + rate * delta[i]; newresult = target(neww, mean, row, m_Class); } else{ for(int i=0; i < m_Dimension; i++) neww[i] = oldw[i]; break search; } } } //System.out.println("???Stop"); m_Change[row] = neww; } /** * Delta of x in one step of gradient descent: * delta(Wij) = 1/2 * sum[k=1..N, k!=i](sqrt(P)*(Yi-Yk)/D - 1) * (MUij - * MUkj)^2 where D = sqrt(sum[j=1..P]Kkj(MUij - MUkj)^2) * N is number of exemplars and P is number of dimensions * * @param x the weights of the exemplar in question * @param rowpos row index of x in X * @param Y the observed class label * @return the delta for all dimensions */ private double[] delta(double[] x, double[][] X, int rowpos, double[] Y){ double y = Y[rowpos]; double[] delta=new double[m_Dimension]; for(int h=0; h < m_Dimension; h++) delta[h] = 0.0; for(int i=0; i < X.length; i++){ if((i != rowpos) && (X[i] != null)){ double var = (y==Y[i]) ? 0.0 : Math.sqrt((double)m_Dimension - 1); double distance=0; for(int j=0; j < m_Dimension; j++) if(Utils.gr(m_Variance[rowpos][j], 0.0)) distance += x[j]*(X[rowpos][j]-X[i][j]) * (X[rowpos][j]-X[i][j]); distance = Math.sqrt(distance); if(distance != 0) for(int k=0; k < m_Dimension; k++) if(m_Variance[rowpos][k] > 0.0) delta[k] += (var/distance - 1.0) * 0.5 * (X[rowpos][k]-X[i][k]) * (X[rowpos][k]-X[i][k]); } } //System.out.println("???delta: "+delta); return delta; } /** * Compute the target function to minimize in gradient descent * The formula is:<br/> * 1/2*sum[i=1..p](f(X, Xi)-var(Y, Yi))^2 <p/> * where p is the number of exemplars and Y is the class label. * In the case of X=MU, f() is the Euclidean distance between two * exemplars together with the related weights and var() is * sqrt(numDimension)*(Y-Yi) where Y-Yi is either 0 (when Y==Yi) * or 1 (Y!=Yi) * * @param x the weights of the exemplar in question * @param rowpos row index of x in X * @param Y the observed class label * @return the result of the target function */ public double target(double[] x, double[][] X, int rowpos, double[] Y){ double y = Y[rowpos], result=0; for(int i=0; i < X.length; i++){ if((i != rowpos) && (X[i] != null)){ double var = (y==Y[i]) ? 0.0 : Math.sqrt((double)m_Dimension - 1); double f=0; for(int j=0; j < m_Dimension; j++) if(Utils.gr(m_Variance[rowpos][j], 0.0)){ f += x[j]*(X[rowpos][j]-X[i][j]) * (X[rowpos][j]-X[i][j]); //System.out.println("i:"+i+" j: "+j+" row: "+rowpos); } f = Math.sqrt(f); //System.out.println("???distance between "+rowpos+" and "+i+": "+f+"|y:"+y+" vs "+Y[i]); if(Double.isInfinite(f)) System.exit(1); result += 0.5 * (f - var) * (f - var); } } //System.out.println("???target: "+result); return result; } /** * Use Kullback Leibler distance to find the nearest neighbours of * the given exemplar. * It also uses K-Nearest Neighbour algorithm to classify the * test exemplar * * @param ex the given test exemplar * @return the classification * @throws Exception if the exemplar could not be classified * successfully */ public double classifyInstance(Instance ex)throws Exception{ ex = scale(ex); double[] var = new double [m_Dimension]; for (int i=0; i<m_Dimension; i++) var[i]= ex.relationalValue(1).variance(i); // The Kullback distance to all exemplars double[] kullback = new double[m_Class.length]; // The first K nearest neighbours' predictions */ double[] predict = new double[m_NumClasses]; for(int h=0; h < predict.length; h++) predict[h] = 0; ex = cleanse(ex); if(ex.relationalValue(1).numInstances() == 0){ if (getDebug()) System.out.println("???Whole exemplar falls into ambiguous area!"); return 1.0; // Bias towards positive class } double[] mean = new double[m_Dimension]; for (int i=0; i<m_Dimension; i++) mean [i]=ex.relationalValue(1).meanOrMode(i); // Avoid zero sigma for(int h=0; h < var.length; h++){ if(Utils.eq(var[h],0.0)) var[h] = m_ZERO; } for(int i=0; i < m_Class.length; i++){ if(m_ValidM[i] != null) kullback[i] = kullback(mean, m_ValidM[i], var, m_Variance[i], i); else kullback[i] = Double.POSITIVE_INFINITY; } for(int j=0; j < m_Neighbour; j++){ int pos = Utils.minIndex(kullback); predict[(int)m_Class[pos]] += m_Weights[pos]; kullback[pos] = Double.POSITIVE_INFINITY; } if (getDebug()) System.out.println("???There are still some unambiguous instances in this exemplar! Predicted as: "+Utils.maxIndex(predict)); return (double)Utils.maxIndex(predict); } /** * Cleanse the given exemplar according to the valid and noise data * statistics * * @param before the given exemplar * @return the processed exemplar * @throws Exception if the returned exemplar is wrong */ public Instance cleanse(Instance before) throws Exception{ Instances insts = before.relationalValue(1).stringFreeStructure(); Instance after = new DenseInstance (before.numAttributes()); after.setDataset(m_Attributes); for(int g=0; g < before.relationalValue(1).numInstances(); g++){ Instance datum = before.relationalValue(1).instance(g); double[] minNoiDists = new double[m_Choose]; double[] minValDists = new double[m_Choose]; int noiseCount = 0, validCount = 0; double[] nDist = new double[m_Mean.length]; double[] vDist = new double[m_Mean.length]; for(int h=0; h < m_Mean.length; h++){ if(m_ValidM[h] == null) vDist[h] = Double.POSITIVE_INFINITY; else vDist[h] = distance(datum, m_ValidM[h], m_ValidV[h], h); if(m_NoiseM[h] == null) nDist[h] = Double.POSITIVE_INFINITY; else nDist[h] = distance(datum, m_NoiseM[h], m_NoiseV[h], h); } for(int k=0; k < m_Choose; k++){ int pos = Utils.minIndex(vDist); minValDists[k] = vDist[pos]; vDist[pos] = Double.POSITIVE_INFINITY; pos = Utils.minIndex(nDist); minNoiDists[k] = nDist[pos]; nDist[pos] = Double.POSITIVE_INFINITY; } int x = 0,y = 0; while((x+y) < m_Choose){ if(minValDists[x] <= minNoiDists[y]){ validCount++; x++; } else{ noiseCount++; y++; } } if(x >= y) insts.add (datum); } after.setValue(0, before.value( 0)); after.setValue(1, after.attribute(1).addRelation(insts)); after.setValue(2, before.value( 2)); return after; } /** * This function calculates the Kullback Leibler distance between * two normal distributions. This distance is always positive. * Kullback Leibler distance = integral{f(X)ln(f(X)/g(X))} * Note that X is a vector. Since we assume dimensions are independent * f(X)(g(X) the same) is actually the product of normal density * functions of each dimensions. Also note that it should be log2 * instead of (ln) in the formula, but we use (ln) simply for computational * convenience. * * The result is as follows, suppose there are P dimensions, and f(X) * is the first distribution and g(X) is the second: * Kullback = sum[1..P](ln(SIGMA2/SIGMA1)) + * sum[1..P](SIGMA1^2 / (2*(SIGMA2^2))) + * sum[1..P]((MU1-MU2)^2 / (2*(SIGMA2^2))) - * P/2 * * @param mu1 mu of the first normal distribution * @param mu2 mu of the second normal distribution * @param var1 variance(SIGMA^2) of the first normal distribution * @param var2 variance(SIGMA^2) of the second normal distribution * @return the Kullback distance of two distributions */ public double kullback(double[] mu1, double[] mu2, double[] var1, double[] var2, int pos){ int p = mu1.length; double result = 0; for(int y=0; y < p; y++){ if((Utils.gr(var1[y], 0)) && (Utils.gr(var2[y], 0))){ result += ((Math.log(Math.sqrt(var2[y]/var1[y]))) + (var1[y] / (2.0*var2[y])) + (m_Change[pos][y] * (mu1[y]-mu2[y])*(mu1[y]-mu2[y]) / (2.0*var2[y])) - 0.5); } } return result; } /** * Returns an enumeration describing the available options * * @return an enumeration of all the available options */ public Enumeration listOptions() { Vector result = new Vector(); result.addElement(new Option( "\tSet number of nearest neighbour for prediction\n" + "\t(default 1)", "K", 1, "-K <number of neighbours>")); result.addElement(new Option( "\tSet number of nearest neighbour for cleansing the training data\n" + "\t(default 1)", "S", 1, "-S <number of neighbours>")); result.addElement(new Option( "\tSet number of nearest neighbour for cleansing the testing data\n" + "\t(default 1)", "E", 1, "-E <number of neighbours>")); return result.elements(); } /** * Parses a given list of options. <p/> * <!-- options-start --> * Valid options are: <p/> * * <pre> -K &lt;number of neighbours&gt; * Set number of nearest neighbour for prediction * (default 1)</pre> * * <pre> -S &lt;number of neighbours&gt; * Set number of nearest neighbour for cleansing the training data * (default 1)</pre> * * <pre> -E &lt;number of neighbours&gt; * Set number of nearest neighbour for cleansing the testing data * (default 1)</pre> * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ public void setOptions(String[] options) throws Exception{ setDebug(Utils.getFlag('D', options)); String numNeighbourString = Utils.getOption('K', options); if (numNeighbourString.length() != 0) setNumNeighbours(Integer.parseInt(numNeighbourString)); else setNumNeighbours(1); numNeighbourString = Utils.getOption('S', options); if (numNeighbourString.length() != 0) setNumTrainingNoises(Integer.parseInt(numNeighbourString)); else setNumTrainingNoises(1); numNeighbourString = Utils.getOption('E', options); if (numNeighbourString.length() != 0) setNumTestingNoises(Integer.parseInt(numNeighbourString)); else setNumTestingNoises(1); } /** * Gets the current settings of the Classifier. * * @return an array of strings suitable for passing to setOptions */ public String[] getOptions() { Vector result; result = new Vector(); if (getDebug()) result.add("-D"); result.add("-K"); result.add("" + getNumNeighbours()); result.add("-S"); result.add("" + getNumTrainingNoises()); result.add("-E"); result.add("" + getNumTestingNoises()); return (String[]) result.toArray(new String[result.size()]); } /** * Returns the tip text for this property * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String numNeighboursTipText() { return "The number of nearest neighbours to the estimate the class prediction of test bags."; } /** * Sets the number of nearest neighbours to estimate * the class prediction of tests bags * @param numNeighbour the number of citers */ public void setNumNeighbours(int numNeighbour){ m_Neighbour = numNeighbour; } /** * Returns the number of nearest neighbours to estimate * the class prediction of tests bags * @return the number of neighbours */ public int getNumNeighbours(){ return m_Neighbour; } /** * Returns the tip text for this property * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String numTrainingNoisesTipText() { return "The number of nearest neighbour instances in the selection of noises in the training data."; } /** * Sets the number of nearest neighbour instances in the * selection of noises in the training data * * @param numTraining the number of noises in training data */ public void setNumTrainingNoises (int numTraining){ m_Select = numTraining; } /** * Returns the number of nearest neighbour instances in the * selection of noises in the training data * * @return the number of noises in training data */ public int getNumTrainingNoises(){ return m_Select; } /** * Returns the tip text for this property * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String numTestingNoisesTipText() { return "The number of nearest neighbour instances in the selection of noises in the test data."; } /** * Returns The number of nearest neighbour instances in the * selection of noises in the test data * @return the number of noises in test data */ public int getNumTestingNoises(){ return m_Choose; } /** * Sets The number of nearest neighbour exemplars in the * selection of noises in the test data * @param numTesting the number of noises in test data */ public void setNumTestingNoises (int numTesting){ m_Choose = numTesting; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 5527 $"); } /** * Main method for testing. * * @param args the options for the classifier */ public static void main(String[] args) { runClassifier(new MINND(), args); } }
33,433
31.334623
251
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/mi/MIOptimalBall.java
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* * MIOptimalBall.java * Copyright (C) 2005 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.mi; import weka.classifiers.Classifier; import weka.core.Capabilities; import weka.core.Instance; import weka.core.Instances; import weka.core.MultiInstanceCapabilitiesHandler; import weka.core.Option; import weka.core.OptionHandler; import weka.core.RevisionUtils; import weka.core.SelectedTag; import weka.core.Tag; import weka.core.TechnicalInformation; import weka.core.TechnicalInformationHandler; import weka.core.Utils; import weka.core.WeightedInstancesHandler; import weka.core.Capabilities.Capability; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import weka.core.matrix.DoubleVector; import weka.filters.Filter; import weka.filters.unsupervised.attribute.MultiInstanceToPropositional; import weka.filters.unsupervised.attribute.Normalize; import weka.filters.unsupervised.attribute.PropositionalToMultiInstance; import weka.filters.unsupervised.attribute.Standardize; import java.util.Enumeration; import java.util.Vector; import weka.classifiers.AbstractClassifier; /** <!-- globalinfo-start --> * This classifier tries to find a suitable ball in the multiple-instance space, with a certain data point in the instance space as a ball center. The possible ball center is a certain instance in a positive bag. The possible radiuses are those which can achieve the highest classification accuracy. The model selects the maximum radius as the radius of the optimal ball.<br/> * <br/> * For more information about this algorithm, see:<br/> * <br/> * Peter Auer, Ronald Ortner: A Boosting Approach to Multiple Instance Learning. In: 15th European Conference on Machine Learning, 63-74, 2004. * <p/> <!-- globalinfo-end --> * <!-- technical-bibtex-start --> * BibTeX: * <pre> * &#64;inproceedings{Auer2004, * author = {Peter Auer and Ronald Ortner}, * booktitle = {15th European Conference on Machine Learning}, * note = {LNAI 3201}, * pages = {63-74}, * publisher = {Springer}, * title = {A Boosting Approach to Multiple Instance Learning}, * year = {2004} * } * </pre> * <p/> <!-- technical-bibtex-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -N &lt;num&gt; * Whether to 0=normalize/1=standardize/2=neither. * (default 0=normalize)</pre> * <!-- options-end --> * * @author Lin Dong (ld21@cs.waikato.ac.nz) * @version $Revision: 5527 $ */ public class MIOptimalBall extends AbstractClassifier implements OptionHandler, WeightedInstancesHandler, MultiInstanceCapabilitiesHandler, TechnicalInformationHandler { /** for serialization */ static final long serialVersionUID = -6465750129576777254L; /** center of the optimal ball */ protected double[] m_Center; /** radius of the optimal ball */ protected double m_Radius; /** the distances from each instance in a positive bag to each bag*/ protected double [][][]m_Distance; /** The filter used to standardize/normalize all values. */ protected Filter m_Filter = null; /** Whether to normalize/standardize/neither */ protected int m_filterType = FILTER_NORMALIZE; /** Normalize training data */ public static final int FILTER_NORMALIZE = 0; /** Standardize training data */ public static final int FILTER_STANDARDIZE = 1; /** No normalization/standardization */ public static final int FILTER_NONE = 2; /** The filter to apply to the training data */ public static final Tag [] TAGS_FILTER = { new Tag(FILTER_NORMALIZE, "Normalize training data"), new Tag(FILTER_STANDARDIZE, "Standardize training data"), new Tag(FILTER_NONE, "No normalization/standardization"), }; /** filter used to convert the MI dataset into single-instance dataset */ protected MultiInstanceToPropositional m_ConvertToSI = new MultiInstanceToPropositional(); /** filter used to convert the single-instance dataset into MI dataset */ protected PropositionalToMultiInstance m_ConvertToMI = new PropositionalToMultiInstance(); /** * Returns a string describing this filter * * @return a description of the filter suitable for * displaying in the explorer/experimenter gui */ public String globalInfo() { return "This classifier tries to find a suitable ball in the " + "multiple-instance space, with a certain data point in the instance " + "space as a ball center. The possible ball center is a certain " + "instance in a positive bag. The possible radiuses are those which can " + "achieve the highest classification accuracy. The model selects the " + "maximum radius as the radius of the optimal ball.\n\n" + "For more information about this algorithm, see:\n\n" + getTechnicalInformation().toString(); } /** * Returns an instance of a TechnicalInformation object, containing * detailed information about the technical background of this class, * e.g., paper reference or book this class is based on. * * @return the technical information about this class */ public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; result = new TechnicalInformation(Type.INPROCEEDINGS); result.setValue(Field.AUTHOR, "Peter Auer and Ronald Ortner"); result.setValue(Field.TITLE, "A Boosting Approach to Multiple Instance Learning"); result.setValue(Field.BOOKTITLE, "15th European Conference on Machine Learning"); result.setValue(Field.YEAR, "2004"); result.setValue(Field.PAGES, "63-74"); result.setValue(Field.PUBLISHER, "Springer"); result.setValue(Field.NOTE, "LNAI 3201"); return result; } /** * Returns default capabilities of the classifier. * * @return the capabilities of this classifier */ public Capabilities getCapabilities() { Capabilities result = super.getCapabilities(); result.disableAll(); // attributes result.enable(Capability.NOMINAL_ATTRIBUTES); result.enable(Capability.RELATIONAL_ATTRIBUTES); result.enable(Capability.MISSING_VALUES); // class result.enable(Capability.BINARY_CLASS); result.enable(Capability.MISSING_CLASS_VALUES); // other result.enable(Capability.ONLY_MULTIINSTANCE); return result; } /** * Returns the capabilities of this multi-instance classifier for the * relational data. * * @return the capabilities of this object * @see Capabilities */ public Capabilities getMultiInstanceCapabilities() { Capabilities result = super.getCapabilities(); result.disableAll(); // attributes result.enable(Capability.NOMINAL_ATTRIBUTES); result.enable(Capability.NUMERIC_ATTRIBUTES); result.enable(Capability.DATE_ATTRIBUTES); result.enable(Capability.MISSING_VALUES); // class result.disableAllClasses(); result.enable(Capability.NO_CLASS); return result; } /** * Builds the classifier * * @param data the training data to be used for generating the * boosted classifier. * @throws Exception if the classifier could not be built successfully */ public void buildClassifier(Instances data) throws Exception { // can classifier handle the data? getCapabilities().testWithFail(data); // remove instances with missing class Instances train = new Instances(data); train.deleteWithMissingClass(); int numAttributes = train.attribute(1).relation().numAttributes(); m_Center = new double[numAttributes]; if (getDebug()) System.out.println("Start training ..."); // convert the training dataset into single-instance dataset m_ConvertToSI.setInputFormat(train); train = Filter.useFilter( train, m_ConvertToSI); if (m_filterType == FILTER_STANDARDIZE) m_Filter = new Standardize(); else if (m_filterType == FILTER_NORMALIZE) m_Filter = new Normalize(); else m_Filter = null; if (m_Filter!=null) { // normalize/standardize the converted training dataset m_Filter.setInputFormat(train); train = Filter.useFilter(train, m_Filter); } // convert the single-instance dataset into multi-instance dataset m_ConvertToMI.setInputFormat(train); train = Filter.useFilter(train, m_ConvertToMI); /*calculate all the distances (and store them in m_Distance[][][]), which are from each instance in all positive bags to all bags */ calculateDistance(train); /*find the suitable ball center (m_Center) and the corresponding radius (m_Radius)*/ findRadius(train); if (getDebug()) System.out.println("Finish building optimal ball model"); } /** * calculate the distances from each instance in a positive bag to each bag. * All result distances are stored in m_Distance[i][j][k], where * m_Distance[i][j][k] refers the distances from the jth instance in ith bag * to the kth bag * * @param train the multi-instance dataset (with relational attribute) */ public void calculateDistance (Instances train) { int numBags =train.numInstances(); int numInstances; Instance tempCenter; m_Distance = new double [numBags][][]; for (int i=0; i<numBags; i++) { if (train.instance(i).classValue() == 1.0) { //positive bag numInstances = train.instance(i).relationalValue(1).numInstances(); m_Distance[i]= new double[numInstances][]; for (int j=0; j<numInstances; j++) { tempCenter = train.instance(i).relationalValue(1).instance(j); m_Distance[i][j]=new double [numBags]; //store the distance from one center to all the bags for (int k=0; k<numBags; k++){ if (i==k) m_Distance[i][j][k]= 0; else m_Distance[i][j][k]= minBagDistance (tempCenter, train.instance(k)); } } } } } /** * Calculate the distance from one data point to a bag * * @param center the data point in instance space * @param bag the bag * @return the double value as the distance. */ public double minBagDistance (Instance center, Instance bag){ double distance; double minDistance = Double.MAX_VALUE; Instances temp = bag.relationalValue(1); //calculate the distance from the data point to each instance in the bag and return the minimum distance for (int i=0; i<temp.numInstances(); i++){ distance =0; for (int j=0; j<center.numAttributes(); j++) distance += (center.value(j)-temp.instance(i).value(j))*(center.value(j)-temp.instance(i).value(j)); if (minDistance>distance) minDistance = distance; } return Math.sqrt(minDistance); } /** * Find the maximum radius for the optimal ball. * * @param train the multi-instance data */ public void findRadius(Instances train) { int numBags, numInstances; double radius, bagDistance; int highestCount=0; numBags = train.numInstances(); //try each instance in all positive bag as a ball center (tempCenter), for (int i=0; i<numBags; i++) { if (train.instance(i).classValue()== 1.0) {//positive bag numInstances = train.instance(i).relationalValue(1).numInstances(); for (int j=0; j<numInstances; j++) { Instance tempCenter = train.instance(i).relationalValue(1).instance(j); //set the possible set of ball radius corresponding to each tempCenter, double sortedDistance[] = sortArray(m_Distance[i][j]); //sort the distance value for (int k=1; k<sortedDistance.length; k++){ radius = sortedDistance[k]-(sortedDistance[k]-sortedDistance[k-1])/2.0 ; //evaluate the performance on the training data according to //the curren selected tempCenter and the set of radius int correctCount =0; for (int n=0; n<numBags; n++){ bagDistance=m_Distance[i][j][n]; if ((bagDistance <= radius && train.instance(n).classValue()==1.0) ||(bagDistance > radius && train.instance(n).classValue ()==0.0)) correctCount += train.instance(n).weight(); } //and keep the track of the ball center and the maximum radius which can achieve the highest accuracy. if (correctCount > highestCount || (correctCount==highestCount && radius > m_Radius)){ highestCount = correctCount; m_Radius = radius; for (int p=0; p<tempCenter.numAttributes(); p++) m_Center[p]= tempCenter.value(p); } } } } } } /** * Sort the array. * * @param distance the array need to be sorted * @return sorted array */ public double [] sortArray(double [] distance) { double [] sorted = new double [distance.length]; //make a copy of the array double []disCopy = new double[distance.length]; for (int i=0;i<distance.length; i++) disCopy[i]= distance[i]; DoubleVector sortVector = new DoubleVector(disCopy); sortVector.sort(); sorted = sortVector.getArrayCopy(); return sorted; } /** * Computes the distribution for a given multiple instance * * @param newBag the instance for which distribution is computed * @return the distribution * @throws Exception if the distribution can't be computed successfully */ public double[] distributionForInstance(Instance newBag) throws Exception { double [] distribution = new double[2]; double distance; distribution[0]=0; distribution[1]=0; Instances insts = new Instances(newBag.dataset(),0); insts.add(newBag); // Filter instances insts= Filter.useFilter( insts, m_ConvertToSI); if (m_Filter!=null) insts = Filter.useFilter(insts, m_Filter); //calculate the distance from each single instance to the ball center int numInsts = insts.numInstances(); insts.deleteAttributeAt(0); //remove the bagIndex attribute, no use for the distance calculation for (int i=0; i<numInsts; i++){ distance =0; for (int j=0; j<insts.numAttributes()-1; j++) distance += (insts.instance(i).value(j) - m_Center[j])*(insts.instance(i).value(j)-m_Center[j]); if (distance <=m_Radius*m_Radius){ // check whether this single instance is inside the ball distribution[1]=1.0; //predicted as a positive bag break; } } distribution[0]= 1-distribution[1]; return distribution; } /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ public Enumeration listOptions() { Vector result = new Vector(); result.addElement(new Option( "\tWhether to 0=normalize/1=standardize/2=neither. \n" + "\t(default 0=normalize)", "N", 1, "-N <num>")); return result.elements(); } /** * Gets the current settings of the classifier. * * @return an array of strings suitable for passing to setOptions */ public String[] getOptions() { Vector result; result = new Vector(); if (getDebug()) result.add("-D"); result.add("-N"); result.add("" + m_filterType); return (String[]) result.toArray(new String[result.size()]); } /** * Parses a given list of options. <p/> * <!-- options-start --> * Valid options are: <p/> * * <pre> -N &lt;num&gt; * Whether to 0=normalize/1=standardize/2=neither. * (default 0=normalize)</pre> * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { setDebug(Utils.getFlag('D', options)); String nString = Utils.getOption('N', options); if (nString.length() != 0) { setFilterType(new SelectedTag(Integer.parseInt(nString), TAGS_FILTER)); } else { setFilterType(new SelectedTag(FILTER_NORMALIZE, TAGS_FILTER)); } } /** * Returns the tip text for this property * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String filterTypeTipText() { return "The filter type for transforming the training data."; } /** * Sets how the training data will be transformed. Should be one of * FILTER_NORMALIZE, FILTER_STANDARDIZE, FILTER_NONE. * * @param newType the new filtering mode */ public void setFilterType(SelectedTag newType) { if (newType.getTags() == TAGS_FILTER) { m_filterType = newType.getSelectedTag().getID(); } } /** * Gets how the training data will be transformed. Will be one of * FILTER_NORMALIZE, FILTER_STANDARDIZE, FILTER_NONE. * * @return the filtering mode */ public SelectedTag getFilterType() { return new SelectedTag(m_filterType, TAGS_FILTER); } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 5527 $"); } /** * Main method for testing this class. * * @param argv should contain the command line arguments to the * scheme (see Evaluation) */ public static void main(String[] argv) { runClassifier(new MIOptimalBall(), argv); } }
18,366
32.034173
376
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/mi/MISMO.java
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* * MISMO.java * Copyright (C) 2005 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.mi; import weka.classifiers.Classifier; import weka.classifiers.functions.Logistic; import weka.classifiers.functions.supportVector.Kernel; import weka.classifiers.functions.supportVector.SMOset; import weka.classifiers.mi.supportVector.MIPolyKernel; import weka.core.Attribute; import weka.core.Capabilities; import weka.core.FastVector; import weka.core.Instance; import weka.core.Instances; import weka.core.MultiInstanceCapabilitiesHandler; import weka.core.Option; import weka.core.OptionHandler; import weka.core.RevisionHandler; import weka.core.RevisionUtils; import weka.core.SelectedTag; import weka.core.SerializedObject; import weka.core.Tag; import weka.core.TechnicalInformation; import weka.core.TechnicalInformationHandler; import weka.core.Utils; import weka.core.WeightedInstancesHandler; import weka.core.Capabilities.Capability; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import weka.filters.Filter; import weka.filters.unsupervised.attribute.MultiInstanceToPropositional; import weka.filters.unsupervised.attribute.NominalToBinary; import weka.filters.unsupervised.attribute.Normalize; import weka.filters.unsupervised.attribute.PropositionalToMultiInstance; import weka.filters.unsupervised.attribute.ReplaceMissingValues; import weka.filters.unsupervised.attribute.Standardize; import java.io.Serializable; import java.util.Enumeration; import java.util.Random; import java.util.Vector; import weka.classifiers.AbstractClassifier; import weka.core.DenseInstance; /** <!-- globalinfo-start --> * Implements John Platt's sequential minimal optimization algorithm for training a support vector classifier.<br/> * <br/> * This implementation globally replaces all missing values and transforms nominal attributes into binary ones. It also normalizes all attributes by default. (In that case the coefficients in the output are based on the normalized data, not the original data --- this is important for interpreting the classifier.)<br/> * <br/> * Multi-class problems are solved using pairwise classification.<br/> * <br/> * To obtain proper probability estimates, use the option that fits logistic regression models to the outputs of the support vector machine. In the multi-class case the predicted probabilities are coupled using Hastie and Tibshirani's pairwise coupling method.<br/> * <br/> * Note: for improved speed normalization should be turned off when operating on SparseInstances.<br/> * <br/> * For more information on the SMO algorithm, see<br/> * <br/> * J. Platt: Machines using Sequential Minimal Optimization. In B. Schoelkopf and C. Burges and A. Smola, editors, Advances in Kernel Methods - Support Vector Learning, 1998.<br/> * <br/> * S.S. Keerthi, S.K. Shevade, C. Bhattacharyya, K.R.K. Murthy (2001). Improvements to Platt's SMO Algorithm for SVM Classifier Design. Neural Computation. 13(3):637-649. * <p/> <!-- globalinfo-end --> * <!-- technical-bibtex-start --> * BibTeX: * <pre> * &#64;incollection{Platt1998, * author = {J. Platt}, * booktitle = {Advances in Kernel Methods - Support Vector Learning}, * editor = {B. Schoelkopf and C. Burges and A. Smola}, * publisher = {MIT Press}, * title = {Machines using Sequential Minimal Optimization}, * year = {1998} * } * * &#64;article{Keerthi2001, * author = {S.S. Keerthi and S.K. Shevade and C. Bhattacharyya and K.R.K. Murthy}, * journal = {Neural Computation}, * number = {3}, * pages = {637-649}, * title = {Improvements to Platt's SMO Algorithm for SVM Classifier Design}, * volume = {13}, * year = {2001} * } * </pre> * <p/> <!-- technical-bibtex-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -D * If set, classifier is run in debug mode and * may output additional info to the console</pre> * * <pre> -no-checks * Turns off all checks - use with caution! * Turning them off assumes that data is purely numeric, doesn't * contain any missing values, and has a nominal class. Turning them * off also means that no header information will be stored if the * machine is linear. Finally, it also assumes that no instance has * a weight equal to 0. * (default: checks on)</pre> * * <pre> -C &lt;double&gt; * The complexity constant C. (default 1)</pre> * * <pre> -N * Whether to 0=normalize/1=standardize/2=neither. * (default 0=normalize)</pre> * * <pre> -I * Use MIminimax feature space. </pre> * * <pre> -L &lt;double&gt; * The tolerance parameter. (default 1.0e-3)</pre> * * <pre> -P &lt;double&gt; * The epsilon for round-off error. (default 1.0e-12)</pre> * * <pre> -M * Fit logistic models to SVM outputs. </pre> * * <pre> -V &lt;double&gt; * The number of folds for the internal cross-validation. * (default -1, use training data)</pre> * * <pre> -W &lt;double&gt; * The random number seed. (default 1)</pre> * * <pre> -K &lt;classname and parameters&gt; * The Kernel to use. * (default: weka.classifiers.functions.supportVector.PolyKernel)</pre> * * <pre> * Options specific to kernel weka.classifiers.mi.supportVector.MIPolyKernel: * </pre> * * <pre> -D * Enables debugging output (if available) to be printed. * (default: off)</pre> * * <pre> -no-checks * Turns off all checks - use with caution! * (default: checks on)</pre> * * <pre> -C &lt;num&gt; * The size of the cache (a prime number), 0 for full cache and * -1 to turn it off. * (default: 250007)</pre> * * <pre> -E &lt;num&gt; * The Exponent to use. * (default: 1.0)</pre> * * <pre> -L * Use lower-order terms. * (default: no)</pre> * <!-- options-end --> * * @author Eibe Frank (eibe@cs.waikato.ac.nz) * @author Shane Legg (shane@intelligenesis.net) (sparse vector code) * @author Stuart Inglis (stuart@reeltwo.com) (sparse vector code) * @author Lin Dong (ld21@cs.waikato.ac.nz) (code for adapting to MI data) * @version $Revision: 1.6 $ */ public class MISMO extends AbstractClassifier implements WeightedInstancesHandler, MultiInstanceCapabilitiesHandler, TechnicalInformationHandler { /** for serialization */ static final long serialVersionUID = -5834036950143719712L; /** * Returns a string describing classifier * @return a description suitable for * displaying in the explorer/experimenter gui */ public String globalInfo() { return "Implements John Platt's sequential minimal optimization " + "algorithm for training a support vector classifier.\n\n" + "This implementation globally replaces all missing values and " + "transforms nominal attributes into binary ones. It also " + "normalizes all attributes by default. (In that case the coefficients " + "in the output are based on the normalized data, not the " + "original data --- this is important for interpreting the classifier.)\n\n" + "Multi-class problems are solved using pairwise classification.\n\n" + "To obtain proper probability estimates, use the option that fits " + "logistic regression models to the outputs of the support vector " + "machine. In the multi-class case the predicted probabilities " + "are coupled using Hastie and Tibshirani's pairwise coupling " + "method.\n\n" + "Note: for improved speed normalization should be turned off when " + "operating on SparseInstances.\n\n" + "For more information on the SMO algorithm, see\n\n" + getTechnicalInformation().toString(); } /** * Returns an instance of a TechnicalInformation object, containing * detailed information about the technical background of this class, * e.g., paper reference or book this class is based on. * * @return the technical information about this class */ public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; TechnicalInformation additional; result = new TechnicalInformation(Type.INCOLLECTION); result.setValue(Field.AUTHOR, "J. Platt"); result.setValue(Field.YEAR, "1998"); result.setValue(Field.TITLE, "Machines using Sequential Minimal Optimization"); result.setValue(Field.BOOKTITLE, "Advances in Kernel Methods - Support Vector Learning"); result.setValue(Field.EDITOR, "B. Schoelkopf and C. Burges and A. Smola"); result.setValue(Field.PUBLISHER, "MIT Press"); additional = result.add(Type.ARTICLE); additional.setValue(Field.AUTHOR, "S.S. Keerthi and S.K. Shevade and C. Bhattacharyya and K.R.K. Murthy"); additional.setValue(Field.YEAR, "2001"); additional.setValue(Field.TITLE, "Improvements to Platt's SMO Algorithm for SVM Classifier Design"); additional.setValue(Field.JOURNAL, "Neural Computation"); additional.setValue(Field.VOLUME, "13"); additional.setValue(Field.NUMBER, "3"); additional.setValue(Field.PAGES, "637-649"); return result; } /** * Class for building a binary support vector machine. */ protected class BinaryMISMO implements Serializable, RevisionHandler { /** for serialization */ static final long serialVersionUID = -7107082483475433531L; /** The Lagrange multipliers. */ protected double[] m_alpha; /** The thresholds. */ protected double m_b, m_bLow, m_bUp; /** The indices for m_bLow and m_bUp */ protected int m_iLow, m_iUp; /** The training data. */ protected Instances m_data; /** Weight vector for linear machine. */ protected double[] m_weights; /** Variables to hold weight vector in sparse form. (To reduce storage requirements.) */ protected double[] m_sparseWeights; protected int[] m_sparseIndices; /** Kernel to use **/ protected Kernel m_kernel; /** The transformed class values. */ protected double[] m_class; /** The current set of errors for all non-bound examples. */ protected double[] m_errors; /* The five different sets used by the algorithm. */ /** {i: 0 < m_alpha[i] < C} */ protected SMOset m_I0; /** {i: m_class[i] = 1, m_alpha[i] = 0} */ protected SMOset m_I1; /** {i: m_class[i] = -1, m_alpha[i] = C} */ protected SMOset m_I2; /** {i: m_class[i] = 1, m_alpha[i] = C} */ protected SMOset m_I3; /** {i: m_class[i] = -1, m_alpha[i] = 0} */ protected SMOset m_I4; /** The set of support vectors {i: 0 < m_alpha[i]} */ protected SMOset m_supportVectors; /** Stores logistic regression model for probability estimate */ protected Logistic m_logistic = null; /** Stores the weight of the training instances */ protected double m_sumOfWeights = 0; /** * Fits logistic regression model to SVM outputs analogue * to John Platt's method. * * @param insts the set of training instances * @param cl1 the first class' index * @param cl2 the second class' index * @param numFolds the number of folds for cross-validation * @param random the random number generator for cross-validation * @throws Exception if the sigmoid can't be fit successfully */ protected void fitLogistic(Instances insts, int cl1, int cl2, int numFolds, Random random) throws Exception { // Create header of instances object FastVector atts = new FastVector(2); atts.addElement(new Attribute("pred")); FastVector attVals = new FastVector(2); attVals.addElement(insts.classAttribute().value(cl1)); attVals.addElement(insts.classAttribute().value(cl2)); atts.addElement(new Attribute("class", attVals)); Instances data = new Instances("data", atts, insts.numInstances()); data.setClassIndex(1); // Collect data for fitting the logistic model if (numFolds <= 0) { // Use training data for (int j = 0; j < insts.numInstances(); j++) { Instance inst = insts.instance(j); double[] vals = new double[2]; vals[0] = SVMOutput(-1, inst); if (inst.classValue() == cl2) { vals[1] = 1; } data.add(new DenseInstance(inst.weight(), vals)); } } else { // Check whether number of folds too large if (numFolds > insts.numInstances()) { numFolds = insts.numInstances(); } // Make copy of instances because we will shuffle them around insts = new Instances(insts); // Perform three-fold cross-validation to collect // unbiased predictions insts.randomize(random); insts.stratify(numFolds); for (int i = 0; i < numFolds; i++) { Instances train = insts.trainCV(numFolds, i, random); SerializedObject so = new SerializedObject(this); BinaryMISMO smo = (BinaryMISMO)so.getObject(); smo.buildClassifier(train, cl1, cl2, false, -1, -1); Instances test = insts.testCV(numFolds, i); for (int j = 0; j < test.numInstances(); j++) { double[] vals = new double[2]; vals[0] = smo.SVMOutput(-1, test.instance(j)); if (test.instance(j).classValue() == cl2) { vals[1] = 1; } data.add(new DenseInstance(test.instance(j).weight(), vals)); } } } // Build logistic regression model m_logistic = new Logistic(); m_logistic.buildClassifier(data); } /** * sets the kernel to use * * @param value the kernel to use */ public void setKernel(Kernel value) { m_kernel = value; } /** * Returns the kernel to use * * @return the current kernel */ public Kernel getKernel() { return m_kernel; } /** * Method for building the binary classifier. * * @param insts the set of training instances * @param cl1 the first class' index * @param cl2 the second class' index * @param fitLogistic true if logistic model is to be fit * @param numFolds number of folds for internal cross-validation * @param randomSeed seed value for random number generator for cross-validation * @throws Exception if the classifier can't be built successfully */ protected void buildClassifier(Instances insts, int cl1, int cl2, boolean fitLogistic, int numFolds, int randomSeed) throws Exception { // Initialize some variables m_bUp = -1; m_bLow = 1; m_b = 0; m_alpha = null; m_data = null; m_weights = null; m_errors = null; m_logistic = null; m_I0 = null; m_I1 = null; m_I2 = null; m_I3 = null; m_I4 = null; m_sparseWeights = null; m_sparseIndices = null; // Store the sum of weights m_sumOfWeights = insts.sumOfWeights(); // Set class values m_class = new double[insts.numInstances()]; m_iUp = -1; m_iLow = -1; for (int i = 0; i < m_class.length; i++) { if ((int) insts.instance(i).classValue() == cl1) { m_class[i] = -1; m_iLow = i; } else if ((int) insts.instance(i).classValue() == cl2) { m_class[i] = 1; m_iUp = i; } else { throw new Exception ("This should never happen!"); } } // Check whether one or both classes are missing if ((m_iUp == -1) || (m_iLow == -1)) { if (m_iUp != -1) { m_b = -1; } else if (m_iLow != -1) { m_b = 1; } else { m_class = null; return; } m_supportVectors = new SMOset(0); m_alpha = new double[0]; m_class = new double[0]; // Fit sigmoid if requested if (fitLogistic) { fitLogistic(insts, cl1, cl2, numFolds, new Random(randomSeed)); } return; } // Set the reference to the data m_data = insts; m_weights = null; // Initialize alpha array to zero m_alpha = new double[m_data.numInstances()]; // Initialize sets m_supportVectors = new SMOset(m_data.numInstances()); m_I0 = new SMOset(m_data.numInstances()); m_I1 = new SMOset(m_data.numInstances()); m_I2 = new SMOset(m_data.numInstances()); m_I3 = new SMOset(m_data.numInstances()); m_I4 = new SMOset(m_data.numInstances()); // Clean out some instance variables m_sparseWeights = null; m_sparseIndices = null; // Initialize error cache m_errors = new double[m_data.numInstances()]; m_errors[m_iLow] = 1; m_errors[m_iUp] = -1; // Initialize kernel m_kernel.buildKernel(m_data); // Build up I1 and I4 for (int i = 0; i < m_class.length; i++ ) { if (m_class[i] == 1) { m_I1.insert(i); } else { m_I4.insert(i); } } // Loop to find all the support vectors int numChanged = 0; boolean examineAll = true; while ((numChanged > 0) || examineAll) { numChanged = 0; if (examineAll) { for (int i = 0; i < m_alpha.length; i++) { if (examineExample(i)) { numChanged++; } } } else { // This code implements Modification 1 from Keerthi et al.'s paper for (int i = 0; i < m_alpha.length; i++) { if ((m_alpha[i] > 0) && (m_alpha[i] < m_C * m_data.instance(i).weight())) { if (examineExample(i)) { numChanged++; } // Is optimality on unbound vectors obtained? if (m_bUp > m_bLow - 2 * m_tol) { numChanged = 0; break; } } } //This is the code for Modification 2 from Keerthi et al.'s paper /*boolean innerLoopSuccess = true; numChanged = 0; while ((m_bUp < m_bLow - 2 * m_tol) && (innerLoopSuccess == true)) { innerLoopSuccess = takeStep(m_iUp, m_iLow, m_errors[m_iLow]); }*/ } if (examineAll) { examineAll = false; } else if (numChanged == 0) { examineAll = true; } } // Set threshold m_b = (m_bLow + m_bUp) / 2.0; // Save memory m_kernel.clean(); m_errors = null; m_I0 = m_I1 = m_I2 = m_I3 = m_I4 = null; // Fit sigmoid if requested if (fitLogistic) { fitLogistic(insts, cl1, cl2, numFolds, new Random(randomSeed)); } } /** * Computes SVM output for given instance. * * @param index the instance for which output is to be computed * @param inst the instance * @return the output of the SVM for the given instance * @throws Exception if something goes wrong */ protected double SVMOutput(int index, Instance inst) throws Exception { double result = 0; for (int i = m_supportVectors.getNext(-1); i != -1; i = m_supportVectors.getNext(i)) { result += m_class[i] * m_alpha[i] * m_kernel.eval(index, i, inst); } result -= m_b; return result; } /** * Prints out the classifier. * * @return a description of the classifier as a string */ public String toString() { StringBuffer text = new StringBuffer(); int printed = 0; if ((m_alpha == null) && (m_sparseWeights == null)) { return "BinaryMISMO: No model built yet.\n"; } try { text.append("BinaryMISMO\n\n"); for (int i = 0; i < m_alpha.length; i++) { if (m_supportVectors.contains(i)) { double val = m_alpha[i]; if (m_class[i] == 1) { if (printed > 0) { text.append(" + "); } } else { text.append(" - "); } text.append(Utils.doubleToString(val, 12, 4) + " * <"); for (int j = 0; j < m_data.numAttributes(); j++) { if (j != m_data.classIndex()) { text.append(m_data.instance(i).toString(j)); } if (j != m_data.numAttributes() - 1) { text.append(" "); } } text.append("> * X]\n"); printed++; } } if (m_b > 0) { text.append(" - " + Utils.doubleToString(m_b, 12, 4)); } else { text.append(" + " + Utils.doubleToString(-m_b, 12, 4)); } text.append("\n\nNumber of support vectors: " + m_supportVectors.numElements()); int numEval = 0; int numCacheHits = -1; if(m_kernel != null) { numEval = m_kernel.numEvals(); numCacheHits = m_kernel.numCacheHits(); } text.append("\n\nNumber of kernel evaluations: " + numEval); if (numCacheHits >= 0 && numEval > 0) { double hitRatio = 1 - numEval*1.0/(numCacheHits+numEval); text.append(" (" + Utils.doubleToString(hitRatio*100, 7, 3).trim() + "% cached)"); } } catch (Exception e) { e.printStackTrace(); return "Can't print BinaryMISMO classifier."; } return text.toString(); } /** * Examines instance. * * @param i2 index of instance to examine * @return true if examination was successfull * @throws Exception if something goes wrong */ protected boolean examineExample(int i2) throws Exception { double y2, F2; int i1 = -1; y2 = m_class[i2]; if (m_I0.contains(i2)) { F2 = m_errors[i2]; } else { F2 = SVMOutput(i2, m_data.instance(i2)) + m_b - y2; m_errors[i2] = F2; // Update thresholds if ((m_I1.contains(i2) || m_I2.contains(i2)) && (F2 < m_bUp)) { m_bUp = F2; m_iUp = i2; } else if ((m_I3.contains(i2) || m_I4.contains(i2)) && (F2 > m_bLow)) { m_bLow = F2; m_iLow = i2; } } // Check optimality using current bLow and bUp and, if // violated, find an index i1 to do joint optimization // with i2... boolean optimal = true; if (m_I0.contains(i2) || m_I1.contains(i2) || m_I2.contains(i2)) { if (m_bLow - F2 > 2 * m_tol) { optimal = false; i1 = m_iLow; } } if (m_I0.contains(i2) || m_I3.contains(i2) || m_I4.contains(i2)) { if (F2 - m_bUp > 2 * m_tol) { optimal = false; i1 = m_iUp; } } if (optimal) { return false; } // For i2 unbound choose the better i1... if (m_I0.contains(i2)) { if (m_bLow - F2 > F2 - m_bUp) { i1 = m_iLow; } else { i1 = m_iUp; } } if (i1 == -1) { throw new Exception("This should never happen!"); } return takeStep(i1, i2, F2); } /** * Method solving for the Lagrange multipliers for * two instances. * * @param i1 index of the first instance * @param i2 index of the second instance * @param F2 * @return true if multipliers could be found * @throws Exception if something goes wrong */ protected boolean takeStep(int i1, int i2, double F2) throws Exception { double alph1, alph2, y1, y2, F1, s, L, H, k11, k12, k22, eta, a1, a2, f1, f2, v1, v2, Lobj, Hobj; double C1 = m_C * m_data.instance(i1).weight(); double C2 = m_C * m_data.instance(i2).weight(); // Don't do anything if the two instances are the same if (i1 == i2) { return false; } // Initialize variables alph1 = m_alpha[i1]; alph2 = m_alpha[i2]; y1 = m_class[i1]; y2 = m_class[i2]; F1 = m_errors[i1]; s = y1 * y2; // Find the constraints on a2 if (y1 != y2) { L = Math.max(0, alph2 - alph1); H = Math.min(C2, C1 + alph2 - alph1); } else { L = Math.max(0, alph1 + alph2 - C1); H = Math.min(C2, alph1 + alph2); } if (L >= H) { return false; } // Compute second derivative of objective function k11 = m_kernel.eval(i1, i1, m_data.instance(i1)); k12 = m_kernel.eval(i1, i2, m_data.instance(i1)); k22 = m_kernel.eval(i2, i2, m_data.instance(i2)); eta = 2 * k12 - k11 - k22; // Check if second derivative is negative if (eta < 0) { // Compute unconstrained maximum a2 = alph2 - y2 * (F1 - F2) / eta; // Compute constrained maximum if (a2 < L) { a2 = L; } else if (a2 > H) { a2 = H; } } else { // Look at endpoints of diagonal f1 = SVMOutput(i1, m_data.instance(i1)); f2 = SVMOutput(i2, m_data.instance(i2)); v1 = f1 + m_b - y1 * alph1 * k11 - y2 * alph2 * k12; v2 = f2 + m_b - y1 * alph1 * k12 - y2 * alph2 * k22; double gamma = alph1 + s * alph2; Lobj = (gamma - s * L) + L - 0.5 * k11 * (gamma - s * L) * (gamma - s * L) - 0.5 * k22 * L * L - s * k12 * (gamma - s * L) * L - y1 * (gamma - s * L) * v1 - y2 * L * v2; Hobj = (gamma - s * H) + H - 0.5 * k11 * (gamma - s * H) * (gamma - s * H) - 0.5 * k22 * H * H - s * k12 * (gamma - s * H) * H - y1 * (gamma - s * H) * v1 - y2 * H * v2; if (Lobj > Hobj + m_eps) { a2 = L; } else if (Lobj < Hobj - m_eps) { a2 = H; } else { a2 = alph2; } } if (Math.abs(a2 - alph2) < m_eps * (a2 + alph2 + m_eps)) { return false; } // To prevent precision problems if (a2 > C2 - m_Del * C2) { a2 = C2; } else if (a2 <= m_Del * C2) { a2 = 0; } // Recompute a1 a1 = alph1 + s * (alph2 - a2); // To prevent precision problems if (a1 > C1 - m_Del * C1) { a1 = C1; } else if (a1 <= m_Del * C1) { a1 = 0; } // Update sets if (a1 > 0) { m_supportVectors.insert(i1); } else { m_supportVectors.delete(i1); } if ((a1 > 0) && (a1 < C1)) { m_I0.insert(i1); } else { m_I0.delete(i1); } if ((y1 == 1) && (a1 == 0)) { m_I1.insert(i1); } else { m_I1.delete(i1); } if ((y1 == -1) && (a1 == C1)) { m_I2.insert(i1); } else { m_I2.delete(i1); } if ((y1 == 1) && (a1 == C1)) { m_I3.insert(i1); } else { m_I3.delete(i1); } if ((y1 == -1) && (a1 == 0)) { m_I4.insert(i1); } else { m_I4.delete(i1); } if (a2 > 0) { m_supportVectors.insert(i2); } else { m_supportVectors.delete(i2); } if ((a2 > 0) && (a2 < C2)) { m_I0.insert(i2); } else { m_I0.delete(i2); } if ((y2 == 1) && (a2 == 0)) { m_I1.insert(i2); } else { m_I1.delete(i2); } if ((y2 == -1) && (a2 == C2)) { m_I2.insert(i2); } else { m_I2.delete(i2); } if ((y2 == 1) && (a2 == C2)) { m_I3.insert(i2); } else { m_I3.delete(i2); } if ((y2 == -1) && (a2 == 0)) { m_I4.insert(i2); } else { m_I4.delete(i2); } // Update error cache using new Lagrange multipliers for (int j = m_I0.getNext(-1); j != -1; j = m_I0.getNext(j)) { if ((j != i1) && (j != i2)) { m_errors[j] += y1 * (a1 - alph1) * m_kernel.eval(i1, j, m_data.instance(i1)) + y2 * (a2 - alph2) * m_kernel.eval(i2, j, m_data.instance(i2)); } } // Update error cache for i1 and i2 m_errors[i1] += y1 * (a1 - alph1) * k11 + y2 * (a2 - alph2) * k12; m_errors[i2] += y1 * (a1 - alph1) * k12 + y2 * (a2 - alph2) * k22; // Update array with Lagrange multipliers m_alpha[i1] = a1; m_alpha[i2] = a2; // Update thresholds m_bLow = -Double.MAX_VALUE; m_bUp = Double.MAX_VALUE; m_iLow = -1; m_iUp = -1; for (int j = m_I0.getNext(-1); j != -1; j = m_I0.getNext(j)) { if (m_errors[j] < m_bUp) { m_bUp = m_errors[j]; m_iUp = j; } if (m_errors[j] > m_bLow) { m_bLow = m_errors[j]; m_iLow = j; } } if (!m_I0.contains(i1)) { if (m_I3.contains(i1) || m_I4.contains(i1)) { if (m_errors[i1] > m_bLow) { m_bLow = m_errors[i1]; m_iLow = i1; } } else { if (m_errors[i1] < m_bUp) { m_bUp = m_errors[i1]; m_iUp = i1; } } } if (!m_I0.contains(i2)) { if (m_I3.contains(i2) || m_I4.contains(i2)) { if (m_errors[i2] > m_bLow) { m_bLow = m_errors[i2]; m_iLow = i2; } } else { if (m_errors[i2] < m_bUp) { m_bUp = m_errors[i2]; m_iUp = i2; } } } if ((m_iLow == -1) || (m_iUp == -1)) { throw new Exception("This should never happen!"); } // Made some progress. return true; } /** * Quick and dirty check whether the quadratic programming problem is solved. * * @throws Exception if something goes wrong */ protected void checkClassifier() throws Exception { double sum = 0; for (int i = 0; i < m_alpha.length; i++) { if (m_alpha[i] > 0) { sum += m_class[i] * m_alpha[i]; } } System.err.println("Sum of y(i) * alpha(i): " + sum); for (int i = 0; i < m_alpha.length; i++) { double output = SVMOutput(i, m_data.instance(i)); if (Utils.eq(m_alpha[i], 0)) { if (Utils.sm(m_class[i] * output, 1)) { System.err.println("KKT condition 1 violated: " + m_class[i] * output); } } if (Utils.gr(m_alpha[i], 0) && Utils.sm(m_alpha[i], m_C * m_data.instance(i).weight())) { if (!Utils.eq(m_class[i] * output, 1)) { System.err.println("KKT condition 2 violated: " + m_class[i] * output); } } if (Utils.eq(m_alpha[i], m_C * m_data.instance(i).weight())) { if (Utils.gr(m_class[i] * output, 1)) { System.err.println("KKT condition 3 violated: " + m_class[i] * output); } } } } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 1.6 $"); } } /** Normalize training data */ public static final int FILTER_NORMALIZE = 0; /** Standardize training data */ public static final int FILTER_STANDARDIZE = 1; /** No normalization/standardization */ public static final int FILTER_NONE = 2; /** The filter to apply to the training data */ public static final Tag [] TAGS_FILTER = { new Tag(FILTER_NORMALIZE, "Normalize training data"), new Tag(FILTER_STANDARDIZE, "Standardize training data"), new Tag(FILTER_NONE, "No normalization/standardization"), }; /** The binary classifier(s) */ protected BinaryMISMO[][] m_classifiers = null; /** The complexity parameter. */ protected double m_C = 1.0; /** Epsilon for rounding. */ protected double m_eps = 1.0e-12; /** Tolerance for accuracy of result. */ protected double m_tol = 1.0e-3; /** Whether to normalize/standardize/neither */ protected int m_filterType = FILTER_NORMALIZE; /** Use MIMinimax feature space? */ protected boolean m_minimax = false; /** The filter used to make attributes numeric. */ protected NominalToBinary m_NominalToBinary; /** The filter used to standardize/normalize all values. */ protected Filter m_Filter = null; /** The filter used to get rid of missing values. */ protected ReplaceMissingValues m_Missing; /** The class index from the training data */ protected int m_classIndex = -1; /** The class attribute */ protected Attribute m_classAttribute; /** Kernel to use **/ protected Kernel m_kernel = new MIPolyKernel(); /** Turn off all checks and conversions? Turning them off assumes that data is purely numeric, doesn't contain any missing values, and has a nominal class. Turning them off also means that no header information will be stored if the machine is linear. Finally, it also assumes that no instance has a weight equal to 0.*/ protected boolean m_checksTurnedOff; /** Precision constant for updating sets */ protected static double m_Del = 1000 * Double.MIN_VALUE; /** Whether logistic models are to be fit */ protected boolean m_fitLogisticModels = false; /** The number of folds for the internal cross-validation */ protected int m_numFolds = -1; /** The random number seed */ protected int m_randomSeed = 1; /** * Turns off checks for missing values, etc. Use with caution. */ public void turnChecksOff() { m_checksTurnedOff = true; } /** * Turns on checks for missing values, etc. */ public void turnChecksOn() { m_checksTurnedOff = false; } /** * Returns default capabilities of the classifier. * * @return the capabilities of this classifier */ public Capabilities getCapabilities() { Capabilities result = getKernel().getCapabilities(); result.setOwner(this); // attributes result.enable(Capability.NOMINAL_ATTRIBUTES); result.enable(Capability.RELATIONAL_ATTRIBUTES); result.enable(Capability.MISSING_VALUES); // class result.disableAllClasses(); result.disableAllClassDependencies(); result.enable(Capability.NOMINAL_CLASS); result.enable(Capability.MISSING_CLASS_VALUES); // other result.enable(Capability.ONLY_MULTIINSTANCE); return result; } /** * Returns the capabilities of this multi-instance classifier for the * relational data. * * @return the capabilities of this object * @see Capabilities */ public Capabilities getMultiInstanceCapabilities() { Capabilities result = ((MultiInstanceCapabilitiesHandler) getKernel()).getMultiInstanceCapabilities(); result.setOwner(this); // attribute result.enableAllAttributeDependencies(); // with NominalToBinary we can also handle nominal attributes, but only // if the kernel can handle numeric attributes if (result.handles(Capability.NUMERIC_ATTRIBUTES)) result.enable(Capability.NOMINAL_ATTRIBUTES); result.enable(Capability.MISSING_VALUES); return result; } /** * Method for building the classifier. Implements a one-against-one * wrapper for multi-class problems. * * @param insts the set of training instances * @throws Exception if the classifier can't be built successfully */ public void buildClassifier(Instances insts) throws Exception { if (!m_checksTurnedOff) { // can classifier handle the data? getCapabilities().testWithFail(insts); // remove instances with missing class insts = new Instances(insts); insts.deleteWithMissingClass(); /* Removes all the instances with weight equal to 0. MUST be done since condition (8) of Keerthi's paper is made with the assertion Ci > 0 (See equation (3a). */ Instances data = new Instances(insts, insts.numInstances()); for(int i = 0; i < insts.numInstances(); i++){ if(insts.instance(i).weight() > 0) data.add(insts.instance(i)); } if (data.numInstances() == 0) { throw new Exception("No training instances left after removing " + "instance with either a weight null or a missing class!"); } insts = data; } // filter data if (!m_checksTurnedOff) m_Missing = new ReplaceMissingValues(); else m_Missing = null; if (getCapabilities().handles(Capability.NUMERIC_ATTRIBUTES)) { boolean onlyNumeric = true; if (!m_checksTurnedOff) { for (int i = 0; i < insts.numAttributes(); i++) { if (i != insts.classIndex()) { if (!insts.attribute(i).isNumeric()) { onlyNumeric = false; break; } } } } if (!onlyNumeric) { m_NominalToBinary = new NominalToBinary(); // exclude the bag attribute m_NominalToBinary.setAttributeIndices("2-last"); } else { m_NominalToBinary = null; } } else { m_NominalToBinary = null; } if (m_filterType == FILTER_STANDARDIZE) m_Filter = new Standardize(); else if (m_filterType == FILTER_NORMALIZE) m_Filter = new Normalize(); else m_Filter = null; Instances transformedInsts; Filter convertToProp = new MultiInstanceToPropositional(); Filter convertToMI = new PropositionalToMultiInstance(); //transform the data into single-instance format if (m_minimax){ /* using SimpleMI class minimax transform method. this method transforms the multi-instance dataset into minmax feature space (single-instance) */ SimpleMI transMinimax = new SimpleMI(); transMinimax.setTransformMethod( new SelectedTag( SimpleMI.TRANSFORMMETHOD_MINIMAX, SimpleMI.TAGS_TRANSFORMMETHOD)); transformedInsts = transMinimax.transform(insts); } else { convertToProp.setInputFormat(insts); transformedInsts=Filter.useFilter(insts, convertToProp); } if (m_Missing != null) { m_Missing.setInputFormat(transformedInsts); transformedInsts = Filter.useFilter(transformedInsts, m_Missing); } if (m_NominalToBinary != null) { m_NominalToBinary.setInputFormat(transformedInsts); transformedInsts = Filter.useFilter(transformedInsts, m_NominalToBinary); } if (m_Filter != null) { m_Filter.setInputFormat(transformedInsts); transformedInsts = Filter.useFilter(transformedInsts, m_Filter); } // convert the single-instance format to multi-instance format convertToMI.setInputFormat(transformedInsts); insts = Filter.useFilter( transformedInsts, convertToMI); m_classIndex = insts.classIndex(); m_classAttribute = insts.classAttribute(); // Generate subsets representing each class Instances[] subsets = new Instances[insts.numClasses()]; for (int i = 0; i < insts.numClasses(); i++) { subsets[i] = new Instances(insts, insts.numInstances()); } for (int j = 0; j < insts.numInstances(); j++) { Instance inst = insts.instance(j); subsets[(int)inst.classValue()].add(inst); } for (int i = 0; i < insts.numClasses(); i++) { subsets[i].compactify(); } // Build the binary classifiers Random rand = new Random(m_randomSeed); m_classifiers = new BinaryMISMO[insts.numClasses()][insts.numClasses()]; for (int i = 0; i < insts.numClasses(); i++) { for (int j = i + 1; j < insts.numClasses(); j++) { m_classifiers[i][j] = new BinaryMISMO(); m_classifiers[i][j].setKernel(Kernel.makeCopy(getKernel())); Instances data = new Instances(insts, insts.numInstances()); for (int k = 0; k < subsets[i].numInstances(); k++) { data.add(subsets[i].instance(k)); } for (int k = 0; k < subsets[j].numInstances(); k++) { data.add(subsets[j].instance(k)); } data.compactify(); data.randomize(rand); m_classifiers[i][j].buildClassifier(data, i, j, m_fitLogisticModels, m_numFolds, m_randomSeed); } } } /** * Estimates class probabilities for given instance. * * @param inst the instance to compute the distribution for * @return the class probabilities * @throws Exception if computation fails */ public double[] distributionForInstance(Instance inst) throws Exception { //convert instance into instances Instances insts = new Instances(inst.dataset(), 0); insts.add(inst); //transform the data into single-instance format Filter convertToProp = new MultiInstanceToPropositional(); Filter convertToMI = new PropositionalToMultiInstance(); if (m_minimax){ // using minimax feature space SimpleMI transMinimax = new SimpleMI(); transMinimax.setTransformMethod( new SelectedTag( SimpleMI.TRANSFORMMETHOD_MINIMAX, SimpleMI.TAGS_TRANSFORMMETHOD)); insts = transMinimax.transform (insts); } else{ convertToProp.setInputFormat(insts); insts=Filter.useFilter( insts, convertToProp); } // Filter instances if (m_Missing!=null) insts = Filter.useFilter(insts, m_Missing); if (m_Filter!=null) insts = Filter.useFilter(insts, m_Filter); // convert the single-instance format to multi-instance format convertToMI.setInputFormat(insts); insts=Filter.useFilter( insts, convertToMI); inst = insts.instance(0); if (!m_fitLogisticModels) { double[] result = new double[inst.numClasses()]; for (int i = 0; i < inst.numClasses(); i++) { for (int j = i + 1; j < inst.numClasses(); j++) { if ((m_classifiers[i][j].m_alpha != null) || (m_classifiers[i][j].m_sparseWeights != null)) { double output = m_classifiers[i][j].SVMOutput(-1, inst); if (output > 0) { result[j] += 1; } else { result[i] += 1; } } } } Utils.normalize(result); return result; } else { // We only need to do pairwise coupling if there are more // then two classes. if (inst.numClasses() == 2) { double[] newInst = new double[2]; newInst[0] = m_classifiers[0][1].SVMOutput(-1, inst); newInst[1] = Utils.missingValue(); return m_classifiers[0][1].m_logistic. distributionForInstance(new DenseInstance(1, newInst)); } double[][] r = new double[inst.numClasses()][inst.numClasses()]; double[][] n = new double[inst.numClasses()][inst.numClasses()]; for (int i = 0; i < inst.numClasses(); i++) { for (int j = i + 1; j < inst.numClasses(); j++) { if ((m_classifiers[i][j].m_alpha != null) || (m_classifiers[i][j].m_sparseWeights != null)) { double[] newInst = new double[2]; newInst[0] = m_classifiers[i][j].SVMOutput(-1, inst); newInst[1] = Utils.missingValue(); r[i][j] = m_classifiers[i][j].m_logistic. distributionForInstance(new DenseInstance(1, newInst))[0]; n[i][j] = m_classifiers[i][j].m_sumOfWeights; } } } return pairwiseCoupling(n, r); } } /** * Implements pairwise coupling. * * @param n the sum of weights used to train each model * @param r the probability estimate from each model * @return the coupled estimates */ public double[] pairwiseCoupling(double[][] n, double[][] r) { // Initialize p and u array double[] p = new double[r.length]; for (int i =0; i < p.length; i++) { p[i] = 1.0 / (double)p.length; } double[][] u = new double[r.length][r.length]; for (int i = 0; i < r.length; i++) { for (int j = i + 1; j < r.length; j++) { u[i][j] = 0.5; } } // firstSum doesn't change double[] firstSum = new double[p.length]; for (int i = 0; i < p.length; i++) { for (int j = i + 1; j < p.length; j++) { firstSum[i] += n[i][j] * r[i][j]; firstSum[j] += n[i][j] * (1 - r[i][j]); } } // Iterate until convergence boolean changed; do { changed = false; double[] secondSum = new double[p.length]; for (int i = 0; i < p.length; i++) { for (int j = i + 1; j < p.length; j++) { secondSum[i] += n[i][j] * u[i][j]; secondSum[j] += n[i][j] * (1 - u[i][j]); } } for (int i = 0; i < p.length; i++) { if ((firstSum[i] == 0) || (secondSum[i] == 0)) { if (p[i] > 0) { changed = true; } p[i] = 0; } else { double factor = firstSum[i] / secondSum[i]; double pOld = p[i]; p[i] *= factor; if (Math.abs(pOld - p[i]) > 1.0e-3) { changed = true; } } } Utils.normalize(p); for (int i = 0; i < r.length; i++) { for (int j = i + 1; j < r.length; j++) { u[i][j] = p[i] / (p[i] + p[j]); } } } while (changed); return p; } /** * Returns the weights in sparse format. * * @return the weights in sparse format */ public double [][][] sparseWeights() { int numValues = m_classAttribute.numValues(); double [][][] sparseWeights = new double[numValues][numValues][]; for (int i = 0; i < numValues; i++) { for (int j = i + 1; j < numValues; j++) { sparseWeights[i][j] = m_classifiers[i][j].m_sparseWeights; } } return sparseWeights; } /** * Returns the indices in sparse format. * * @return the indices in sparse format */ public int [][][] sparseIndices() { int numValues = m_classAttribute.numValues(); int [][][] sparseIndices = new int[numValues][numValues][]; for (int i = 0; i < numValues; i++) { for (int j = i + 1; j < numValues; j++) { sparseIndices[i][j] = m_classifiers[i][j].m_sparseIndices; } } return sparseIndices; } /** * Returns the bias of each binary SMO. * * @return the bias of each binary SMO */ public double [][] bias() { int numValues = m_classAttribute.numValues(); double [][] bias = new double[numValues][numValues]; for (int i = 0; i < numValues; i++) { for (int j = i + 1; j < numValues; j++) { bias[i][j] = m_classifiers[i][j].m_b; } } return bias; } /** * Returns the number of values of the class attribute. * * @return the number values of the class attribute */ public int numClassAttributeValues() { return m_classAttribute.numValues(); } /** * Returns the names of the class attributes. * * @return the names of the class attributes */ public String[] classAttributeNames() { int numValues = m_classAttribute.numValues(); String[] classAttributeNames = new String[numValues]; for (int i = 0; i < numValues; i++) { classAttributeNames[i] = m_classAttribute.value(i); } return classAttributeNames; } /** * Returns the attribute names. * * @return the attribute names */ public String[][][] attributeNames() { int numValues = m_classAttribute.numValues(); String[][][] attributeNames = new String[numValues][numValues][]; for (int i = 0; i < numValues; i++) { for (int j = i + 1; j < numValues; j++) { int numAttributes = m_classifiers[i][j].m_data.numAttributes(); String[] attrNames = new String[numAttributes]; for (int k = 0; k < numAttributes; k++) { attrNames[k] = m_classifiers[i][j].m_data.attribute(k).name(); } attributeNames[i][j] = attrNames; } } return attributeNames; } /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ public Enumeration listOptions() { Vector result = new Vector(); Enumeration enm = super.listOptions(); while (enm.hasMoreElements()) result.addElement(enm.nextElement()); result.addElement(new Option( "\tTurns off all checks - use with caution!\n" + "\tTurning them off assumes that data is purely numeric, doesn't\n" + "\tcontain any missing values, and has a nominal class. Turning them\n" + "\toff also means that no header information will be stored if the\n" + "\tmachine is linear. Finally, it also assumes that no instance has\n" + "\ta weight equal to 0.\n" + "\t(default: checks on)", "no-checks", 0, "-no-checks")); result.addElement(new Option( "\tThe complexity constant C. (default 1)", "C", 1, "-C <double>")); result.addElement(new Option( "\tWhether to 0=normalize/1=standardize/2=neither.\n" + "\t(default 0=normalize)", "N", 1, "-N")); result.addElement(new Option( "\tUse MIminimax feature space. ", "I", 0, "-I")); result.addElement(new Option( "\tThe tolerance parameter. (default 1.0e-3)", "L", 1, "-L <double>")); result.addElement(new Option( "\tThe epsilon for round-off error. (default 1.0e-12)", "P", 1, "-P <double>")); result.addElement(new Option( "\tFit logistic models to SVM outputs. ", "M", 0, "-M")); result.addElement(new Option( "\tThe number of folds for the internal cross-validation. \n" + "\t(default -1, use training data)", "V", 1, "-V <double>")); result.addElement(new Option( "\tThe random number seed. (default 1)", "W", 1, "-W <double>")); result.addElement(new Option( "\tThe Kernel to use.\n" + "\t(default: weka.classifiers.functions.supportVector.PolyKernel)", "K", 1, "-K <classname and parameters>")); result.addElement(new Option( "", "", 0, "\nOptions specific to kernel " + getKernel().getClass().getName() + ":")); enm = ((OptionHandler) getKernel()).listOptions(); while (enm.hasMoreElements()) result.addElement(enm.nextElement()); return result.elements(); } /** * Parses a given list of options. <p/> * <!-- options-start --> * Valid options are: <p/> * * <pre> -D * If set, classifier is run in debug mode and * may output additional info to the console</pre> * * <pre> -no-checks * Turns off all checks - use with caution! * Turning them off assumes that data is purely numeric, doesn't * contain any missing values, and has a nominal class. Turning them * off also means that no header information will be stored if the * machine is linear. Finally, it also assumes that no instance has * a weight equal to 0. * (default: checks on)</pre> * * <pre> -C &lt;double&gt; * The complexity constant C. (default 1)</pre> * * <pre> -N * Whether to 0=normalize/1=standardize/2=neither. * (default 0=normalize)</pre> * * <pre> -I * Use MIminimax feature space. </pre> * * <pre> -L &lt;double&gt; * The tolerance parameter. (default 1.0e-3)</pre> * * <pre> -P &lt;double&gt; * The epsilon for round-off error. (default 1.0e-12)</pre> * * <pre> -M * Fit logistic models to SVM outputs. </pre> * * <pre> -V &lt;double&gt; * The number of folds for the internal cross-validation. * (default -1, use training data)</pre> * * <pre> -W &lt;double&gt; * The random number seed. (default 1)</pre> * * <pre> -K &lt;classname and parameters&gt; * The Kernel to use. * (default: weka.classifiers.functions.supportVector.PolyKernel)</pre> * * <pre> * Options specific to kernel weka.classifiers.mi.supportVector.MIPolyKernel: * </pre> * * <pre> -D * Enables debugging output (if available) to be printed. * (default: off)</pre> * * <pre> -no-checks * Turns off all checks - use with caution! * (default: checks on)</pre> * * <pre> -C &lt;num&gt; * The size of the cache (a prime number), 0 for full cache and * -1 to turn it off. * (default: 250007)</pre> * * <pre> -E &lt;num&gt; * The Exponent to use. * (default: 1.0)</pre> * * <pre> -L * Use lower-order terms. * (default: no)</pre> * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { String tmpStr; String[] tmpOptions; setChecksTurnedOff(Utils.getFlag("no-checks", options)); tmpStr = Utils.getOption('C', options); if (tmpStr.length() != 0) setC(Double.parseDouble(tmpStr)); else setC(1.0); tmpStr = Utils.getOption('L', options); if (tmpStr.length() != 0) setToleranceParameter(Double.parseDouble(tmpStr)); else setToleranceParameter(1.0e-3); tmpStr = Utils.getOption('P', options); if (tmpStr.length() != 0) setEpsilon(new Double(tmpStr)); else setEpsilon(1.0e-12); setMinimax(Utils.getFlag('I', options)); tmpStr = Utils.getOption('N', options); if (tmpStr.length() != 0) setFilterType(new SelectedTag(Integer.parseInt(tmpStr), TAGS_FILTER)); else setFilterType(new SelectedTag(FILTER_NORMALIZE, TAGS_FILTER)); setBuildLogisticModels(Utils.getFlag('M', options)); tmpStr = Utils.getOption('V', options); if (tmpStr.length() != 0) m_numFolds = Integer.parseInt(tmpStr); else m_numFolds = -1; tmpStr = Utils.getOption('W', options); if (tmpStr.length() != 0) setRandomSeed(Integer.parseInt(tmpStr)); else setRandomSeed(1); tmpStr = Utils.getOption('K', options); tmpOptions = Utils.splitOptions(tmpStr); if (tmpOptions.length != 0) { tmpStr = tmpOptions[0]; tmpOptions[0] = ""; setKernel(Kernel.forName(tmpStr, tmpOptions)); } super.setOptions(options); } /** * Gets the current settings of the classifier. * * @return an array of strings suitable for passing to setOptions */ public String[] getOptions() { int i; Vector result; String[] options; result = new Vector(); options = super.getOptions(); for (i = 0; i < options.length; i++) result.add(options[i]); if (getChecksTurnedOff()) result.add("-no-checks"); result.add("-C"); result.add("" + getC()); result.add("-L"); result.add("" + getToleranceParameter()); result.add("-P"); result.add("" + getEpsilon()); result.add("-N"); result.add("" + m_filterType); if (getMinimax()) result.add("-I"); if (getBuildLogisticModels()) result.add("-M"); result.add("-V"); result.add("" + getNumFolds()); result.add("-W"); result.add("" + getRandomSeed()); result.add("-K"); result.add("" + getKernel().getClass().getName() + " " + Utils.joinOptions(getKernel().getOptions())); return (String[]) result.toArray(new String[result.size()]); } /** * Disables or enables the checks (which could be time-consuming). Use with * caution! * * @param value if true turns off all checks */ public void setChecksTurnedOff(boolean value) { if (value) turnChecksOff(); else turnChecksOn(); } /** * Returns whether the checks are turned off or not. * * @return true if the checks are turned off */ public boolean getChecksTurnedOff() { return m_checksTurnedOff; } /** * Returns the tip text for this property * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String checksTurnedOffTipText() { return "Turns time-consuming checks off - use with caution."; } /** * Returns the tip text for this property * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String kernelTipText() { return "The kernel to use."; } /** * Gets the kernel to use. * * @return the kernel */ public Kernel getKernel() { return m_kernel; } /** * Sets the kernel to use. * * @param value the kernel */ public void setKernel(Kernel value) { if (!(value instanceof MultiInstanceCapabilitiesHandler)) throw new IllegalArgumentException( "Kernel must be able to handle multi-instance data!\n" + "(This one does not implement " + MultiInstanceCapabilitiesHandler.class.getName() + ")"); m_kernel = value; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String cTipText() { return "The complexity parameter C."; } /** * Get the value of C. * * @return Value of C. */ public double getC() { return m_C; } /** * Set the value of C. * * @param v Value to assign to C. */ public void setC(double v) { m_C = v; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String toleranceParameterTipText() { return "The tolerance parameter (shouldn't be changed)."; } /** * Get the value of tolerance parameter. * @return Value of tolerance parameter. */ public double getToleranceParameter() { return m_tol; } /** * Set the value of tolerance parameter. * @param v Value to assign to tolerance parameter. */ public void setToleranceParameter(double v) { m_tol = v; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String epsilonTipText() { return "The epsilon for round-off error (shouldn't be changed)."; } /** * Get the value of epsilon. * @return Value of epsilon. */ public double getEpsilon() { return m_eps; } /** * Set the value of epsilon. * @param v Value to assign to epsilon. */ public void setEpsilon(double v) { m_eps = v; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String filterTypeTipText() { return "Determines how/if the data will be transformed."; } /** * Gets how the training data will be transformed. Will be one of * FILTER_NORMALIZE, FILTER_STANDARDIZE, FILTER_NONE. * * @return the filtering mode */ public SelectedTag getFilterType() { return new SelectedTag(m_filterType, TAGS_FILTER); } /** * Sets how the training data will be transformed. Should be one of * FILTER_NORMALIZE, FILTER_STANDARDIZE, FILTER_NONE. * * @param newType the new filtering mode */ public void setFilterType(SelectedTag newType) { if (newType.getTags() == TAGS_FILTER) { m_filterType = newType.getSelectedTag().getID(); } } /** * Returns the tip text for this property * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String minimaxTipText() { return "Whether the MIMinimax feature space is to be used."; } /** * Check if the MIMinimax feature space is to be used. * @return true if minimax */ public boolean getMinimax() { return m_minimax; } /** * Set if the MIMinimax feature space is to be used. * @param v true if RBF */ public void setMinimax(boolean v) { m_minimax = v; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String buildLogisticModelsTipText() { return "Whether to fit logistic models to the outputs (for proper " + "probability estimates)."; } /** * Get the value of buildLogisticModels. * * @return Value of buildLogisticModels. */ public boolean getBuildLogisticModels() { return m_fitLogisticModels; } /** * Set the value of buildLogisticModels. * * @param newbuildLogisticModels Value to assign to buildLogisticModels. */ public void setBuildLogisticModels(boolean newbuildLogisticModels) { m_fitLogisticModels = newbuildLogisticModels; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String numFoldsTipText() { return "The number of folds for cross-validation used to generate " + "training data for logistic models (-1 means use training data)."; } /** * Get the value of numFolds. * * @return Value of numFolds. */ public int getNumFolds() { return m_numFolds; } /** * Set the value of numFolds. * * @param newnumFolds Value to assign to numFolds. */ public void setNumFolds(int newnumFolds) { m_numFolds = newnumFolds; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String randomSeedTipText() { return "Random number seed for the cross-validation."; } /** * Get the value of randomSeed. * * @return Value of randomSeed. */ public int getRandomSeed() { return m_randomSeed; } /** * Set the value of randomSeed. * * @param newrandomSeed Value to assign to randomSeed. */ public void setRandomSeed(int newrandomSeed) { m_randomSeed = newrandomSeed; } /** * Prints out the classifier. * * @return a description of the classifier as a string */ public String toString() { StringBuffer text = new StringBuffer(); if ((m_classAttribute == null)) { return "SMO: No model built yet."; } try { text.append("SMO\n\n"); for (int i = 0; i < m_classAttribute.numValues(); i++) { for (int j = i + 1; j < m_classAttribute.numValues(); j++) { text.append("Classifier for classes: " + m_classAttribute.value(i) + ", " + m_classAttribute.value(j) + "\n\n"); text.append(m_classifiers[i][j]); if (m_fitLogisticModels) { text.append("\n\n"); if ( m_classifiers[i][j].m_logistic == null) { text.append("No logistic model has been fit.\n"); } else { text.append(m_classifiers[i][j].m_logistic); } } text.append("\n\n"); } } } catch (Exception e) { return "Can't print SMO classifier."; } return text.toString(); } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 1.6 $"); } /** * Main method for testing this class. * * @param argv the commandline parameters */ public static void main(String[] argv) { runClassifier(new MISMO(), argv); } }
63,754
28.903846
319
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/mi/MISVM.java
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, InumBag., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* * MISVM.java * Copyright (C) 2005 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.mi; import weka.classifiers.Classifier; import weka.classifiers.functions.SMO; import weka.classifiers.functions.supportVector.Kernel; import weka.classifiers.functions.supportVector.PolyKernel; import weka.core.Capabilities; import weka.core.Instance; import weka.core.Instances; import weka.core.MultiInstanceCapabilitiesHandler; import weka.core.Option; import weka.core.OptionHandler; import weka.core.RevisionUtils; import weka.core.SelectedTag; import weka.core.Tag; import weka.core.TechnicalInformation; import weka.core.TechnicalInformationHandler; import weka.core.Utils; import weka.core.Capabilities.Capability; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import weka.filters.Filter; import weka.filters.unsupervised.attribute.MultiInstanceToPropositional; import weka.filters.unsupervised.attribute.Normalize; import weka.filters.unsupervised.attribute.Standardize; import weka.filters.unsupervised.instance.SparseToNonSparse; import java.util.Enumeration; import java.util.Vector; import weka.classifiers.AbstractClassifier; /** <!-- globalinfo-start --> * Implements Stuart Andrews' mi_SVM (Maximum pattern Margin Formulation of MIL). Applying weka.classifiers.functions.SMO to solve multiple instances problem.<br/> * The algorithm first assign the bag label to each instance in the bag as its initial class label. After that applying SMO to compute SVM solution for all instances in positive bags And then reassign the class label of each instance in the positive bag according to the SVM result Keep on iteration until labels do not change anymore.<br/> * <br/> * For more information see:<br/> * <br/> * Stuart Andrews, Ioannis Tsochantaridis, Thomas Hofmann: Support Vector Machines for Multiple-Instance Learning. In: Advances in Neural Information Processing Systems 15, 561-568, 2003. * <p/> <!-- globalinfo-end --> * <!-- technical-bibtex-start --> * BibTeX: * <pre> * &#64;inproceedings{Andrews2003, * author = {Stuart Andrews and Ioannis Tsochantaridis and Thomas Hofmann}, * booktitle = {Advances in Neural Information Processing Systems 15}, * pages = {561-568}, * publisher = {MIT Press}, * title = {Support Vector Machines for Multiple-Instance Learning}, * year = {2003} * } * </pre> * <p/> <!-- technical-bibtex-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -D * If set, classifier is run in debug mode and * may output additional info to the console</pre> * * <pre> -C &lt;double&gt; * The complexity constant C. (default 1)</pre> * * <pre> -N &lt;default 0&gt; * Whether to 0=normalize/1=standardize/2=neither. * (default: 0=normalize)</pre> * * <pre> -I &lt;num&gt; * The maximum number of iterations to perform. * (default: 500)</pre> * * <pre> -K &lt;classname and parameters&gt; * The Kernel to use. * (default: weka.classifiers.functions.supportVector.PolyKernel)</pre> * * <pre> * Options specific to kernel weka.classifiers.functions.supportVector.PolyKernel: * </pre> * * <pre> -D * Enables debugging output (if available) to be printed. * (default: off)</pre> * * <pre> -no-checks * Turns off all checks - use with caution! * (default: checks on)</pre> * * <pre> -C &lt;num&gt; * The size of the cache (a prime number), 0 for full cache and * -1 to turn it off. * (default: 250007)</pre> * * <pre> -E &lt;num&gt; * The Exponent to use. * (default: 1.0)</pre> * * <pre> -L * Use lower-order terms. * (default: no)</pre> * <!-- options-end --> * * @author Lin Dong (ld21@cs.waikato.ac.nz) * @version $Revision: 5527 $ * @see weka.classifiers.functions.SMO */ public class MISVM extends AbstractClassifier implements OptionHandler, MultiInstanceCapabilitiesHandler, TechnicalInformationHandler { /** for serialization */ static final long serialVersionUID = 7622231064035278145L; /** The filter used to transform the sparse datasets to nonsparse */ protected Filter m_SparseFilter = new SparseToNonSparse(); /** The SMO classifier used to compute SVM soluton w,b for the dataset */ protected SVM m_SVM; /** the kernel to use */ protected Kernel m_kernel = new PolyKernel(); /** The complexity parameter. */ protected double m_C = 1.0; /** The filter used to standardize/normalize all values. */ protected Filter m_Filter =null; /** Whether to normalize/standardize/neither */ protected int m_filterType = FILTER_NORMALIZE; /** Normalize training data */ public static final int FILTER_NORMALIZE = 0; /** Standardize training data */ public static final int FILTER_STANDARDIZE = 1; /** No normalization/standardization */ public static final int FILTER_NONE = 2; /** The filter to apply to the training data */ public static final Tag [] TAGS_FILTER = { new Tag(FILTER_NORMALIZE, "Normalize training data"), new Tag(FILTER_STANDARDIZE, "Standardize training data"), new Tag(FILTER_NONE, "No normalization/standardization"), }; /** the maximum number of iterations to perform */ protected int m_MaxIterations = 500; /** filter used to convert the MI dataset into single-instance dataset */ protected MultiInstanceToPropositional m_ConvertToProp = new MultiInstanceToPropositional(); /** * Returns a string describing this filter * * @return a description of the filter suitable for * displaying in the explorer/experimenter gui */ public String globalInfo() { return "Implements Stuart Andrews' mi_SVM (Maximum pattern Margin " + "Formulation of MIL). Applying weka.classifiers.functions.SMO " + "to solve multiple instances problem.\n" + "The algorithm first assign the bag label to each instance in the " + "bag as its initial class label. After that applying SMO to compute " + "SVM solution for all instances in positive bags And then reassign " + "the class label of each instance in the positive bag according to " + "the SVM result Keep on iteration until labels do not change " + "anymore.\n\n" + "For more information see:\n\n" + getTechnicalInformation().toString(); } /** * Returns an instance of a TechnicalInformation object, containing * detailed information about the technical background of this class, * e.g., paper reference or book this class is based on. * * @return the technical information about this class */ public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; result = new TechnicalInformation(Type.INPROCEEDINGS); result.setValue(Field.AUTHOR, "Stuart Andrews and Ioannis Tsochantaridis and Thomas Hofmann"); result.setValue(Field.YEAR, "2003"); result.setValue(Field.TITLE, "Support Vector Machines for Multiple-Instance Learning"); result.setValue(Field.BOOKTITLE, "Advances in Neural Information Processing Systems 15"); result.setValue(Field.PUBLISHER, "MIT Press"); result.setValue(Field.PAGES, "561-568"); return result; } /** * Returns an enumeration describing the available options * * @return an enumeration of all the available options */ public Enumeration listOptions() { Vector result = new Vector(); Enumeration enm = super.listOptions(); while (enm.hasMoreElements()) result.addElement(enm.nextElement()); result.addElement(new Option( "\tThe complexity constant C. (default 1)", "C", 1, "-C <double>")); result.addElement(new Option( "\tWhether to 0=normalize/1=standardize/2=neither.\n" + "\t(default: 0=normalize)", "N", 1, "-N <default 0>")); result.addElement(new Option( "\tThe maximum number of iterations to perform.\n" + "\t(default: 500)", "I", 1, "-I <num>")); result.addElement(new Option( "\tThe Kernel to use.\n" + "\t(default: weka.classifiers.functions.supportVector.PolyKernel)", "K", 1, "-K <classname and parameters>")); result.addElement(new Option( "", "", 0, "\nOptions specific to kernel " + getKernel().getClass().getName() + ":")); enm = ((OptionHandler) getKernel()).listOptions(); while (enm.hasMoreElements()) result.addElement(enm.nextElement()); return result.elements(); } /** * Parses a given list of options. <p/> * <!-- options-start --> * Valid options are: <p/> * * <pre> -D * If set, classifier is run in debug mode and * may output additional info to the console</pre> * * <pre> -C &lt;double&gt; * The complexity constant C. (default 1)</pre> * * <pre> -N &lt;default 0&gt; * Whether to 0=normalize/1=standardize/2=neither. * (default: 0=normalize)</pre> * * <pre> -I &lt;num&gt; * The maximum number of iterations to perform. * (default: 500)</pre> * * <pre> -K &lt;classname and parameters&gt; * The Kernel to use. * (default: weka.classifiers.functions.supportVector.PolyKernel)</pre> * * <pre> * Options specific to kernel weka.classifiers.functions.supportVector.PolyKernel: * </pre> * * <pre> -D * Enables debugging output (if available) to be printed. * (default: off)</pre> * * <pre> -no-checks * Turns off all checks - use with caution! * (default: checks on)</pre> * * <pre> -C &lt;num&gt; * The size of the cache (a prime number), 0 for full cache and * -1 to turn it off. * (default: 250007)</pre> * * <pre> -E &lt;num&gt; * The Exponent to use. * (default: 1.0)</pre> * * <pre> -L * Use lower-order terms. * (default: no)</pre> * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { String tmpStr; String[] tmpOptions; tmpStr = Utils.getOption('C', options); if (tmpStr.length() != 0) setC(Double.parseDouble(tmpStr)); else setC(1.0); tmpStr = Utils.getOption('N', options); if (tmpStr.length() != 0) setFilterType(new SelectedTag(Integer.parseInt(tmpStr), TAGS_FILTER)); else setFilterType(new SelectedTag(FILTER_NORMALIZE, TAGS_FILTER)); tmpStr = Utils.getOption('I', options); if (tmpStr.length() != 0) setMaxIterations(Integer.parseInt(tmpStr)); else setMaxIterations(500); tmpStr = Utils.getOption('K', options); tmpOptions = Utils.splitOptions(tmpStr); if (tmpOptions.length != 0) { tmpStr = tmpOptions[0]; tmpOptions[0] = ""; setKernel(Kernel.forName(tmpStr, tmpOptions)); } super.setOptions(options); } /** * Gets the current settings of the classifier. * * @return an array of strings suitable for passing to setOptions */ public String[] getOptions() { Vector result; result = new Vector(); if (getDebug()) result.add("-D"); result.add("-C"); result.add("" + getC()); result.add("-N"); result.add("" + m_filterType); result.add("-K"); result.add("" + getKernel().getClass().getName() + " " + Utils.joinOptions(getKernel().getOptions())); return (String[]) result.toArray(new String[result.size()]); } /** * Returns the tip text for this property * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String kernelTipText() { return "The kernel to use."; } /** * Gets the kernel to use. * * @return the kernel */ public Kernel getKernel() { return m_kernel; } /** * Sets the kernel to use. * * @param value the kernel */ public void setKernel(Kernel value) { m_kernel = value; } /** * Returns the tip text for this property * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String filterTypeTipText() { return "The filter type for transforming the training data."; } /** * Sets how the training data will be transformed. Should be one of * FILTER_NORMALIZE, FILTER_STANDARDIZE, FILTER_NONE. * * @param newType the new filtering mode */ public void setFilterType(SelectedTag newType) { if (newType.getTags() == TAGS_FILTER) { m_filterType = newType.getSelectedTag().getID(); } } /** * Gets how the training data will be transformed. Will be one of * FILTER_NORMALIZE, FILTER_STANDARDIZE, FILTER_NONE. * * @return the filtering mode */ public SelectedTag getFilterType() { return new SelectedTag(m_filterType, TAGS_FILTER); } /** * Returns the tip text for this property * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String cTipText() { return "The value for C."; } /** * Get the value of C. * * @return Value of C. */ public double getC() { return m_C; } /** * Set the value of C. * * @param v Value to assign to C. */ public void setC(double v) { m_C = v; } /** * Returns the tip text for this property * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String maxIterationsTipText() { return "The maximum number of iterations to perform."; } /** * Gets the maximum number of iterations. * * @return the maximum number of iterations. */ public int getMaxIterations() { return m_MaxIterations; } /** * Sets the maximum number of iterations. * * @param value the maximum number of iterations. */ public void setMaxIterations(int value) { if (value < 1) System.out.println( "At least 1 iteration is necessary (provided: " + value + ")!"); else m_MaxIterations = value; } /** * adapted version of SMO */ private class SVM extends SMO { /** for serialization */ static final long serialVersionUID = -8325638229658828931L; /** * Constructor */ protected SVM (){ super(); } /** * Computes SVM output for given instance. * * @param index the instance for which output is to be computed * @param inst the instance * @return the output of the SVM for the given instance * @throws Exception in case of an error */ protected double output(int index, Instance inst) throws Exception { double output = 0; output = m_classifiers[0][1].SVMOutput(index, inst); return output; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 5527 $"); } } /** * Returns default capabilities of the classifier. * * @return the capabilities of this classifier */ public Capabilities getCapabilities() { Capabilities result = super.getCapabilities(); result.disableAll(); // attributes result.enable(Capability.NOMINAL_ATTRIBUTES); result.enable(Capability.RELATIONAL_ATTRIBUTES); result.enable(Capability.MISSING_VALUES); // class result.disableAllClasses(); result.disableAllClassDependencies(); result.enable(Capability.BINARY_CLASS); result.enable(Capability.MISSING_CLASS_VALUES); // other result.enable(Capability.ONLY_MULTIINSTANCE); return result; } /** * Returns the capabilities of this multi-instance classifier for the * relational data. * * @return the capabilities of this object * @see Capabilities */ public Capabilities getMultiInstanceCapabilities() { SVM classifier; Capabilities result; classifier = null; result = null; try { classifier = new SVM(); classifier.setKernel(Kernel.makeCopy(getKernel())); result = classifier.getCapabilities(); result.setOwner(this); } catch (Exception e) { e.printStackTrace(); } // class result.disableAllClasses(); result.enable(Capability.NO_CLASS); return result; } /** * Builds the classifier * * @param train the training data to be used for generating the * boosted classifier. * @throws Exception if the classifier could not be built successfully */ public void buildClassifier(Instances train) throws Exception { // can classifier handle the data? getCapabilities().testWithFail(train); // remove instances with missing class train = new Instances(train); train.deleteWithMissingClass(); int numBags = train.numInstances(); //number of bags int []bagSize= new int [numBags]; int classes [] = new int [numBags]; Vector instLabels = new Vector(); //store the class label assigned to each single instance Vector pre_instLabels=new Vector(); for(int h=0; h<numBags; h++) {//h_th bag classes[h] = (int) train.instance(h).classValue(); bagSize[h]=train.instance(h).relationalValue(1).numInstances(); for (int i=0; i<bagSize[h];i++) instLabels.addElement(new Double(classes[h])); } // convert the training dataset into single-instance dataset m_ConvertToProp.setWeightMethod( new SelectedTag( MultiInstanceToPropositional.WEIGHTMETHOD_1, MultiInstanceToPropositional.TAGS_WEIGHTMETHOD)); m_ConvertToProp.setInputFormat(train); train = Filter.useFilter( train, m_ConvertToProp); train.deleteAttributeAt(0); //remove the bagIndex attribute; if (m_filterType == FILTER_STANDARDIZE) m_Filter = new Standardize(); else if (m_filterType == FILTER_NORMALIZE) m_Filter = new Normalize(); else m_Filter = null; if (m_Filter!=null) { m_Filter.setInputFormat(train); train = Filter.useFilter(train, m_Filter); } if (m_Debug) { System.out.println("\nIteration History..." ); } if (getDebug()) System.out.println("\nstart building model ..."); int index; double sum, max_output; Vector max_index = new Vector(); Instance inst=null; int loopNum=0; do { loopNum++; index=-1; if (m_Debug) System.out.println("=====================loop: "+loopNum); //store the previous label information pre_instLabels=(Vector)instLabels.clone(); // set the proper SMO options in order to build a SVM model m_SVM = new SVM(); m_SVM.setC(getC()); m_SVM.setKernel(Kernel.makeCopy(getKernel())); // SVM model do not normalize / standardize the input dataset as the the dataset has already been processed m_SVM.setFilterType(new SelectedTag(FILTER_NONE, TAGS_FILTER)); m_SVM.buildClassifier(train); for(int h=0; h<numBags; h++) {//h_th bag if (classes[h]==1) { //positive bag if (m_Debug) System.out.println("--------------- "+h+" ----------------"); sum=0; //compute outputs f=(w,x)+b for all instance in positive bags for (int i=0; i<bagSize[h]; i++){ index ++; inst=train.instance(index); double output =m_SVM.output(-1, inst); //System.out.println(output); if (output<=0){ if (inst.classValue()==1.0) { train.instance(index).setClassValue(0.0); instLabels.set(index, new Double(0.0)); if (m_Debug) System.out.println( index+ "- changed to 0"); } } else { if (inst.classValue()==0.0) { train.instance(index).setClassValue(1.0); instLabels.set(index, new Double(1.0)); if (m_Debug) System.out.println(index+ "+ changed to 1"); } } sum += train.instance(index).classValue(); } /* if class value of all instances in a positive bag are changed to 0.0, find the instance with max SVMOutput value and assign the class value 1.0 to it. */ if (sum==0){ //find the instance with max SVMOutput value max_output=-Double.MAX_VALUE; max_index.clear(); for (int j=index-bagSize[h]+1; j<index+1; j++){ inst=train.instance(j); double output = m_SVM.output(-1, inst); if(max_output<output) { max_output=output; max_index.clear(); max_index.add(new Integer(j)); } else if(max_output==output) max_index.add(new Integer(j)); } //assign the class value 1.0 to the instances with max SVMOutput for (int vecIndex=0; vecIndex<max_index.size(); vecIndex ++) { Integer i =(Integer)max_index.get(vecIndex); train.instance(i.intValue()).setClassValue(1.0); instLabels.set(i.intValue(), new Double(1.0)); if (m_Debug) System.out.println("##change to 1 ###outpput: "+max_output+" max_index: "+i+ " bag: "+h); } } }else //negative bags index += bagSize[h]; } }while(!instLabels.equals(pre_instLabels) && loopNum < m_MaxIterations); if (getDebug()) System.out.println("finish building model."); } /** * Computes the distribution for a given exemplar * * @param exmp the exemplar for which distribution is computed * @return the distribution * @throws Exception if the distribution can't be computed successfully */ public double[] distributionForInstance(Instance exmp) throws Exception { double sum=0; double classValue; double[] distribution = new double[2]; Instances testData = new Instances(exmp.dataset(), 0); testData.add(exmp); // convert the training dataset into single-instance dataset testData = Filter.useFilter(testData, m_ConvertToProp); testData.deleteAttributeAt(0); //remove the bagIndex attribute if (m_Filter != null) testData = Filter.useFilter(testData, m_Filter); for(int j = 0; j < testData.numInstances(); j++){ Instance inst = testData.instance(j); double output = m_SVM.output(-1, inst); if (output <= 0) classValue = 0.0; else classValue = 1.0; sum += classValue; } if (sum == 0) distribution[0] = 1.0; else distribution[0] = 0.0; distribution [1] = 1.0 - distribution[0]; return distribution; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 5527 $"); } /** * Main method for testing this class. * * @param argv should contain the command line arguments to the * scheme (see Evaluation) */ public static void main(String[] argv) { runClassifier(new MISVM(), argv); } }
24,192
28.684663
341
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/mi/MIWrapper.java
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* * MIWrapper.java * Copyright (C) 2005 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.mi; import weka.classifiers.SingleClassifierEnhancer; import weka.core.Capabilities; import weka.core.Instance; import weka.core.Instances; import weka.core.MultiInstanceCapabilitiesHandler; import weka.core.Option; import weka.core.OptionHandler; import weka.core.RevisionUtils; import weka.core.SelectedTag; import weka.core.Tag; import weka.core.TechnicalInformation; import weka.core.TechnicalInformationHandler; import weka.core.Utils; import weka.core.Capabilities.Capability; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import weka.filters.Filter; import weka.filters.unsupervised.attribute.MultiInstanceToPropositional; import java.util.Enumeration; import java.util.Vector; /** <!-- globalinfo-start --> * A simple Wrapper method for applying standard propositional learners to multi-instance data.<br/> * <br/> * For more information see:<br/> * <br/> * E. T. Frank, X. Xu (2003). Applying propositional learning algorithms to multi-instance data. Department of Computer Science, University of Waikato, Hamilton, NZ. * <p/> <!-- globalinfo-end --> * <!-- technical-bibtex-start --> * BibTeX: * <pre> * &#64;techreport{Frank2003, * address = {Department of Computer Science, University of Waikato, Hamilton, NZ}, * author = {E. T. Frank and X. Xu}, * institution = {University of Waikato}, * month = {06}, * title = {Applying propositional learning algorithms to multi-instance data}, * year = {2003} * } * </pre> * <p/> <!-- technical-bibtex-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -P [1|2|3] * The method used in testing: * 1.arithmetic average * 2.geometric average * 3.max probability of positive bag. * (default: 1)</pre> * * <pre> -A [0|1|2|3] * The type of weight setting for each single-instance: * 0.keep the weight to be the same as the original value; * 1.weight = 1.0 * 2.weight = 1.0/Total number of single-instance in the * corresponding bag * 3. weight = Total number of single-instance / (Total * number of bags * Total number of single-instance * in the corresponding bag). * (default: 3)</pre> * * <pre> -D * If set, classifier is run in debug mode and * may output additional info to the console</pre> * * <pre> -W * Full name of base classifier. * (default: weka.classifiers.rules.ZeroR)</pre> * * <pre> * Options specific to classifier weka.classifiers.rules.ZeroR: * </pre> * * <pre> -D * If set, classifier is run in debug mode and * may output additional info to the console</pre> * <!-- options-end --> * * @author Eibe Frank (eibe@cs.waikato.ac.nz) * @author Xin Xu (xx5@cs.waikato.ac.nz) * @version $Revision: 1.5 $ */ public class MIWrapper extends SingleClassifierEnhancer implements MultiInstanceCapabilitiesHandler, OptionHandler, TechnicalInformationHandler { /** for serialization */ static final long serialVersionUID = -7707766152904315910L; /** The number of the class labels */ protected int m_NumClasses; /** arithmetic average */ public static final int TESTMETHOD_ARITHMETIC = 1; /** geometric average */ public static final int TESTMETHOD_GEOMETRIC = 2; /** max probability of positive bag */ public static final int TESTMETHOD_MAXPROB = 3; /** the test methods */ public static final Tag[] TAGS_TESTMETHOD = { new Tag(TESTMETHOD_ARITHMETIC, "arithmetic average"), new Tag(TESTMETHOD_GEOMETRIC, "geometric average"), new Tag(TESTMETHOD_MAXPROB, "max probability of positive bag") }; /** the test method */ protected int m_Method = TESTMETHOD_GEOMETRIC; /** Filter used to convert MI dataset into single-instance dataset */ protected MultiInstanceToPropositional m_ConvertToProp = new MultiInstanceToPropositional(); /** the single-instance weight setting method */ protected int m_WeightMethod = MultiInstanceToPropositional.WEIGHTMETHOD_INVERSE2; /** * Returns a string describing this filter * * @return a description of the filter suitable for * displaying in the explorer/experimenter gui */ public String globalInfo() { return "A simple Wrapper method for applying standard propositional learners " + "to multi-instance data.\n\n" + "For more information see:\n\n" + getTechnicalInformation().toString(); } /** * Returns an instance of a TechnicalInformation object, containing * detailed information about the technical background of this class, * e.g., paper reference or book this class is based on. * * @return the technical information about this class */ public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; result = new TechnicalInformation(Type.TECHREPORT); result.setValue(Field.AUTHOR, "E. T. Frank and X. Xu"); result.setValue(Field.TITLE, "Applying propositional learning algorithms to multi-instance data"); result.setValue(Field.YEAR, "2003"); result.setValue(Field.MONTH, "06"); result.setValue(Field.INSTITUTION, "University of Waikato"); result.setValue(Field.ADDRESS, "Department of Computer Science, University of Waikato, Hamilton, NZ"); return result; } /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ public Enumeration listOptions() { Vector result = new Vector(); result.addElement(new Option( "\tThe method used in testing:\n" + "\t1.arithmetic average\n" + "\t2.geometric average\n" + "\t3.max probability of positive bag.\n" + "\t(default: 1)", "P", 1, "-P [1|2|3]")); result.addElement(new Option( "\tThe type of weight setting for each single-instance:\n" + "\t0.keep the weight to be the same as the original value;\n" + "\t1.weight = 1.0\n" + "\t2.weight = 1.0/Total number of single-instance in the\n" + "\t\tcorresponding bag\n" + "\t3. weight = Total number of single-instance / (Total\n" + "\t\tnumber of bags * Total number of single-instance \n" + "\t\tin the corresponding bag).\n" + "\t(default: 3)", "A", 1, "-A [0|1|2|3]")); Enumeration enu = super.listOptions(); while (enu.hasMoreElements()) { result.addElement(enu.nextElement()); } return result.elements(); } /** * Parses a given list of options. <p/> * <!-- options-start --> * Valid options are: <p/> * * <pre> -P [1|2|3] * The method used in testing: * 1.arithmetic average * 2.geometric average * 3.max probability of positive bag. * (default: 1)</pre> * * <pre> -A [0|1|2|3] * The type of weight setting for each single-instance: * 0.keep the weight to be the same as the original value; * 1.weight = 1.0 * 2.weight = 1.0/Total number of single-instance in the * corresponding bag * 3. weight = Total number of single-instance / (Total * number of bags * Total number of single-instance * in the corresponding bag). * (default: 3)</pre> * * <pre> -D * If set, classifier is run in debug mode and * may output additional info to the console</pre> * * <pre> -W * Full name of base classifier. * (default: weka.classifiers.rules.ZeroR)</pre> * * <pre> * Options specific to classifier weka.classifiers.rules.ZeroR: * </pre> * * <pre> -D * If set, classifier is run in debug mode and * may output additional info to the console</pre> * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { setDebug(Utils.getFlag('D', options)); String methodString = Utils.getOption('P', options); if (methodString.length() != 0) { setMethod( new SelectedTag(Integer.parseInt(methodString), TAGS_TESTMETHOD)); } else { setMethod( new SelectedTag(TESTMETHOD_ARITHMETIC, TAGS_TESTMETHOD)); } String weightString = Utils.getOption('A', options); if (weightString.length() != 0) { setWeightMethod( new SelectedTag( Integer.parseInt(weightString), MultiInstanceToPropositional.TAGS_WEIGHTMETHOD)); } else { setWeightMethod( new SelectedTag( MultiInstanceToPropositional.WEIGHTMETHOD_INVERSE2, MultiInstanceToPropositional.TAGS_WEIGHTMETHOD)); } super.setOptions(options); } /** * Gets the current settings of the Classifier. * * @return an array of strings suitable for passing to setOptions */ public String[] getOptions() { Vector result; String[] options; int i; result = new Vector(); result.add("-P"); result.add("" + m_Method); result.add("-A"); result.add("" + m_WeightMethod); options = super.getOptions(); for (i = 0; i < options.length; i++) result.add(options[i]); return (String[]) result.toArray(new String[result.size()]); } /** * Returns the tip text for this property * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String weightMethodTipText() { return "The method used for weighting the instances."; } /** * The new method for weighting the instances. * * @param method the new method */ public void setWeightMethod(SelectedTag method){ if (method.getTags() == MultiInstanceToPropositional.TAGS_WEIGHTMETHOD) m_WeightMethod = method.getSelectedTag().getID(); } /** * Returns the current weighting method for instances. * * @return the current weighting method */ public SelectedTag getWeightMethod(){ return new SelectedTag( m_WeightMethod, MultiInstanceToPropositional.TAGS_WEIGHTMETHOD); } /** * Returns the tip text for this property * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String methodTipText() { return "The method used for testing."; } /** * Set the method used in testing. * * @param method the index of method to use. */ public void setMethod(SelectedTag method) { if (method.getTags() == TAGS_TESTMETHOD) m_Method = method.getSelectedTag().getID(); } /** * Get the method used in testing. * * @return the index of method used in testing. */ public SelectedTag getMethod() { return new SelectedTag(m_Method, TAGS_TESTMETHOD); } /** * Returns default capabilities of the classifier. * * @return the capabilities of this classifier */ public Capabilities getCapabilities() { Capabilities result = super.getCapabilities(); // class result.disableAllClasses(); result.disableAllClassDependencies(); if (super.getCapabilities().handles(Capability.NOMINAL_CLASS)) result.enable(Capability.NOMINAL_CLASS); if (super.getCapabilities().handles(Capability.BINARY_CLASS)) result.enable(Capability.BINARY_CLASS); result.enable(Capability.RELATIONAL_ATTRIBUTES); result.enable(Capability.MISSING_CLASS_VALUES); // other result.enable(Capability.ONLY_MULTIINSTANCE); return result; } /** * Returns the capabilities of this multi-instance classifier for the * relational data. * * @return the capabilities of this object * @see Capabilities */ public Capabilities getMultiInstanceCapabilities() { Capabilities result = super.getCapabilities(); // class result.disableAllClasses(); result.enable(Capability.NO_CLASS); return result; } /** * Builds the classifier * * @param data the training data to be used for generating the * boosted classifier. * @throws Exception if the classifier could not be built successfully */ public void buildClassifier(Instances data) throws Exception { // can classifier handle the data? getCapabilities().testWithFail(data); // remove instances with missing class Instances train = new Instances(data); train.deleteWithMissingClass(); if (m_Classifier == null) { throw new Exception("A base classifier has not been specified!"); } if (getDebug()) System.out.println("Start training ..."); m_NumClasses = train.numClasses(); //convert the training dataset into single-instance dataset m_ConvertToProp.setWeightMethod(getWeightMethod()); m_ConvertToProp.setInputFormat(train); train = Filter.useFilter(train, m_ConvertToProp); train.deleteAttributeAt(0); // remove the bag index attribute m_Classifier.buildClassifier(train); } /** * Computes the distribution for a given exemplar * * @param exmp the exemplar for which distribution is computed * @return the distribution * @throws Exception if the distribution can't be computed successfully */ public double[] distributionForInstance(Instance exmp) throws Exception { Instances testData = new Instances (exmp.dataset(),0); testData.add(exmp); // convert the training dataset into single-instance dataset m_ConvertToProp.setWeightMethod( new SelectedTag( MultiInstanceToPropositional.WEIGHTMETHOD_ORIGINAL, MultiInstanceToPropositional.TAGS_WEIGHTMETHOD)); testData = Filter.useFilter(testData, m_ConvertToProp); testData.deleteAttributeAt(0); //remove the bag index attribute // Compute the log-probability of the bag double [] distribution = new double[m_NumClasses]; double nI = (double)testData.numInstances(); double [] maxPr = new double [m_NumClasses]; for(int i=0; i<nI; i++){ double[] dist = m_Classifier.distributionForInstance(testData.instance(i)); for(int j=0; j<m_NumClasses; j++){ switch(m_Method){ case TESTMETHOD_ARITHMETIC: distribution[j] += dist[j]/nI; break; case TESTMETHOD_GEOMETRIC: // Avoid 0/1 probability if(dist[j]<0.001) dist[j] = 0.001; else if(dist[j]>0.999) dist[j] = 0.999; distribution[j] += Math.log(dist[j])/nI; break; case TESTMETHOD_MAXPROB: if (dist[j]>maxPr[j]) maxPr[j] = dist[j]; break; } } } if(m_Method == TESTMETHOD_GEOMETRIC) for(int j=0; j<m_NumClasses; j++) distribution[j] = Math.exp(distribution[j]); if(m_Method == TESTMETHOD_MAXPROB){ // for positive bag distribution[1] = maxPr[1]; distribution[0] = 1 - distribution[1]; } if (Utils.eq(Utils.sum(distribution), 0)) { for (int i = 0; i < distribution.length; i++) distribution[i] = 1.0 / (double) distribution.length; } else { Utils.normalize(distribution); } return distribution; } /** * Gets a string describing the classifier. * * @return a string describing the classifer built. */ public String toString() { return "MIWrapper with base classifier: \n"+m_Classifier.toString(); } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 1.5 $"); } /** * Main method for testing this class. * * @param argv should contain the command line arguments to the * scheme (see Evaluation) */ public static void main(String[] argv) { runClassifier(new MIWrapper(), argv); } }
16,721
29.348457
165
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/mi/SimpleMI.java
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* * SimpleMI.java * Copyright (C) 2005 University of Waikato, Hamilton, New Zealand */ package weka.classifiers.mi; import weka.classifiers.SingleClassifierEnhancer; import weka.core.Attribute; import weka.core.Capabilities; import weka.core.Instance; import weka.core.Instances; import weka.core.MultiInstanceCapabilitiesHandler; import weka.core.Option; import weka.core.OptionHandler; import weka.core.RevisionUtils; import weka.core.SelectedTag; import weka.core.Tag; import weka.core.Utils; import weka.core.Capabilities.Capability; import java.util.Enumeration; import java.util.Vector; import weka.core.DenseInstance; /** <!-- globalinfo-start --> * Reduces MI data into mono-instance data. * <p/> <!-- globalinfo-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -M [1|2|3] * The method used in transformation: * 1.arithmatic average; 2.geometric centor; * 3.using minimax combined features of a bag (default: 1) * * Method 3: * Define s to be the vector of the coordinate-wise maxima * and minima of X, ie., * s(X)=(minx1, ..., minxm, maxx1, ...,maxxm), transform * the exemplars into mono-instance which contains attributes * s(X)</pre> * * <pre> -D * If set, classifier is run in debug mode and * may output additional info to the console</pre> * * <pre> -W * Full name of base classifier. * (default: weka.classifiers.rules.ZeroR)</pre> * * <pre> * Options specific to classifier weka.classifiers.rules.ZeroR: * </pre> * * <pre> -D * If set, classifier is run in debug mode and * may output additional info to the console</pre> * <!-- options-end --> * * @author Eibe Frank (eibe@cs.waikato.ac.nz) * @author Xin Xu (xx5@cs.waikato.ac.nz) * @author Lin Dong (ld21@cs.waikato.ac.nz) * @version $Revision: 1.6 $ */ public class SimpleMI extends SingleClassifierEnhancer implements OptionHandler, MultiInstanceCapabilitiesHandler { /** for serialization */ static final long serialVersionUID = 9137795893666592662L; /** arithmetic average */ public static final int TRANSFORMMETHOD_ARITHMETIC = 1; /** geometric average */ public static final int TRANSFORMMETHOD_GEOMETRIC = 2; /** using minimax combined features of a bag */ public static final int TRANSFORMMETHOD_MINIMAX = 3; /** the transformation methods */ public static final Tag[] TAGS_TRANSFORMMETHOD = { new Tag(TRANSFORMMETHOD_ARITHMETIC, "arithmetic average"), new Tag(TRANSFORMMETHOD_GEOMETRIC, "geometric average"), new Tag(TRANSFORMMETHOD_MINIMAX, "using minimax combined features of a bag") }; /** the method used in transformation */ protected int m_TransformMethod = TRANSFORMMETHOD_ARITHMETIC; /** * Returns a string describing this filter * * @return a description of the filter suitable for * displaying in the explorer/experimenter gui */ public String globalInfo() { return "Reduces MI data into mono-instance data."; } /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ public Enumeration listOptions() { Vector result = new Vector(); result.addElement(new Option( "\tThe method used in transformation:\n" + "\t1.arithmatic average; 2.geometric centor;\n" + "\t3.using minimax combined features of a bag (default: 1)\n\n" + "\tMethod 3:\n" + "\tDefine s to be the vector of the coordinate-wise maxima\n" + "\tand minima of X, ie., \n" + "\ts(X)=(minx1, ..., minxm, maxx1, ...,maxxm), transform\n" + "\tthe exemplars into mono-instance which contains attributes\n" + "\ts(X)", "M", 1, "-M [1|2|3]")); Enumeration enu = super.listOptions(); while (enu.hasMoreElements()) { result.addElement(enu.nextElement()); } return result.elements(); } /** * Parses a given list of options. <p/> * <!-- options-start --> * Valid options are: <p/> * * <pre> -M [1|2|3] * The method used in transformation: * 1.arithmatic average; 2.geometric centor; * 3.using minimax combined features of a bag (default: 1) * * Method 3: * Define s to be the vector of the coordinate-wise maxima * and minima of X, ie., * s(X)=(minx1, ..., minxm, maxx1, ...,maxxm), transform * the exemplars into mono-instance which contains attributes * s(X)</pre> * * <pre> -D * If set, classifier is run in debug mode and * may output additional info to the console</pre> * * <pre> -W * Full name of base classifier. * (default: weka.classifiers.rules.ZeroR)</pre> * * <pre> * Options specific to classifier weka.classifiers.rules.ZeroR: * </pre> * * <pre> -D * If set, classifier is run in debug mode and * may output additional info to the console</pre> * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { setDebug(Utils.getFlag('D', options)); String methodString = Utils.getOption('M', options); if (methodString.length() != 0) { setTransformMethod( new SelectedTag( Integer.parseInt(methodString), TAGS_TRANSFORMMETHOD)); } else { setTransformMethod( new SelectedTag( TRANSFORMMETHOD_ARITHMETIC, TAGS_TRANSFORMMETHOD)); } super.setOptions(options); } /** * Gets the current settings of the Classifier. * * @return an array of strings suitable for passing to setOptions */ public String[] getOptions() { Vector result; String[] options; int i; result = new Vector(); result.add("-M"); result.add("" + m_TransformMethod); options = super.getOptions(); for (i = 0; i < options.length; i++) result.add(options[i]); return (String[]) result.toArray(new String[result.size()]); } /** * Returns the tip text for this property * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String transformMethodTipText() { return "The method used in transformation."; } /** * Set the method used in transformation. * * @param newMethod the index of method to use. */ public void setTransformMethod(SelectedTag newMethod) { if (newMethod.getTags() == TAGS_TRANSFORMMETHOD) m_TransformMethod = newMethod.getSelectedTag().getID(); } /** * Get the method used in transformation. * * @return the index of method used. */ public SelectedTag getTransformMethod() { return new SelectedTag(m_TransformMethod, TAGS_TRANSFORMMETHOD); } /** * Implements MITransform (3 type of transformation) 1.arithmatic average; * 2.geometric centor; 3.merge minima and maxima attribute value together * * @param train the multi-instance dataset (with relational attribute) * @return the transformed dataset with each bag contain mono-instance * (without relational attribute) so that any classifier not for MI dataset * can be applied on it. * @throws Exception if the transformation fails */ public Instances transform(Instances train) throws Exception{ Attribute classAttribute = (Attribute) train.classAttribute().copy(); Attribute bagLabel = (Attribute) train.attribute(0); double labelValue; Instances newData = train.attribute(1).relation().stringFreeStructure(); //insert a bag label attribute at the begining newData.insertAttributeAt(bagLabel, 0); //insert a class attribute at the end newData.insertAttributeAt(classAttribute, newData.numAttributes()); newData.setClassIndex(newData.numAttributes()-1); Instances mini_data = newData.stringFreeStructure(); Instances max_data = newData.stringFreeStructure(); Instance newInst = new DenseInstance (newData.numAttributes()); Instance mini_Inst = new DenseInstance (mini_data.numAttributes()); Instance max_Inst = new DenseInstance (max_data.numAttributes()); newInst.setDataset(newData); mini_Inst.setDataset(mini_data); max_Inst.setDataset(max_data); double N= train.numInstances( );//number of bags for(int i=0; i<N; i++){ int attIdx =1; Instance bag = train.instance(i); //retrieve the bag instance labelValue= bag.value(0); if (m_TransformMethod != TRANSFORMMETHOD_MINIMAX) newInst.setValue(0, labelValue); else { mini_Inst.setValue(0, labelValue); max_Inst.setValue(0, labelValue); } Instances data = bag.relationalValue(1); // retrieve relational value for each bag for(int j=0; j<data.numAttributes( ); j++){ double value; if(m_TransformMethod == TRANSFORMMETHOD_ARITHMETIC){ value = data.meanOrMode(j); newInst.setValue(attIdx++, value); } else if (m_TransformMethod == TRANSFORMMETHOD_GEOMETRIC){ double[] minimax = minimax(data, j); value = (minimax[0]+minimax[1])/2.0; newInst.setValue(attIdx++, value); } else { //m_TransformMethod == TRANSFORMMETHOD_MINIMAX double[] minimax = minimax(data, j); mini_Inst.setValue(attIdx, minimax[0]);//minima value max_Inst.setValue(attIdx, minimax[1]);//maxima value attIdx++; } } if (m_TransformMethod == TRANSFORMMETHOD_MINIMAX) { if (!bag.classIsMissing()) max_Inst.setClassValue(bag.classValue()); //set class value mini_data.add(mini_Inst); max_data.add(max_Inst); } else{ if (!bag.classIsMissing()) newInst.setClassValue(bag.classValue()); //set class value newData.add(newInst); } } if (m_TransformMethod == TRANSFORMMETHOD_MINIMAX) { mini_data.setClassIndex(-1); mini_data.deleteAttributeAt(mini_data.numAttributes()-1); //delete class attribute for the minima data max_data.deleteAttributeAt(0); // delete the bag label attribute for the maxima data newData = Instances.mergeInstances(mini_data, max_data); //merge minima and maxima data newData.setClassIndex(newData.numAttributes()-1); } return newData; } /** * Get the minimal and maximal value of a certain attribute in a certain data * * @param data the data * @param attIndex the index of the attribute * @return the double array containing in entry 0 for min and 1 for max. */ public static double[] minimax(Instances data, int attIndex){ double[] rt = {Double.POSITIVE_INFINITY, Double.NEGATIVE_INFINITY}; for(int i=0; i<data.numInstances(); i++){ double val = data.instance(i).value(attIndex); if(val > rt[1]) rt[1] = val; if(val < rt[0]) rt[0] = val; } for(int j=0; j<2; j++) if(Double.isInfinite(rt[j])) rt[j] = Double.NaN; return rt; } /** * Returns default capabilities of the classifier. * * @return the capabilities of this classifier */ public Capabilities getCapabilities() { Capabilities result = super.getCapabilities(); // attributes result.enable(Capability.NOMINAL_ATTRIBUTES); result.enable(Capability.RELATIONAL_ATTRIBUTES); result.enable(Capability.MISSING_VALUES); // class result.disableAllClasses(); result.disableAllClassDependencies(); if (super.getCapabilities().handles(Capability.NOMINAL_CLASS)) result.enable(Capability.NOMINAL_CLASS); if (super.getCapabilities().handles(Capability.BINARY_CLASS)) result.enable(Capability.BINARY_CLASS); result.enable(Capability.MISSING_CLASS_VALUES); // other result.enable(Capability.ONLY_MULTIINSTANCE); return result; } /** * Returns the capabilities of this multi-instance classifier for the * relational data. * * @return the capabilities of this object * @see Capabilities */ public Capabilities getMultiInstanceCapabilities() { Capabilities result = super.getCapabilities(); // attributes result.enable(Capability.NOMINAL_ATTRIBUTES); result.enable(Capability.NUMERIC_ATTRIBUTES); result.enable(Capability.DATE_ATTRIBUTES); result.enable(Capability.MISSING_VALUES); // class result.disableAllClasses(); result.enable(Capability.NO_CLASS); return result; } /** * Builds the classifier * * @param train the training data to be used for generating the * boosted classifier. * @throws Exception if the classifier could not be built successfully */ public void buildClassifier(Instances train) throws Exception { // can classifier handle the data? getCapabilities().testWithFail(train); // remove instances with missing class train = new Instances(train); train.deleteWithMissingClass(); if (m_Classifier == null) { throw new Exception("A base classifier has not been specified!"); } if (getDebug()) System.out.println("Start training ..."); Instances data = transform(train); data.deleteAttributeAt(0); // delete the bagID attribute m_Classifier.buildClassifier(data); if (getDebug()) System.out.println("Finish building model"); } /** * Computes the distribution for a given exemplar * * @param newBag the exemplar for which distribution is computed * @return the distribution * @throws Exception if the distribution can't be computed successfully */ public double[] distributionForInstance(Instance newBag) throws Exception { double [] distribution = new double[2]; Instances test = new Instances (newBag.dataset(), 0); test.add(newBag); test = transform(test); test.deleteAttributeAt(0); Instance newInst=test.firstInstance(); distribution = m_Classifier.distributionForInstance(newInst); return distribution; } /** * Gets a string describing the classifier. * * @return a string describing the classifer built. */ public String toString() { return "SimpleMI with base classifier: \n"+m_Classifier.toString(); } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 1.6 $"); } /** * Main method for testing this class. * * @param argv should contain the command line arguments to the * scheme (see Evaluation) */ public static void main(String[] argv) { runClassifier(new SimpleMI(), argv); } }
15,504
29.642292
108
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/mi/supportVector/MIPolyKernel.java
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* * MIPolyKernel.java * Copyright (C) 2005 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.mi.supportVector; import weka.classifiers.functions.supportVector.PolyKernel; import weka.core.Capabilities; import weka.core.Instance; import weka.core.Instances; import weka.core.MultiInstanceCapabilitiesHandler; import weka.core.RevisionUtils; import weka.core.Capabilities.Capability; /** <!-- globalinfo-start --> * The polynomial kernel : K(x, y) = &lt;x, y&gt;^p or K(x, y) = (&lt;x, y&gt;+1)^p * <p/> <!-- globalinfo-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -D * Enables debugging output (if available) to be printed. * (default: off)</pre> * * <pre> -no-checks * Turns off all checks - use with caution! * (default: checks on)</pre> * * <pre> -C &lt;num&gt; * The size of the cache (a prime number), 0 for full cache and * -1 to turn it off. * (default: 250007)</pre> * * <pre> -E &lt;num&gt; * The Exponent to use. * (default: 1.0)</pre> * * <pre> -L * Use lower-order terms. * (default: no)</pre> * <!-- options-end --> * * @author Eibe Frank (eibe@cs.waikato.ac.nz) * @author Shane Legg (shane@intelligenesis.net) (sparse vector code) * @author Stuart Inglis (stuart@reeltwo.com) (sparse vector code) * @author Lin Dong (ld21@cs.waikato.ac.nz) (MIkernel) * @version $Revision: 1.4 $ */ public class MIPolyKernel extends PolyKernel implements MultiInstanceCapabilitiesHandler { /** for serialiation */ private static final long serialVersionUID = 7926421479341051777L; /** * default constructor - does nothing. */ public MIPolyKernel() { super(); } /** * Creates a new <code>MIPolyKernel</code> instance. * * @param data the training dataset used. * @param cacheSize the size of the cache (a prime number) * @param exponent the exponent to use * @param lowerOrder whether to use lower-order terms * @throws Exception if something goes wrong */ public MIPolyKernel(Instances data, int cacheSize, double exponent, boolean lowerOrder) throws Exception { super(data, cacheSize, exponent, lowerOrder); } /** * * @param id1 the index of instance 1 * @param id2 the index of instance 2 * @param inst1 the instance 1 object * @return the dot product * @throws Exception if something goes wrong */ protected double evaluate(int id1, int id2, Instance inst1) throws Exception { double result, res; Instances data1= new Instances(inst1.relationalValue(1)); Instances data2; if(id1==id2) data2= new Instances(data1); else data2 = new Instances (m_data.instance(id2).relationalValue(1)); res=0; for(int i=0; i<data1.numInstances();i++){ for (int j=0; j<data2.numInstances(); j++){ result = dotProd(data1.instance(i), data2.instance(j)); // Use lower order terms? if (getUseLowerOrder()) { result += 1.0; } if (getExponent() != 1.0) { result = Math.pow(result, getExponent()); } res += result; } } return res; } /** * Returns the Capabilities of this kernel. * * @return the capabilities of this object * @see Capabilities */ public Capabilities getCapabilities() { Capabilities result = super.getCapabilities(); // attributes result.enable(Capability.NOMINAL_ATTRIBUTES); result.enable(Capability.RELATIONAL_ATTRIBUTES); result.enable(Capability.MISSING_VALUES); // class result.enableAllClasses(); // other result.enable(Capability.ONLY_MULTIINSTANCE); return result; } /** * Returns the capabilities of this multi-instance kernel for the * relational data. * * @return the capabilities of this object * @see Capabilities */ public Capabilities getMultiInstanceCapabilities() { Capabilities result = super.getCapabilities(); // class result.disableAllClasses(); result.enable(Capability.NO_CLASS); return result; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 1.4 $"); } }
5,067
25.814815
83
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/mi/supportVector/MIRBFKernel.java
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* * MIRBFKernel.java * Copyright (C) 2005 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.mi.supportVector; import weka.classifiers.functions.supportVector.RBFKernel; import weka.core.Capabilities; import weka.core.Instance; import weka.core.Instances; import weka.core.MultiInstanceCapabilitiesHandler; import weka.core.RevisionUtils; import weka.core.Capabilities.Capability; /** <!-- globalinfo-start --> * The RBF kernel. K(x, y) = e^-(gamma * &lt;x-y, x-y&gt;^2) * <p/> <!-- globalinfo-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -D * Enables debugging output (if available) to be printed. * (default: off)</pre> * * <pre> -no-checks * Turns off all checks - use with caution! * (default: checks on)</pre> * * <pre> -C &lt;num&gt; * The size of the cache (a prime number), 0 for full cache and * -1 to turn it off. * (default: 250007)</pre> * * <pre> -G &lt;num&gt; * The Gamma parameter. * (default: 0.01)</pre> * <!-- options-end --> * * @author Eibe Frank (eibe@cs.waikato.ac.nz) * @author Shane Legg (shane@intelligenesis.net) (sparse vector code) * @author Stuart Inglis (stuart@reeltwo.com) (sparse vector code) * @author J. Lindgren (jtlindgr{at}cs.helsinki.fi) (RBF kernel) * @author Lin Dong (ld21@cs.waikato.ac.nz) (MIkernel) * @version $Revision: 1.4 $ */ public class MIRBFKernel extends RBFKernel implements MultiInstanceCapabilitiesHandler { /** for serialiation */ private static final long serialVersionUID = -8711882393708956962L; /** The precalculated dotproducts of &lt;inst_i,inst_i&gt; */ protected double m_kernelPrecalc[][]; /** * default constructor - does nothing. */ public MIRBFKernel() { super(); } /** * Constructor. * * @param data the data to use * @param cacheSize the size of the cache * @param gamma the bandwidth * @throws Exception if something goes wrong */ public MIRBFKernel(Instances data, int cacheSize, double gamma) throws Exception { super(data, cacheSize, gamma); } /** * * @param id1 the index of instance 1 * @param id2 the index of instance 2 * @param inst1 the instance 1 object * @return the dot product * @throws Exception if something goes wrong */ protected double evaluate(int id1, int id2, Instance inst1) throws Exception { double result = 0; Instances insts1, insts2; if (id1 == -1) insts1 = new Instances(inst1.relationalValue(1)); else insts1 = new Instances(m_data.instance(id1).relationalValue(1)); insts2 = new Instances (m_data.instance(id2).relationalValue(1)); double precalc1=0; for(int i = 0; i < insts1.numInstances(); i++){ for (int j = 0; j < insts2.numInstances(); j++){ if (id1 == -1) precalc1 = dotProd(insts1.instance(i), insts1.instance(i)); else precalc1 = m_kernelPrecalc[id1][i]; double res = Math.exp(m_gamma*(2. * dotProd(insts1.instance(i), insts2.instance(j)) -precalc1 - m_kernelPrecalc[id2][j] ) ); result += res; } } return result; } /** * initializes variables etc. * * @param data the data to use */ protected void initVars(Instances data) { super.initVars(data); m_kernelPrecalc = new double[data.numInstances()][]; } /** * Returns the Capabilities of this kernel. * * @return the capabilities of this object * @see Capabilities */ public Capabilities getCapabilities() { Capabilities result = super.getCapabilities(); // attributes result.enable(Capability.NOMINAL_ATTRIBUTES); result.enable(Capability.RELATIONAL_ATTRIBUTES); result.enable(Capability.MISSING_VALUES); // class result.enableAllClasses(); // other result.enable(Capability.ONLY_MULTIINSTANCE); return result; } /** * Returns the capabilities of this multi-instance kernel for the * relational data. * * @return the capabilities of this object * @see Capabilities */ public Capabilities getMultiInstanceCapabilities() { Capabilities result = super.getCapabilities(); // class result.disableAllClasses(); result.enable(Capability.NO_CLASS); return result; } /** * builds the kernel with the given data. Initializes the kernel cache. * The actual size of the cache in bytes is (64 * cacheSize). * * @param data the data to base the kernel on * @throws Exception if something goes wrong */ public void buildKernel(Instances data) throws Exception { // does kernel handle the data? if (!getChecksTurnedOff()) getCapabilities().testWithFail(data); initVars(data); for (int i = 0; i < data.numInstances(); i++){ Instances insts = new Instances(data.instance(i).relationalValue(1)); m_kernelPrecalc[i] = new double [insts.numInstances()]; for (int j = 0; j < insts.numInstances(); j++) m_kernelPrecalc[i][j] = dotProd(insts.instance(j), insts.instance(j)); } } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 1.4 $"); } }
6,069
26.844037
133
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/misc/HyperPipes.java
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* * HyperPipes.java * Copyright (C) 2002 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.misc; import weka.classifiers.Classifier; import weka.core.Attribute; import weka.core.Capabilities; import weka.core.Instance; import weka.core.Instances; import weka.core.RevisionHandler; import weka.core.RevisionUtils; import weka.core.UnsupportedAttributeTypeException; import weka.core.Utils; import weka.core.Capabilities.Capability; import java.io.Serializable; import weka.classifiers.AbstractClassifier; /** <!-- globalinfo-start --> * Class implementing a HyperPipe classifier. For each category a HyperPipe is constructed that contains all points of that category (essentially records the attribute bounds observed for each category). Test instances are classified according to the category that "most contains the instance".<br/> * Does not handle numeric class, or missing values in test cases. Extremely simple algorithm, but has the advantage of being extremely fast, and works quite well when you have "smegloads" of attributes. * <p/> <!-- globalinfo-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -D * If set, classifier is run in debug mode and * may output additional info to the console</pre> * <!-- options-end --> * * @author Lucio de Souza Coelho (lucio@intelligenesis.net) * @author Len Trigg (len@reeltwo.com) * @version $Revision: 5528 $ */ public class HyperPipes extends AbstractClassifier { /** for serialization */ static final long serialVersionUID = -7527596632268975274L; /** The index of the class attribute */ protected int m_ClassIndex; /** The structure of the training data */ protected Instances m_Instances; /** Stores the HyperPipe for each class */ protected HyperPipe [] m_HyperPipes; /** a ZeroR model in case no model can be built from the data */ protected Classifier m_ZeroR; /** * Returns a string describing classifier * @return a description suitable for * displaying in the explorer/experimenter gui */ public String globalInfo() { return "Class implementing a HyperPipe classifier. For each category a " + "HyperPipe is constructed that contains all points of that category " + "(essentially records the attribute bounds observed for each category). " + "Test instances are classified according to the category that \"most " + "contains the instance\".\n" + "Does not handle numeric class, or missing values in test cases. Extremely " + "simple algorithm, but has the advantage of being extremely fast, and " + "works quite well when you have \"smegloads\" of attributes."; } /** * Represents an n-dimensional structure that bounds all instances * passed to it (generally all of a given class value). */ class HyperPipe implements Serializable, RevisionHandler { /** for serialization */ static final long serialVersionUID = 3972254260367902025L; /** Contains the numeric bounds of all instances in the HyperPipe */ protected double [][] m_NumericBounds; /** Contains the nominal bounds of all instances in the HyperPipe */ protected boolean [][] m_NominalBounds; /** * Creates the HyperPipe as the n-dimensional parallel-piped * with minimum volume containing all the points in * pointSet. * * @param instances all instances belonging to the same class * @throws Exception if missing values are found */ public HyperPipe(Instances instances) throws Exception { m_NumericBounds = new double [instances.numAttributes()][]; m_NominalBounds = new boolean [instances.numAttributes()][]; for (int i = 0; i < instances.numAttributes(); i++) { switch (instances.attribute(i).type()) { case Attribute.NUMERIC: m_NumericBounds[i] = new double [2]; m_NumericBounds[i][0] = Double.POSITIVE_INFINITY; m_NumericBounds[i][1] = Double.NEGATIVE_INFINITY; break; case Attribute.NOMINAL: m_NominalBounds[i] = new boolean [instances.attribute(i).numValues()]; break; default: throw new UnsupportedAttributeTypeException("Cannot process string attributes!"); } } for (int i = 0; i < instances.numInstances(); i++) { addInstance(instances.instance(i)); } } /** * Updates the bounds arrays with a single instance. Missing values * are ignored (i.e. they don't change the bounds for that attribute) * * @param instance the instance * @throws Exception if any missing values are encountered */ public void addInstance(Instance instance) throws Exception { for (int j = 0; j < instance.numAttributes(); j++) { if ((j != m_ClassIndex) && (!instance.isMissing(j))) { double current = instance.value(j); if (m_NumericBounds[j] != null) { // i.e. a numeric attribute if (current < m_NumericBounds[j][0]) m_NumericBounds[j][0] = current; if (current > m_NumericBounds[j][1]) m_NumericBounds[j][1] = current; } else { // i.e. a nominal attribute m_NominalBounds[j][(int) current] = true; } } } } /** * Returns the fraction of the dimensions of a given instance with * values lying within the corresponding bounds of the HyperPipe. * * @param instance the instance * @return the fraction of dimensions * @throws Exception if any missing values are encountered */ public double partialContains(Instance instance) throws Exception { int count = 0; for (int i = 0; i < instance.numAttributes(); i++) { if (i == m_ClassIndex) { continue; } if (instance.isMissing(i)) { continue; } double current = instance.value(i); if (m_NumericBounds[i] != null) { // i.e. a numeric attribute if ((current >= m_NumericBounds[i][0]) && (current <= m_NumericBounds[i][1])) { count++; } } else { // i.e. a nominal attribute if (m_NominalBounds[i][(int) current]) { count++; } } } return ((double)count) / (instance.numAttributes() - 1); } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 5528 $"); } } /** * Returns default capabilities of the classifier. * * @return the capabilities of this classifier */ public Capabilities getCapabilities() { Capabilities result = super.getCapabilities(); result.disableAll(); // attributes result.enable(Capability.NOMINAL_ATTRIBUTES); result.enable(Capability.NUMERIC_ATTRIBUTES); result.enable(Capability.DATE_ATTRIBUTES); result.enable(Capability.MISSING_VALUES); // class result.enable(Capability.NOMINAL_CLASS); result.enable(Capability.MISSING_CLASS_VALUES); // instances result.setMinimumNumberInstances(0); return result; } /** * Generates the classifier. * * @param instances set of instances serving as training data * @throws Exception if the classifier has not been generated successfully */ public void buildClassifier(Instances instances) throws Exception { // can classifier handle the data? getCapabilities().testWithFail(instances); // remove instances with missing class instances = new Instances(instances); instances.deleteWithMissingClass(); // only class? -> build ZeroR model if (instances.numAttributes() == 1) { System.err.println( "Cannot build model (only class attribute present in data!), " + "using ZeroR model instead!"); m_ZeroR = new weka.classifiers.rules.ZeroR(); m_ZeroR.buildClassifier(instances); return; } else { m_ZeroR = null; } m_ClassIndex = instances.classIndex(); m_Instances = new Instances(instances, 0); // Copy the structure for ref // Create the HyperPipe for each class m_HyperPipes = new HyperPipe [instances.numClasses()]; for (int i = 0; i < m_HyperPipes.length; i++) { m_HyperPipes[i] = new HyperPipe(new Instances(instances, 0)); } // Add the instances for (int i = 0; i < instances.numInstances(); i++) { updateClassifier(instances.instance(i)); } } /** * Updates the classifier. * * @param instance the instance to be put into the classifier * @throws Exception if the instance could not be included successfully */ public void updateClassifier(Instance instance) throws Exception { if (instance.classIsMissing()) { return; } m_HyperPipes[(int) instance.classValue()].addInstance(instance); } /** * Classifies the given test instance. * * @param instance the instance to be classified * @return the predicted class for the instance * @throws Exception if the instance can't be classified */ public double [] distributionForInstance(Instance instance) throws Exception { // default model? if (m_ZeroR != null) { return m_ZeroR.distributionForInstance(instance); } double [] dist = new double[m_HyperPipes.length]; for (int j = 0; j < m_HyperPipes.length; j++) { dist[j] = m_HyperPipes[j].partialContains(instance); } double sum = Utils.sum(dist); if (sum <= 0) { for (int j = 0; j < dist.length; j++) { dist[j] = 1.0 / (double)dist.length; } return dist; } else { Utils.normalize(dist, sum); return dist; } } /** * Returns a description of this classifier. * * @return a description of this classifier as a string. */ public String toString() { // only ZeroR model? if (m_ZeroR != null) { StringBuffer buf = new StringBuffer(); buf.append(this.getClass().getName().replaceAll(".*\\.", "") + "\n"); buf.append(this.getClass().getName().replaceAll(".*\\.", "").replaceAll(".", "=") + "\n\n"); buf.append("Warning: No model could be built, hence ZeroR model is used:\n\n"); buf.append(m_ZeroR.toString()); return buf.toString(); } if (m_HyperPipes == null) { return ("HyperPipes classifier"); } StringBuffer text = new StringBuffer("HyperPipes classifier\n"); /* Perhaps print out the bounds for each HyperPipe. for (int i = 0; i < m_HyperPipes.length; i++) { text.append("HyperPipe for class: " + m_Instances.attribute(m_ClassIndex).value(i) + "\n"); text.append(m_HyperPipes[i] + "\n\n"); } */ return text.toString(); } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 5528 $"); } /** * Main method for testing this class. * * @param argv should contain command line arguments for evaluation * (see Evaluation). */ public static void main(String [] argv) { runClassifier(new HyperPipes(), argv); } }
11,735
29.092308
299
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/misc/InputMappedClassifier.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * InputMappedClassifier.java * Copyright (C) 2010-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.misc; import java.io.Serializable; import java.util.Enumeration; import java.util.Vector; import weka.classifiers.Classifier; import weka.classifiers.SingleClassifierEnhancer; import weka.core.AdditionalMeasureProducer; import weka.core.Attribute; import weka.core.Capabilities; import weka.core.Capabilities.Capability; import weka.core.DenseInstance; import weka.core.Drawable; import weka.core.Environment; import weka.core.EnvironmentHandler; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.OptionHandler; import weka.core.RevisionUtils; import weka.core.SerializationHelper; import weka.core.Utils; import weka.core.WeightedInstancesHandler; /** <!-- globalinfo-start --> * Wrapper classifier that addresses incompatible training and test data by building a mapping between the training data that a classifier has been built with and the incoming test instances' structure. Model attributes that are not found in the incoming instances receive missing values, so do incoming nominal attribute values that the classifier has not seen before. A new classifier can be trained or an existing one loaded from a file. * <p/> <!-- globalinfo-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -I * Ignore case when matching attribute names and nominal values.</pre> * * <pre> -M * Suppress the output of the mapping report.</pre> * * <pre> -trim * Trim white space from either end of names before matching.</pre> * * <pre> -L &lt;path to model to load&gt; * Path to a model to load. If set, this model * will be used for prediction and any base classifier * specification will be ignored. Environment variables * may be used in the path (e.g. ${HOME}/myModel.model)</pre> * * <pre> -D * If set, classifier is run in debug mode and * may output additional info to the console</pre> * * <pre> -W * Full name of base classifier. * (default: weka.classifiers.rules.ZeroR)</pre> * * <pre> * Options specific to classifier weka.classifiers.rules.ZeroR: * </pre> * * <pre> -D * If set, classifier is run in debug mode and * may output additional info to the console</pre> * <!-- options-end --> * * @author Mark Hall (mhall{[at]}pentaho{[dot]}com) * @version $Revision: 8034 $ * */ public class InputMappedClassifier extends SingleClassifierEnhancer implements Serializable, OptionHandler, Drawable, WeightedInstancesHandler, AdditionalMeasureProducer, EnvironmentHandler { /** For serialization */ private static final long serialVersionUID = 4901630631723287761L; /** The path to the serialized model to use (if any) */ protected String m_modelPath = ""; /** The header of the last known set of incoming test instances */ protected transient Instances m_inputHeader; /** The instances structure used to train the classifier with */ protected Instances m_modelHeader; /** Handle any environment variables used in the model path */ protected transient Environment m_env; /** Map from model attributes to incoming attributes */ protected transient int[] m_attributeMap; protected transient int[] m_attributeStatus; /** For each model attribute, map from incoming nominal values to model * nominal values */ protected transient int[][] m_nominalValueMap; /** Trim white space from both ends of attribute names and nominal values? */ protected boolean m_trim = true; /** Ignore case when matching attribute names and nominal values? */ protected boolean m_ignoreCase = true; /** Dont output mapping report if set to true */ protected boolean m_suppressMappingReport = false; /** * If true, then a call to buildClassifier() will not overwrite * any test structure that has been recorded with the current training * structure. This is useful for getting a correct mapping report * output in toString() after buildClassifier has been called and * before any test instance has been seen. Test structure and mapping * will get reset if a test instance is received whose structure does * not match the recorded test structure. */ protected boolean m_initialTestStructureKnown = false; /** Holds values for instances constructed for prediction */ protected double[] m_vals; /** * Returns a string describing this classifier * * @return a description of the classifier suitable for * displaying in the explorer/experimenter gui */ public String globalInfo() { return "Wrapper classifier that addresses incompatible training and test " + "data by building a mapping between the training data that " + "a classifier has been built with and the incoming test instances' " + "structure. Model attributes that are not found in the incoming " + "instances receive missing values, so do incoming nominal attribute " + "values that the classifier has not seen before. A new classifier " + "can be trained or an existing one loaded from a file."; } /** * Set the environment variables to use * * @param env the environment variables to use */ public void setEnvironment(Environment env) { m_env = env; } /** * Returns the tip text for this property * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String ignoreCaseForNamesTipText() { return "Ignore case when matching attribute names and nomina values."; } /** * Set whether to ignore case when matching attribute names and * nominal values. * * @param ignore true if case is to be ignored */ public void setIgnoreCaseForNames(boolean ignore) { m_ignoreCase = ignore; } /** * Get whether to ignore case when matching attribute names * and nominal values. * * @return true if case is to be ignored. */ public boolean getIgnoreCaseForNames() { return m_ignoreCase; } /** * Returns the tip text for this property * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String trimTipText() { return "Trim white space from each end of attribute names and " + "nominal values before matching."; } /** * Set whether to trim white space from each end of names * before matching. * * @param trim true to trim white space. */ public void setTrim(boolean trim) { m_trim = trim; } /** * Get whether to trim white space from each end of names * before matching. * * @return true if white space is to be trimmed. */ public boolean getTrim() { return m_trim; } /** * Returns the tip text for this property * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String suppressMappingReportTipText() { return "Don't output a report of model-to-input mappings."; } /** * Set whether to suppress output the report of model to input mappings. * * @param suppress true to suppress this output. */ public void setSuppressMappingReport(boolean suppress) { m_suppressMappingReport = suppress; } /** * Get whether to suppress output the report of model to input mappings. * * @return true if this output is to be suppressed. */ public boolean getSuppressMappingReport() { return m_suppressMappingReport; } /** * Returns the tip text for this property * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String modelPathTipText() { return "Set the path from which to load a model. " + "Loading occurs when the first test instance " + "is received. Environment variables can be used in the " + "supplied path."; } /** * Set the path from which to load a model. Loading occurs when the * first test instance is received or getModelHeader() is called * programatically. Environment variables can be used in the * supplied path - e.g. ${HOME}/myModel.model. * * @param modelPath the path to the model to load. * @throws Exception if a problem occurs during loading. */ public void setModelPath(String modelPath) throws Exception { if (m_env == null) { m_env = Environment.getSystemWide(); } m_modelPath = modelPath; //loadModel(modelPath); } /** * Get the path used for loading a model. * * @return the path used for loading a model. */ public String getModelPath() { return m_modelPath; } /** * Returns default capabilities of the classifier. * * @return the capabilities of this classifier */ public Capabilities getCapabilities() { Capabilities result = super.getCapabilities(); result.disable(Capability.RELATIONAL_ATTRIBUTES); return result; } /** * Returns an enumeration describing the available options. * <!-- options-start --> * Valid options are: <p/> * * <pre> -I * Ignore case when matching attribute names and nominal values.</pre> * * <pre> -M * Suppress the output of the mapping report.</pre> * * <pre> -trim * Trim white space from either end of names before matching.</pre> * * <pre> -L &lt;path to model to load&gt; * Path to a model to load. If set, this model * will be used for prediction and any base classifier * specification will be ignored. Environment variables * may be used in the path (e.g. ${HOME}/myModel.model)</pre> * * <pre> -D * If set, classifier is run in debug mode and * may output additional info to the console</pre> * * <pre> -W * Full name of base classifier. * (default: weka.classifiers.rules.ZeroR)</pre> * * <pre> * Options specific to classifier weka.classifiers.rules.ZeroR: * </pre> * * <pre> -D * If set, classifier is run in debug mode and * may output additional info to the console</pre> * <!-- options-end --> * * @return an enumeration of all the available options. */ public Enumeration<Option> listOptions() { Vector<Option> newVector = new Vector(4); newVector.addElement(new Option("\tIgnore case when matching attribute " + "names and nominal values.", "I", 0, "-I")); newVector.addElement(new Option("\tSuppress the output of the mapping report.", "M", 0, "-M")); newVector.addElement(new Option("\tTrim white space from either end of names " + "before matching.", "trim", 0, "-trim")); newVector.addElement(new Option("\tPath to a model to load. If set, this model" + "\n\twill be used for prediction and any base classifier" + "\n\tspecification will be ignored. Environment variables" + "\n\tmay be used in the path (e.g. ${HOME}/myModel.model)", "L", 1, "-L <path to model to load>")); Enumeration<Option> enu = super.listOptions(); while (enu.hasMoreElements()) { newVector.addElement(enu.nextElement()); } return newVector.elements(); } /** * Parses a given list of options. <p/> * <!-- options-start --> * Valid options are: <p/> * * <pre> -I * Ignore case when matching attribute names and nominal values.</pre> * * <pre> -M * Suppress the output of the mapping report.</pre> * * <pre> -trim * Trim white space from either end of names before matching.</pre> * * <pre> -L &lt;path to model to load&gt; * Path to a model to load. If set, this model * will be used for prediction and any base classifier * specification will be ignored. Environment variables * may be used in the path (e.g. ${HOME}/myModel.model)</pre> * * <pre> -D * If set, classifier is run in debug mode and * may output additional info to the console</pre> * * <pre> -W * Full name of base classifier. * (default: weka.classifiers.rules.ZeroR)</pre> * * <pre> * Options specific to classifier weka.classifiers.rules.ZeroR: * </pre> * * <pre> -D * If set, classifier is run in debug mode and * may output additional info to the console</pre> * <!-- options-end --> * * Options after -- are passed to the designated classifier.<p> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { setIgnoreCaseForNames(Utils.getFlag('I', options)); setSuppressMappingReport(Utils.getFlag('M', options)); setTrim(Utils.getFlag("trim", options)); String modelPath = Utils.getOption('L', options); if (modelPath.length() > 0) { setModelPath(modelPath); } super.setOptions(options); } /** * Gets the current settings of the Classifier. * * @return an array of strings suitable for passing to setOptions */ public String[] getOptions() { String[] superOptions = super.getOptions(); String[] options = new String[superOptions.length + 5]; int current = 0; if (getIgnoreCaseForNames()) { options[current++] = "-I"; } if (getSuppressMappingReport()) { options[current++] = "-M"; } if (getTrim()) { options[current++] = "-trim"; } if (getModelPath() != null && getModelPath().length() > 0) { options[current++] = "-L"; options[current++] = getModelPath(); } System.arraycopy(superOptions, 0, options, current, superOptions.length); current += superOptions.length; while (current < options.length) { options[current++] = ""; } return options; } /** * Set the test structure (if known in advance) that we are likely * to see. If set, then a call to buildClassifier() will not overwrite * any test structure that has been recorded with the current training * structure. This is useful for getting a correct mapping report * output in toString() after buildClassifier has been called and * before any test instance has been seen. Test structure and mapping * will get reset if a test instance is received whose structure does * not match the recorded test structure. * * @param testStructure the structure of the test instances that * we are likely to see (if known in advance) */ public void setTestStructure(Instances testStructure) { m_inputHeader = testStructure; m_initialTestStructureKnown = true; } /** * Set the structure of the data used to create the model. This method * is useful for clients who have an existing in-memory model that they'd * like to wrap in the InputMappedClassifier * * @param modelHeader the structure of the data used to build the wrapped * model */ public void setModelHeader(Instances modelHeader) { m_modelHeader = modelHeader; } private void loadModel(String modelPath) throws Exception { if (modelPath != null && modelPath.length() > 0) { try { if (m_env == null) { m_env = Environment.getSystemWide(); } modelPath = m_env.substitute(modelPath); } catch (Exception ex) { // ignore any problems } try { Object[] modelAndHeader = SerializationHelper.readAll(modelPath); if (modelAndHeader.length != 2) { throw new Exception("[InputMappedClassifier] serialized model file " + "does not seem to contain both a model and " + "the instances header used in training it!"); } else { setClassifier((Classifier)modelAndHeader[0]); m_modelHeader = (Instances)modelAndHeader[1]; } } catch (Exception ex) { ex.printStackTrace(); } } } /** * Build the classifier * * @param data the training data to be used for generating the * bagged classifier. * @throws Exception if the classifier could not be built successfully */ public void buildClassifier(Instances data) throws Exception { if (!m_initialTestStructureKnown) { m_inputHeader = new Instances(data, 0); } m_attributeMap = null; if (m_modelPath != null && m_modelPath.length() > 0) { return; // Don't build a classifier if a path has been specified } // can classifier handle the data? getCapabilities().testWithFail(data); m_Classifier.buildClassifier(data); //m_loadedClassifier = m_Classifier; m_modelHeader = new Instances(data, 0); } private boolean stringMatch(String one, String two) { if (m_trim) { one = one.trim(); two = two.trim(); } if (m_ignoreCase) { return one.equalsIgnoreCase(two); } else { return one.equals(two); } } /** * Helper method to pad/truncate strings * * @param s String to modify * @param pad character to pad with * @param len length of final string * @return final String */ private String getFixedLengthString(String s, char pad, int len) { String padded = null; if (len <= 0) { return s; } // truncate? if (s.length() >= len) { return s.substring(0, len); } else { char [] buf = new char[len - s.length()]; for (int j = 0; j < len - s.length(); j++) { buf[j] = pad; } padded = s + new String(buf); } return padded; } private StringBuffer createMappingReport() { StringBuffer result = new StringBuffer(); result.append("Attribute mappings:\n\n"); int maxLength = 0; for (int i = 0; i < m_modelHeader.numAttributes(); i++) { if (m_modelHeader.attribute(i).name().length() > maxLength) { maxLength = m_modelHeader.attribute(i).name().length(); } } maxLength += 12; int minLength = 16; String headerS = "Model attributes"; String sep = "----------------"; if (maxLength < minLength) { maxLength = minLength; } headerS = getFixedLengthString(headerS, ' ', maxLength); sep = getFixedLengthString(sep, '-', maxLength); sep += "\t ----------------\n"; headerS += "\t Incoming attributes\n"; result.append(headerS); result.append(sep); for (int i = 0; i < m_modelHeader.numAttributes(); i++) { Attribute temp = m_modelHeader.attribute(i); String attName = "(" + ((temp.isNumeric()) ? "numeric)" : "nominal)") + " " + temp.name(); attName = getFixedLengthString(attName, ' ', maxLength); attName += "\t--> "; result.append(attName); String inAttNum = ""; if (m_attributeStatus[i] == NO_MATCH) { inAttNum += "- "; result.append(inAttNum + "missing (no match)\n"); } else if (m_attributeStatus[i] == TYPE_MISMATCH) { inAttNum += (m_attributeMap[i] + 1) + " "; result.append(inAttNum + "missing (type mis-match)\n"); } else { Attribute inAtt = m_inputHeader.attribute(m_attributeMap[i]); String inName = "" + (m_attributeMap[i] + 1) + " (" + ((inAtt.isNumeric()) ? "numeric)" : "nominal)") + " " + inAtt.name(); result.append(inName + "\n"); } } return result; } protected static final int NO_MATCH = -1; protected static final int TYPE_MISMATCH = -2; protected static final int OK = -3; private boolean regenerateMapping() throws Exception { loadModel(m_modelPath); // load a model (if specified) if (m_modelHeader == null) { return false; } m_attributeMap = new int[m_modelHeader.numAttributes()]; m_attributeStatus = new int[m_modelHeader.numAttributes()]; m_nominalValueMap = new int[m_modelHeader.numAttributes()][]; for (int i = 0; i < m_modelHeader.numAttributes(); i++) { String modelAttName = m_modelHeader.attribute(i).name(); m_attributeStatus[i] = NO_MATCH; for (int j = 0; j < m_inputHeader.numAttributes(); j++) { String incomingAttName = m_inputHeader.attribute(j).name(); if (stringMatch(modelAttName, incomingAttName)) { m_attributeMap[i] = j; m_attributeStatus[i] = OK; Attribute modelAtt = m_modelHeader.attribute(i); Attribute incomingAtt = m_inputHeader.attribute(j); // check types if (modelAtt.type() != incomingAtt.type()) { m_attributeStatus[i] = TYPE_MISMATCH; break; } // now check nominal values (number, names...) if (modelAtt.numValues() != incomingAtt.numValues()) { System.out.println("[InputMappedClassifier] Warning: incoming nominal " + "attribute " + incomingAttName + " does not have the same " + "number of values as model attribute " + modelAttName); } if (modelAtt.isNominal() && incomingAtt.isNominal()) { int[] valuesMap = new int[incomingAtt.numValues()]; for (int k = 0; k < incomingAtt.numValues(); k++) { String incomingNomValue = incomingAtt.value(k); int indexInModel = modelAtt.indexOfValue(incomingNomValue); if (indexInModel < 0) { valuesMap[k] = NO_MATCH; } else { valuesMap[k] = indexInModel; } } m_nominalValueMap[i] = valuesMap; } } } } return true; } /** * Return the instance structure that the encapsulated model was built with. * If the classifier will be built from scratch by InputMappedClassifier then * this method just returns the default structure that is passed in as argument. * * @param defaultH the default instances structure * @return the instances structure used to create the encapsulated model * @throws Exception if a problem occurs */ public Instances getModelHeader(Instances defaultH) throws Exception { loadModel(m_modelPath); // If the model header is null, then we must be going to build from // scratch in buildClassifier. Therefore, just return the supplied default, // since this has to match what we will build with Instances toReturn = (m_modelHeader == null) ? defaultH : m_modelHeader; return new Instances(toReturn, 0); } // get the mapped class index (i.e. the index in the incoming data of // the attribute that the model uses as the class public int getMappedClassIndex() throws Exception { if (m_modelHeader == null) { throw new Exception("[InputMappedClassifier] No model available!"); } if (m_attributeMap[m_modelHeader.classIndex()] == NO_MATCH) { return -1; } return m_attributeMap[m_modelHeader.classIndex()]; } public Instance constructMappedInstance(Instance incoming) throws Exception { boolean regenerateMapping = false; if (m_inputHeader == null) { m_inputHeader = incoming.dataset(); regenerateMapping = true; m_initialTestStructureKnown = false; } else if (!m_inputHeader.equalHeaders(incoming.dataset())) { /*System.out.println("[InputMappedClassifier] incoming data does not match " + "last known input format - regenerating mapping..."); System.out.println("Incoming\n" + new Instances(incoming.dataset(), 0)); System.out.println("Stored input header\n" + new Instances(m_inputHeader, 0)); System.out.println("Model header\n" + new Instances(m_modelHeader, 0)); */ m_inputHeader = incoming.dataset(); regenerateMapping = true; m_initialTestStructureKnown = false; } else if (m_attributeMap == null) { regenerateMapping = true; m_initialTestStructureKnown = false; } if (regenerateMapping) { regenerateMapping(); m_vals = null; if (!m_suppressMappingReport) { StringBuffer result = createMappingReport(); System.out.println(result.toString()); } } m_vals = new double[m_modelHeader.numAttributes()]; for (int i = 0; i < m_modelHeader.numAttributes(); i++) { if (m_attributeStatus[i] == OK) { Attribute modelAtt = m_modelHeader.attribute(i); Attribute incomingAtt = m_inputHeader.attribute(m_attributeMap[i]); if (Utils.isMissingValue(incoming.value(m_attributeMap[i]))) { m_vals[i] = Utils.missingValue(); continue; } if (modelAtt.isNumeric()) { m_vals[i] = incoming.value(m_attributeMap[i]); } else if (modelAtt.isNominal()) { int mapVal = m_nominalValueMap[i][(int)incoming.value(m_attributeMap[i])]; if (mapVal == NO_MATCH) { m_vals[i] = Utils.missingValue(); } else { m_vals[i] = mapVal; } } } else { m_vals[i] = Utils.missingValue(); } } Instance newInst = new DenseInstance(incoming.weight(), m_vals); newInst.setDataset(m_modelHeader); return newInst; } public double classifyInstance(Instance inst) throws Exception { Instance converted = constructMappedInstance(inst); return m_Classifier.classifyInstance(converted); } public double[] distributionForInstance(Instance inst) throws Exception { Instance converted = constructMappedInstance(inst); return m_Classifier.distributionForInstance(converted); } public String toString() { StringBuffer buff = new StringBuffer(); buff.append("InputMappedClassifier:\n\n"); try { loadModel(m_modelPath); } catch (Exception ex) { return "[InputMappedClassifier] Problem loading model."; } if (m_modelPath != null && m_modelPath.length() > 0) { buff.append("Model sourced from: " + m_modelPath + "\n\n"); } /*if (m_loadedClassifier != null) { buff.append(m_loadedClassifier); } else { */ buff.append(m_Classifier); //} if (!m_suppressMappingReport && m_inputHeader != null) { try { regenerateMapping(); } catch (Exception ex) { ex.printStackTrace(); return "[InputMappedClassifier] Problem loading model."; } if (m_attributeMap != null) { buff.append("\n" + createMappingReport().toString()); } } return buff.toString(); } /** * Returns the type of graph this classifier * represents. * * @return the type of graph */ public int graphType() { if (m_Classifier instanceof Drawable) return ((Drawable)m_Classifier).graphType(); else return Drawable.NOT_DRAWABLE; } /** * Returns an enumeration of the additional measure names * @return an enumeration of the measure names */ public Enumeration enumerateMeasures() { Vector newVector = new Vector(); if (m_Classifier instanceof AdditionalMeasureProducer) { Enumeration en = ((AdditionalMeasureProducer)m_Classifier). enumerateMeasures(); while (en.hasMoreElements()) { String mname = (String)en.nextElement(); newVector.addElement(mname); } } return newVector.elements(); } /** * Returns the value of the named measure * @param additionalMeasureName the name of the measure to query for its value * @return the value of the named measure * @throws IllegalArgumentException if the named measure is not supported */ public double getMeasure(String additionalMeasureName) { if (m_Classifier instanceof AdditionalMeasureProducer) { return ((AdditionalMeasureProducer)m_Classifier). getMeasure(additionalMeasureName); } else { throw new IllegalArgumentException(additionalMeasureName + " not supported (InputMappedClassifier)"); } } /** * Returns graph describing the classifier (if possible). * * @return the graph of the classifier in dotty format * @throws Exception if the classifier cannot be graphed */ public String graph() throws Exception { if (m_Classifier != null && m_Classifier instanceof Drawable) return ((Drawable)m_Classifier).graph(); else throw new Exception("Classifier: " + getClassifierSpec() + " cannot be graphed"); } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } /** * Main method for testing this class. * * @param argv should contain the following arguments: * -t training file [-T test file] [-c class index] */ public static void main(String [] argv) { runClassifier(new InputMappedClassifier(), argv); } }
29,994
30.640295
440
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/misc/SerializedClassifier.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * SerializedClassifier.java * Copyright (C) 2007-2012 University of Waikato, Hamilton, New Zealand */ package weka.classifiers.misc; import java.io.File; import java.util.Enumeration; import java.util.Vector; import weka.classifiers.AbstractClassifier; import weka.classifiers.Classifier; import weka.core.Capabilities; import weka.core.Capabilities.Capability; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.RevisionUtils; import weka.core.SerializationHelper; import weka.core.Utils; /** <!-- globalinfo-start --> * A wrapper around a serialized classifier model. This classifier loads a serialized models and uses it to make predictions.<br/> * <br/> * Warning: since the serialized model doesn't get changed, cross-validation cannot bet used with this classifier. * <p/> <!-- globalinfo-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -D * If set, classifier is run in debug mode and * may output additional info to the console</pre> * * <pre> -model &lt;filename&gt; * The file containing the serialized model. * (required)</pre> * <!-- options-end --> * * @author fracpete (fracpete at waikato dot ac dot nz) * @version $Revision: 8034 $ */ public class SerializedClassifier extends AbstractClassifier { /** for serialization */ private static final long serialVersionUID = 4599593909947628642L; /** the serialized classifier model used for making predictions */ protected transient Classifier m_Model = null; /** the file where the serialized model is stored */ protected File m_ModelFile = new File(System.getProperty("user.dir")); /** * Returns a string describing classifier * * @return a description suitable for displaying in the * explorer/experimenter gui */ public String globalInfo() { return "A wrapper around a serialized classifier model. This classifier loads " + "a serialized models and uses it to make predictions.\n\n" + "Warning: since the serialized model doesn't get changed, cross-validation " + "cannot bet used with this classifier."; } /** * Gets an enumeration describing the available options. * * @return an enumeration of all the available options. */ public Enumeration listOptions(){ Vector result; Enumeration enm; result = new Vector(); enm = super.listOptions(); while (enm.hasMoreElements()) result.addElement(enm.nextElement()); result.addElement(new Option( "\tThe file containing the serialized model.\n" + "\t(required)", "model", 1, "-model <filename>")); return result.elements(); } /** * returns the options of the current setup * * @return the current options */ public String[] getOptions(){ int i; Vector result; String[] options; result = new Vector(); options = super.getOptions(); for (i = 0; i < options.length; i++) result.add(options[i]); result.add("-model"); result.add("" + getModelFile()); return (String[]) result.toArray(new String[result.size()]); } /** * Parses the options for this object. <p/> * <!-- options-start --> * Valid options are: <p/> * * <pre> -D * If set, classifier is run in debug mode and * may output additional info to the console</pre> * * <pre> -model &lt;filename&gt; * The file containing the serialized model. * (required)</pre> * <!-- options-end --> * * @param options the options to use * @throws Exception if setting of options fails */ public void setOptions(String[] options) throws Exception { String tmpStr; super.setOptions(options); tmpStr = Utils.getOption("model", options); if (tmpStr.length() != 0) setModelFile(new File(tmpStr)); else setModelFile(new File(System.getProperty("user.dir"))); } /** * Returns the tip text for this property * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String modelFileTipText() { return "The serialized classifier model to use for predictions."; } /** * Gets the file containing the serialized model. * * @return the file. */ public File getModelFile() { return m_ModelFile; } /** * Sets the file containing the serialized model. * * @param value the file. */ public void setModelFile(File value) { m_ModelFile = value; if (value.exists() && value.isFile()) { try { initModel(); } catch (Exception e) { throw new IllegalArgumentException("Cannot load model from file '" + value + "': " + e); } } } /** * Sets the fully built model to use, if one doesn't want to load a model * from a file or already deserialized a model from somewhere else. * * @param value the built model * @see #getCurrentModel() */ public void setModel(Classifier value) { m_Model = value; } /** * Gets the currently loaded model (can be null). Call buildClassifier method * to load model from file. * * @return the current model * @see #setModel(Classifier) */ public Classifier getCurrentModel() { return m_Model; } /** * loads the serialized model if necessary, throws an Exception if the * derserialization fails. * * @throws Exception if deserialization fails */ protected void initModel() throws Exception { if (m_Model == null) m_Model = (Classifier) SerializationHelper.read(m_ModelFile.getAbsolutePath()); } /** * Returns default capabilities of the base classifier. * * @return the capabilities of the base classifier */ public Capabilities getCapabilities() { Capabilities result; // init model if necessary if (m_ModelFile != null && m_ModelFile.exists() && m_ModelFile.isFile()) { try { initModel(); } catch (Exception e) { System.err.println(e); } } if (m_Model != null) { result = m_Model.getCapabilities(); } else { result = new Capabilities(this); result.disableAll(); } // set dependencies for (Capability cap: Capability.values()) result.enableDependency(cap); result.setOwner(this); return result; } /** * Calculates the class membership probabilities for the given test * instance. * * @param instance the instance to be classified * @return preedicted class probability distribution * @throws Exception if distribution can't be computed successfully */ public double[] distributionForInstance(Instance instance) throws Exception { double[] result; // init model if necessary initModel(); result = m_Model.distributionForInstance(instance); return result; } /** * loads only the serialized classifier * * @param data the training instances * @throws Exception if something goes wrong */ public void buildClassifier(Instances data) throws Exception { // init model if necessary initModel(); // can classifier handle the data? getCapabilities().testWithFail(data); } /** * Returns a string representation of the classifier * * @return the string representation of the classifier */ public String toString() { StringBuffer result; if (m_Model == null) { result = new StringBuffer("No model loaded yet."); } else { result = new StringBuffer(); result.append("SerializedClassifier\n"); result.append("====================\n\n"); result.append("File: " + getModelFile() + "\n\n"); result.append(m_Model.toString()); } return result.toString(); } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } /** * Runs the classifier with the given options * * @param args the commandline options */ public static void main(String[] args) { runClassifier(new SerializedClassifier(), args); } }
8,938
25.214076
130
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/misc/VFI.java
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* * VFI.java * Copyright (C) 2000 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.misc; import weka.classifiers.Classifier; import weka.core.Capabilities; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.OptionHandler; import weka.core.RevisionUtils; import weka.core.TechnicalInformation; import weka.core.TechnicalInformationHandler; import weka.core.Utils; import weka.core.WeightedInstancesHandler; import weka.core.Capabilities.Capability; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import java.util.Enumeration; import java.util.Vector; import weka.classifiers.AbstractClassifier; /** <!-- globalinfo-start --> * Classification by voting feature intervals. Intervals are constucted around each class for each attribute (basically discretization). Class counts are recorded for each interval on each attribute. Classification is by voting. For more info see:<br/> * <br/> * G. Demiroz, A. Guvenir: Classification by voting feature intervals. In: 9th European Conference on Machine Learning, 85-92, 1997.<br/> * <br/> * Have added a simple attribute weighting scheme. Higher weight is assigned to more confident intervals, where confidence is a function of entropy:<br/> * weight (att_i) = (entropy of class distrib att_i / max uncertainty)^-bias * <p/> <!-- globalinfo-end --> * <!-- technical-bibtex-start --> * BibTeX: * <pre> * &#64;inproceedings{Demiroz1997, * author = {G. Demiroz and A. Guvenir}, * booktitle = {9th European Conference on Machine Learning}, * pages = {85-92}, * publisher = {Springer}, * title = {Classification by voting feature intervals}, * year = {1997} * } * </pre> * <p/> <!-- technical-bibtex-end --> * * Faster than NaiveBayes but slower than HyperPipes. <p><p> * * <pre> * Confidence: 0.01 (two tailed) * * Dataset (1) VFI '-B | (2) Hyper (3) Naive * ------------------------------------ * anneal.ORIG (10) 74.56 | 97.88 v 74.77 * anneal (10) 71.83 | 97.88 v 86.51 v * audiology (10) 51.69 | 66.26 v 72.25 v * autos (10) 57.63 | 62.79 v 57.76 * balance-scale (10) 68.72 | 46.08 * 90.5 v * breast-cancer (10) 67.25 | 69.84 v 73.12 v * wisconsin-breast-cancer (10) 95.72 | 88.31 * 96.05 v * horse-colic.ORIG (10) 66.13 | 70.41 v 66.12 * horse-colic (10) 78.36 | 62.07 * 78.28 * credit-rating (10) 85.17 | 44.58 * 77.84 * * german_credit (10) 70.81 | 69.89 * 74.98 v * pima_diabetes (10) 62.13 | 65.47 v 75.73 v * Glass (10) 56.82 | 50.19 * 47.43 * * cleveland-14-heart-diseas (10) 80.01 | 55.18 * 83.83 v * hungarian-14-heart-diseas (10) 82.8 | 65.55 * 84.37 v * heart-statlog (10) 79.37 | 55.56 * 84.37 v * hepatitis (10) 83.78 | 63.73 * 83.87 * hypothyroid (10) 92.64 | 93.33 v 95.29 v * ionosphere (10) 94.16 | 35.9 * 82.6 * * iris (10) 96.2 | 91.47 * 95.27 * * kr-vs-kp (10) 88.22 | 54.1 * 87.84 * * labor (10) 86.73 | 87.67 93.93 v * lymphography (10) 78.48 | 58.18 * 83.24 v * mushroom (10) 99.85 | 99.77 * 95.77 * * primary-tumor (10) 29 | 24.78 * 49.35 v * segment (10) 77.42 | 75.15 * 80.1 v * sick (10) 65.92 | 93.85 v 92.71 v * sonar (10) 58.02 | 57.17 67.97 v * soybean (10) 86.81 | 86.12 * 92.9 v * splice (10) 88.61 | 41.97 * 95.41 v * vehicle (10) 52.94 | 32.77 * 44.8 * * vote (10) 91.5 | 61.38 * 90.19 * * vowel (10) 57.56 | 36.34 * 62.81 v * waveform (10) 56.33 | 46.11 * 80.02 v * zoo (10) 94.05 | 94.26 95.04 v * ------------------------------------ * (v| |*) | (9|3|23) (22|5|8) * </pre> * <p> * <!-- options-start --> * Valid options are: <p/> * * <pre> -C * Don't weight voting intervals by confidence</pre> * * <pre> -B &lt;bias&gt; * Set exponential bias towards confident intervals * (default = 1.0)</pre> * <!-- options-end --> * * @author Mark Hall (mhall@cs.waikato.ac.nz) * @version $Revision: 6308 $ */ public class VFI extends AbstractClassifier implements OptionHandler, WeightedInstancesHandler, TechnicalInformationHandler { /** for serialization */ static final long serialVersionUID = 8081692166331321866L; /** The index of the class attribute */ protected int m_ClassIndex; /** The number of classes */ protected int m_NumClasses; /** The training data */ protected Instances m_Instances = null; /** The class counts for each interval of each attribute */ protected double [][][] m_counts; /** The global class counts */ protected double [] m_globalCounts; /** The lower bounds for each attribute */ protected double [][] m_intervalBounds; /** The maximum entropy for the class */ protected double m_maxEntrop; /** Exponentially bias more confident intervals */ protected boolean m_weightByConfidence = true; /** Bias towards more confident intervals */ protected double m_bias = -0.6; private double TINY = 0.1e-10; /** * Returns a string describing this search method * @return a description of the search method suitable for * displaying in the explorer/experimenter gui */ public String globalInfo() { return "Classification by voting feature intervals. Intervals are " +"constucted around each class for each attribute (" +"basically discretization). Class counts are " +"recorded for each interval on each attribute. Classification is by " +"voting. For more info see:\n\n" + getTechnicalInformation().toString() + "\n\n" +"Have added a simple attribute weighting scheme. Higher weight is " +"assigned to more confident intervals, where confidence is a function " +"of entropy:\nweight (att_i) = (entropy of class distrib att_i / " +"max uncertainty)^-bias"; } /** * Returns an instance of a TechnicalInformation object, containing * detailed information about the technical background of this class, * e.g., paper reference or book this class is based on. * * @return the technical information about this class */ public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; result = new TechnicalInformation(Type.INPROCEEDINGS); result.setValue(Field.AUTHOR, "G. Demiroz and A. Guvenir"); result.setValue(Field.TITLE, "Classification by voting feature intervals"); result.setValue(Field.BOOKTITLE, "9th European Conference on Machine Learning"); result.setValue(Field.YEAR, "1997"); result.setValue(Field.PAGES, "85-92"); result.setValue(Field.PUBLISHER, "Springer"); return result; } /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ public Enumeration listOptions() { Vector newVector = new Vector(2); newVector.addElement( new Option("\tDon't weight voting intervals by confidence", "C", 0,"-C")); newVector.addElement( new Option("\tSet exponential bias towards confident intervals\n" +"\t(default = 1.0)", "B", 1,"-B <bias>")); return newVector.elements(); } /** * Parses a given list of options. <p/> * <!-- options-start --> * Valid options are: <p/> * * <pre> -C * Don't weight voting intervals by confidence</pre> * * <pre> -B &lt;bias&gt; * Set exponential bias towards confident intervals * (default = 1.0)</pre> * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { String optionString; setWeightByConfidence(!Utils.getFlag('C', options)); optionString = Utils.getOption('B', options); if (optionString.length() != 0) { Double temp = new Double(optionString); setBias(temp.doubleValue()); } Utils.checkForRemainingOptions(options); } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String weightByConfidenceTipText() { return "Weight feature intervals by confidence"; } /** * Set weighting by confidence * @param c true if feature intervals are to be weighted by confidence */ public void setWeightByConfidence(boolean c) { m_weightByConfidence = c; } /** * Get whether feature intervals are being weighted by confidence * @return true if weighting by confidence is selected */ public boolean getWeightByConfidence() { return m_weightByConfidence; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String biasTipText() { return "Strength of bias towards more confident features"; } /** * Set the value of the exponential bias towards more confident intervals * @param b the value of the bias parameter */ public void setBias(double b) { m_bias = -b; } /** * Get the value of the bias parameter * @return the bias parameter */ public double getBias() { return -m_bias; } /** * Gets the current settings of VFI * * @return an array of strings suitable for passing to setOptions() */ public String[] getOptions () { String[] options = new String[3]; int current = 0; if (!getWeightByConfidence()) { options[current++] = "-C"; } options[current++] = "-B"; options[current++] = ""+getBias(); while (current < options.length) { options[current++] = ""; } return options; } /** * Returns default capabilities of the classifier. * * @return the capabilities of this classifier */ public Capabilities getCapabilities() { Capabilities result = super.getCapabilities(); result.disableAll(); // attributes result.enable(Capability.NOMINAL_ATTRIBUTES); result.enable(Capability.NUMERIC_ATTRIBUTES); result.enable(Capability.DATE_ATTRIBUTES); result.enable(Capability.MISSING_VALUES); // class result.enable(Capability.NOMINAL_CLASS); result.enable(Capability.MISSING_CLASS_VALUES); // instances result.setMinimumNumberInstances(0); return result; } /** * Generates the classifier. * * @param instances set of instances serving as training data * @throws Exception if the classifier has not been generated successfully */ public void buildClassifier(Instances instances) throws Exception { if (!m_weightByConfidence) { TINY = 0.0; } // can classifier handle the data? getCapabilities().testWithFail(instances); // remove instances with missing class instances = new Instances(instances); instances.deleteWithMissingClass(); m_ClassIndex = instances.classIndex(); m_NumClasses = instances.numClasses(); m_globalCounts = new double [m_NumClasses]; m_maxEntrop = Math.log(m_NumClasses) / Math.log(2); m_Instances = new Instances(instances, 0); // Copy the structure for ref m_intervalBounds = new double[instances.numAttributes()][2+(2*m_NumClasses)]; for (int j = 0; j < instances.numAttributes(); j++) { boolean alt = false; for (int i = 0; i < m_NumClasses*2+2; i++) { if (i == 0) { m_intervalBounds[j][i] = Double.NEGATIVE_INFINITY; } else if (i == m_NumClasses*2+1) { m_intervalBounds[j][i] = Double.POSITIVE_INFINITY; } else { if (alt) { m_intervalBounds[j][i] = Double.NEGATIVE_INFINITY; alt = false; } else { m_intervalBounds[j][i] = Double.POSITIVE_INFINITY; alt = true; } } } } // find upper and lower bounds for numeric attributes for (int j = 0; j < instances.numAttributes(); j++) { if (j != m_ClassIndex && instances.attribute(j).isNumeric()) { for (int i = 0; i < instances.numInstances(); i++) { Instance inst = instances.instance(i); if (!inst.isMissing(j)) { if (inst.value(j) < m_intervalBounds[j][((int)inst.classValue()*2+1)]) { m_intervalBounds[j][((int)inst.classValue()*2+1)] = inst.value(j); } if (inst.value(j) > m_intervalBounds[j][((int)inst.classValue()*2+2)]) { m_intervalBounds[j][((int)inst.classValue()*2+2)] = inst.value(j); } } } } } m_counts = new double [instances.numAttributes()][][]; // sort intervals for (int i = 0 ; i < instances.numAttributes(); i++) { if (instances.attribute(i).isNumeric()) { int [] sortedIntervals = Utils.sort(m_intervalBounds[i]); // remove any duplicate bounds int count = 1; for (int j = 1; j < sortedIntervals.length; j++) { if (m_intervalBounds[i][sortedIntervals[j]] != m_intervalBounds[i][sortedIntervals[j-1]]) { count++; } } double [] reordered = new double [count]; count = 1; reordered[0] = m_intervalBounds[i][sortedIntervals[0]]; for (int j = 1; j < sortedIntervals.length; j++) { if (m_intervalBounds[i][sortedIntervals[j]] != m_intervalBounds[i][sortedIntervals[j-1]]) { reordered[count] = m_intervalBounds[i][sortedIntervals[j]]; count++; } } m_intervalBounds[i] = reordered; m_counts[i] = new double [count][m_NumClasses]; } else if (i != m_ClassIndex) { // nominal attribute m_counts[i] = new double [instances.attribute(i).numValues()][m_NumClasses]; } } // collect class counts for (int i = 0; i < instances.numInstances(); i++) { Instance inst = instances.instance(i); m_globalCounts[(int)instances.instance(i).classValue()] += inst.weight(); for (int j = 0; j < instances.numAttributes(); j++) { if (!inst.isMissing(j) && j != m_ClassIndex) { if (instances.attribute(j).isNumeric()) { double val = inst.value(j); int k; for (k = m_intervalBounds[j].length-1; k >= 0; k--) { if (val > m_intervalBounds[j][k]) { m_counts[j][k][(int)inst.classValue()] += inst.weight(); break; } else if (val == m_intervalBounds[j][k]) { m_counts[j][k][(int)inst.classValue()] += (inst.weight() / 2.0); m_counts[j][k-1][(int)inst.classValue()] += (inst.weight() / 2.0);; break; } } } else { // nominal attribute m_counts[j][(int)inst.value(j)][(int)inst.classValue()] += inst.weight();; } } } } } /** * Returns a description of this classifier. * * @return a description of this classifier as a string. */ public String toString() { if (m_Instances == null) { return "FVI: Classifier not built yet!"; } StringBuffer sb = new StringBuffer("Voting feature intervals classifier\n"); /* Output the intervals and class counts for each attribute */ /* for (int i = 0; i < m_Instances.numAttributes(); i++) { if (i != m_ClassIndex) { sb.append("\n"+m_Instances.attribute(i).name()+" :\n"); if (m_Instances.attribute(i).isNumeric()) { for (int j = 0; j < m_intervalBounds[i].length; j++) { sb.append(m_intervalBounds[i][j]).append("\n"); if (j != m_intervalBounds[i].length-1) { for (int k = 0; k < m_NumClasses; k++) { sb.append(m_counts[i][j][k]+" "); } } sb.append("\n"); } } else { for (int j = 0; j < m_Instances.attribute(i).numValues(); j++) { sb.append(m_Instances.attribute(i).value(j)).append("\n"); for (int k = 0; k < m_NumClasses; k++) { sb.append(m_counts[i][j][k]+" "); } sb.append("\n"); } } } } */ return sb.toString(); } /** * Classifies the given test instance. * * @param instance the instance to be classified * @return the predicted class for the instance * @throws Exception if the instance can't be classified */ public double [] distributionForInstance(Instance instance) throws Exception { double [] dist = new double[m_NumClasses]; double [] temp = new double[m_NumClasses]; double weight = 1.0; for (int i = 0; i < instance.numAttributes(); i++) { if (i != m_ClassIndex && !instance.isMissing(i)) { double val = instance.value(i); boolean ok = false; if (instance.attribute(i).isNumeric()) { int k; for (k = m_intervalBounds[i].length-1; k >= 0; k--) { if (val > m_intervalBounds[i][k]) { for (int j = 0; j < m_NumClasses; j++) { if (m_globalCounts[j] > 0) { temp[j] = ((m_counts[i][k][j]+TINY) / (m_globalCounts[j]+TINY)); } } ok = true; break; } else if (val == m_intervalBounds[i][k]) { for (int j = 0; j < m_NumClasses; j++) { if (m_globalCounts[j] > 0) { temp[j] = ((m_counts[i][k][j] + m_counts[i][k-1][j]) / 2.0) + TINY; temp[j] /= (m_globalCounts[j]+TINY); } } ok = true; break; } } if (!ok) { throw new Exception("This shouldn't happen"); } } else { // nominal attribute ok = true; for (int j = 0; j < m_NumClasses; j++) { if (m_globalCounts[j] > 0) { temp[j] = ((m_counts[i][(int)val][j]+TINY) / (m_globalCounts[j]+TINY)); } } } double sum = Utils.sum(temp); if (sum <= 0) { for (int j = 0; j < temp.length; j++) { temp[j] = 1.0 / (double)temp.length; } } else { Utils.normalize(temp, sum); } if (m_weightByConfidence) { weight = weka.core.ContingencyTables.entropy(temp); weight = Math.pow(weight, m_bias); if (weight < 1.0) { weight = 1.0; } } for (int j = 0; j < m_NumClasses; j++) { dist[j] += (temp[j] * weight); } } } double sum = Utils.sum(dist); if (sum <= 0) { for (int j = 0; j < dist.length; j++) { dist[j] = 1.0 / (double)dist.length; } return dist; } else { Utils.normalize(dist, sum); return dist; } } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 6308 $"); } /** * Main method for testing this class. * * @param args should contain command line arguments for evaluation * (see Evaluation). */ public static void main(String [] args) { runClassifier(new VFI(), args); } }
19,859
30.079812
252
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/rules/BinaryFile.java
package weka.classifiers.rules; import java.io.*; public class BinaryFile { /** * Use this constant to specify big-endian integers. */ public static final short BIG_ENDIAN = 1; /** * Use this constant to specify litte-endian constants. */ public static final short LITTLE_ENDIAN = 2; /** * The underlying file. */ protected RandomAccessFile _file; /** * Are we in LITTLE_ENDIAN or BIG_ENDIAN mode. */ protected short _endian; /** * Are we reading signed or unsigned numbers. */ protected boolean _signed; /** * The constructor. Use to specify the underlying file. * * @param f The file to read/write from/to. */ public BinaryFile(RandomAccessFile f) { _file = f; _endian = LITTLE_ENDIAN; _signed = false; } /** * Set the endian mode for reading integers. * * @param i Specify either LITTLE_ENDIAN or BIG_ENDIAN. * @exception java.lang.Exception Will be thrown if this method is not passed either BinaryFile.LITTLE_ENDIAN or BinaryFile.BIG_ENDIAN. */ public void setEndian(short i) throws Exception { if ((i == BIG_ENDIAN) || (i == LITTLE_ENDIAN)) _endian = i; else throw (new Exception( "Must be BinaryFile.LITTLE_ENDIAN or BinaryFile.BIG_ENDIAN")); } /** * Returns the endian mode. Will be either BIG_ENDIAN or LITTLE_ENDIAN. * * @return BIG_ENDIAN or LITTLE_ENDIAN to specify the current endian mode. */ public int getEndian() { return _endian; } /** * Sets the signed or unsigned mode for integers. true for signed, false for unsigned. * * @param b True if numbers are to be read/written as signed, false if unsigned. */ public void setSigned(boolean b) { _signed = b; } /** * Returns the signed mode. * * @return Returns true for signed, false for unsigned. */ public boolean getSigned() { return _signed; } /** * Reads a fixed length ASCII string. * * @param length How long of a string to read. * @return The number of bytes read. * @exception java.io.IOException If an IO exception occurs. */ public String readFixedString(int length) throws java.io.IOException { String rtn = ""; for (int i = 0; i < length; i++) rtn += (char) _file.readByte(); return rtn; } /** * Writes a fixed length ASCII string. Will truncate the string if it does not fit in the specified buffer. * * @param str The string to be written. * @param length The length of the area to write to. Should be larger than the length of the string being written. * @exception java.io.IOException If an IO exception occurs. */ public void writeFixedString(String str, int length) throws java.io.IOException { int i; // trim the string back some if needed if (str.length() > length) str = str.substring(0, length); // write the string for (i = 0; i < str.length(); i++) _file.write(str.charAt(i)); // buffer extra space if needed i = length - str.length(); while ((i--) > 0) _file.write(0); } /** * Reads a string that stores one length byte before the string. This string can be up to 255 characters long. Pascal stores strings this way. * * @return The string that was read. * @exception java.io.IOException If an IO exception occurs. */ public String readLengthPrefixString() throws java.io.IOException { short len = readUnsignedByte(); return readFixedString(len); } /** * Writes a string that is prefixed by a single byte that specifies the length of the string. This is how Pascal usually stores strings. * * @param str The string to be written. * @exception java.io.IOException If an IO exception occurs. */ public void writeLengthPrefixString(String str) throws java.io.IOException { writeByte((byte) str.length()); for (int i = 0; i < str.length(); i++) _file.write(str.charAt(i)); } /** * Reads a fixed length string that is zero(NULL) terminated. This is a type of string used by C/C++. For example char str[80]. * * @param length The length of the string. * @return The string that was read. * @exception java.io.IOException If an IO exception occurs. */ public String readFixedZeroString(int length) throws java.io.IOException { String rtn = readFixedString(length); int i = rtn.indexOf(0); if (i != -1) rtn = rtn.substring(0, i); return rtn; } /** * Writes a fixed length string that is zero terminated. This is the format generally used by C/C++ for string storage. * * @param str The string to be written. * @param length The length of the buffer to receive the string. * @exception java.io.IOException If an IO exception occurs. */ public void writeFixedZeroString(String str, int length) throws java.io.IOException { writeFixedString(str, length); } /** * Reads an unlimited length zero(null) terminated string. * * @return The string that was read. * @exception java.io.IOException If an IO exception occurs. */ public String readZeroString() throws java.io.IOException { String rtn = ""; char ch; do { ch = (char) _file.read(); if (ch != 0) rtn += ch; } while (ch != 0); return rtn; } /** * Writes an unlimited zero(NULL) terminated string to the file. * * @param str The string to be written. * @exception java.io.IOException If an IO exception occurs. */ public void writeZeroString(String str) throws java.io.IOException { for (int i = 0; i < str.length(); i++) _file.write(str.charAt(i)); writeByte((byte) 0); } /** * Internal function used to read an unsigned byte. External classes should use the readByte function. * * @return The byte, unsigned, as a short. * @exception java.io.IOException If an IO exception occurs. */ protected short readUnsignedByte() throws java.io.IOException { return (short) (_file.readByte() & 0xff); } /** * Reads an 8-bit byte. Can be signed or unsigned depending on the signed property. * * @return A byte stored in a short. * @exception java.io.IOException If an IO exception occurs. */ public short readByte() throws java.io.IOException { if (_signed) return (short) _file.readByte(); else return (short) _file.readUnsignedByte(); } /** * Writes a single byte to the file. * * @param b The byte to be written. * @exception java.io.IOException If an IO exception occurs. */ public void writeByte(short b) throws java.io.IOException { _file.write(b & 0xff); } /** * Reads a 16-bit word. Can be signed or unsigned depending on the signed property. Can be little or big endian depending on the endian property. * * @return A word stored in an int. * @exception java.io.IOException If an IO exception occurs. */ public int readWord() throws java.io.IOException { short a, b; int result; a = readUnsignedByte(); b = readUnsignedByte(); if (_endian == BIG_ENDIAN) result = ((a << 8) | b); else result = (a | (b << 8)); if (_signed) if ((result & 0x8000) == 0x8000) result = -(0x10000 - result); return result; } /** * Write a word to the file. * * @param w The word to be written to the file. * @exception java.io.IOException If an IO exception occurs. */ public void writeWord(int w) throws java.io.IOException { if (_endian == BIG_ENDIAN) { _file.write((w & 0xff00) >> 8); _file.write(w & 0xff); } else { _file.write(w & 0xff); _file.write((w & 0xff00) >> 8); } } /** * Reads a 32-bit double word. Can be signed or unsigned depending on the signed property. Can be little or big endian depending on the endian property. * * @return A double world stored in a long. * @exception java.io.IOException If an IO exception occurs. */ public long readDWord() throws java.io.IOException { short a, b, c, d; long result; a = readUnsignedByte(); b = readUnsignedByte(); c = readUnsignedByte(); d = readUnsignedByte(); if (_endian == BIG_ENDIAN) result = ((a << 24) | (b << 16) | (c << 8) | d); else result = (a | (b << 8) | (c << 16) | (d << 24)); if (_signed) if ((result & 0x80000000L) == 0x80000000L) result = -(0x100000000L - result); return result; } /** * Writes a double word to the file. * * @param d The double word to be written to the file. * @exception java.io.IOException If an IO exception occurs. */ public void writeDWord(long d) throws java.io.IOException { if (_endian == BIG_ENDIAN) { _file.write((int) (d & 0xff000000) >> 24); _file.write((int) (d & 0xff0000) >> 16); _file.write((int) (d & 0xff00) >> 8); _file.write((int) (d & 0xff)); } else { _file.write((int) (d & 0xff)); _file.write((int) (d & 0xff00) >> 8); _file.write((int) (d & 0xff0000) >> 16); _file.write((int) (d & 0xff000000) >> 24); } } /** * Allows the file to be aligned to a specified byte boundary. For example, if a 4(double word) is specified, the file pointer will be moved to the next double word boundary. * * @param a The byte-boundary to align to. * @exception java.io.IOException If an IO exception occurs. */ public void align(int a) throws java.io.IOException { if ((_file.getFilePointer() % a) > 0) { long pos = _file.getFilePointer() / a; _file.seek((pos + 1) * a); } } }
10,512
29.650146
179
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/rules/Classify_Test.java
package weka.classifiers.rules; import java.io.BufferedReader; import java.io.FileNotFoundException; import java.io.FileReader; import java.io.IOException; import java.io.RandomAccessFile; import java.io.Serializable; import java.util.Hashtable; import java.util.StringTokenizer; import weka.core.Instance; public class Classify_Test implements Serializable { private static final long serialVersionUID = 1L; public static void main(String[] args) { } // majority selection of the class . Returns -1 if classification fails, else returns position of the class label in the vector static int maggioranza(String[] rules,String[] class_labels, String class_path, String pathname, Instance instance) { //read class_labels and id_class_base int id_base_class = 0; int class_labels_freq[] = new int[class_labels.length]; for (int i = 0; i< class_labels.length; i++) class_labels_freq[i] = 0; // frequency counter initially set to zero FileReader file = null; StringTokenizer st; try { file = new FileReader(class_path); } catch (FileNotFoundException e) { e.printStackTrace(); } BufferedReader buff = new BufferedReader(file); boolean eof = false; boolean firstline = true; String line = null; try { // Legge da file le etichette di classe while (!eof) { line = buff.readLine(); if (line == null) eof = true; else { if (firstline) { id_base_class = Integer.parseInt(line); firstline = false; } } } // end while (!eof) file.close(); } catch (IOException e) { e.printStackTrace(); } // scan rules counting class_labels appearance int counter; for (int i = 0; i < rules.length; i++) { if (rules[i] != null && rules[i]!="") { StringTokenizer st2 = new StringTokenizer(rules[i], " "); counter = 0; while (st2.hasMoreTokens()) { String s = st2.nextToken(); counter++; if (counter == 3) { // string s cointains class label /*** non weighted majority selection ***/ //Integer.parseInt(s)-id_base_class is the position of the class in the class_labels vector class_labels_freq[Integer.parseInt(s)-id_base_class]++; } } // end while } // end if } // search for the maximum int maximum = -1; int result = -1; for (int i= 0; i<class_labels_freq.length; i++) { if (class_labels_freq[i]>maximum) { result = i; maximum = class_labels_freq[i]; } } instance.setClassValue(class_labels[result]); int my_result = (int) instance.value(instance.classIndex()); return (my_result); } static // majority selection of the class . Returns -1 if classification fails, else returns position of the class label in the vector int maggioranza_numeric(String[] rules,String[] class_labels, String class_path, String pathname) { //read class_labels and id_class_base int id_base_class = 0; int class_labels_freq[] = new int[class_labels.length]; for (int i = 0; i< class_labels.length; i++) class_labels_freq[i] = 0; // frequency counter initially set to zero FileReader file = null; StringTokenizer st; try { file = new FileReader(class_path); } catch (FileNotFoundException e) { e.printStackTrace(); } BufferedReader buff = new BufferedReader(file); boolean eof = false; boolean firstline = true; String line = null; try { while (!eof) { line = buff.readLine(); if (line == null) eof = true; else { if (firstline) { id_base_class = Integer.parseInt(line); firstline = false; } } } // end while (!eof) file.close(); } catch (IOException e) { e.printStackTrace(); } // scan rules counting class_labels appearance int counter; for (int i = 0; i < rules.length; i++) { if (rules[i] != null && rules[i]!="") { StringTokenizer st2 = new StringTokenizer(rules[i], " "); counter = 0; while (st2.hasMoreTokens()) { String s = st2.nextToken(); counter++; if (counter == 3) { // string s cointains class label /*** non weighted majority selection ***/ class_labels_freq[Integer.parseInt(s)-id_base_class]++; } } // end while } // end if } // search for the maximum int maximum = -1; int result = -1; for (int i= 0; i<class_labels_freq.length; i++) { if (class_labels_freq[i]>maximum) { result = i; maximum = class_labels_freq[i]; } } return (Integer.parseInt(class_labels[result])); } static int elimina(String[] rules, double soglia) { int i; double conf; double rule_conf = 0.0; StringTokenizer st; String s; int counter = 0; // read rhe rule confidence st = new StringTokenizer(rules[0], " "); while (st.hasMoreTokens()) { s = st.nextToken(); counter++; if (counter == 5) // confidence value rule_conf = Double.parseDouble(s); } conf = rule_conf - soglia; for (i = 1; rules[i] != null; i++) { // read confidence of i-th rule st = new StringTokenizer(rules[i], " "); while (st.hasMoreTokens()) { s = st.nextToken(); counter++; if (counter == 5) // confidence value rule_conf = Double.parseDouble(s); } // end while if ( rule_conf <= conf ) { /* Elimino la regola e le successive */ for (int j = i; j< rules.length; j++) rules[j] = null; return (i); } } // end for return (i); } double[] search_row_NEW(String current_bin_path, String dataset, Instance instance, String[] class_labels, int id_class_base) { // read the original arff dataset searching for the correpondent row in the binary file int row = -1; String my_String = ""; double[] instance_vector = new double[instance.numAttributes()-1]; for (int i= 0; i< instance.numAttributes()-1; i++) { //if (i != (instance.numAttributes()-1)) my_String += Integer.toString((int)instance.value(i)); if (i != (instance.numAttributes()-1)) my_String += ","; } FileReader file = null; try { file = new FileReader(dataset); } catch (FileNotFoundException e) { e.printStackTrace(); } BufferedReader buff = new BufferedReader(file); boolean start = false; boolean eof = false; String line = null; int row_counter = 0; try { while (!eof) { line = buff.readLine(); if (start == true) row_counter++; if (line == null) eof = true; else { if (start) { line = line.substring(0, line.lastIndexOf(",")+1); } if (line.equalsIgnoreCase(my_String) == true) { row = (row_counter-1); } if (line.equalsIgnoreCase("@data")==true) start = true; } } // end while (!eof) file.close(); } catch (IOException e) { e.printStackTrace(); } if (row == -1) { return null; } //read the corresponent row-th row in the binary file //update the instance and return it. RandomAccessFile filebin = null; BinaryFile binFile; // set the endian mode to LITTLE_ENDIAN final short endian = BinaryFile.LITTLE_ENDIAN; // set the signed mode to unsigned final boolean signed = false; long tid, cid, numItems, item; long i; try { filebin = new RandomAccessFile(current_bin_path, "r"); binFile = new BinaryFile(filebin); // set the endian mode to LITTLE_ENDIAN binFile.setEndian(BinaryFile.LITTLE_ENDIAN); // set the signed mode to unsigned binFile.setSigned(false); while (true) { // read tid, cid, and number of items tid=binFile.readDWord(); cid=binFile.readDWord(); numItems=binFile.readDWord(); for (i=0;i<numItems-1;i++) { item=binFile.readDWord(); if (tid == row) instance_vector[(int)i]= (double)item; } item=binFile.readDWord(); } } catch (Exception e) { System.out.println("**Error: " + e.getMessage()); } try { filebin.close(); } catch (IOException e) { e.printStackTrace(); } return instance_vector; } static // selection of the first num_rules rules that classify the transaction String[] selection_NEW(double[] transaction, String levelI_path, String levelII_path, int num_rules, int num_features) { String[] rules = new String[num_rules]; int index = 0; for (int i = 0; i < num_rules; i++) rules[i] = null; // scan levelI rules and verify if all the items in each rule are contained into the transaction FileReader file = null; StringTokenizer st; try { file = new FileReader(levelI_path); } catch (FileNotFoundException e) { e.printStackTrace(); } BufferedReader buff = new BufferedReader(file); boolean eof = false; boolean found = false; String line = null; try { while (!eof) { line = buff.readLine(); if (line == null) eof = true; else { st = new StringTokenizer(line, ","); while (st.hasMoreTokens()) { found = false; String s = st.nextToken(); if (s.indexOf("{")!=-1 && s.indexOf("}")==-1) s = s.substring(1,s.length()); if (s.indexOf("{")!=-1 && s.indexOf("}")!=-1) s = s.substring(1,s.length()-1); if (s.indexOf("{")==-1 && s.indexOf("}")!=-1) s = s.substring(0,s.indexOf("}")); //System.out.println("s: "+s+" "); for (int j = 0; (j< num_features) && (found == false); j++) { if (transaction[j] == Double.parseDouble(s)) found = true; } if (!found) break; // element not found.. pass to the next rule } if (found) { // if finishing inspecting a rule found remains true, it means that a matching rule rules[index]= new String(line); index++; if (index == (num_rules)) return rules; } // end if } // end else } // end while (!eof) file.close(); } catch (IOException e) { e.printStackTrace(); } /*** possible modification: if I find one level-I rule exit ***/ if (rules != null) return rules; /*** end modification ***/ // if program arrives here, it means that not enough rules could be extracted from levelI.. let's move to levelII file = null; st = null; try { file = new FileReader(levelII_path); } catch (FileNotFoundException e) { e.printStackTrace(); } buff = new BufferedReader(file); eof = false; found = false; line = null; try { while (!eof) { line = buff.readLine(); if (line == null) eof = true; else { st = new StringTokenizer(line, ","); while (st.hasMoreTokens()) { found = false; String s = st.nextToken(); if (s.indexOf("{")!=-1 && s.indexOf("}")==-1) s = s.substring(1,s.length()); if (s.indexOf("{")!=-1 && s.indexOf("}")!=-1) s = s.substring(1,s.length()-1); if (s.indexOf("{")==-1 && s.indexOf("}")!=-1) s = s.substring(0,s.indexOf("}")); for (int j = 0; (j< num_features) && (found == false); j++) { if (transaction[j] == Double.parseDouble(s)) found = true; } if (!found) break; // element not found.. pass to the next rule } if (found) { // if finishing inspecting a rule found remains true, it means that a matching rule rules[index]= new String(line); index++; if (index == (num_rules)) return rules; } // end if } // end else } // end while (!eof) file.close(); } catch (IOException e) { e.printStackTrace(); } // if program arrives here, it means that not enough rules could be extracted from levelI or II.. // return Null for remaining not extracted rules return rules; } static // selection of the first num_rules rules that classify the transaction String[] selection_HASH(String[] transaction, String levelI_path, String levelII_path, int num_rules, int num_features, Hashtable hash) { if (hash.isEmpty()== true) { //System.err.println("Error on hash table!\n"); return null; } String[] rules = new String[num_rules]; int index = 0; for (int i = 0; i < num_rules; i++) rules[i] = null; // scan levelI rules and verify if all the items in each rule are contained into the transaction FileReader file = null; StringTokenizer st; try { file = new FileReader(levelI_path); } catch (FileNotFoundException e) { e.printStackTrace(); } BufferedReader buff = new BufferedReader(file); boolean eof = false; boolean found = false; String line = null; try { while (!eof) { line = buff.readLine(); if (line == null) eof = true; else { st = new StringTokenizer(line, ","); while (st.hasMoreTokens()) { found = false; String s = st.nextToken(); if (s.indexOf("{")!=-1 && s.indexOf("}")==-1) s = s.substring(1,s.length()); else { if (s.indexOf("{")!=-1 && s.indexOf("}")!=-1) s = s.substring(1,s.indexOf("}")); else { if (s.indexOf("{")==-1 && s.indexOf("}")!=-1) s = s.substring(0,s.indexOf("}")); } } for (int j = 0; (j< (transaction.length)) && (found == false); j++) { String myStr = "Attr"+j+"Value"+transaction[j]; Integer n = (Integer)hash.get(myStr); if (n != null) { if (Integer.parseInt(s) == n.intValue()) found = true; } else { // if the element doesn't appear in the hashtable you cannot say anything about it //found = true; } } if (!found) break; // element not found.. pass to the next rule } if (found) { // if finishing inspecting a rule found remains true, it means that a matching rule rules[index]= new String(line); index++; if (index == (num_rules)) return rules; } // end if } // end else } // end while (!eof) file.close(); } catch (IOException e) { e.printStackTrace(); } /*** possible modification: if I find one level-I rule exit ***/ if (index > 0) return rules; /*** end modification ***/ // if program arrives here, it means that not enough rules could be extracted from levelI.. let's move to levelII //System.out.println("Pass to level II.."); file = null; st = null; try { file = new FileReader(levelII_path); } catch (FileNotFoundException e) { e.printStackTrace(); } buff = new BufferedReader(file); eof = false; found = false; line = null; try { while (!eof) { line = buff.readLine(); if (line == null) eof = true; else { st = new StringTokenizer(line, ","); while (st.hasMoreTokens()) { found = false; String s = st.nextToken(); if (s.indexOf("{")!=-1 && s.indexOf("}")==-1) s = s.substring(1,s.length()); else { if (s.indexOf("{")!=-1 && s.indexOf("}")!=-1) s = s.substring(1,s.indexOf("}")); else { if (s.indexOf("{")==-1 && s.indexOf("}")!=-1) s = s.substring(0,s.indexOf("}")); } } //System.out.println("s: "+s+" "); for (int j = 0; (j< transaction.length) && (found == false); j++) { String myStr = "Attr"+j+"Value"+transaction[j]; Integer n = (Integer)hash.get(myStr); if (n != null) { if (Integer.parseInt(s) == n.intValue()) found = true; } else { // if the element doesn't appear in the hashtable you cannot say anything about it //found = true; } } if (!found) break; // element not found.. pass to the next rule } if (found) { // if finishing inspecting a rule found remains true, it means that a matching rule rules[index]= new String(line); index++; if (index == (num_rules)) return rules; } // end if } // end else } // end while (!eof) file.close(); } catch (IOException e) { e.printStackTrace(); } // if program arrives here, it means that not enough rules could be extracted from levelI or II.. // return Null for remaining not extracted rules return rules; } static // selection of the first num_rules rules that classify the transaction String[] selection_HASH_numeric(double[] transaction, String levelI_path, String levelII_path, int num_rules, int num_features, Hashtable hash) { if (hash.isEmpty()== true) { System.err.println("Error on hash table!\n"); return null; } String[] rules = new String[num_rules]; int index = 0; //double[] transaction = new double[num_features+1]; // read the list of transactions //transaction = Test_instance.toDoubleArray(); for (int i = 0; i < num_rules; i++) rules[i] = null; // scan levelI rules and verify if all the items in each rule are contained into the transaction FileReader file = null; StringTokenizer st; try { file = new FileReader(levelI_path); } catch (FileNotFoundException e) { e.printStackTrace(); } BufferedReader buff = new BufferedReader(file); boolean eof = false; boolean found = false; String line = null; try { while (!eof) { line = buff.readLine(); if (line == null) eof = true; else { st = new StringTokenizer(line, ","); while (st.hasMoreTokens()) { found = false; String s = st.nextToken(); if (s.indexOf("{")!=-1 && s.indexOf("}")==-1) s = s.substring(1,s.length()); else { if (s.indexOf("{")!=-1 && s.indexOf("}")!=-1) s = s.substring(1,s.indexOf("}")); else { if (s.indexOf("{")==-1 && s.indexOf("}")!=-1) s = s.substring(0,s.indexOf("}")); } } //System.out.println("s: "+s+" "); for (int j = 0; (j< transaction.length) && (found == false); j++) { //if (transaction[j] == Double.parseDouble(s)) found = true; // found = true; String myStr = "Attr"+j+"Value"+(int)transaction[j]; Integer n = (Integer)hash.get(myStr); if (n != null) { if (Double.parseDouble(s) == n.intValue()) found = true; } else { // if the element doesn't appear in the hashtable you cannot say anything about it //found = true; } } if (!found) break; // element not found.. pass to the next rule } if (found) { // if finishing inspecting a rule found remains true, it means that a matching rule rules[index]= new String(line); index++; if (index == (num_rules)) return rules; } // end if } // end else } // end while (!eof) file.close(); } catch (IOException e) { e.printStackTrace(); } /*** possible modification: if I find one level-I rule exit ***/ if (index > 0) return rules; /*** end modification ***/ // if program arrives here, it means that not enough rules could be extracted from levelI.. let's move to levelII //System.out.println("pass to level II.."); file = null; st = null; try { file = new FileReader(levelII_path); } catch (FileNotFoundException e) { e.printStackTrace(); } buff = new BufferedReader(file); eof = false; found = false; line = null; try { while (!eof) { line = buff.readLine(); if (line == null) eof = true; else { st = new StringTokenizer(line, ","); while (st.hasMoreTokens()) { found = false; String s = st.nextToken(); if (s.indexOf("{")!=-1 && s.indexOf("}")==-1) s = s.substring(1,s.length()); else { if (s.indexOf("{")!=-1 && s.indexOf("}")!=-1) s = s.substring(1,s.indexOf("}")); else { if (s.indexOf("{")==-1 && s.indexOf("}")!=-1) s = s.substring(0,s.indexOf("}")); } } for (int j = 0; (j< num_features) && (found == false); j++) { //if (transaction[j] == Double.parseDouble(s)) found = true; // found = true; String myStr = "Attr"+j+"Value"+Double.parseDouble(s); Integer n = (Integer)hash.get(myStr); if (n != null) { if (transaction[j] == n.intValue()) found = true; } else { // if the element doesn't appear in the hashtable you cannot say anything about it } } if (!found) break; // element not found.. pass to the next rule } if (found) { // if finishing inspecting a rule found remains true, it means that a matching rule rules[index]= new String(line); index++; if (index == (num_rules)) return rules; } // end if } // end else } // end while (!eof) file.close(); } catch (IOException e) { e.printStackTrace(); } // if program arrives here, it means that not enough rules could be extracted from levelI or II.. // return Null for remaining not extracted rules return rules; } } // end class
27,760
39.586257
149
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/rules/ConjunctiveRule.java
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* * ConjunctiveRule.java * Copyright (C) 2001 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.rules; import weka.classifiers.Classifier; import weka.core.Attribute; import weka.core.Capabilities; import weka.core.ContingencyTables; import weka.core.FastVector; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.OptionHandler; import weka.core.RevisionHandler; import weka.core.RevisionUtils; import weka.core.Utils; import weka.core.WeightedInstancesHandler; import weka.core.Capabilities.Capability; import java.io.Serializable; import java.util.Enumeration; import java.util.Random; import java.util.Vector; import weka.classifiers.AbstractClassifier; /** <!-- globalinfo-start --> * This class implements a single conjunctive rule learner that can predict for numeric and nominal class labels.<br/> * <br/> * A rule consists of antecedents "AND"ed together and the consequent (class value) for the classification/regression. In this case, the consequent is the distribution of the available classes (or mean for a numeric value) in the dataset. If the test instance is not covered by this rule, then it's predicted using the default class distributions/value of the data not covered by the rule in the training data.This learner selects an antecedent by computing the Information Gain of each antecendent and prunes the generated rule using Reduced Error Prunning (REP) or simple pre-pruning based on the number of antecedents.<br/> * <br/> * For classification, the Information of one antecedent is the weighted average of the entropies of both the data covered and not covered by the rule.<br/> * For regression, the Information is the weighted average of the mean-squared errors of both the data covered and not covered by the rule.<br/> * <br/> * In pruning, weighted average of the accuracy rates on the pruning data is used for classification while the weighted average of the mean-squared errors on the pruning data is used for regression.<br/> * <br/> * <p/> <!-- globalinfo-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -N &lt;number of folds&gt; * Set number of folds for REP * One fold is used as pruning set. * (default 3)</pre> * * <pre> -R * Set if NOT uses randomization * (default:use randomization)</pre> * * <pre> -E * Set whether consider the exclusive * expressions for nominal attributes * (default false)</pre> * * <pre> -M &lt;min. weights&gt; * Set the minimal weights of instances * within a split. * (default 2.0)</pre> * * <pre> -P &lt;number of antecedents&gt; * Set number of antecedents for pre-pruning * if -1, then REP is used * (default -1)</pre> * * <pre> -S &lt;seed&gt; * Set the seed of randomization * (default 1)</pre> * <!-- options-end --> * * @author Xin XU (xx5@cs.waikato.ac.nz) * @version $Revision: 5529 $ */ public class ConjunctiveRule extends AbstractClassifier implements OptionHandler, WeightedInstancesHandler{ /** for serialization */ static final long serialVersionUID = -5938309903225087198L; /** The number of folds to split data into Grow and Prune for REP*/ private int m_Folds = 3; /** The class attribute of the data*/ private Attribute m_ClassAttribute; /** The vector of antecedents of this rule*/ protected FastVector m_Antds = null; /** The default rule distribution of the data not covered*/ protected double[] m_DefDstr = null; /** The consequent of this rule */ protected double[] m_Cnsqt = null; /** Number of classes in the training data */ private int m_NumClasses = 0; /** The seed to perform randomization */ private long m_Seed = 1; /** The Random object used for randomization */ private Random m_Random = null; /** The predicted classes recorded for each antecedent in the growing data */ private FastVector m_Targets; /** Whether to use exlusive expressions for nominal attributes */ private boolean m_IsExclude = false; /** The minimal number of instance weights within a split*/ private double m_MinNo = 2.0; /** The number of antecedents in pre-pruning */ private int m_NumAntds = -1; /** * Returns a string describing classifier * @return a description suitable for * displaying in the explorer/experimenter gui */ public String globalInfo() { return "This class implements a single conjunctive rule learner that can predict " + "for numeric and nominal class labels.\n\n" + "A rule consists of antecedents \"AND\"ed together and the consequent (class value) " + "for the classification/regression. In this case, the consequent is the " + "distribution of the available classes (or mean for a numeric value) in the dataset. " + "If the test instance is not covered by this rule, then it's predicted " + "using the default class distributions/value of the data not covered by the " + "rule in the training data." + "This learner selects an antecedent by computing the Information Gain of each " + "antecendent and prunes the generated rule using Reduced Error Prunning (REP) " + "or simple pre-pruning based on the number of antecedents.\n\n" + "For classification, the Information of one antecedent is the weighted average of " + "the entropies of both the data covered and not covered by the rule.\n" + "For regression, the Information is the weighted average of the mean-squared errors " + "of both the data covered and not covered by the rule.\n\n" + "In pruning, weighted average of the accuracy rates on the pruning data is used " + "for classification while the weighted average of the mean-squared errors " + "on the pruning data is used for regression.\n\n"; } /** * The single antecedent in the rule, which is composed of an attribute and * the corresponding value. There are two inherited classes, namely NumericAntd * and NominalAntd in which the attributes are numeric and nominal respectively. */ private abstract class Antd implements Serializable, RevisionHandler { /** for serialization */ private static final long serialVersionUID = -8729076306737827571L; /** The attribute of the antecedent */ protected Attribute att; /** The attribute value of the antecedent. For numeric attribute, value is either 0(1st bag) or 1(2nd bag) */ protected double value; /** The maximum infoGain achieved by this antecedent test */ protected double maxInfoGain; /** The information of this antecedent test on the growing data */ protected double inform; /** The parameter related to the meanSquaredError of the data not covered by the previous antecedents when the class is numeric */ protected double uncoverWtSq, uncoverWtVl, uncoverSum; /** The parameters related to the data not covered by the previous antecedents when the class is nominal */ protected double[] uncover; /** Constructor for nominal class */ public Antd(Attribute a, double[] unc){ att=a; value=Double.NaN; maxInfoGain = 0; inform = Double.NaN; uncover = unc; } /** * Constructor for numeric class */ public Antd(Attribute a, double uncoveredWtSq, double uncoveredWtVl, double uncoveredWts){ att=a; value=Double.NaN; maxInfoGain = 0; inform = Double.NaN; uncoverWtSq = uncoveredWtSq; uncoverWtVl = uncoveredWtVl; uncoverSum = uncoveredWts; } /* The abstract members for inheritance */ public abstract Instances[] splitData(Instances data, double defInfo); public abstract boolean isCover(Instance inst); public abstract String toString(); /* Get functions of this antecedent */ public Attribute getAttr(){ return att; } public double getAttrValue(){ return value; } public double getMaxInfoGain(){ return maxInfoGain; } public double getInfo(){ return inform;} /** * Function used to calculate the weighted mean squared error, * i.e., sum[x-avg(x)]^2 based on the given elements of the formula: * meanSquaredError = sum(Wi*Xi^2) - (sum(WiXi))^2/sum(Wi) * * @param weightedSq sum(Wi*Xi^2) * @param weightedValue sum(WiXi) * @param sum sum of weights * @return the weighted mean-squared error */ protected double wtMeanSqErr(double weightedSq, double weightedValue, double sum){ if(Utils.smOrEq(sum, 1.0E-6)) return 0; return (weightedSq - (weightedValue * weightedValue) / sum); } /** * Function used to calculate the entropy of given vector of values * entropy = (1/sum)*{-sigma[i=1..P](Xi*log2(Xi)) + sum*log2(sum)} * where P is the length of the vector * * @param value the given vector of values * @param sum the sum of the given values. It's provided just for efficiency. * @return the entropy */ protected double entropy(double[] value, double sum){ if(Utils.smOrEq(sum, 1.0E-6)) return 0; double entropy = 0; for(int i=0; i < value.length; i++){ if(!Utils.eq(value[i],0)) entropy -= value[i] * Utils.log2(value[i]); } entropy += sum * Utils.log2(sum); entropy /= sum; return entropy; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 5529 $"); } } /** * The antecedent with numeric attribute */ private class NumericAntd extends Antd { /** for serialization */ static final long serialVersionUID = -7957266498918210436L; /** The split point for this numeric antecedent */ private double splitPoint; /** * Constructor for nominal class */ public NumericAntd(Attribute a, double[] unc){ super(a, unc); splitPoint = Double.NaN; } /** * Constructor for numeric class */ public NumericAntd(Attribute a, double sq, double vl, double wts){ super(a, sq, vl, wts); splitPoint = Double.NaN; } /** * Get split point of this numeric antecedent * * @return the split point */ public double getSplitPoint(){ return splitPoint; } /** * Implements the splitData function. * This procedure is to split the data into two bags according * to the information gain of the numeric attribute value * the data with missing values are stored in the last split. * The maximum infoGain is also calculated. * * @param insts the data to be split * @param defInfo the default information for data * @return the array of data after split */ public Instances[] splitData(Instances insts, double defInfo){ Instances data = new Instances(insts); data.sort(att); int total=data.numInstances();// Total number of instances without // missing value for att maxInfoGain = 0; value = 0; // Compute minimum number of Instances required in each split double minSplit; if(m_ClassAttribute.isNominal()){ minSplit = 0.1 * (data.sumOfWeights()) / ((double)m_ClassAttribute.numValues()); if (Utils.smOrEq(minSplit,m_MinNo)) minSplit = m_MinNo; else if (Utils.gr(minSplit,25)) minSplit = 25; } else minSplit = m_MinNo; double[] fst=null, snd=null, missing=null; if(m_ClassAttribute.isNominal()){ fst = new double[m_NumClasses]; snd = new double[m_NumClasses]; missing = new double[m_NumClasses]; for(int v=0; v < m_NumClasses; v++) fst[v]=snd[v]=missing[v]=0.0; } double fstCover=0, sndCover=0, fstWtSq=0, sndWtSq=0, fstWtVl=0, sndWtVl=0; int split=1; // Current split position int prev=0; // Previous split position int finalSplit=split; // Final split position for(int x=0; x<data.numInstances(); x++){ Instance inst = data.instance(x); if(inst.isMissing(att)){ total = x; break; } sndCover += inst.weight(); if(m_ClassAttribute.isNominal()) // Nominal class snd[(int)inst.classValue()] += inst.weight(); else{ // Numeric class sndWtSq += inst.weight() * inst.classValue() * inst.classValue(); sndWtVl += inst.weight() * inst.classValue(); } } // Enough Instances with known values? if (Utils.sm(sndCover,(2*minSplit))) return null; double msingWtSq=0, msingWtVl=0; Instances missingData = new Instances(data, 0); for(int y=total; y < data.numInstances(); y++){ Instance inst = data.instance(y); missingData.add(inst); if(m_ClassAttribute.isNominal()) missing[(int)inst.classValue()] += inst.weight(); else{ msingWtSq += inst.weight() * inst.classValue() * inst.classValue(); msingWtVl += inst.weight() * inst.classValue(); } } if(total == 0) return null; // Data all missing for the attribute splitPoint = data.instance(total-1).value(att); for(; split < total; split++){ if(!Utils.eq(data.instance(split).value(att), // Can't split data.instance(prev).value(att))){// within same value // Move the split point for(int y=prev; y<split; y++){ Instance inst = data.instance(y); fstCover += inst.weight(); sndCover -= inst.weight(); if(m_ClassAttribute.isNominal()){ // Nominal class fst[(int)inst.classValue()] += inst.weight(); snd[(int)inst.classValue()] -= inst.weight(); } else{ // Numeric class fstWtSq += inst.weight() * inst.classValue() * inst.classValue(); fstWtVl += inst.weight() * inst.classValue(); sndWtSq -= inst.weight() * inst.classValue() * inst.classValue(); sndWtVl -= inst.weight() * inst.classValue(); } } if(Utils.sm(fstCover, minSplit) || Utils.sm(sndCover, minSplit)){ prev=split; // Cannot split because either continue; // split has not enough data } double fstEntp = 0, sndEntp = 0; if(m_ClassAttribute.isNominal()){ fstEntp = entropy(fst, fstCover); sndEntp = entropy(snd, sndCover); } else{ fstEntp = wtMeanSqErr(fstWtSq, fstWtVl, fstCover)/fstCover; sndEntp = wtMeanSqErr(sndWtSq, sndWtVl, sndCover)/sndCover; } /* Which bag has higher information gain? */ boolean isFirst; double fstInfoGain, sndInfoGain; double info, infoGain, fstInfo, sndInfo; if(m_ClassAttribute.isNominal()){ double sum = data.sumOfWeights(); double otherCover, whole = sum + Utils.sum(uncover), otherEntropy; double[] other = null; // InfoGain of first bag other = new double[m_NumClasses]; for(int z=0; z < m_NumClasses; z++) other[z] = uncover[z] + snd[z] + missing[z]; otherCover = whole - fstCover; otherEntropy = entropy(other, otherCover); // Weighted average fstInfo = (fstEntp*fstCover + otherEntropy*otherCover)/whole; fstInfoGain = defInfo - fstInfo; // InfoGain of second bag other = new double[m_NumClasses]; for(int z=0; z < m_NumClasses; z++) other[z] = uncover[z] + fst[z] + missing[z]; otherCover = whole - sndCover; otherEntropy = entropy(other, otherCover); // Weighted average sndInfo = (sndEntp*sndCover + otherEntropy*otherCover)/whole; sndInfoGain = defInfo - sndInfo; } else{ double sum = data.sumOfWeights(); double otherWtSq = (sndWtSq + msingWtSq + uncoverWtSq), otherWtVl = (sndWtVl + msingWtVl + uncoverWtVl), otherCover = (sum - fstCover + uncoverSum); fstInfo = Utils.eq(fstCover, 0) ? 0 : (fstEntp * fstCover); fstInfo += wtMeanSqErr(otherWtSq, otherWtVl, otherCover); fstInfoGain = defInfo - fstInfo; otherWtSq = (fstWtSq + msingWtSq + uncoverWtSq); otherWtVl = (fstWtVl + msingWtVl + uncoverWtVl); otherCover = sum - sndCover + uncoverSum; sndInfo = Utils.eq(sndCover, 0) ? 0 : (sndEntp * sndCover); sndInfo += wtMeanSqErr(otherWtSq, otherWtVl, otherCover); sndInfoGain = defInfo - sndInfo; } if(Utils.gr(fstInfoGain,sndInfoGain) || (Utils.eq(fstInfoGain,sndInfoGain)&&(Utils.sm(fstEntp,sndEntp)))){ isFirst = true; infoGain = fstInfoGain; info = fstInfo; } else{ isFirst = false; infoGain = sndInfoGain; info = sndInfo; } boolean isUpdate = Utils.gr(infoGain, maxInfoGain); /* Check whether so far the max infoGain */ if(isUpdate){ splitPoint = ((data.instance(split).value(att)) + (data.instance(prev).value(att)))/2.0; value = ((isFirst) ? 0 : 1); inform = info; maxInfoGain = infoGain; finalSplit = split; } prev=split; } } /* Split the data */ Instances[] splitData = new Instances[3]; splitData[0] = new Instances(data, 0, finalSplit); splitData[1] = new Instances(data, finalSplit, total-finalSplit); splitData[2] = new Instances(missingData); return splitData; } /** * Whether the instance is covered by this antecedent * * @param inst the instance in question * @return the boolean value indicating whether the instance is covered * by this antecedent */ public boolean isCover(Instance inst){ boolean isCover=false; if(!inst.isMissing(att)){ if(Utils.eq(value, 0)){ if(Utils.smOrEq(inst.value(att), splitPoint)) isCover=true; } else if(Utils.gr(inst.value(att), splitPoint)) isCover=true; } return isCover; } /** * Prints this antecedent * * @return a textual description of this antecedent */ public String toString() { String symbol = Utils.eq(value, 0.0) ? " <= " : " > "; return (att.name() + symbol + Utils.doubleToString(splitPoint, 6)); } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 5529 $"); } } /** * The antecedent with nominal attribute */ class NominalAntd extends Antd { /** for serialization */ static final long serialVersionUID = -5949864163376447424L; /* The parameters of infoGain calculated for each attribute value */ private double[][] stats; private double[] coverage; private boolean isIn; /** * Constructor for nominal class */ public NominalAntd(Attribute a, double[] unc){ super(a, unc); int bag = att.numValues(); stats = new double[bag][m_NumClasses]; coverage = new double[bag]; isIn = true; } /** * Constructor for numeric class */ public NominalAntd(Attribute a, double sq, double vl, double wts){ super(a, sq, vl, wts); int bag = att.numValues(); stats = null; coverage = new double[bag]; isIn = true; } /** * Implements the splitData function. * This procedure is to split the data into bags according * to the nominal attribute value * the data with missing values are stored in the last bag. * The infoGain for each bag is also calculated. * * @param data the data to be split * @param defInfo the default information for data * @return the array of data after split */ public Instances[] splitData(Instances data, double defInfo){ int bag = att.numValues(); Instances[] splitData = new Instances[bag+1]; double[] wSq = new double[bag]; double[] wVl = new double[bag]; double totalWS=0, totalWV=0, msingWS=0, msingWV=0, sum=data.sumOfWeights(); double[] all = new double[m_NumClasses]; double[] missing = new double[m_NumClasses]; for(int w=0; w < m_NumClasses; w++) all[w] = missing[w] = 0; for(int x=0; x<bag; x++){ coverage[x] = wSq[x] = wVl[x] = 0; if(stats != null) for(int y=0; y < m_NumClasses; y++) stats[x][y] = 0; splitData[x] = new Instances(data, data.numInstances()); } splitData[bag] = new Instances(data, data.numInstances()); // Record the statistics of data for(int x=0; x<data.numInstances(); x++){ Instance inst=data.instance(x); if(!inst.isMissing(att)){ int v = (int)inst.value(att); splitData[v].add(inst); coverage[v] += inst.weight(); if(m_ClassAttribute.isNominal()){ // Nominal class stats[v][(int)inst.classValue()] += inst.weight(); all[(int)inst.classValue()] += inst.weight(); } else{ // Numeric class wSq[v] += inst.weight() * inst.classValue() * inst.classValue(); wVl[v] += inst.weight() * inst.classValue(); totalWS += inst.weight() * inst.classValue() * inst.classValue(); totalWV += inst.weight() * inst.classValue(); } } else{ splitData[bag].add(inst); if(m_ClassAttribute.isNominal()){ // Nominal class all[(int)inst.classValue()] += inst.weight(); missing[(int)inst.classValue()] += inst.weight(); } else{ // Numeric class totalWS += inst.weight() * inst.classValue() * inst.classValue(); totalWV += inst.weight() * inst.classValue(); msingWS += inst.weight() * inst.classValue() * inst.classValue(); msingWV += inst.weight() * inst.classValue(); } } } // The total weights of the whole grow data double whole; if(m_ClassAttribute.isNominal()) whole = sum + Utils.sum(uncover); else whole = sum + uncoverSum; // Find the split double minEntrp=Double.MAX_VALUE; maxInfoGain = 0; // Check if >=2 splits have more than the minimal data int count=0; for(int x=0; x<bag; x++) if(Utils.grOrEq(coverage[x], m_MinNo)) ++count; if(count < 2){ // Don't split maxInfoGain = 0; inform = defInfo; value = Double.NaN; return null; } for(int x=0; x<bag; x++){ double t = coverage[x], entrp, infoGain; if(Utils.sm(t, m_MinNo)) continue; if(m_ClassAttribute.isNominal()){ // Nominal class double[] other = new double[m_NumClasses]; for(int y=0; y < m_NumClasses; y++) other[y] = all[y] - stats[x][y] + uncover[y]; double otherCover = whole - t; // Entropies of data covered and uncovered entrp = entropy(stats[x], t); double uncEntp = entropy(other, otherCover); // Weighted average infoGain = defInfo - (entrp*t + uncEntp*otherCover)/whole; } else{ // Numeric class double weight = (whole - t); entrp = wtMeanSqErr(wSq[x], wVl[x], t)/t; infoGain = defInfo - (entrp * t) - wtMeanSqErr((totalWS-wSq[x]+uncoverWtSq), (totalWV-wVl[x]+uncoverWtVl), weight); } // Test the exclusive expression boolean isWithin =true; if(m_IsExclude){ double infoGain2, entrp2; if(m_ClassAttribute.isNominal()){ // Nominal class double[] other2 = new double[m_NumClasses]; double[] notIn = new double[m_NumClasses]; for(int y=0; y < m_NumClasses; y++){ other2[y] = stats[x][y] + missing[y] + uncover[y]; notIn[y] = all[y] - stats[x][y] - missing[y]; } double msSum = Utils.sum(missing); double otherCover2 = t + msSum + Utils.sum(uncover); entrp2 = entropy(notIn, (sum-t-msSum)); double uncEntp2 = entropy(other2, otherCover2); infoGain2 = defInfo - (entrp2*(sum-t-msSum) + uncEntp2*otherCover2)/whole; } else{ // Numeric class double msWts = splitData[bag].sumOfWeights(); double weight2 = t + uncoverSum + msWts; entrp2 = wtMeanSqErr((totalWS-wSq[x]-msingWS), (totalWV-wVl[x]-msingWV),(sum-t-msWts)) /(sum-t-msWts); infoGain2 = defInfo - entrp2 * (sum-t-msWts) - wtMeanSqErr((wSq[x]+uncoverWtSq+msingWS), (wVl[x]+uncoverWtVl+msingWV), weight2); } // Use the exclusive expression? if (Utils.gr(infoGain2, infoGain) || (Utils.eq(infoGain2, infoGain) && Utils.sm(entrp2, entrp))){ infoGain = infoGain2; entrp = entrp2; isWithin =false; } } // Test this split if (Utils.gr(infoGain, maxInfoGain) || (Utils.eq(infoGain, maxInfoGain) && Utils.sm(entrp, minEntrp))){ value = (double)x; maxInfoGain = infoGain; inform = maxInfoGain - defInfo; minEntrp = entrp; isIn = isWithin; } } return splitData; } /** * Whether the instance is covered by this antecedent * * @param inst the instance in question * @return the boolean value indicating whether the instance is covered * by this antecedent */ public boolean isCover(Instance inst){ boolean isCover=false; if(!inst.isMissing(att)){ if(isIn){ if(Utils.eq(inst.value(att), value)) isCover=true; } else if(!Utils.eq(inst.value(att), value)) isCover=true; } return isCover; } /** * Whether the expression is "att = value" or att != value" * for this nominal attribute. True if in the former expression, * otherwise the latter * * @return the boolean value */ public boolean isIn(){ return isIn; } /** * Prints this antecedent * * @return a textual description of this antecedent */ public String toString() { String symbol = isIn ? " = " : " != "; return (att.name() + symbol + att.value((int)value)); } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 5529 $"); } } /** * Returns an enumeration describing the available options * Valid options are: <p> * * -N number <br> * Set number of folds for REP. One fold is * used as the pruning set. (Default: 3) <p> * * -R <br> * Set if NOT randomize the data before split to growing and * pruning data. If NOT set, the seed of randomization is * specified by the -S option. (Default: randomize) <p> * * -S <br> * Seed of randomization. (Default: 1)<p> * * -E <br> * Set whether consider the exclusive expressions for nominal * attribute split. (Default: false) <p> * * -M number <br> * Set the minimal weights of instances within a split. * (Default: 2) <p> * * -P number <br> * Set the number of antecedents allowed in the rule if pre-pruning * is used. If this value is other than -1, then pre-pruning will be * used, otherwise the rule uses REP. (Default: -1) <p> * * @return an enumeration of all the available options */ public Enumeration listOptions() { Vector newVector = new Vector(6); newVector.addElement(new Option("\tSet number of folds for REP\n" + "\tOne fold is used as pruning set.\n" + "\t(default 3)","N", 1, "-N <number of folds>")); newVector.addElement(new Option("\tSet if NOT uses randomization\n" + "\t(default:use randomization)","R", 0, "-R")); newVector.addElement(new Option("\tSet whether consider the exclusive\n" + "\texpressions for nominal attributes\n"+ "\t(default false)","E", 0, "-E")); newVector.addElement(new Option("\tSet the minimal weights of instances\n" + "\twithin a split.\n" + "\t(default 2.0)","M", 1, "-M <min. weights>")); newVector.addElement(new Option("\tSet number of antecedents for pre-pruning\n" + "\tif -1, then REP is used\n" + "\t(default -1)","P", 1, "-P <number of antecedents>")); newVector.addElement(new Option("\tSet the seed of randomization\n" + "\t(default 1)","S", 1, "-S <seed>")); return newVector.elements(); } /** * Parses a given list of options. <p/> * <!-- options-start --> * Valid options are: <p/> * * <pre> -N &lt;number of folds&gt; * Set number of folds for REP * One fold is used as pruning set. * (default 3)</pre> * * <pre> -R * Set if NOT uses randomization * (default:use randomization)</pre> * * <pre> -E * Set whether consider the exclusive * expressions for nominal attributes * (default false)</pre> * * <pre> -M &lt;min. weights&gt; * Set the minimal weights of instances * within a split. * (default 2.0)</pre> * * <pre> -P &lt;number of antecedents&gt; * Set number of antecedents for pre-pruning * if -1, then REP is used * (default -1)</pre> * * <pre> -S &lt;seed&gt; * Set the seed of randomization * (default 1)</pre> * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { String numFoldsString = Utils.getOption('N', options); if (numFoldsString.length() != 0) m_Folds = Integer.parseInt(numFoldsString); else m_Folds = 3; String minNoString = Utils.getOption('M', options); if (minNoString.length() != 0) m_MinNo = Double.parseDouble(minNoString); else m_MinNo = 2.0; String seedString = Utils.getOption('S', options); if (seedString.length() != 0) m_Seed = Integer.parseInt(seedString); else m_Seed = 1; String numAntdsString = Utils.getOption('P', options); if (numAntdsString.length() != 0) m_NumAntds = Integer.parseInt(numAntdsString); else m_NumAntds = -1; m_IsExclude = Utils.getFlag('E', options); } /** * Gets the current settings of the Classifier. * * @return an array of strings suitable for passing to setOptions */ public String [] getOptions() { String [] options = new String [9]; int current = 0; options[current++] = "-N"; options[current++] = "" + m_Folds; options[current++] = "-M"; options[current++] = "" + m_MinNo; options[current++] = "-P"; options[current++] = "" + m_NumAntds; options[current++] = "-S"; options[current++] = "" + m_Seed; if(m_IsExclude) options[current++] = "-E"; while (current < options.length) options[current++] = ""; return options; } /** The access functions for parameters */ /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String foldsTipText() { return "Determines the amount of data used for pruning. One fold is used for " + "pruning, the rest for growing the rules."; } /** * the number of folds to use * * @param folds the number of folds to use */ public void setFolds(int folds) { m_Folds = folds; } /** * returns the current number of folds * * @return the number of folds */ public int getFolds() { return m_Folds; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String seedTipText() { return "The seed used for randomizing the data."; } /** * sets the seed for randomizing the data * * @param s the seed value */ public void setSeed(long s) { m_Seed = s; } /** * returns the current seed value for randomizing the data * * @return the seed value */ public long getSeed() { return m_Seed; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String exclusiveTipText() { return "Set whether to consider exclusive expressions for nominal " + "attribute splits."; } /** * Returns whether exclusive expressions for nominal attributes splits are * considered * * @return true if exclusive expressions for nominal attributes splits are * considered */ public boolean getExclusive() { return m_IsExclude; } /** * Sets whether exclusive expressions for nominal attributes splits are * considered * * @param e whether to consider exclusive expressions for nominal attribute * splits */ public void setExclusive(boolean e) { m_IsExclude = e; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String minNoTipText() { return "The minimum total weight of the instances in a rule."; } /** * Sets the minimum total weight of the instances in a rule * * @param m the minimum total weight of the instances in a rule */ public void setMinNo(double m) { m_MinNo = m; } /** * Gets the minimum total weight of the instances in a rule * * @return the minimum total weight of the instances in a rule */ public double getMinNo(){ return m_MinNo; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String numAntdsTipText() { return "Set the number of antecedents allowed in the rule if " + "pre-pruning is used. If this value is other than -1, then " + "pre-pruning will be used, otherwise the rule uses reduced-error " + "pruning."; } /** * Sets the number of antecedants * * @param n the number of antecedants */ public void setNumAntds(int n) { m_NumAntds = n; } /** * Gets the number of antecedants * * @return the number of antecedants */ public int getNumAntds(){ return m_NumAntds; } /** * Returns default capabilities of the classifier. * * @return the capabilities of this classifier */ public Capabilities getCapabilities() { Capabilities result = super.getCapabilities(); result.disableAll(); // attributes result.enable(Capability.NOMINAL_ATTRIBUTES); result.enable(Capability.NUMERIC_ATTRIBUTES); result.enable(Capability.DATE_ATTRIBUTES); result.enable(Capability.MISSING_VALUES); // class result.enable(Capability.NOMINAL_CLASS); result.enable(Capability.NUMERIC_CLASS); result.enable(Capability.DATE_CLASS); result.enable(Capability.MISSING_CLASS_VALUES); return result; } /** * Builds a single rule learner with REP dealing with nominal classes or * numeric classes. * For nominal classes, this rule learner predicts a distribution on * the classes. * For numeric classes, this learner predicts a single value. * * @param instances the training data * @throws Exception if classifier can't be built successfully */ public void buildClassifier(Instances instances) throws Exception { // can classifier handle the data? getCapabilities().testWithFail(instances); // remove instances with missing class Instances data = new Instances(instances); data.deleteWithMissingClass(); if(data.numInstances() < m_Folds) throw new Exception("Not enough data for REP."); m_ClassAttribute = data.classAttribute(); if(m_ClassAttribute.isNominal()) m_NumClasses = m_ClassAttribute.numValues(); else m_NumClasses = 1; m_Antds = new FastVector(); m_DefDstr = new double[m_NumClasses]; m_Cnsqt = new double[m_NumClasses]; m_Targets = new FastVector(); m_Random = new Random(m_Seed); if(m_NumAntds != -1){ grow(data); } else{ data.randomize(m_Random); // Split data into Grow and Prune data.stratify(m_Folds); Instances growData=data.trainCV(m_Folds, m_Folds-1, m_Random); Instances pruneData=data.testCV(m_Folds, m_Folds-1); grow(growData); // Build this rule prune(pruneData); // Prune this rule } if(m_ClassAttribute.isNominal()){ Utils.normalize(m_Cnsqt); if(Utils.gr(Utils.sum(m_DefDstr), 0)) Utils.normalize(m_DefDstr); } } /** * Computes class distribution for the given instance. * * @param instance the instance for which distribution is to be computed * @return the class distribution for the given instance * @throws Exception if given instance is null */ public double[] distributionForInstance(Instance instance) throws Exception { if(instance == null) throw new Exception("Testing instance is NULL!"); if (isCover(instance)) return m_Cnsqt; else return m_DefDstr; } /** * Whether the instance covered by this rule * * @param datum the instance in question * @return the boolean value indicating whether the instance is covered by this rule */ public boolean isCover(Instance datum){ boolean isCover=true; for(int i=0; i<m_Antds.size(); i++){ Antd antd = (Antd)m_Antds.elementAt(i); if(!antd.isCover(datum)){ isCover = false; break; } } return isCover; } /** * Whether this rule has antecedents, i.e. whether it is a default rule * * @return the boolean value indicating whether the rule has antecedents */ public boolean hasAntds(){ if (m_Antds == null) return false; else return (m_Antds.size() > 0); } /** * Build one rule using the growing data * * @param data the growing data used to build the rule */ private void grow(Instances data){ Instances growData = new Instances(data); double defInfo; double whole = data.sumOfWeights(); if(m_NumAntds != 0){ /* Class distribution for data both covered and not covered by one antecedent */ double[][] classDstr = new double[2][m_NumClasses]; /* Compute the default information of the growing data */ for(int j=0; j < m_NumClasses; j++){ classDstr[0][j] = 0; classDstr[1][j] = 0; } if(m_ClassAttribute.isNominal()){ for(int i=0; i < growData.numInstances(); i++){ Instance datum = growData.instance(i); classDstr[0][(int)datum.classValue()] += datum.weight(); } defInfo = ContingencyTables.entropy(classDstr[0]); } else{ for(int i=0; i < growData.numInstances(); i++){ Instance datum = growData.instance(i); classDstr[0][0] += datum.weight() * datum.classValue(); } // No need to be divided by the denomitor because // it's always the same double defMean = (classDstr[0][0] / whole); defInfo = meanSquaredError(growData, defMean) * growData.sumOfWeights(); } // Store the default class distribution double[][] tmp = new double[2][m_NumClasses]; for(int y=0; y < m_NumClasses; y++){ if(m_ClassAttribute.isNominal()){ tmp[0][y] = classDstr[0][y]; tmp[1][y] = classDstr[1][y]; } else{ tmp[0][y] = classDstr[0][y]/whole; tmp[1][y] = classDstr[1][y]; } } m_Targets.addElement(tmp); /* Keep the record of which attributes have already been used*/ boolean[] used=new boolean[growData.numAttributes()]; for (int k=0; k<used.length; k++) used[k]=false; int numUnused=used.length; double maxInfoGain, uncoveredWtSq=0, uncoveredWtVl=0, uncoveredWts=0; boolean isContinue = true; // The stopping criterion of this rule while (isContinue){ maxInfoGain = 0; // We require that infoGain be positive /* Build a list of antecedents */ Antd oneAntd=null; Instances coverData = null, uncoverData = null; Enumeration enumAttr=growData.enumerateAttributes(); int index=-1; /* Build one condition based on all attributes not used yet*/ while (enumAttr.hasMoreElements()){ Attribute att= (Attribute)(enumAttr.nextElement()); index++; Antd antd =null; if(m_ClassAttribute.isNominal()){ if(att.isNumeric()) antd = new NumericAntd(att, classDstr[1]); else antd = new NominalAntd(att, classDstr[1]); } else if(att.isNumeric()) antd = new NumericAntd(att, uncoveredWtSq, uncoveredWtVl, uncoveredWts); else antd = new NominalAntd(att, uncoveredWtSq, uncoveredWtVl, uncoveredWts); if(!used[index]){ /* Compute the best information gain for each attribute, it's stored in the antecedent formed by this attribute. This procedure returns the data covered by the antecedent*/ Instances[] coveredData = computeInfoGain(growData, defInfo, antd); if(coveredData != null){ double infoGain = antd.getMaxInfoGain(); boolean isUpdate = Utils.gr(infoGain, maxInfoGain); if(isUpdate){ oneAntd=antd; coverData = coveredData[0]; uncoverData = coveredData[1]; maxInfoGain = infoGain; } } } } if(oneAntd == null) break; //Numeric attributes can be used more than once if(!oneAntd.getAttr().isNumeric()){ used[oneAntd.getAttr().index()]=true; numUnused--; } m_Antds.addElement(oneAntd); growData = coverData;// Grow data size is shrinking for(int x=0; x < uncoverData.numInstances(); x++){ Instance datum = uncoverData.instance(x); if(m_ClassAttribute.isNumeric()){ uncoveredWtSq += datum.weight() * datum.classValue() * datum.classValue(); uncoveredWtVl += datum.weight() * datum.classValue(); uncoveredWts += datum.weight(); classDstr[0][0] -= datum.weight() * datum.classValue(); classDstr[1][0] += datum.weight() * datum.classValue(); } else{ classDstr[0][(int)datum.classValue()] -= datum.weight(); classDstr[1][(int)datum.classValue()] += datum.weight(); } } // Store class distribution of growing data tmp = new double[2][m_NumClasses]; for(int y=0; y < m_NumClasses; y++){ if(m_ClassAttribute.isNominal()){ tmp[0][y] = classDstr[0][y]; tmp[1][y] = classDstr[1][y]; } else{ tmp[0][y] = classDstr[0][y]/(whole-uncoveredWts); tmp[1][y] = classDstr[1][y]/uncoveredWts; } } m_Targets.addElement(tmp); defInfo = oneAntd.getInfo(); int numAntdsThreshold = (m_NumAntds == -1) ? Integer.MAX_VALUE : m_NumAntds; if(Utils.eq(growData.sumOfWeights(), 0.0) || (numUnused == 0) || (m_Antds.size() >= numAntdsThreshold)) isContinue = false; } } m_Cnsqt = ((double[][])(m_Targets.lastElement()))[0]; m_DefDstr = ((double[][])(m_Targets.lastElement()))[1]; } /** * Compute the best information gain for the specified antecedent * * @param instances the data based on which the infoGain is computed * @param defInfo the default information of data * @param antd the specific antecedent * @return the data covered and not covered by the antecedent */ private Instances[] computeInfoGain(Instances instances, double defInfo, Antd antd){ Instances data = new Instances(instances); /* Split the data into bags. The information gain of each bag is also calculated in this procedure */ Instances[] splitData = antd.splitData(data, defInfo); Instances[] coveredData = new Instances[2]; /* Get the bag of data to be used for next antecedents */ Instances tmp1 = new Instances(data, 0); Instances tmp2 = new Instances(data, 0); if(splitData == null) return null; for(int x=0; x < (splitData.length-1); x++){ if(x == ((int)antd.getAttrValue())) tmp1 = splitData[x]; else{ for(int y=0; y < splitData[x].numInstances(); y++) tmp2.add(splitData[x].instance(y)); } } if(antd.getAttr().isNominal()){ // Nominal attributes if(((NominalAntd)antd).isIn()){ // Inclusive expression coveredData[0] = new Instances(tmp1); coveredData[1] = new Instances(tmp2); } else{ // Exclusive expression coveredData[0] = new Instances(tmp2); coveredData[1] = new Instances(tmp1); } } else{ // Numeric attributes coveredData[0] = new Instances(tmp1); coveredData[1] = new Instances(tmp2); } /* Add data with missing value */ for(int z=0; z<splitData[splitData.length-1].numInstances(); z++) coveredData[1].add(splitData[splitData.length-1].instance(z)); return coveredData; } /** * Prune the rule using the pruning data. * The weighted average of accuracy rate/mean-squared error is * used to prune the rule. * * @param pruneData the pruning data used to prune the rule */ private void prune(Instances pruneData){ Instances data=new Instances(pruneData); Instances otherData = new Instances(data, 0); double total = data.sumOfWeights(); /* The default accurate# and the the accuracy rate on pruning data */ double defAccu; if(m_ClassAttribute.isNumeric()) defAccu = meanSquaredError(pruneData, ((double[][])m_Targets.firstElement())[0][0]); else{ int predict = Utils.maxIndex(((double[][])m_Targets.firstElement())[0]); defAccu = computeAccu(pruneData, predict)/total; } int size=m_Antds.size(); if(size == 0){ m_Cnsqt = ((double[][])m_Targets.lastElement())[0]; m_DefDstr = ((double[][])m_Targets.lastElement())[1]; return; // Default rule before pruning } double[] worthValue = new double[size]; /* Calculate accuracy parameters for all the antecedents in this rule */ for(int x=0; x<size; x++){ Antd antd=(Antd)m_Antds.elementAt(x); Instances newData = new Instances(data); if(Utils.eq(newData.sumOfWeights(),0.0)) break; data = new Instances(newData, newData.numInstances()); // Make data empty for(int y=0; y<newData.numInstances(); y++){ Instance ins=newData.instance(y); if(antd.isCover(ins)) // Covered by this antecedent data.add(ins); // Add to data for further else otherData.add(ins); // Not covered by this antecedent } double covered, other; double[][] classes = (double[][])m_Targets.elementAt(x+1); // m_Targets has one more element if(m_ClassAttribute.isNominal()){ int coverClass = Utils.maxIndex(classes[0]), otherClass = Utils.maxIndex(classes[1]); covered = computeAccu(data, coverClass); other = computeAccu(otherData, otherClass); } else{ double coverClass = classes[0][0], otherClass = classes[1][0]; covered = (data.sumOfWeights())*meanSquaredError(data, coverClass); other = (otherData.sumOfWeights())*meanSquaredError(otherData, otherClass); } worthValue[x] = (covered + other)/total; } /* Prune the antecedents according to the accuracy parameters */ for(int z=(size-1); z > 0; z--){ // Treatment to avoid precision problems double valueDelta; if(m_ClassAttribute.isNominal()){ if(Utils.sm(worthValue[z], 1.0)) valueDelta = (worthValue[z] - worthValue[z-1]) / worthValue[z]; else valueDelta = worthValue[z] - worthValue[z-1]; } else{ if(Utils.sm(worthValue[z], 1.0)) valueDelta = (worthValue[z-1] - worthValue[z]) / worthValue[z]; else valueDelta = (worthValue[z-1] - worthValue[z]); } if(Utils.smOrEq(valueDelta, 0.0)){ m_Antds.removeElementAt(z); m_Targets.removeElementAt(z+1); } else break; } // Check whether this rule is a default rule if(m_Antds.size() == 1){ double valueDelta; if(m_ClassAttribute.isNominal()){ if(Utils.sm(worthValue[0], 1.0)) valueDelta = (worthValue[0] - defAccu) / worthValue[0]; else valueDelta = (worthValue[0] - defAccu); } else{ if(Utils.sm(worthValue[0], 1.0)) valueDelta = (defAccu - worthValue[0]) / worthValue[0]; else valueDelta = (defAccu - worthValue[0]); } if(Utils.smOrEq(valueDelta, 0.0)){ m_Antds.removeAllElements(); m_Targets.removeElementAt(1); } } m_Cnsqt = ((double[][])(m_Targets.lastElement()))[0]; m_DefDstr = ((double[][])(m_Targets.lastElement()))[1]; } /** * Private function to compute number of accurate instances * based on the specified predicted class * * @param data the data in question * @param clas the predicted class * @return the default accuracy number */ private double computeAccu(Instances data, int clas){ double accu = 0; for(int i=0; i<data.numInstances(); i++){ Instance inst = data.instance(i); if((int)inst.classValue() == clas) accu += inst.weight(); } return accu; } /** * Private function to compute the squared error of * the specified data and the specified mean * * @param data the data in question * @param mean the specified mean * @return the default mean-squared error */ private double meanSquaredError(Instances data, double mean){ if(Utils.eq(data.sumOfWeights(),0.0)) return 0; double mSqErr=0, sum = data.sumOfWeights(); for(int i=0; i < data.numInstances(); i++){ Instance datum = data.instance(i); mSqErr += datum.weight()* (datum.classValue() - mean)* (datum.classValue() - mean); } return (mSqErr / sum); } /** * Prints this rule with the specified class label * * @param att the string standing for attribute in the consequent of this rule * @param cl the string standing for value in the consequent of this rule * @return a textual description of this rule with the specified class label */ public String toString(String att, String cl) { StringBuffer text = new StringBuffer(); if(m_Antds.size() > 0){ for(int j=0; j< (m_Antds.size()-1); j++) text.append("(" + ((Antd)(m_Antds.elementAt(j))).toString()+ ") and "); text.append("("+((Antd)(m_Antds.lastElement())).toString() + ")"); } text.append(" => " + att + " = " + cl); return text.toString(); } /** * Prints this rule * * @return a textual description of this rule */ public String toString() { String title = "\n\nSingle conjunctive rule learner:\n"+ "--------------------------------\n", body = null; StringBuffer text = new StringBuffer(); if(m_ClassAttribute != null){ if(m_ClassAttribute.isNominal()){ body = toString(m_ClassAttribute.name(), m_ClassAttribute.value(Utils.maxIndex(m_Cnsqt))); text.append("\n\nClass distributions:\nCovered by the rule:\n"); for(int k=0; k < m_Cnsqt.length; k++) text.append(m_ClassAttribute.value(k)+ "\t"); text.append('\n'); for(int l=0; l < m_Cnsqt.length; l++) text.append(Utils.doubleToString(m_Cnsqt[l], 6)+"\t"); text.append("\n\nNot covered by the rule:\n"); for(int k=0; k < m_DefDstr.length; k++) text.append(m_ClassAttribute.value(k)+ "\t"); text.append('\n'); for(int l=0; l < m_DefDstr.length; l++) text.append(Utils.doubleToString(m_DefDstr[l], 6)+"\t"); } else body = toString(m_ClassAttribute.name(), Utils.doubleToString(m_Cnsqt[0], 6)); } return (title + body + text.toString()); } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 5529 $"); } /** * Main method. * * @param args the options for the classifier */ public static void main(String[] args) { runClassifier(new ConjunctiveRule(), args); } }
52,724
30.106195
627
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/rules/DTNB.java
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* * DecisionTable.java * Copyright (C) 2008 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.rules; import weka.attributeSelection.ASEvaluation; import weka.attributeSelection.ASSearch; import weka.attributeSelection.SubsetEvaluator; import weka.classifiers.bayes.NaiveBayes; import weka.core.Capabilities; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.RevisionUtils; import weka.core.SelectedTag; import weka.core.TechnicalInformation; import weka.core.Utils; import weka.core.Capabilities.Capability; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import java.util.BitSet; import java.util.Enumeration; import java.util.Vector; /** * <!-- globalinfo-start --> * Class for building and using a decision table/naive bayes hybrid classifier. At each point in the search, the algorithm evaluates the merit of dividing the attributes into two disjoint subsets: one for the decision table, the other for naive Bayes. A forward selection search is used, where at each step, selected attributes are modeled by naive Bayes and the remainder by the decision table, and all attributes are modelled by the decision table initially. At each step, the algorithm also considers dropping an attribute entirely from the model.<br/> * <br/> * For more information, see: <br/> * <br/> * Mark Hall, Eibe Frank: Combining Naive Bayes and Decision Tables. In: Proceedings of the 21st Florida Artificial Intelligence Society Conference (FLAIRS), ???-???, 2008. * <p/> <!-- globalinfo-end --> * <!-- technical-bibtex-start --> * BibTeX: * <pre> * &#64;inproceedings{Hall2008, * author = {Mark Hall and Eibe Frank}, * booktitle = {Proceedings of the 21st Florida Artificial Intelligence Society Conference (FLAIRS)}, * pages = {318-319}, * publisher = {AAAI press}, * title = {Combining Naive Bayes and Decision Tables}, * year = {2008} * } * </pre> * <p/> <!-- technical-bibtex-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -X &lt;number of folds&gt; * Use cross validation to evaluate features. * Use number of folds = 1 for leave one out CV. * (Default = leave one out CV)</pre> * * <pre> -E &lt;acc | rmse | mae | auc&gt; * Performance evaluation measure to use for selecting attributes. * (Default = accuracy for discrete class and rmse for numeric class)</pre> * * <pre> -I * Use nearest neighbour instead of global table majority.</pre> * * <pre> -R * Display decision table rules. * </pre> * <!-- options-end --> * * @author Mark Hall (mhall{[at]}pentaho{[dot]}org) * @author Eibe Frank (eibe{[at]}cs{[dot]}waikato{[dot]}ac{[dot]}nz) * * @version $Revision: 6269 $ * */ public class DTNB extends DecisionTable { /** * The naive Bayes half of the hybrid */ protected NaiveBayes m_NB; /** * The features used by naive Bayes */ private int [] m_nbFeatures; /** * Percentage of the total number of features used by the decision table */ private double m_percentUsedByDT; /** * Percentage of the features features that were dropped entirely */ private double m_percentDeleted; static final long serialVersionUID = 2999557077765701326L; /** * Returns a string describing classifier * @return a description suitable for * displaying in the explorer/experimenter gui */ public String globalInfo() { return "Class for building and using a decision table/naive bayes hybrid classifier. At each point " + "in the search, the algorithm evaluates the merit of dividing the attributes into two disjoint " + "subsets: one for the decision table, the other for naive Bayes. A forward selection search is " + "used, where at each step, selected attributes are modeled by naive Bayes and the remainder " + "by the decision table, and all attributes are modelled by the decision table initially. At each " + "step, the algorithm also considers dropping an attribute entirely from the model.\n\n" + "For more information, see: \n\n" + getTechnicalInformation().toString(); } /** * Returns an instance of a TechnicalInformation object, containing * detailed information about the technical background of this class, * e.g., paper reference or book this class is based on. * * @return the technical information about this class */ public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; result = new TechnicalInformation(Type.INPROCEEDINGS); result.setValue(Field.AUTHOR, "Mark Hall and Eibe Frank"); result.setValue(Field.TITLE, "Combining Naive Bayes and Decision Tables"); result.setValue(Field.BOOKTITLE, "Proceedings of the 21st Florida Artificial Intelligence " + "Society Conference (FLAIRS)"); result.setValue(Field.YEAR, "2008"); result.setValue(Field.PAGES, "318-319"); result.setValue(Field.PUBLISHER, "AAAI press"); return result; } /** * Calculates the accuracy on a test fold for internal cross validation * of feature sets * * @param fold set of instances to be "left out" and classified * @param fs currently selected feature set * @return the accuracy for the fold * @throws Exception if something goes wrong */ double evaluateFoldCV(Instances fold, int [] fs) throws Exception { int i; int ruleCount = 0; int numFold = fold.numInstances(); int numCl = m_theInstances.classAttribute().numValues(); double [][] class_distribs = new double [numFold][numCl]; double [] instA = new double [fs.length]; double [] normDist; DecisionTableHashKey thekey; double acc = 0.0; int classI = m_theInstances.classIndex(); Instance inst; if (m_classIsNominal) { normDist = new double [numCl]; } else { normDist = new double [2]; } // first *remove* instances for (i=0;i<numFold;i++) { inst = fold.instance(i); for (int j=0;j<fs.length;j++) { if (fs[j] == classI) { instA[j] = Double.MAX_VALUE; // missing for the class } else if (inst.isMissing(fs[j])) { instA[j] = Double.MAX_VALUE; } else{ instA[j] = inst.value(fs[j]); } } thekey = new DecisionTableHashKey(instA); if ((class_distribs[i] = (double [])m_entries.get(thekey)) == null) { throw new Error("This should never happen!"); } else { if (m_classIsNominal) { class_distribs[i][(int)inst.classValue()] -= inst.weight(); inst.setWeight(-inst.weight()); m_NB.updateClassifier(inst); inst.setWeight(-inst.weight()); } else { class_distribs[i][0] -= (inst.classValue() * inst.weight()); class_distribs[i][1] -= inst.weight(); } ruleCount++; } m_classPriorCounts[(int)inst.classValue()] -= inst.weight(); } double [] classPriors = m_classPriorCounts.clone(); Utils.normalize(classPriors); // now classify instances for (i=0;i<numFold;i++) { inst = fold.instance(i); System.arraycopy(class_distribs[i],0,normDist,0,normDist.length); if (m_classIsNominal) { boolean ok = false; for (int j=0;j<normDist.length;j++) { if (Utils.gr(normDist[j],1.0)) { ok = true; break; } } if (!ok) { // majority class normDist = classPriors.clone(); } else { Utils.normalize(normDist); } double [] nbDist = m_NB.distributionForInstance(inst); for (int l = 0; l < normDist.length; l++) { normDist[l] = (Math.log(normDist[l]) - Math.log(classPriors[l])); normDist[l] += Math.log(nbDist[l]); } normDist = Utils.logs2probs(normDist); // Utils.normalize(normDist); // System.out.println(normDist[0] + " " + normDist[1] + " " + inst.classValue()); if (m_evaluationMeasure == EVAL_AUC) { m_evaluation.evaluateModelOnceAndRecordPrediction(normDist, inst); } else { m_evaluation.evaluateModelOnce(normDist, inst); } /* } else { normDist[(int)m_majority] = 1.0; if (m_evaluationMeasure == EVAL_AUC) { m_evaluation.evaluateModelOnceAndRecordPrediction(normDist, inst); } else { m_evaluation.evaluateModelOnce(normDist, inst); } } */ } else { if (Utils.eq(normDist[1],0.0)) { double [] temp = new double[1]; temp[0] = m_majority; m_evaluation.evaluateModelOnce(temp, inst); } else { double [] temp = new double[1]; temp[0] = normDist[0] / normDist[1]; m_evaluation.evaluateModelOnce(temp, inst); } } } // now re-insert instances for (i=0;i<numFold;i++) { inst = fold.instance(i); m_classPriorCounts[(int)inst.classValue()] += inst.weight(); if (m_classIsNominal) { class_distribs[i][(int)inst.classValue()] += inst.weight(); m_NB.updateClassifier(inst); } else { class_distribs[i][0] += (inst.classValue() * inst.weight()); class_distribs[i][1] += inst.weight(); } } return acc; } /** * Classifies an instance for internal leave one out cross validation * of feature sets * * @param instance instance to be "left out" and classified * @param instA feature values of the selected features for the instance * @return the classification of the instance * @throws Exception if something goes wrong */ double evaluateInstanceLeaveOneOut(Instance instance, double [] instA) throws Exception { DecisionTableHashKey thekey; double [] tempDist; double [] normDist; thekey = new DecisionTableHashKey(instA); // if this one is not in the table if ((tempDist = (double [])m_entries.get(thekey)) == null) { throw new Error("This should never happen!"); } else { normDist = new double [tempDist.length]; System.arraycopy(tempDist,0,normDist,0,tempDist.length); normDist[(int)instance.classValue()] -= instance.weight(); // update the table // first check to see if the class counts are all zero now boolean ok = false; for (int i=0;i<normDist.length;i++) { if (Utils.gr(normDist[i],1.0)) { ok = true; break; } } // downdate the class prior counts m_classPriorCounts[(int)instance.classValue()] -= instance.weight(); double [] classPriors = m_classPriorCounts.clone(); Utils.normalize(classPriors); if (!ok) { // majority class normDist = classPriors; } else { Utils.normalize(normDist); } m_classPriorCounts[(int)instance.classValue()] += instance.weight(); if (m_NB != null){ // downdate NaiveBayes instance.setWeight(-instance.weight()); m_NB.updateClassifier(instance); double [] nbDist = m_NB.distributionForInstance(instance); instance.setWeight(-instance.weight()); m_NB.updateClassifier(instance); for (int i = 0; i < normDist.length; i++) { normDist[i] = (Math.log(normDist[i]) - Math.log(classPriors[i])); normDist[i] += Math.log(nbDist[i]); } normDist = Utils.logs2probs(normDist); // Utils.normalize(normDist); } if (m_evaluationMeasure == EVAL_AUC) { m_evaluation.evaluateModelOnceAndRecordPrediction(normDist, instance); } else { m_evaluation.evaluateModelOnce(normDist, instance); } return Utils.maxIndex(normDist); } } /** * Sets up a dummy subset evaluator that basically just delegates * evaluation to the estimatePerformance method in DecisionTable */ protected void setUpEvaluator() throws Exception { m_evaluator = new EvalWithDelete(); m_evaluator.buildEvaluator(m_theInstances); } protected class EvalWithDelete extends ASEvaluation implements SubsetEvaluator { // holds the list of attributes that are no longer in the model at all private BitSet m_deletedFromDTNB; public void buildEvaluator(Instances data) throws Exception { m_NB = null; m_deletedFromDTNB = new BitSet(data.numAttributes()); // System.err.println("Here"); } private int setUpForEval(BitSet subset) throws Exception { int fc = 0; for (int jj = 0;jj < m_numAttributes; jj++) { if (subset.get(jj)) { fc++; } } //int [] nbFs = new int [fc]; //int count = 0; for (int j = 0; j < m_numAttributes; j++) { m_theInstances.attribute(j).setWeight(1.0); // reset weight if (j != m_theInstances.classIndex()) { if (subset.get(j)) { // nbFs[count++] = j; m_theInstances.attribute(j).setWeight(0.0); // no influence for NB } } } // process delete set for (int i = 0; i < m_numAttributes; i++) { if (m_deletedFromDTNB.get(i)) { m_theInstances.attribute(i).setWeight(0.0); // no influence for NB } } if (m_NB == null) { // construct naive bayes for the first time m_NB = new NaiveBayes(); m_NB.buildClassifier(m_theInstances); } return fc; } public double evaluateSubset(BitSet subset) throws Exception { int fc = setUpForEval(subset); return estimatePerformance(subset, fc); } public double evaluateSubsetDelete(BitSet subset, int potentialDelete) throws Exception { int fc = setUpForEval(subset); // clear potentail delete for naive Bayes m_theInstances.attribute(potentialDelete).setWeight(0.0); //copy.clear(potentialDelete); //fc--; return estimatePerformance(subset, fc); } public BitSet getDeletedList() { return m_deletedFromDTNB; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 6269 $"); } } protected ASSearch m_backwardWithDelete; /** * Inner class implementing a special forwards search that looks for a good * split of attributes between naive Bayes and the decision table. It also * considers dropping attributes entirely from the model. */ protected class BackwardsWithDelete extends ASSearch { public String globalInfo() { return "Specialized search that performs a forward selection (naive Bayes)/" + "backward elimination (decision table). Also considers dropping attributes " + "entirely from the combined model."; } public String toString() { return ""; } public int [] search(ASEvaluation eval, Instances data) throws Exception { int i; double best_merit = -Double.MAX_VALUE; double temp_best = 0, temp_merit = 0, temp_merit_delete = 0; int temp_index=0; BitSet temp_group; BitSet best_group = null; int numAttribs = data.numAttributes(); if (best_group == null) { best_group = new BitSet(numAttribs); } int classIndex = data.classIndex(); for (i = 0; i < numAttribs; i++) { if (i != classIndex) { best_group.set(i); } } //System.err.println(best_group); // Evaluate the initial subset // best_merit = m_evaluator.evaluateSubset(best_group); best_merit = ((SubsetEvaluator)eval).evaluateSubset(best_group); //System.err.println(best_merit); // main search loop boolean done = false; boolean addone = false; boolean z; boolean deleted = false; while (!done) { temp_group = (BitSet)best_group.clone(); temp_best = best_merit; done = true; addone = false; for (i = 0; i < numAttribs;i++) { z = ((i != classIndex) && (temp_group.get(i))); if (z) { // set/unset the bit temp_group.clear(i); // temp_merit = m_evaluator.evaluateSubset(temp_group); temp_merit = ((SubsetEvaluator)eval).evaluateSubset(temp_group); // temp_merit_delete = ((EvalWithDelete)m_evaluator).evaluateSubsetDelete(temp_group, i); temp_merit_delete = ((EvalWithDelete)eval).evaluateSubsetDelete(temp_group, i); boolean deleteBetter = false; //System.out.println("Merit: " + temp_merit + "\t" + "Delete merit: " + temp_merit_delete); if (temp_merit_delete >= temp_merit) { temp_merit = temp_merit_delete; deleteBetter = true; } z = (temp_merit >= temp_best); if (z) { temp_best = temp_merit; temp_index = i; addone = true; done = false; if (deleteBetter) { deleted = true; } else { deleted = false; } } // unset this addition/deletion temp_group.set(i); } } if (addone) { best_group.clear(temp_index); best_merit = temp_best; if (deleted) { // ((EvalWithDelete)m_evaluator).getDeletedList().set(temp_index); ((EvalWithDelete)eval).getDeletedList().set(temp_index); } //System.err.println("----------------------"); //System.err.println("Best subset: (dec table)" + best_group); //System.err.println("Best subset: (deleted)" + ((EvalWithDelete)m_evaluator).getDeletedList()); //System.err.println(best_merit); } } return attributeList(best_group); } /** * converts a BitSet into a list of attribute indexes * @param group the BitSet to convert * @return an array of attribute indexes **/ protected int[] attributeList (BitSet group) { int count = 0; BitSet copy = (BitSet)group.clone(); /* remove any that have been completely deleted from DTNB BitSet deleted = ((EvalWithDelete)m_evaluator).getDeletedList(); for (int i = 0; i < m_numAttributes; i++) { if (deleted.get(i)) { copy.clear(i); } } */ // count how many were selected for (int i = 0; i < m_numAttributes; i++) { if (copy.get(i)) { count++; } } int[] list = new int[count]; count = 0; for (int i = 0; i < m_numAttributes; i++) { if (copy.get(i)) { list[count++] = i; } } return list; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 6269 $"); } } private void setUpSearch() { m_backwardWithDelete = new BackwardsWithDelete(); } /** * Generates the classifier. * * @param data set of instances serving as training data * @throws Exception if the classifier has not been generated successfully */ public void buildClassifier(Instances data) throws Exception { m_saveMemory = false; if (data.classAttribute().isNumeric()) { throw new Exception("Can only handle nominal class!"); } if (m_backwardWithDelete == null) { setUpSearch(); m_search = m_backwardWithDelete; } /* if (m_search != m_backwardWithDelete) { m_search = m_backwardWithDelete; } */ super.buildClassifier(data); // new NB stuff // delete the features used by the decision table (not the class!!) for (int i = 0; i < m_theInstances.numAttributes(); i++) { m_theInstances.attribute(i).setWeight(1.0); // reset all weights } // m_nbFeatures = new int [m_decisionFeatures.length - 1]; int count = 0; for (int i = 0; i < m_decisionFeatures.length; i++) { if (m_decisionFeatures[i] != m_theInstances.classIndex()) { count++; // m_nbFeatures[count++] = m_decisionFeatures[i]; m_theInstances.attribute(m_decisionFeatures[i]).setWeight(0.0); // No influence for NB } } double numDeleted = 0; // remove any attributes that have been deleted completely from the DTNB BitSet deleted = ((EvalWithDelete)m_evaluator).getDeletedList(); for (int i = 0; i < m_theInstances.numAttributes(); i++) { if (deleted.get(i)) { m_theInstances.attribute(i).setWeight(0.0); // count--; numDeleted++; // System.err.println("Attribute "+i+" was eliminated completely"); } } m_percentUsedByDT = (double)count / (m_theInstances.numAttributes() - 1); m_percentDeleted = numDeleted / (m_theInstances.numAttributes() -1); m_NB = new NaiveBayes(); m_NB.buildClassifier(m_theInstances); m_dtInstances = new Instances(m_dtInstances, 0); m_theInstances = new Instances(m_theInstances, 0); } /** * Calculates the class membership probabilities for the given * test instance. * * @param instance the instance to be classified * @return predicted class probability distribution * @exception Exception if distribution can't be computed */ public double [] distributionForInstance(Instance instance) throws Exception { DecisionTableHashKey thekey; double [] tempDist; double [] normDist; m_disTransform.input(instance); m_disTransform.batchFinished(); instance = m_disTransform.output(); m_delTransform.input(instance); m_delTransform.batchFinished(); Instance dtInstance = m_delTransform.output(); thekey = new DecisionTableHashKey(dtInstance, dtInstance.numAttributes(), false); // if this one is not in the table if ((tempDist = (double [])m_entries.get(thekey)) == null) { if (m_useIBk) { tempDist = m_ibk.distributionForInstance(dtInstance); } else { // tempDist = new double [m_theInstances.classAttribute().numValues()]; // tempDist[(int)m_majority] = 1.0; tempDist = m_classPriors.clone(); // return tempDist; ?????? } } else { // normalise distribution normDist = new double [tempDist.length]; System.arraycopy(tempDist,0,normDist,0,tempDist.length); Utils.normalize(normDist); tempDist = normDist; } double [] nbDist = m_NB.distributionForInstance(instance); for (int i = 0; i < nbDist.length; i++) { tempDist[i] = (Math.log(tempDist[i]) - Math.log(m_classPriors[i])); tempDist[i] += Math.log(nbDist[i]); /*tempDist[i] *= nbDist[i]; tempDist[i] /= m_classPriors[i];*/ } tempDist = Utils.logs2probs(tempDist); Utils.normalize(tempDist); return tempDist; } public String toString() { String sS = super.toString(); if (m_displayRules && m_NB != null) { sS += m_NB.toString(); } return sS; } /** * Returns the number of rules * @return the number of rules */ public double measurePercentAttsUsedByDT() { return m_percentUsedByDT; } /** * Returns an enumeration of the additional measure names * @return an enumeration of the measure names */ public Enumeration enumerateMeasures() { Vector newVector = new Vector(2); newVector.addElement("measureNumRules"); newVector.addElement("measurePercentAttsUsedByDT"); return newVector.elements(); } /** * Returns the value of the named measure * @param additionalMeasureName the name of the measure to query for its value * @return the value of the named measure * @throws IllegalArgumentException if the named measure is not supported */ public double getMeasure(String additionalMeasureName) { if (additionalMeasureName.compareToIgnoreCase("measureNumRules") == 0) { return measureNumRules(); } else if (additionalMeasureName.compareToIgnoreCase("measurePercentAttsUsedByDT") == 0) { return measurePercentAttsUsedByDT(); } else { throw new IllegalArgumentException(additionalMeasureName + " not supported (DecisionTable)"); } } /** * Returns default capabilities of the classifier. * * @return the capabilities of this classifier */ public Capabilities getCapabilities() { Capabilities result = super.getCapabilities(); result.disable(Capability.NUMERIC_CLASS); result.disable(Capability.DATE_CLASS); return result; } /** * Sets the search method to use * * @param search */ public void setSearch(ASSearch search) { // Search method cannot be changed. // Must be BackwardsWithDelete return; } /** * Gets the current search method * * @return the search method used */ public ASSearch getSearch() { if (m_backwardWithDelete == null) { setUpSearch(); // setSearch(m_backwardWithDelete); m_search = m_backwardWithDelete; } return m_search; } /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ public Enumeration listOptions() { Vector newVector = new Vector(7); newVector.addElement(new Option( "\tUse cross validation to evaluate features.\n" + "\tUse number of folds = 1 for leave one out CV.\n" + "\t(Default = leave one out CV)", "X", 1, "-X <number of folds>")); newVector.addElement(new Option( "\tPerformance evaluation measure to use for selecting attributes.\n" + "\t(Default = accuracy for discrete class and rmse for numeric class)", "E", 1, "-E <acc | rmse | mae | auc>")); newVector.addElement(new Option( "\tUse nearest neighbour instead of global table majority.", "I", 0, "-I")); newVector.addElement(new Option( "\tDisplay decision table rules.\n", "R", 0, "-R")); return newVector.elements(); } /** * Parses the options for this object. <p/> * <!-- options-start --> * Valid options are: <p/> * * <pre> -X &lt;number of folds&gt; * Use cross validation to evaluate features. * Use number of folds = 1 for leave one out CV. * (Default = leave one out CV)</pre> * * <pre> -E &lt;acc | rmse | mae | auc&gt; * Performance evaluation measure to use for selecting attributes. * (Default = accuracy for discrete class and rmse for numeric class)</pre> * * <pre> -I * Use nearest neighbour instead of global table majority.</pre> * * <pre> -R * Display decision table rules. * </pre> * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { String optionString; resetOptions(); optionString = Utils.getOption('X',options); if (optionString.length() != 0) { setCrossVal(Integer.parseInt(optionString)); } m_useIBk = Utils.getFlag('I',options); m_displayRules = Utils.getFlag('R',options); optionString = Utils.getOption('E', options); if (optionString.length() != 0) { if (optionString.equals("acc")) { setEvaluationMeasure(new SelectedTag(EVAL_ACCURACY, TAGS_EVALUATION)); } else if (optionString.equals("rmse")) { setEvaluationMeasure(new SelectedTag(EVAL_RMSE, TAGS_EVALUATION)); } else if (optionString.equals("mae")) { setEvaluationMeasure(new SelectedTag(EVAL_MAE, TAGS_EVALUATION)); } else if (optionString.equals("auc")) { setEvaluationMeasure(new SelectedTag(EVAL_AUC, TAGS_EVALUATION)); } else { throw new IllegalArgumentException("Invalid evaluation measure"); } } } /** * Gets the current settings of the classifier. * * @return an array of strings suitable for passing to setOptions */ public String [] getOptions() { String [] options = new String [9]; int current = 0; options[current++] = "-X"; options[current++] = "" + getCrossVal(); if (m_evaluationMeasure != EVAL_DEFAULT) { options[current++] = "-E"; switch (m_evaluationMeasure) { case EVAL_ACCURACY: options[current++] = "acc"; break; case EVAL_RMSE: options[current++] = "rmse"; break; case EVAL_MAE: options[current++] = "mae"; break; case EVAL_AUC: options[current++] = "auc"; break; } } if (m_useIBk) { options[current++] = "-I"; } if (m_displayRules) { options[current++] = "-R"; } while (current < options.length) { options[current++] = ""; } return options; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 6269 $"); } /** * Main method for testing this class. * * @param argv the command-line options */ public static void main(String [] argv) { runClassifier(new DTNB(), argv); } }
28,699
28.285714
555
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/rules/DecisionTable.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * DecisionTable.java * Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.rules; import java.util.Arrays; import java.util.BitSet; import java.util.Enumeration; import java.util.Hashtable; import java.util.Random; import java.util.Vector; import weka.attributeSelection.ASEvaluation; import weka.attributeSelection.ASSearch; import weka.attributeSelection.BestFirst; import weka.attributeSelection.SubsetEvaluator; import weka.classifiers.AbstractClassifier; import weka.classifiers.Evaluation; import weka.classifiers.lazy.IBk; import weka.core.AdditionalMeasureProducer; import weka.core.Capabilities; import weka.core.Capabilities.Capability; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.OptionHandler; import weka.core.RevisionUtils; import weka.core.SelectedTag; import weka.core.Tag; import weka.core.TechnicalInformation; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import weka.core.TechnicalInformationHandler; import weka.core.Utils; import weka.core.WeightedInstancesHandler; import weka.filters.Filter; import weka.filters.unsupervised.attribute.Remove; /** <!-- globalinfo-start --> * Class for building and using a simple decision table majority classifier.<br/> * <br/> * For more information see: <br/> * <br/> * Ron Kohavi: The Power of Decision Tables. In: 8th European Conference on Machine Learning, 174-189, 1995. * <p/> <!-- globalinfo-end --> * <!-- technical-bibtex-start --> * BibTeX: * <pre> * &#64;inproceedings{Kohavi1995, * author = {Ron Kohavi}, * booktitle = {8th European Conference on Machine Learning}, * pages = {174-189}, * publisher = {Springer}, * title = {The Power of Decision Tables}, * year = {1995} * } * </pre> * <p/> <!-- technical-bibtex-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -S &lt;search method specification&gt; * Full class name of search method, followed * by its options. * eg: "weka.attributeSelection.BestFirst -D 1" * (default weka.attributeSelection.BestFirst)</pre> * * <pre> -X &lt;number of folds&gt; * Use cross validation to evaluate features. * Use number of folds = 1 for leave one out CV. * (Default = leave one out CV)</pre> * * <pre> -E &lt;acc | rmse | mae | auc&gt; * Performance evaluation measure to use for selecting attributes. * (Default = accuracy for discrete class and rmse for numeric class)</pre> * * <pre> -I * Use nearest neighbour instead of global table majority.</pre> * * <pre> -R * Display decision table rules. * </pre> * * <pre> * Options specific to search method weka.attributeSelection.BestFirst: * </pre> * * <pre> -P &lt;start set&gt; * Specify a starting set of attributes. * Eg. 1,3,5-7.</pre> * * <pre> -D &lt;0 = backward | 1 = forward | 2 = bi-directional&gt; * Direction of search. (default = 1).</pre> * * <pre> -N &lt;num&gt; * Number of non-improving nodes to * consider before terminating search.</pre> * * <pre> -S &lt;num&gt; * Size of lookup cache for evaluated subsets. * Expressed as a multiple of the number of * attributes in the data set. (default = 1)</pre> * <!-- options-end --> * * @author Mark Hall (mhall@cs.waikato.ac.nz) * @version $Revision: 8034 $ */ public class DecisionTable extends AbstractClassifier implements OptionHandler, WeightedInstancesHandler, AdditionalMeasureProducer, TechnicalInformationHandler { /** for serialization */ static final long serialVersionUID = 2888557078165701326L; /** The hashtable used to hold training instances */ protected Hashtable m_entries; /** The class priors to use when there is no match in the table */ protected double [] m_classPriorCounts; protected double [] m_classPriors; /** Holds the final feature set */ protected int [] m_decisionFeatures; /** Discretization filter */ protected Filter m_disTransform; /** Filter used to remove columns discarded by feature selection */ protected Remove m_delTransform; /** IB1 used to classify non matching instances rather than majority class */ protected IBk m_ibk; /** Holds the original training instances */ protected Instances m_theInstances; /** Holds the final feature selected set of instances */ protected Instances m_dtInstances; /** The number of attributes in the dataset */ protected int m_numAttributes; /** The number of instances in the dataset */ private int m_numInstances; /** Class is nominal */ protected boolean m_classIsNominal; /** Use the IBk classifier rather than majority class */ protected boolean m_useIBk; /** Display Rules */ protected boolean m_displayRules; /** Number of folds for cross validating feature sets */ private int m_CVFolds; /** Random numbers for use in cross validation */ private Random m_rr; /** Holds the majority class */ protected double m_majority; /** The search method to use */ protected ASSearch m_search = new BestFirst(); /** Our own internal evaluator */ protected ASEvaluation m_evaluator; /** The evaluation object used to evaluate subsets */ protected Evaluation m_evaluation; /** default is accuracy for discrete class and RMSE for numeric class */ public static final int EVAL_DEFAULT = 1; public static final int EVAL_ACCURACY = 2; public static final int EVAL_RMSE = 3; public static final int EVAL_MAE = 4; public static final int EVAL_AUC = 5; public static final Tag [] TAGS_EVALUATION = { new Tag(EVAL_DEFAULT, "Default: accuracy (discrete class); RMSE (numeric class)"), new Tag(EVAL_ACCURACY, "Accuracy (discrete class only"), new Tag(EVAL_RMSE, "RMSE (of the class probabilities for discrete class)"), new Tag(EVAL_MAE, "MAE (of the class probabilities for discrete class)"), new Tag(EVAL_AUC, "AUC (area under the ROC curve - discrete class only)") }; protected int m_evaluationMeasure = EVAL_DEFAULT; /** * Returns a string describing classifier * @return a description suitable for * displaying in the explorer/experimenter gui */ public String globalInfo() { return "Class for building and using a simple decision table majority " + "classifier.\n\n" + "For more information see: \n\n" + getTechnicalInformation().toString(); } /** * Returns an instance of a TechnicalInformation object, containing * detailed information about the technical background of this class, * e.g., paper reference or book this class is based on. * * @return the technical information about this class */ public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; result = new TechnicalInformation(Type.INPROCEEDINGS); result.setValue(Field.AUTHOR, "Ron Kohavi"); result.setValue(Field.TITLE, "The Power of Decision Tables"); result.setValue(Field.BOOKTITLE, "8th European Conference on Machine Learning"); result.setValue(Field.YEAR, "1995"); result.setValue(Field.PAGES, "174-189"); result.setValue(Field.PUBLISHER, "Springer"); return result; } /** * Inserts an instance into the hash table * * @param inst instance to be inserted * @param instA to create the hash key from * @throws Exception if the instance can't be inserted */ private void insertIntoTable(Instance inst, double [] instA) throws Exception { double [] tempClassDist2; double [] newDist; DecisionTableHashKey thekey; if (instA != null) { thekey = new DecisionTableHashKey(instA); } else { thekey = new DecisionTableHashKey(inst, inst.numAttributes(), false); } // see if this one is already in the table tempClassDist2 = (double []) m_entries.get(thekey); if (tempClassDist2 == null) { if (m_classIsNominal) { newDist = new double [m_theInstances.classAttribute().numValues()]; //Leplace estimation for (int i = 0; i < m_theInstances.classAttribute().numValues(); i++) { newDist[i] = 1.0; } newDist[(int)inst.classValue()] = inst.weight(); // add to the table m_entries.put(thekey, newDist); } else { newDist = new double [2]; newDist[0] = inst.classValue() * inst.weight(); newDist[1] = inst.weight(); // add to the table m_entries.put(thekey, newDist); } } else { // update the distribution for this instance if (m_classIsNominal) { tempClassDist2[(int)inst.classValue()]+=inst.weight(); // update the table m_entries.put(thekey, tempClassDist2); } else { tempClassDist2[0] += (inst.classValue() * inst.weight()); tempClassDist2[1] += inst.weight(); // update the table m_entries.put(thekey, tempClassDist2); } } } /** * Classifies an instance for internal leave one out cross validation * of feature sets * * @param instance instance to be "left out" and classified * @param instA feature values of the selected features for the instance * @return the classification of the instance * @throws Exception if something goes wrong */ double evaluateInstanceLeaveOneOut(Instance instance, double [] instA) throws Exception { DecisionTableHashKey thekey; double [] tempDist; double [] normDist; thekey = new DecisionTableHashKey(instA); if (m_classIsNominal) { // if this one is not in the table if ((tempDist = (double [])m_entries.get(thekey)) == null) { throw new Error("This should never happen!"); } else { normDist = new double [tempDist.length]; System.arraycopy(tempDist,0,normDist,0,tempDist.length); normDist[(int)instance.classValue()] -= instance.weight(); // update the table // first check to see if the class counts are all zero now boolean ok = false; for (int i=0;i<normDist.length;i++) { if (Utils.gr(normDist[i],1.0)) { ok = true; break; } } // downdate the class prior counts m_classPriorCounts[(int)instance.classValue()] -= instance.weight(); double [] classPriors = m_classPriorCounts.clone(); Utils.normalize(classPriors); if (!ok) { // majority class normDist = classPriors; } m_classPriorCounts[(int)instance.classValue()] += instance.weight(); //if (ok) { Utils.normalize(normDist); if (m_evaluationMeasure == EVAL_AUC) { m_evaluation.evaluateModelOnceAndRecordPrediction(normDist, instance); } else { m_evaluation.evaluateModelOnce(normDist, instance); } return Utils.maxIndex(normDist); /*} else { normDist = new double [normDist.length]; normDist[(int)m_majority] = 1.0; if (m_evaluationMeasure == EVAL_AUC) { m_evaluation.evaluateModelOnceAndRecordPrediction(normDist, instance); } else { m_evaluation.evaluateModelOnce(normDist, instance); } return m_majority; } */ } // return Utils.maxIndex(tempDist); } else { // see if this one is already in the table if ((tempDist = (double[])m_entries.get(thekey)) != null) { normDist = new double [tempDist.length]; System.arraycopy(tempDist,0,normDist,0,tempDist.length); normDist[0] -= (instance.classValue() * instance.weight()); normDist[1] -= instance.weight(); if (Utils.eq(normDist[1],0.0)) { double [] temp = new double[1]; temp[0] = m_majority; m_evaluation.evaluateModelOnce(temp, instance); return m_majority; } else { double [] temp = new double[1]; temp[0] = normDist[0] / normDist[1]; m_evaluation.evaluateModelOnce(temp, instance); return temp[0]; } } else { throw new Error("This should never happen!"); } } // shouldn't get here // return 0.0; } /** * Calculates the accuracy on a test fold for internal cross validation * of feature sets * * @param fold set of instances to be "left out" and classified * @param fs currently selected feature set * @return the accuracy for the fold * @throws Exception if something goes wrong */ double evaluateFoldCV(Instances fold, int [] fs) throws Exception { int i; int ruleCount = 0; int numFold = fold.numInstances(); int numCl = m_theInstances.classAttribute().numValues(); double [][] class_distribs = new double [numFold][numCl]; double [] instA = new double [fs.length]; double [] normDist; DecisionTableHashKey thekey; double acc = 0.0; int classI = m_theInstances.classIndex(); Instance inst; if (m_classIsNominal) { normDist = new double [numCl]; } else { normDist = new double [2]; } // first *remove* instances for (i=0;i<numFold;i++) { inst = fold.instance(i); for (int j=0;j<fs.length;j++) { if (fs[j] == classI) { instA[j] = Double.MAX_VALUE; // missing for the class } else if (inst.isMissing(fs[j])) { instA[j] = Double.MAX_VALUE; } else{ instA[j] = inst.value(fs[j]); } } thekey = new DecisionTableHashKey(instA); if ((class_distribs[i] = (double [])m_entries.get(thekey)) == null) { throw new Error("This should never happen!"); } else { if (m_classIsNominal) { class_distribs[i][(int)inst.classValue()] -= inst.weight(); } else { class_distribs[i][0] -= (inst.classValue() * inst.weight()); class_distribs[i][1] -= inst.weight(); } ruleCount++; } m_classPriorCounts[(int)inst.classValue()] -= inst.weight(); } double [] classPriors = m_classPriorCounts.clone(); Utils.normalize(classPriors); // now classify instances for (i=0;i<numFold;i++) { inst = fold.instance(i); System.arraycopy(class_distribs[i],0,normDist,0,normDist.length); if (m_classIsNominal) { boolean ok = false; for (int j=0;j<normDist.length;j++) { if (Utils.gr(normDist[j],1.0)) { ok = true; break; } } if (!ok) { // majority class normDist = classPriors.clone(); } // if (ok) { Utils.normalize(normDist); if (m_evaluationMeasure == EVAL_AUC) { m_evaluation.evaluateModelOnceAndRecordPrediction(normDist, inst); } else { m_evaluation.evaluateModelOnce(normDist, inst); } /* } else { normDist[(int)m_majority] = 1.0; if (m_evaluationMeasure == EVAL_AUC) { m_evaluation.evaluateModelOnceAndRecordPrediction(normDist, inst); } else { m_evaluation.evaluateModelOnce(normDist, inst); } } */ } else { if (Utils.eq(normDist[1],0.0)) { double [] temp = new double[1]; temp[0] = m_majority; m_evaluation.evaluateModelOnce(temp, inst); } else { double [] temp = new double[1]; temp[0] = normDist[0] / normDist[1]; m_evaluation.evaluateModelOnce(temp, inst); } } } // now re-insert instances for (i=0;i<numFold;i++) { inst = fold.instance(i); m_classPriorCounts[(int)inst.classValue()] += inst.weight(); if (m_classIsNominal) { class_distribs[i][(int)inst.classValue()] += inst.weight(); } else { class_distribs[i][0] += (inst.classValue() * inst.weight()); class_distribs[i][1] += inst.weight(); } } return acc; } /** * Evaluates a feature subset by cross validation * * @param feature_set the subset to be evaluated * @param num_atts the number of attributes in the subset * @return the estimated accuracy * @throws Exception if subset can't be evaluated */ protected double estimatePerformance(BitSet feature_set, int num_atts) throws Exception { m_evaluation = new Evaluation(m_theInstances); int i; int [] fs = new int [num_atts]; double [] instA = new double [num_atts]; int classI = m_theInstances.classIndex(); int index = 0; for (i=0;i<m_numAttributes;i++) { if (feature_set.get(i)) { fs[index++] = i; } } // create new hash table m_entries = new Hashtable((int)(m_theInstances.numInstances() * 1.5)); // insert instances into the hash table for (i=0;i<m_numInstances;i++) { Instance inst = m_theInstances.instance(i); for (int j=0;j<fs.length;j++) { if (fs[j] == classI) { instA[j] = Double.MAX_VALUE; // missing for the class } else if (inst.isMissing(fs[j])) { instA[j] = Double.MAX_VALUE; } else { instA[j] = inst.value(fs[j]); } } insertIntoTable(inst, instA); } if (m_CVFolds == 1) { // calculate leave one out error for (i=0;i<m_numInstances;i++) { Instance inst = m_theInstances.instance(i); for (int j=0;j<fs.length;j++) { if (fs[j] == classI) { instA[j] = Double.MAX_VALUE; // missing for the class } else if (inst.isMissing(fs[j])) { instA[j] = Double.MAX_VALUE; } else { instA[j] = inst.value(fs[j]); } } evaluateInstanceLeaveOneOut(inst, instA); } } else { m_theInstances.randomize(m_rr); m_theInstances.stratify(m_CVFolds); // calculate 10 fold cross validation error for (i=0;i<m_CVFolds;i++) { Instances insts = m_theInstances.testCV(m_CVFolds,i); evaluateFoldCV(insts, fs); } } switch (m_evaluationMeasure) { case EVAL_DEFAULT: if (m_classIsNominal) { return m_evaluation.pctCorrect(); } return -m_evaluation.rootMeanSquaredError(); case EVAL_ACCURACY: return m_evaluation.pctCorrect(); case EVAL_RMSE: return -m_evaluation.rootMeanSquaredError(); case EVAL_MAE: return -m_evaluation.meanAbsoluteError(); case EVAL_AUC: double [] classPriors = m_evaluation.getClassPriors(); Utils.normalize(classPriors); double weightedAUC = 0; for (i = 0; i < m_theInstances.classAttribute().numValues(); i++) { double tempAUC = m_evaluation.areaUnderROC(i); if (!Utils.isMissingValue(tempAUC)) { weightedAUC += (classPriors[i] * tempAUC); } else { System.err.println("Undefined AUC!!"); } } return weightedAUC; } // shouldn't get here return 0.0; } /** * Returns a String representation of a feature subset * * @param sub BitSet representation of a subset * @return String containing subset */ private String printSub(BitSet sub) { String s=""; for (int jj=0;jj<m_numAttributes;jj++) { if (sub.get(jj)) { s += " "+(jj+1); } } return s; } /** * Resets the options. */ protected void resetOptions() { m_entries = null; m_decisionFeatures = null; m_useIBk = false; m_CVFolds = 1; m_displayRules = false; m_evaluationMeasure = EVAL_DEFAULT; } /** * Constructor for a DecisionTable */ public DecisionTable() { resetOptions(); } /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ public Enumeration listOptions() { Vector newVector = new Vector(7); newVector.addElement(new Option( "\tFull class name of search method, followed\n" + "\tby its options.\n" + "\teg: \"weka.attributeSelection.BestFirst -D 1\"\n" + "\t(default weka.attributeSelection.BestFirst)", "S", 1, "-S <search method specification>")); newVector.addElement(new Option( "\tUse cross validation to evaluate features.\n" + "\tUse number of folds = 1 for leave one out CV.\n" + "\t(Default = leave one out CV)", "X", 1, "-X <number of folds>")); newVector.addElement(new Option( "\tPerformance evaluation measure to use for selecting attributes.\n" + "\t(Default = accuracy for discrete class and rmse for numeric class)", "E", 1, "-E <acc | rmse | mae | auc>")); newVector.addElement(new Option( "\tUse nearest neighbour instead of global table majority.", "I", 0, "-I")); newVector.addElement(new Option( "\tDisplay decision table rules.\n", "R", 0, "-R")); newVector.addElement(new Option( "", "", 0, "\nOptions specific to search method " + m_search.getClass().getName() + ":")); Enumeration enu = ((OptionHandler)m_search).listOptions(); while (enu.hasMoreElements()) { newVector.addElement(enu.nextElement()); } return newVector.elements(); } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String crossValTipText() { return "Sets the number of folds for cross validation (1 = leave one out)."; } /** * Sets the number of folds for cross validation (1 = leave one out) * * @param folds the number of folds */ public void setCrossVal(int folds) { m_CVFolds = folds; } /** * Gets the number of folds for cross validation * * @return the number of cross validation folds */ public int getCrossVal() { return m_CVFolds; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String useIBkTipText() { return "Sets whether IBk should be used instead of the majority class."; } /** * Sets whether IBk should be used instead of the majority class * * @param ibk true if IBk is to be used */ public void setUseIBk(boolean ibk) { m_useIBk = ibk; } /** * Gets whether IBk is being used instead of the majority class * * @return true if IBk is being used */ public boolean getUseIBk() { return m_useIBk; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String displayRulesTipText() { return "Sets whether rules are to be printed."; } /** * Sets whether rules are to be printed * * @param rules true if rules are to be printed */ public void setDisplayRules(boolean rules) { m_displayRules = rules; } /** * Gets whether rules are being printed * * @return true if rules are being printed */ public boolean getDisplayRules() { return m_displayRules; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String searchTipText() { return "The search method used to find good attribute combinations for the " + "decision table."; } /** * Sets the search method to use * * @param search */ public void setSearch(ASSearch search) { m_search = search; } /** * Gets the current search method * * @return the search method used */ public ASSearch getSearch() { return m_search; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String evaluationMeasureTipText() { return "The measure used to evaluate the performance of attribute combinations " + "used in the decision table."; } /** * Gets the currently set performance evaluation measure used for selecting * attributes for the decision table * * @return the performance evaluation measure */ public SelectedTag getEvaluationMeasure() { return new SelectedTag(m_evaluationMeasure, TAGS_EVALUATION); } /** * Sets the performance evaluation measure to use for selecting attributes * for the decision table * * @param newMethod the new performance evaluation metric to use */ public void setEvaluationMeasure(SelectedTag newMethod) { if (newMethod.getTags() == TAGS_EVALUATION) { m_evaluationMeasure = newMethod.getSelectedTag().getID(); } } /** * Parses the options for this object. <p/> * <!-- options-start --> * Valid options are: <p/> * * <pre> -S &lt;search method specification&gt; * Full class name of search method, followed * by its options. * eg: "weka.attributeSelection.BestFirst -D 1" * (default weka.attributeSelection.BestFirst)</pre> * * <pre> -X &lt;number of folds&gt; * Use cross validation to evaluate features. * Use number of folds = 1 for leave one out CV. * (Default = leave one out CV)</pre> * * <pre> -E &lt;acc | rmse | mae | auc&gt; * Performance evaluation measure to use for selecting attributes. * (Default = accuracy for discrete class and rmse for numeric class)</pre> * * <pre> -I * Use nearest neighbour instead of global table majority.</pre> * * <pre> -R * Display decision table rules. * </pre> * * <pre> * Options specific to search method weka.attributeSelection.BestFirst: * </pre> * * <pre> -P &lt;start set&gt; * Specify a starting set of attributes. * Eg. 1,3,5-7.</pre> * * <pre> -D &lt;0 = backward | 1 = forward | 2 = bi-directional&gt; * Direction of search. (default = 1).</pre> * * <pre> -N &lt;num&gt; * Number of non-improving nodes to * consider before terminating search.</pre> * * <pre> -S &lt;num&gt; * Size of lookup cache for evaluated subsets. * Expressed as a multiple of the number of * attributes in the data set. (default = 1)</pre> * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { String optionString; resetOptions(); optionString = Utils.getOption('X',options); if (optionString.length() != 0) { m_CVFolds = Integer.parseInt(optionString); } m_useIBk = Utils.getFlag('I',options); m_displayRules = Utils.getFlag('R',options); optionString = Utils.getOption('E', options); if (optionString.length() != 0) { if (optionString.equals("acc")) { setEvaluationMeasure(new SelectedTag(EVAL_ACCURACY, TAGS_EVALUATION)); } else if (optionString.equals("rmse")) { setEvaluationMeasure(new SelectedTag(EVAL_RMSE, TAGS_EVALUATION)); } else if (optionString.equals("mae")) { setEvaluationMeasure(new SelectedTag(EVAL_MAE, TAGS_EVALUATION)); } else if (optionString.equals("auc")) { setEvaluationMeasure(new SelectedTag(EVAL_AUC, TAGS_EVALUATION)); } else { throw new IllegalArgumentException("Invalid evaluation measure"); } } String searchString = Utils.getOption('S', options); if (searchString.length() == 0) searchString = weka.attributeSelection.BestFirst.class.getName(); String [] searchSpec = Utils.splitOptions(searchString); if (searchSpec.length == 0) { throw new IllegalArgumentException("Invalid search specification string"); } String searchName = searchSpec[0]; searchSpec[0] = ""; setSearch(ASSearch.forName(searchName, searchSpec)); } /** * Gets the current settings of the classifier. * * @return an array of strings suitable for passing to setOptions */ public String [] getOptions() { String [] options = new String [9]; int current = 0; options[current++] = "-X"; options[current++] = "" + m_CVFolds; if (m_evaluationMeasure != EVAL_DEFAULT) { options[current++] = "-E"; switch (m_evaluationMeasure) { case EVAL_ACCURACY: options[current++] = "acc"; break; case EVAL_RMSE: options[current++] = "rmse"; break; case EVAL_MAE: options[current++] = "mae"; break; case EVAL_AUC: options[current++] = "auc"; break; } } if (m_useIBk) { options[current++] = "-I"; } if (m_displayRules) { options[current++] = "-R"; } options[current++] = "-S"; options[current++] = "" + getSearchSpec(); while (current < options.length) { options[current++] = ""; } return options; } /** * Gets the search specification string, which contains the class name of * the search method and any options to it * * @return the search string. */ protected String getSearchSpec() { ASSearch s = getSearch(); if (s instanceof OptionHandler) { return s.getClass().getName() + " " + Utils.joinOptions(((OptionHandler)s).getOptions()); } return s.getClass().getName(); } /** * Returns default capabilities of the classifier. * * @return the capabilities of this classifier */ public Capabilities getCapabilities() { Capabilities result = super.getCapabilities(); result.disableAll(); // attributes result.enable(Capability.NOMINAL_ATTRIBUTES); result.enable(Capability.NUMERIC_ATTRIBUTES); result.enable(Capability.DATE_ATTRIBUTES); result.enable(Capability.MISSING_VALUES); // class result.enable(Capability.NOMINAL_CLASS); if (m_evaluationMeasure != EVAL_ACCURACY && m_evaluationMeasure != EVAL_AUC) { result.enable(Capability.NUMERIC_CLASS); result.enable(Capability.DATE_CLASS); } result.enable(Capability.MISSING_CLASS_VALUES); return result; } private class DummySubsetEvaluator extends ASEvaluation implements SubsetEvaluator { /** for serialization */ private static final long serialVersionUID = 3927442457704974150L; public void buildEvaluator(Instances data) throws Exception { } public double evaluateSubset(BitSet subset) throws Exception { int fc = 0; for (int jj = 0;jj < m_numAttributes; jj++) { if (subset.get(jj)) { fc++; } } return estimatePerformance(subset, fc); } } /** * Sets up a dummy subset evaluator that basically just delegates * evaluation to the estimatePerformance method in DecisionTable */ protected void setUpEvaluator() throws Exception { m_evaluator = new DummySubsetEvaluator(); } protected boolean m_saveMemory = true; /** * Generates the classifier. * * @param data set of instances serving as training data * @throws Exception if the classifier has not been generated successfully */ public void buildClassifier(Instances data) throws Exception { // can classifier handle the data? getCapabilities().testWithFail(data); // remove instances with missing class m_theInstances = new Instances(data); m_theInstances.deleteWithMissingClass(); m_rr = new Random(1); if (m_theInstances.classAttribute().isNominal()) {// Set up class priors m_classPriorCounts = new double [data.classAttribute().numValues()]; Arrays.fill(m_classPriorCounts, 1.0); for (int i = 0; i <data.numInstances(); i++) { Instance curr = data.instance(i); m_classPriorCounts[(int)curr.classValue()] += curr.weight(); } m_classPriors = m_classPriorCounts.clone(); Utils.normalize(m_classPriors); } setUpEvaluator(); if (m_theInstances.classAttribute().isNumeric()) { m_disTransform = new weka.filters.unsupervised.attribute.Discretize(); m_classIsNominal = false; // use binned discretisation if the class is numeric ((weka.filters.unsupervised.attribute.Discretize)m_disTransform). setBins(10); ((weka.filters.unsupervised.attribute.Discretize)m_disTransform). setInvertSelection(true); // Discretize all attributes EXCEPT the class String rangeList = ""; rangeList+=(m_theInstances.classIndex()+1); //System.out.println("The class col: "+m_theInstances.classIndex()); ((weka.filters.unsupervised.attribute.Discretize)m_disTransform). setAttributeIndices(rangeList); } else { m_disTransform = new weka.filters.supervised.attribute.Discretize(); ((weka.filters.supervised.attribute.Discretize)m_disTransform).setUseBetterEncoding(true); m_classIsNominal = true; } m_disTransform.setInputFormat(m_theInstances); m_theInstances = Filter.useFilter(m_theInstances, m_disTransform); m_numAttributes = m_theInstances.numAttributes(); m_numInstances = m_theInstances.numInstances(); m_majority = m_theInstances.meanOrMode(m_theInstances.classAttribute()); // Perform the search int [] selected = m_search.search(m_evaluator, m_theInstances); m_decisionFeatures = new int [selected.length+1]; System.arraycopy(selected, 0, m_decisionFeatures, 0, selected.length); m_decisionFeatures[m_decisionFeatures.length-1] = m_theInstances.classIndex(); // reduce instances to selected features m_delTransform = new Remove(); m_delTransform.setInvertSelection(true); // set features to keep m_delTransform.setAttributeIndicesArray(m_decisionFeatures); m_delTransform.setInputFormat(m_theInstances); m_dtInstances = Filter.useFilter(m_theInstances, m_delTransform); // reset the number of attributes m_numAttributes = m_dtInstances.numAttributes(); // create hash table m_entries = new Hashtable((int)(m_dtInstances.numInstances() * 1.5)); // insert instances into the hash table for (int i = 0; i < m_numInstances; i++) { Instance inst = m_dtInstances.instance(i); insertIntoTable(inst, null); } // Replace the global table majority with nearest neighbour? if (m_useIBk) { m_ibk = new IBk(); m_ibk.buildClassifier(m_theInstances); } // Save memory if (m_saveMemory) { m_theInstances = new Instances(m_theInstances, 0); m_dtInstances = new Instances(m_dtInstances, 0); } m_evaluation = null; } /** * Calculates the class membership probabilities for the given * test instance. * * @param instance the instance to be classified * @return predicted class probability distribution * @throws Exception if distribution can't be computed */ public double [] distributionForInstance(Instance instance) throws Exception { DecisionTableHashKey thekey; double [] tempDist; double [] normDist; m_disTransform.input(instance); m_disTransform.batchFinished(); instance = m_disTransform.output(); m_delTransform.input(instance); m_delTransform.batchFinished(); instance = m_delTransform.output(); thekey = new DecisionTableHashKey(instance, instance.numAttributes(), false); // if this one is not in the table if ((tempDist = (double [])m_entries.get(thekey)) == null) { if (m_useIBk) { tempDist = m_ibk.distributionForInstance(instance); } else { if (!m_classIsNominal) { tempDist = new double[1]; tempDist[0] = m_majority; } else { tempDist = m_classPriors.clone(); /*tempDist = new double [m_theInstances.classAttribute().numValues()]; tempDist[(int)m_majority] = 1.0; */ } } } else { if (!m_classIsNominal) { normDist = new double[1]; normDist[0] = (tempDist[0] / tempDist[1]); tempDist = normDist; } else { // normalise distribution normDist = new double [tempDist.length]; System.arraycopy(tempDist,0,normDist,0,tempDist.length); Utils.normalize(normDist); tempDist = normDist; } } return tempDist; } /** * Returns a string description of the features selected * * @return a string of features */ public String printFeatures() { int i; String s = ""; for (i=0;i<m_decisionFeatures.length;i++) { if (i==0) { s = ""+(m_decisionFeatures[i]+1); } else { s += ","+(m_decisionFeatures[i]+1); } } return s; } /** * Returns the number of rules * @return the number of rules */ public double measureNumRules() { return m_entries.size(); } /** * Returns an enumeration of the additional measure names * @return an enumeration of the measure names */ public Enumeration enumerateMeasures() { Vector newVector = new Vector(1); newVector.addElement("measureNumRules"); return newVector.elements(); } /** * Returns the value of the named measure * @param additionalMeasureName the name of the measure to query for its value * @return the value of the named measure * @throws IllegalArgumentException if the named measure is not supported */ public double getMeasure(String additionalMeasureName) { if (additionalMeasureName.compareToIgnoreCase("measureNumRules") == 0) { return measureNumRules(); } else { throw new IllegalArgumentException(additionalMeasureName + " not supported (DecisionTable)"); } } /** * Returns a description of the classifier. * * @return a description of the classifier as a string. */ public String toString() { if (m_entries == null) { return "Decision Table: No model built yet."; } else { StringBuffer text = new StringBuffer(); text.append("Decision Table:"+ "\n\nNumber of training instances: "+m_numInstances+ "\nNumber of Rules : "+m_entries.size()+"\n"); if (m_useIBk) { text.append("Non matches covered by IB1.\n"); } else { text.append("Non matches covered by Majority class.\n"); } text.append(m_search.toString()); /*text.append("Best first search for feature set,\nterminated after "+ m_maxStale+" non improving subsets.\n"); */ text.append("Evaluation (for feature selection): CV "); if (m_CVFolds > 1) { text.append("("+m_CVFolds+" fold) "); } else { text.append("(leave one out) "); } text.append("\nFeature set: "+printFeatures()); if (m_displayRules) { // find out the max column width int maxColWidth = 0; for (int i=0;i<m_dtInstances.numAttributes();i++) { if (m_dtInstances.attribute(i).name().length() > maxColWidth) { maxColWidth = m_dtInstances.attribute(i).name().length(); } if (m_classIsNominal || (i != m_dtInstances.classIndex())) { Enumeration e = m_dtInstances.attribute(i).enumerateValues(); while (e.hasMoreElements()) { String ss = (String)e.nextElement(); if (ss.length() > maxColWidth) { maxColWidth = ss.length(); } } } } text.append("\n\nRules:\n"); StringBuffer tm = new StringBuffer(); for (int i=0;i<m_dtInstances.numAttributes();i++) { if (m_dtInstances.classIndex() != i) { int d = maxColWidth - m_dtInstances.attribute(i).name().length(); tm.append(m_dtInstances.attribute(i).name()); for (int j=0;j<d+1;j++) { tm.append(" "); } } } tm.append(m_dtInstances.attribute(m_dtInstances.classIndex()).name()+" "); for (int i=0;i<tm.length()+10;i++) { text.append("="); } text.append("\n"); text.append(tm); text.append("\n"); for (int i=0;i<tm.length()+10;i++) { text.append("="); } text.append("\n"); Enumeration e = m_entries.keys(); while (e.hasMoreElements()) { DecisionTableHashKey tt = (DecisionTableHashKey)e.nextElement(); text.append(tt.toString(m_dtInstances,maxColWidth)); double [] ClassDist = (double []) m_entries.get(tt); if (m_classIsNominal) { int m = Utils.maxIndex(ClassDist); try { text.append(m_dtInstances.classAttribute().value(m)+"\n"); } catch (Exception ee) { System.out.println(ee.getMessage()); } } else { text.append((ClassDist[0] / ClassDist[1])+"\n"); } } for (int i=0;i<tm.length()+10;i++) { text.append("="); } text.append("\n"); text.append("\n"); } return text.toString(); } } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } /** * Main method for testing this class. * * @param argv the command-line options */ public static void main(String [] argv) { runClassifier(new DecisionTable(), argv); } }
40,165
27.486525
108
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/rules/DecisionTableHashKey.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * DecisionTableHashKey.java * Copyright (C) 2007-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.rules; import java.io.Serializable; import weka.core.Instance; import weka.core.Instances; import weka.core.RevisionHandler; import weka.core.RevisionUtils; /** * Class providing hash table keys for DecisionTable */ public class DecisionTableHashKey implements Serializable, RevisionHandler { /** for serialization */ static final long serialVersionUID = 5674163500154964602L; /** Array of attribute values for an instance */ private double [] attributes; /** True for an index if the corresponding attribute value is missing. */ private boolean [] missing; /** The key */ private int key; /** * Constructor for a hashKey * * @param t an instance from which to generate a key * @param numAtts the number of attributes * @param ignoreClass if true treat the class as a normal attribute * @throws Exception if something goes wrong */ public DecisionTableHashKey(Instance t, int numAtts, boolean ignoreClass) throws Exception { int i; int cindex = t.classIndex(); key = -999; attributes = new double [numAtts]; missing = new boolean [numAtts]; for (i=0;i<numAtts;i++) { if (i == cindex && !ignoreClass) { missing[i] = true; } else { if ((missing[i] = t.isMissing(i)) == false) { attributes[i] = t.value(i); } } } } /** * Convert a hash entry to a string * * @param t the set of instances * @param maxColWidth width to make the fields * @return string representation of the hash entry */ public String toString(Instances t, int maxColWidth) { int i; int cindex = t.classIndex(); StringBuffer text = new StringBuffer(); for (i=0;i<attributes.length;i++) { if (i != cindex) { if (missing[i]) { text.append("?"); for (int j=0;j<maxColWidth;j++) { text.append(" "); } } else { String ss = t.attribute(i).value((int)attributes[i]); StringBuffer sb = new StringBuffer(ss); for (int j=0;j < (maxColWidth-ss.length()+1); j++) { sb.append(" "); } text.append(sb); } } } return text.toString(); } /** * Constructor for a hashKey * * @param t an array of feature values */ public DecisionTableHashKey(double [] t) { int i; int l = t.length; key = -999; attributes = new double [l]; missing = new boolean [l]; for (i=0;i<l;i++) { if (t[i] == Double.MAX_VALUE) { missing[i] = true; } else { missing[i] = false; attributes[i] = t[i]; } } } /** * Calculates a hash code * * @return the hash code as an integer */ public int hashCode() { int hv = 0; if (key != -999) return key; for (int i=0;i<attributes.length;i++) { if (missing[i]) { hv += (i*13); } else { hv += (i * 5 * (attributes[i]+1)); } } if (key == -999) { key = hv; } return hv; } /** * Tests if two instances are equal * * @param b a key to compare with * @return true if both objects are equal */ public boolean equals(Object b) { if ((b == null) || !(b.getClass().equals(this.getClass()))) { return false; } boolean ok = true; boolean l; if (b instanceof DecisionTableHashKey) { DecisionTableHashKey n = (DecisionTableHashKey)b; for (int i=0;i<attributes.length;i++) { l = n.missing[i]; if (missing[i] || l) { if ((missing[i] && !l) || (!missing[i] && l)) { ok = false; break; } } else { if (attributes[i] != n.attributes[i]) { ok = false; break; } } } } else { return false; } return ok; } /** * Prints the hash code */ public void print_hash_code() { System.out.println("Hash val: "+hashCode()); } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } }
4,974
23.033816
94
java