repo
stringlengths
1
191
file
stringlengths
23
351
code
stringlengths
0
5.32M
file_length
int64
0
5.32M
avg_line_length
float64
0
2.9k
max_line_length
int64
0
288k
extension_type
stringclasses
1 value
tsml-java
tsml-java-master/src/main/java/weka/classifiers/IteratedSingleClassifierEnhancer.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * IteratedSingleClassifierEnhancer.java * Copyright (C) 2004-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers; import java.util.Enumeration; import java.util.Vector; import weka.core.Instances; import weka.core.Option; import weka.core.Utils; /** * Abstract utility class for handling settings common to * meta classifiers that build an ensemble from a single base learner. * * @author Eibe Frank (eibe@cs.waikato.ac.nz) * @version $Revision: 8034 $ */ public abstract class IteratedSingleClassifierEnhancer extends SingleClassifierEnhancer { /** for serialization */ private static final long serialVersionUID = -6217979135443319724L; /** Array for storing the generated base classifiers. */ protected Classifier[] m_Classifiers; /** The number of iterations. */ protected int m_NumIterations = 10; /** * Stump method for building the classifiers. * * @param data the training data to be used for generating the * bagged classifier. * @exception Exception if the classifier could not be built successfully */ public void buildClassifier(Instances data) throws Exception { if (m_Classifier == null) { throw new Exception("A base classifier has not been specified!"); } m_Classifiers = AbstractClassifier.makeCopies(m_Classifier, m_NumIterations); } /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ public Enumeration listOptions() { Vector newVector = new Vector(2); newVector.addElement(new Option( "\tNumber of iterations.\n" + "\t(default 10)", "I", 1, "-I <num>")); Enumeration enu = super.listOptions(); while (enu.hasMoreElements()) { newVector.addElement(enu.nextElement()); } return newVector.elements(); } /** * Parses a given list of options. Valid options are:<p> * * -W classname <br> * Specify the full class name of the base learner.<p> * * -I num <br> * Set the number of iterations (default 10). <p> * * Options after -- are passed to the designated classifier.<p> * * @param options the list of options as an array of strings * @exception Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { String iterations = Utils.getOption('I', options); if (iterations.length() != 0) { setNumIterations(Integer.parseInt(iterations)); } else { setNumIterations(10); } super.setOptions(options); } /** * Gets the current settings of the classifier. * * @return an array of strings suitable for passing to setOptions */ public String [] getOptions() { String [] superOptions = super.getOptions(); String [] options = new String [superOptions.length + 2]; int current = 0; options[current++] = "-I"; options[current++] = "" + getNumIterations(); System.arraycopy(superOptions, 0, options, current, superOptions.length); return options; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String numIterationsTipText() { return "The number of iterations to be performed."; } /** * Sets the number of bagging iterations */ public void setNumIterations(int numIterations) { m_NumIterations = numIterations; } /** * Gets the number of bagging iterations * * @return the maximum number of bagging iterations */ public int getNumIterations() { return m_NumIterations; } }
4,368
26.477987
81
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/IterativeClassifier.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * IterativeClassifier.java * Copyright (C) 2001-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers; import weka.core.Instances; /** * Interface for classifiers that can induce models of growing * complexity one step at a time. * * @author Gabi Schmidberger (gabi@cs.waikato.ac.nz) * @author Bernhard Pfahringer (bernhard@cs.waikato.ac.nz) * @version $Revision: 8034 $ */ public interface IterativeClassifier { /** * Inits an iterative classifier. * * @param instances the instances to be used in induction * @exception Exception if the model cannot be initialized */ void initClassifier(Instances instances) throws Exception; /** * Performs one iteration. * * @param iteration the index of the current iteration (0-based) * @exception Exception if this iteration fails */ void next(int iteration) throws Exception; /** * Signal end of iterating, useful for any house-keeping/cleanup * * @exception Exception if cleanup fails */ void done() throws Exception; /** * Performs a deep copy of the classifier, and a reference copy * of the training instances (or a deep copy if required). * * @return a clone of the classifier */ Object clone() throws CloneNotSupportedException; }
1,986
27.797101
74
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/MultipleClassifiersCombiner.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * MultipleClassifiersCombiner.java * Copyright (C) 2004-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers; import java.util.Enumeration; import java.util.Vector; import weka.core.Capabilities; import weka.core.Capabilities.Capability; import weka.core.Option; import weka.core.OptionHandler; import weka.core.Utils; /** * Abstract utility class for handling settings common to * meta classifiers that build an ensemble from multiple classifiers. * * @author Eibe Frank (eibe@cs.waikato.ac.nz) * @version $Revision: 8034 $ */ public abstract class MultipleClassifiersCombiner extends AbstractClassifier { /** for serialization */ private static final long serialVersionUID = 2776436621129422119L; /** Array for storing the generated base classifiers. */ protected Classifier[] m_Classifiers = { new weka.classifiers.rules.ZeroR() }; /** * Returns an enumeration describing the available options * * @return an enumeration of all the available options */ public Enumeration listOptions() { Vector newVector = new Vector(1); newVector.addElement(new Option( "\tFull class name of classifier to include, followed\n" + "\tby scheme options. May be specified multiple times.\n" + "\t(default: \"weka.classifiers.rules.ZeroR\")", "B", 1, "-B <classifier specification>")); Enumeration enu = super.listOptions(); while (enu.hasMoreElements()) { newVector.addElement(enu.nextElement()); } return newVector.elements(); } /** * Parses a given list of options. Valid options are:<p> * * -B classifierstring <br> * Classifierstring should contain the full class name of a scheme * included for selection followed by options to the classifier * (required, option should be used once for each classifier).<p> * * @param options the list of options as an array of strings * @exception Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { // Iterate through the schemes Vector classifiers = new Vector(); while (true) { String classifierString = Utils.getOption('B', options); if (classifierString.length() == 0) { break; } String [] classifierSpec = Utils.splitOptions(classifierString); if (classifierSpec.length == 0) { throw new IllegalArgumentException("Invalid classifier specification string"); } String classifierName = classifierSpec[0]; classifierSpec[0] = ""; classifiers.addElement(AbstractClassifier.forName(classifierName, classifierSpec)); } if (classifiers.size() == 0) { classifiers.addElement(new weka.classifiers.rules.ZeroR()); } Classifier [] classifiersArray = new Classifier [classifiers.size()]; for (int i = 0; i < classifiersArray.length; i++) { classifiersArray[i] = (Classifier) classifiers.elementAt(i); } setClassifiers(classifiersArray); super.setOptions(options); } /** * Gets the current settings of the Classifier. * * @return an array of strings suitable for passing to setOptions */ public String [] getOptions() { String [] superOptions = super.getOptions(); int current = 0; String[] options = new String [superOptions.length + m_Classifiers.length * 2]; for (int i = 0; i < m_Classifiers.length; i++) { options[current++] = "-B"; options[current++] = "" + getClassifierSpec(i); } System.arraycopy(superOptions, 0, options, current, superOptions.length); return options; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String classifiersTipText() { return "The base classifiers to be used."; } /** * Sets the list of possible classifers to choose from. * * @param classifiers an array of classifiers with all options set. */ public void setClassifiers(Classifier [] classifiers) { m_Classifiers = classifiers; } /** * Gets the list of possible classifers to choose from. * * @return the array of Classifiers */ public Classifier [] getClassifiers() { return m_Classifiers; } /** * Gets a single classifier from the set of available classifiers. * * @param index the index of the classifier wanted * @return the Classifier */ public Classifier getClassifier(int index) { return m_Classifiers[index]; } /** * Gets the classifier specification string, which contains the class name of * the classifier and any options to the classifier * * @param index the index of the classifier string to retrieve, starting from * 0. * @return the classifier string, or the empty string if no classifier * has been assigned (or the index given is out of range). */ protected String getClassifierSpec(int index) { if (m_Classifiers.length < index) { return ""; } Classifier c = getClassifier(index); return c.getClass().getName() + " " + Utils.joinOptions(((OptionHandler)c).getOptions()); } /** * Returns combined capabilities of the base classifiers, i.e., the * capabilities all of them have in common. * * @return the capabilities of the base classifiers */ public Capabilities getCapabilities() { Capabilities result; int i; if (getClassifiers().length == 0) { result = new Capabilities(this); result.disableAll(); } else { result = (Capabilities) getClassifier(0).getCapabilities().clone(); for (i = 1; i < getClassifiers().length; i++) result.and(getClassifier(i).getCapabilities()); } // set dependencies for (Capability cap: Capability.values()) result.enableDependency(cap); result.setOwner(this); return result; } }
6,651
29.236364
86
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/ParallelIteratedSingleClassifierEnhancer.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * ParallelIteratedSingleClassifierEnhancer.java * Copyright (C) 2009-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers; import java.util.Enumeration; import java.util.Vector; import java.util.concurrent.LinkedBlockingQueue; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; import weka.core.Instances; import weka.core.Option; import weka.core.Utils; /** * Abstract utility class for handling settings common to * meta classifiers that build an ensemble in parallel from a single * base learner. * * @author Mark Hall (mhall{[at]}pentaho{[dot]}com) * @version $Revision: 8034 $ */ public abstract class ParallelIteratedSingleClassifierEnhancer extends IteratedSingleClassifierEnhancer { /** For serialization */ private static final long serialVersionUID = -5026378741833046436L; /** The number of threads to have executing at any one time */ protected int m_numExecutionSlots = 1; /** Pool of threads to train models with */ protected transient ThreadPoolExecutor m_executorPool; /** The number of classifiers completed so far */ protected int m_completed; /** * The number of classifiers that experienced a failure of some sort * during construction */ protected int m_failed; /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ public Enumeration listOptions() { Vector newVector = new Vector(2); newVector.addElement(new Option( "\tNumber of execution slots.\n" + "\t(default 1 - i.e. no parallelism)", "num-slots", 1, "-num-slots <num>")); Enumeration enu = super.listOptions(); while (enu.hasMoreElements()) { newVector.addElement(enu.nextElement()); } return newVector.elements(); } /** * Parses a given list of options. Valid options are:<p> * * -Z num <br> * Set the number of execution slots to use (default 1 - i.e. no parallelism). <p> * * Options after -- are passed to the designated classifier.<p> * * @param options the list of options as an array of strings * @exception Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { String iterations = Utils.getOption("num-slots", options); if (iterations.length() != 0) { setNumExecutionSlots(Integer.parseInt(iterations)); } else { setNumExecutionSlots(1); } super.setOptions(options); } /** * Gets the current settings of the classifier. * * @return an array of strings suitable for passing to setOptions */ public String [] getOptions() { String [] superOptions = super.getOptions(); String [] options = new String [superOptions.length + 2]; int current = 0; options[current++] = "-num-slots"; options[current++] = "" + getNumExecutionSlots(); System.arraycopy(superOptions, 0, options, current, superOptions.length); return options; } /** * Set the number of execution slots (threads) to use for building the * members of the ensemble. * * @param numSlots the number of slots to use. */ public void setNumExecutionSlots(int numSlots) { m_numExecutionSlots = numSlots; } /** * Get the number of execution slots (threads) to use for building * the members of the ensemble. * * @return the number of slots to use */ public int getNumExecutionSlots() { return m_numExecutionSlots; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String numExecutionSlotsTipText() { return "The number of execution slots (threads) to use for " + "constructing the ensemble."; } /** * Stump method for building the classifiers * * @param data the training data to be used for generating the ensemble * @exception Exception if the classifier could not be built successfully */ public void buildClassifier(Instances data) throws Exception { super.buildClassifier(data); if (m_numExecutionSlots < 1) { throw new Exception("Number of execution slots needs to be >= 1!"); } if (m_numExecutionSlots > 1) { startExecutorPool(); } m_completed = 0; m_failed = 0; } /** * Start the pool of execution threads */ protected void startExecutorPool() { if (m_executorPool != null) { m_executorPool.shutdownNow(); } m_executorPool = new ThreadPoolExecutor(m_numExecutionSlots, m_numExecutionSlots, 120, TimeUnit.SECONDS, new LinkedBlockingQueue<Runnable>()); } private synchronized void block(boolean tf) { if (tf) { try { if (m_numExecutionSlots > 1 && m_completed + m_failed < m_Classifiers.length) { wait(); } } catch (InterruptedException ex) { } } else { notifyAll(); } } /** * Does the actual construction of the ensemble * * @throws Exception if something goes wrong during the training * process */ protected synchronized void buildClassifiers() throws Exception { for (int i = 0; i < m_Classifiers.length; i++) { if (m_numExecutionSlots > 1) { final Classifier currentClassifier = m_Classifiers[i]; final int iteration = i; if (m_Debug) { System.out.print("Training classifier (" + (i +1) + ")"); } Runnable newTask = new Runnable() { public void run() { try { currentClassifier.buildClassifier(getTrainingSet(iteration)); completedClassifier(iteration, true); } catch (Exception ex) { ex.printStackTrace(); completedClassifier(iteration, false); } } }; // launch this task m_executorPool.execute(newTask); } else { m_Classifiers[i].buildClassifier(getTrainingSet(i)); } } if (m_numExecutionSlots > 1 && m_completed + m_failed < m_Classifiers.length) { block(true); } } /** * Records the completion of the training of a single classifier. Unblocks if * all classifiers have been trained. * * @param iteration the iteration that has completed * @param success whether the classifier trained successfully */ protected synchronized void completedClassifier(int iteration, boolean success) { if (!success) { m_failed++; if (m_Debug) { System.err.println("Iteration " + iteration + " failed!"); } } else { m_completed++; } if (m_completed + m_failed == m_Classifiers.length) { if (m_failed > 0) { if (m_Debug) { System.err.println("Problem building classifiers - some iterations failed."); } } // have to shut the pool down or program executes as a server // and when running from the command line does not return to the // prompt m_executorPool.shutdown(); block(false); } } /** * Gets a training set for a particular iteration. Implementations need * to be careful with thread safety and should probably be synchronized * to be on the safe side. * * @param iteration the number of the iteration for the requested training set * @return the training set for the supplied iteration number * @throws Exception if something goes wrong. */ protected abstract Instances getTrainingSet(int iteration) throws Exception; }
8,304
28.038462
87
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/ParallelMultipleClassifiersCombiner.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * ParallelMultipleClassifiersCombiner.java * Copyright (C) 2009-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers; import java.util.Enumeration; import java.util.Vector; import java.util.concurrent.LinkedBlockingQueue; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; import weka.core.Instances; import weka.core.Option; import weka.core.Utils; /** * Abstract utility class for handling settings common to * meta classifiers that build an ensemble in parallel using multiple * classifiers. * * @author Mark Hall (mhall{[at]}pentaho{[dot]}com) * @version $Revision: 8034 $ */ public abstract class ParallelMultipleClassifiersCombiner extends MultipleClassifiersCombiner { /** For serialization */ private static final long serialVersionUID = 728109028953726626L; /** The number of threads to have executing at any one time */ protected int m_numExecutionSlots = 1; /** Pool of threads to train models with */ protected transient ThreadPoolExecutor m_executorPool; /** The number of classifiers completed so far */ protected int m_completed; /** * The number of classifiers that experienced a failure of some sort * during construction */ protected int m_failed; /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ public Enumeration listOptions() { Vector newVector = new Vector(2); newVector.addElement(new Option( "\tNumber of execution slots.\n" + "\t(default 1 - i.e. no parallelism)", "num-slots", 1, "-num-slots <num>")); Enumeration enu = super.listOptions(); while (enu.hasMoreElements()) { newVector.addElement(enu.nextElement()); } return newVector.elements(); } /** * Parses a given list of options. Valid options are:<p> * * -Z num <br> * Set the number of execution slots to use (default 1 - i.e. no parallelism). <p> * * Options after -- are passed to the designated classifier.<p> * * @param options the list of options as an array of strings * @exception Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { String iterations = Utils.getOption("num-slots", options); if (iterations.length() != 0) { setNumExecutionSlots(Integer.parseInt(iterations)); } else { setNumExecutionSlots(1); } super.setOptions(options); } /** * Gets the current settings of the classifier. * * @return an array of strings suitable for passing to setOptions */ public String [] getOptions() { String [] superOptions = super.getOptions(); String [] options = new String [superOptions.length + 2]; int current = 0; options[current++] = "-num-slots"; options[current++] = "" + getNumExecutionSlots(); System.arraycopy(superOptions, 0, options, current, superOptions.length); return options; } /** * Set the number of execution slots (threads) to use for building the * members of the ensemble. * * @param numSlots the number of slots to use. */ public void setNumExecutionSlots(int numSlots) { m_numExecutionSlots = numSlots; } /** * Get the number of execution slots (threads) to use for building * the members of the ensemble. * * @return the number of slots to use */ public int getNumExecutionSlots() { return m_numExecutionSlots; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String numExecutionSlotsTipText() { return "The number of execution slots (threads) to use for " + "constructing the ensemble."; } /** * Stump method for building the classifiers * * @param data the training data to be used for generating the ensemble * @exception Exception if the classifier could not be built successfully */ public void buildClassifier(Instances data) throws Exception { if (m_numExecutionSlots < 1) { throw new Exception("Number of execution slots needs to be >= 1!"); } if (m_numExecutionSlots > 1) { if (m_Debug) { System.out.println("Starting executor pool with " + m_numExecutionSlots + " slots..."); } startExecutorPool(); } m_completed = 0; m_failed = 0; } /** * Start the pool of execution threads */ protected void startExecutorPool() { if (m_executorPool != null) { m_executorPool.shutdownNow(); } m_executorPool = new ThreadPoolExecutor(m_numExecutionSlots, m_numExecutionSlots, 120, TimeUnit.SECONDS, new LinkedBlockingQueue<Runnable>()); } private synchronized void block(boolean tf) { if (tf) { try { if (m_numExecutionSlots > 1 && m_completed + m_failed < m_Classifiers.length) { wait(); } } catch (InterruptedException ex) { } } else { notifyAll(); } } /** * Does the actual construction of the ensemble * * @throws Exception if something goes wrong during the training * process */ protected synchronized void buildClassifiers(final Instances data) throws Exception { for (int i = 0; i < m_Classifiers.length; i++) { if (m_numExecutionSlots > 1) { final Classifier currentClassifier = m_Classifiers[i]; final int iteration = i; Runnable newTask = new Runnable() { public void run() { try { if (m_Debug) { System.out.println("Training classifier (" + (iteration +1) + ")"); } currentClassifier.buildClassifier(data); if (m_Debug) { System.out.println("Finished classifier (" + (iteration +1) + ")"); } completedClassifier(iteration, true); } catch (Exception ex) { ex.printStackTrace(); completedClassifier(iteration, false); } } }; // launch this task m_executorPool.execute(newTask); } else { m_Classifiers[i].buildClassifier(data); } } if (m_numExecutionSlots > 1 && m_completed + m_failed < m_Classifiers.length) { block(true); } } /** * Records the completion of the training of a single classifier. Unblocks if * all classifiers have been trained. * * @param iteration the iteration that has completed * @param success whether the classifier trained successfully */ protected synchronized void completedClassifier(int iteration, boolean success) { if (!success) { m_failed++; if (m_Debug) { System.err.println("Iteration " + iteration + " failed!"); } } else { m_completed++; } if (m_completed + m_failed == m_Classifiers.length) { if (m_failed > 0) { if (m_Debug) { System.err.println("Problem building classifiers - some iterations failed."); } } // have to shut the pool down or program executes as a server // and when running from the command line does not return to the // prompt m_executorPool.shutdown(); block(false); } } }
8,067
27.711744
87
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/RandomizableClassifier.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * RandomizableClassifier.java * Copyright (C) 2004-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers; import java.util.Enumeration; import java.util.Vector; import weka.core.Option; import weka.core.Randomizable; import weka.core.Utils; /** * Abstract utility class for handling settings common to randomizable * classifiers. * * @author Eibe Frank (eibe@cs.waikato.ac.nz) * @version $Revision: 8034 $ */ public abstract class RandomizableClassifier extends AbstractClassifier implements Randomizable { /** for serialization */ private static final long serialVersionUID = -8816375798262351903L; /** The random number seed. */ protected int m_Seed = 1; /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ public Enumeration listOptions() { Vector newVector = new Vector(2); newVector.addElement(new Option( "\tRandom number seed.\n" + "\t(default 1)", "S", 1, "-S <num>")); Enumeration enu = super.listOptions(); while (enu.hasMoreElements()) { newVector.addElement(enu.nextElement()); } return newVector.elements(); } /** * Parses a given list of options. Valid options are:<p> * * -W classname <br> * Specify the full class name of the base learner.<p> * * -I num <br> * Set the number of iterations (default 10). <p> * * -S num <br> * Set the random number seed (default 1). <p> * * Options after -- are passed to the designated classifier.<p> * * @param options the list of options as an array of strings * @exception Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { String seed = Utils.getOption('S', options); if (seed.length() != 0) { setSeed(Integer.parseInt(seed)); } else { setSeed(1); } super.setOptions(options); } /** * Gets the current settings of the classifier. * * @return an array of strings suitable for passing to setOptions */ public String [] getOptions() { String [] superOptions = super.getOptions(); String [] options = new String [superOptions.length + 2]; int current = 0; options[current++] = "-S"; options[current++] = "" + getSeed(); System.arraycopy(superOptions, 0, options, current, superOptions.length); return options; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String seedTipText() { return "The random number seed to be used."; } /** * Set the seed for random number generation. * * @param seed the seed */ public void setSeed(int seed) { m_Seed = seed; } /** * Gets the seed for the random number generations * * @return the seed for the random number generation */ public int getSeed() { return m_Seed; } }
3,721
24.493151
74
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/RandomizableIteratedSingleClassifierEnhancer.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * RandomizableIteratedSingleClassifierEnhancer.java * Copyright (C) 2004-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers; import java.util.Enumeration; import java.util.Vector; import weka.core.Option; import weka.core.Randomizable; import weka.core.Utils; /** * Abstract utility class for handling settings common to randomizable * meta classifiers that build an ensemble from a single base learner. * * @author Eibe Frank (eibe@cs.waikato.ac.nz) * @version $Revision: 8034 $ */ public abstract class RandomizableIteratedSingleClassifierEnhancer extends IteratedSingleClassifierEnhancer implements Randomizable { /** for serialization */ private static final long serialVersionUID = 5063351391524938557L; /** The random number seed. */ protected int m_Seed = 1; /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ public Enumeration listOptions() { Vector newVector = new Vector(2); newVector.addElement(new Option( "\tRandom number seed.\n" + "\t(default 1)", "S", 1, "-S <num>")); Enumeration enu = super.listOptions(); while (enu.hasMoreElements()) { newVector.addElement(enu.nextElement()); } return newVector.elements(); } /** * Parses a given list of options. Valid options are:<p> * * -W classname <br> * Specify the full class name of the base learner.<p> * * -I num <br> * Set the number of iterations (default 10). <p> * * -S num <br> * Set the random number seed (default 1). <p> * * Options after -- are passed to the designated classifier.<p> * * @param options the list of options as an array of strings * @exception Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { String seed = Utils.getOption('S', options); if (seed.length() != 0) { setSeed(Integer.parseInt(seed)); } else { setSeed(1); } super.setOptions(options); } /** * Gets the current settings of the classifier. * * @return an array of strings suitable for passing to setOptions */ public String [] getOptions() { String [] superOptions = super.getOptions(); String [] options = new String [superOptions.length + 2]; int current = 0; options[current++] = "-S"; options[current++] = "" + getSeed(); System.arraycopy(superOptions, 0, options, current, superOptions.length); return options; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String seedTipText() { return "The random number seed to be used."; } /** * Set the seed for random number generation. * * @param seed the seed */ public void setSeed(int seed) { m_Seed = seed; } /** * Gets the seed for the random number generations * * @return the seed for the random number generation */ public int getSeed() { return m_Seed; } }
3,833
25.260274
74
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/RandomizableMultipleClassifiersCombiner.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * RandomizableMultipleClassifiersCombiner.java * Copyright (C) 2004-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers; import java.util.Enumeration; import java.util.Vector; import weka.core.Option; import weka.core.Randomizable; import weka.core.Utils; /** * Abstract utility class for handling settings common to randomizable * meta classifiers that build an ensemble from multiple classifiers based * on a given random number seed. * * @author Eibe Frank (eibe@cs.waikato.ac.nz) * @version $Revision: 8034 $ */ public abstract class RandomizableMultipleClassifiersCombiner extends MultipleClassifiersCombiner implements Randomizable { /** for serialization */ private static final long serialVersionUID = 5057936555724785679L; /** The random number seed. */ protected int m_Seed = 1; /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ public Enumeration listOptions() { Vector newVector = new Vector(2); newVector.addElement(new Option( "\tRandom number seed.\n" + "\t(default 1)", "S", 1, "-S <num>")); Enumeration enu = super.listOptions(); while (enu.hasMoreElements()) { newVector.addElement(enu.nextElement()); } return newVector.elements(); } /** * Parses a given list of options. Valid options are:<p> * * -B classifierstring <br> * Classifierstring should contain the full class name of a scheme * included for selection followed by options to the classifier * (required, option should be used once for each classifier).<p> * * -S num <br> * Set the random number seed (default 1). <p> * * @param options the list of options as an array of strings * @exception Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { String seed = Utils.getOption('S', options); if (seed.length() != 0) { setSeed(Integer.parseInt(seed)); } else { setSeed(1); } super.setOptions(options); } /** * Gets the current settings of the classifier. * * @return an array of strings suitable for passing to setOptions */ public String [] getOptions() { String [] superOptions = super.getOptions(); String [] options = new String [superOptions.length + 2]; int current = 0; options[current++] = "-S"; options[current++] = "" + getSeed(); System.arraycopy(superOptions, 0, options, current, superOptions.length); return options; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String seedTipText() { return "The random number seed to be used."; } /** * Set the seed for random number generation. * * @param seed the seed */ public void setSeed(int seed) { m_Seed = seed; } /** * Gets the seed for the random number generations * * @return the seed for the random number generation */ public int getSeed() { return m_Seed; } }
3,864
25.840278
74
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/RandomizableParallelIteratedSingleClassifierEnhancer.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * RandomizableParallelIteratedSingleClassifierEnhancer.java * Copyright (C) 2009-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers; import java.util.Enumeration; import java.util.Vector; import weka.core.Option; import weka.core.Randomizable; import weka.core.Utils; /** * Abstract utility class for handling settings common to randomizable * meta classifiers that build an ensemble in parallel from a single base * learner. * * @author Mark Hall (mhall{[at]}pentaho{[dot]}com) * @version $Revision: 8034 $ */ public abstract class RandomizableParallelIteratedSingleClassifierEnhancer extends ParallelIteratedSingleClassifierEnhancer implements Randomizable { /** * For serialization */ private static final long serialVersionUID = 1298141000373615374L; /** The random number seed. */ protected int m_Seed = 1; /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ public Enumeration listOptions() { Vector newVector = new Vector(2); newVector.addElement(new Option( "\tRandom number seed.\n" + "\t(default 1)", "S", 1, "-S <num>")); Enumeration enu = super.listOptions(); while (enu.hasMoreElements()) { newVector.addElement(enu.nextElement()); } return newVector.elements(); } /** * Parses a given list of options. Valid options are:<p> * * -W classname <br> * Specify the full class name of the base learner.<p> * * -I num <br> * Set the number of iterations (default 10). <p> * * -S num <br> * Set the random number seed (default 1). <p> * * Options after -- are passed to the designated classifier.<p> * * @param options the list of options as an array of strings * @exception Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { String seed = Utils.getOption('S', options); if (seed.length() != 0) { setSeed(Integer.parseInt(seed)); } else { setSeed(1); } super.setOptions(options); } /** * Gets the current settings of the classifier. * * @return an array of strings suitable for passing to setOptions */ public String [] getOptions() { String [] superOptions = super.getOptions(); String [] options = new String [superOptions.length + 2]; int current = 0; options[current++] = "-S"; options[current++] = "" + getSeed(); System.arraycopy(superOptions, 0, options, current, superOptions.length); return options; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String seedTipText() { return "The random number seed to be used."; } /** * Set the seed for random number generation. * * @param seed the seed */ public void setSeed(int seed) { m_Seed = seed; } /** * Gets the seed for the random number generations * * @return the seed for the random number generation */ public int getSeed() { return m_Seed; } }
3,914
25.1
78
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/RandomizableParallelMultipleClassifiersCombiner.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * RandomizableParallelMultipleClassifiersCombiner.java * Copyright (C) 2009-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers; import java.util.Enumeration; import java.util.Vector; import weka.core.Option; import weka.core.Randomizable; import weka.core.Utils; /** * Abstract utility class for handling settings common to * meta classifiers that build an ensemble in parallel using multiple * classifiers based on a given random number seed. * * @author Mark Hall (mhall{[at]}pentaho{[dot]}com) * @version $Revision: 8034 $ */ public abstract class RandomizableParallelMultipleClassifiersCombiner extends ParallelMultipleClassifiersCombiner implements Randomizable { /** For serialization */ private static final long serialVersionUID = 8274061943448676943L; /** The random number seed. */ protected int m_Seed = 1; /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ public Enumeration listOptions() { Vector newVector = new Vector(2); newVector.addElement(new Option( "\tRandom number seed.\n" + "\t(default 1)", "S", 1, "-S <num>")); Enumeration enu = super.listOptions(); while (enu.hasMoreElements()) { newVector.addElement(enu.nextElement()); } return newVector.elements(); } /** * Parses a given list of options. Valid options are:<p> * * -B classifierstring <br> * Classifierstring should contain the full class name of a scheme * included for selection followed by options to the classifier * (required, option should be used once for each classifier).<p> * * -S num <br> * Set the random number seed (default 1). <p> * * @param options the list of options as an array of strings * @exception Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { String seed = Utils.getOption('S', options); if (seed.length() != 0) { setSeed(Integer.parseInt(seed)); } else { setSeed(1); } super.setOptions(options); } /** * Gets the current settings of the classifier. * * @return an array of strings suitable for passing to setOptions */ public String [] getOptions() { String [] superOptions = super.getOptions(); String [] options = new String [superOptions.length + 2]; int current = 0; options[current++] = "-S"; options[current++] = "" + getSeed(); System.arraycopy(superOptions, 0, options, current, superOptions.length); return options; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String seedTipText() { return "The random number seed to be used."; } /** * Set the seed for random number generation. * * @param seed the seed */ public void setSeed(int seed) { m_Seed = seed; } /** * Gets the seed for the random number generations * * @return the seed for the random number generation */ public int getSeed() { return m_Seed; } }
3,921
26.236111
77
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/RandomizableSingleClassifierEnhancer.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * RandomizableSingleClassifierEnhancer.java * Copyright (C) 2004-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers; import java.util.Enumeration; import java.util.Vector; import weka.core.Option; import weka.core.Randomizable; import weka.core.Utils; /** * Abstract utility class for handling settings common to randomizable * meta classifiers that build an ensemble from a single base learner. * * @author Eibe Frank (eibe@cs.waikato.ac.nz) * @version $Revision: 8034 $ */ public abstract class RandomizableSingleClassifierEnhancer extends SingleClassifierEnhancer implements Randomizable { /** for serialization */ private static final long serialVersionUID = 558286687096157160L; /** The random number seed. */ protected int m_Seed = 1; /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ public Enumeration listOptions() { Vector newVector = new Vector(2); newVector.addElement(new Option( "\tRandom number seed.\n" + "\t(default 1)", "S", 1, "-S <num>")); Enumeration enu = super.listOptions(); while (enu.hasMoreElements()) { newVector.addElement(enu.nextElement()); } return newVector.elements(); } /** * Parses a given list of options. Valid options are:<p> * * -W classname <br> * Specify the full class name of the base learner.<p> * * -I num <br> * Set the number of iterations (default 10). <p> * * -S num <br> * Set the random number seed (default 1). <p> * * Options after -- are passed to the designated classifier.<p> * * @param options the list of options as an array of strings * @exception Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { String seed = Utils.getOption('S', options); if (seed.length() != 0) { setSeed(Integer.parseInt(seed)); } else { setSeed(1); } super.setOptions(options); } /** * Gets the current settings of the classifier. * * @return an array of strings suitable for passing to setOptions */ public String [] getOptions() { String [] superOptions = super.getOptions(); String [] options = new String [superOptions.length + 2]; int current = 0; options[current++] = "-S"; options[current++] = "" + getSeed(); System.arraycopy(superOptions, 0, options, current, superOptions.length); return options; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String seedTipText() { return "The random number seed to be used."; } /** * Set the seed for random number generation. * * @param seed the seed */ public void setSeed(int seed) { m_Seed = seed; } /** * Gets the seed for the random number generations * * @return the seed for the random number generation */ public int getSeed() { return m_Seed; } }
3,808
25.089041
74
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/SingleClassifierEnhancer.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * SingleClassifierEnhancer.java * Copyright (C) 2004-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers; import java.util.Enumeration; import java.util.Vector; import weka.classifiers.rules.ZeroR; import weka.core.Capabilities; import weka.core.Capabilities.Capability; import weka.core.Option; import weka.core.OptionHandler; import weka.core.Utils; /** * Abstract utility class for handling settings common to meta * classifiers that use a single base learner. * * @author Eibe Frank (eibe@cs.waikato.ac.nz) * @version $Revision: 8034 $ */ public abstract class SingleClassifierEnhancer extends AbstractClassifier { /** for serialization */ private static final long serialVersionUID = -3665885256363525164L; /** The base classifier to use */ protected Classifier m_Classifier = new ZeroR(); /** * String describing default classifier. */ protected String defaultClassifierString() { return "weka.classifiers.rules.ZeroR"; } /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ public Enumeration listOptions() { Vector newVector = new Vector(3); Enumeration enu = super.listOptions(); while (enu.hasMoreElements()) { newVector.addElement(enu.nextElement()); } newVector.addElement(new Option( "\tFull name of base classifier.\n" + "\t(default: " + defaultClassifierString() +")", "W", 1, "-W")); newVector.addElement(new Option( "", "", 0, "\nOptions specific to classifier " + m_Classifier.getClass().getName() + ":")); enu = ((OptionHandler)m_Classifier).listOptions(); while (enu.hasMoreElements()) { newVector.addElement(enu.nextElement()); } return newVector.elements(); } /** * Parses a given list of options. Valid options are:<p> * * -W classname <br> * Specify the full class name of the base learner.<p> * * Options after -- are passed to the designated classifier.<p> * * @param options the list of options as an array of strings * @exception Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { super.setOptions(options); String classifierName = Utils.getOption('W', options); if (classifierName.length() > 0) { // This is just to set the classifier in case the option // parsing fails. setClassifier(AbstractClassifier.forName(classifierName, null)); setClassifier(AbstractClassifier.forName(classifierName, Utils.partitionOptions(options))); } else { // This is just to set the classifier in case the option // parsing fails. setClassifier(AbstractClassifier.forName(defaultClassifierString(), null)); setClassifier(AbstractClassifier.forName(defaultClassifierString(), Utils.partitionOptions(options))); } } /** * Gets the current settings of the Classifier. * * @return an array of strings suitable for passing to setOptions */ public String [] getOptions() { String [] classifierOptions = ((OptionHandler)m_Classifier).getOptions(); int extraOptionsLength = classifierOptions.length; if (extraOptionsLength > 0) { extraOptionsLength++; // for the double hyphen } String [] superOptions = super.getOptions(); String [] options = new String [superOptions.length + extraOptionsLength + 2]; int current = 0; options[current++] = "-W"; options[current++] = getClassifier().getClass().getName(); System.arraycopy(superOptions, 0, options, current, superOptions.length); current += superOptions.length; if (classifierOptions.length > 0) { options[current++] = "--"; System.arraycopy(classifierOptions, 0, options, current, classifierOptions.length); } return options; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String classifierTipText() { return "The base classifier to be used."; } /** * Returns default capabilities of the base classifier. * * @return the capabilities of the base classifier */ public Capabilities getCapabilities() { Capabilities result; if (getClassifier() != null) { result = getClassifier().getCapabilities(); } else { result = new Capabilities(this); result.disableAll(); } // set dependencies for (Capability cap: Capability.values()) result.enableDependency(cap); result.setOwner(this); return result; } /** * Set the base learner. * * @param newClassifier the classifier to use. */ public void setClassifier(Classifier newClassifier) { m_Classifier = newClassifier; } /** * Get the classifier used as the base learner. * * @return the classifier used as the classifier */ public Classifier getClassifier() { return m_Classifier; } /** * Gets the classifier specification string, which contains the class name of * the classifier and any options to the classifier * * @return the classifier string */ protected String getClassifierSpec() { Classifier c = getClassifier(); return c.getClass().getName() + " " + Utils.joinOptions(((OptionHandler)c).getOptions()); } }
6,182
26.851351
81
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/Sourcable.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * Sourcable.java * Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers; /** * Interface for classifiers that can be converted to Java source. * * @author Len Trigg (trigg@cs.waikato.ac.nz) * @version $Revision: 8034 $ */ public interface Sourcable { /** * Returns a string that describes the classifier as source. The * classifier will be contained in a class with the given name (there may * be auxiliary classes), * and will contain a method with the signature: * <pre><code> * public static double classify(Object [] i); * </code></pre> * where the array <code>i</code> contains elements that are either * Double, String, with missing values represented as null. The generated * code is public domain and comes with no warranty. * * @param className the name that should be given to the source class. * @return the object source described by a string * @throws Exception if the source can't be computed */ String toSource(String className) throws Exception; }
1,762
29.396552
75
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/UpdateableClassifier.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * UpdateableClassifier.java * Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers; import weka.core.Instance; /** * Interface to incremental classification models that can learn using * one instance at a time. * * @author Eibe Frank (eibe@cs.waikato.ac.nz) * @version $Revision: 8034 $ */ public interface UpdateableClassifier { /** * Updates a classifier using the given instance. * * @param instance the instance to included * @exception Exception if instance could not be incorporated * successfully */ void updateClassifier(Instance instance) throws Exception; }
1,334
28.666667
74
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/bayes/AODE.java
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* * AODE.java * Copyright (C) 2003 * Algorithm developed by: Geoff Webb * Code written by: Janice Boughton & Zhihai Wang */ package weka.classifiers.bayes; import weka.classifiers.Classifier; import weka.classifiers.UpdateableClassifier; import weka.core.Capabilities; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.OptionHandler; import weka.core.RevisionUtils; import weka.core.TechnicalInformation; import weka.core.TechnicalInformationHandler; import weka.core.Utils; import weka.core.WeightedInstancesHandler; import weka.core.Capabilities.Capability; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import java.util.Enumeration; import java.util.Vector; import weka.classifiers.AbstractClassifier; /** <!-- globalinfo-start --> * AODE achieves highly accurate classification by averaging over all of a small space of alternative naive-Bayes-like models that have weaker (and hence less detrimental) independence assumptions than naive Bayes. The resulting algorithm is computationally efficient while delivering highly accurate classification on many learning tasks.<br/> * <br/> * For more information, see<br/> * <br/> * G. Webb, J. Boughton, Z. Wang (2005). Not So Naive Bayes: Aggregating One-Dependence Estimators. Machine Learning. 58(1):5-24.<br/> * <br/> * Further papers are available at<br/> * http://www.csse.monash.edu.au/~webb/.<br/> * <br/> * Can use an m-estimate for smoothing base probability estimates in place of the Laplace correction (via option -M).<br/> * Default frequency limit set to 1. * <p/> <!-- globalinfo-end --> * <!-- technical-bibtex-start --> * BibTeX: * <pre> * &#64;article{Webb2005, * author = {G. Webb and J. Boughton and Z. Wang}, * journal = {Machine Learning}, * number = {1}, * pages = {5-24}, * title = {Not So Naive Bayes: Aggregating One-Dependence Estimators}, * volume = {58}, * year = {2005} * } * </pre> * <p/> <!-- technical-bibtex-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -D * Output debugging information * </pre> * * <pre> -F &lt;int&gt; * Impose a frequency limit for superParents * (default is 1)</pre> * * <pre> -M * Use m-estimate instead of laplace correction * </pre> * * <pre> -W &lt;int&gt; * Specify a weight to use with m-estimate * (default is 1)</pre> * <!-- options-end --> * * @author Janice Boughton (jrbought@csse.monash.edu.au) * @author Zhihai Wang (zhw@csse.monash.edu.au) * @version $Revision: 5516 $ */ public class AODE extends AbstractClassifier implements OptionHandler, WeightedInstancesHandler, UpdateableClassifier, TechnicalInformationHandler { /** for serialization */ static final long serialVersionUID = 9197439980415113523L; /** * 3D array (m_NumClasses * m_TotalAttValues * m_TotalAttValues) * of attribute counts, i.e., the number of times an attribute value occurs * in conjunction with another attribute value and a class value. */ private double [][][] m_CondiCounts; /** The number of times each class value occurs in the dataset */ private double [] m_ClassCounts; /** The sums of attribute-class counts * -- if there are no missing values for att, then * m_SumForCounts[classVal][att] will be the same as * m_ClassCounts[classVal] */ private double [][] m_SumForCounts; /** The number of classes */ private int m_NumClasses; /** The number of attributes in dataset, including class */ private int m_NumAttributes; /** The number of instances in the dataset */ private int m_NumInstances; /** The index of the class attribute */ private int m_ClassIndex; /** The dataset */ private Instances m_Instances; /** * The total number of values (including an extra for each attribute's * missing value, which are included in m_CondiCounts) for all attributes * (not including class). E.g., for three atts each with two possible values, * m_TotalAttValues would be 9 (6 values + 3 missing). * This variable is used when allocating space for m_CondiCounts matrix. */ private int m_TotalAttValues; /** The starting index (in the m_CondiCounts matrix) of the values for each * attribute */ private int [] m_StartAttIndex; /** The number of values for each attribute */ private int [] m_NumAttValues; /** The frequency of each attribute value for the dataset */ private double [] m_Frequencies; /** The number of valid class values observed in dataset * -- with no missing classes, this number is the same as m_NumInstances. */ private double m_SumInstances; /** An att's frequency must be this value or more to be a superParent */ private int m_Limit = 1; /** If true, outputs debugging info */ private boolean m_Debug = false; /** flag for using m-estimates */ private boolean m_MEstimates = false; /** value for m in m-estimate */ private int m_Weight = 1; /** * Returns a string describing this classifier * @return a description of the classifier suitable for * displaying in the explorer/experimenter gui */ public String globalInfo() { return "AODE achieves highly accurate classification by averaging over " +"all of a small space of alternative naive-Bayes-like models that have " +"weaker (and hence less detrimental) independence assumptions than " +"naive Bayes. The resulting algorithm is computationally efficient " +"while delivering highly accurate classification on many learning " +"tasks.\n\n" +"For more information, see\n\n" + getTechnicalInformation().toString() + "\n\n" +"Further papers are available at\n" +" http://www.csse.monash.edu.au/~webb/.\n\n" + "Can use an m-estimate for smoothing base probability estimates " + "in place of the Laplace correction (via option -M).\n" + "Default frequency limit set to 1."; } /** * Returns an instance of a TechnicalInformation object, containing * detailed information about the technical background of this class, * e.g., paper reference or book this class is based on. * * @return the technical information about this class */ public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; result = new TechnicalInformation(Type.ARTICLE); result.setValue(Field.AUTHOR, "G. Webb and J. Boughton and Z. Wang"); result.setValue(Field.YEAR, "2005"); result.setValue(Field.TITLE, "Not So Naive Bayes: Aggregating One-Dependence Estimators"); result.setValue(Field.JOURNAL, "Machine Learning"); result.setValue(Field.VOLUME, "58"); result.setValue(Field.NUMBER, "1"); result.setValue(Field.PAGES, "5-24"); return result; } /** * Returns default capabilities of the classifier. * * @return the capabilities of this classifier */ public Capabilities getCapabilities() { Capabilities result = super.getCapabilities(); result.disableAll(); // attributes result.enable(Capability.NOMINAL_ATTRIBUTES); result.enable(Capability.MISSING_VALUES); // class result.enable(Capability.NOMINAL_CLASS); result.enable(Capability.MISSING_CLASS_VALUES); // instances result.setMinimumNumberInstances(0); return result; } /** * Generates the classifier. * * @param instances set of instances serving as training data * @throws Exception if the classifier has not been generated * successfully */ public void buildClassifier(Instances instances) throws Exception { // can classifier handle the data? getCapabilities().testWithFail(instances); // remove instances with missing class m_Instances = new Instances(instances); m_Instances.deleteWithMissingClass(); // reset variable for this fold m_SumInstances = 0; m_ClassIndex = instances.classIndex(); m_NumInstances = m_Instances.numInstances(); m_NumAttributes = m_Instances.numAttributes(); m_NumClasses = m_Instances.numClasses(); // allocate space for attribute reference arrays m_StartAttIndex = new int[m_NumAttributes]; m_NumAttValues = new int[m_NumAttributes]; m_TotalAttValues = 0; for(int i = 0; i < m_NumAttributes; i++) { if(i != m_ClassIndex) { m_StartAttIndex[i] = m_TotalAttValues; m_NumAttValues[i] = m_Instances.attribute(i).numValues(); m_TotalAttValues += m_NumAttValues[i] + 1; // + 1 so room for missing value count } else { // m_StartAttIndex[i] = -1; // class isn't included m_NumAttValues[i] = m_NumClasses; } } // allocate space for counts and frequencies m_CondiCounts = new double[m_NumClasses][m_TotalAttValues][m_TotalAttValues]; m_ClassCounts = new double[m_NumClasses]; m_SumForCounts = new double[m_NumClasses][m_NumAttributes]; m_Frequencies = new double[m_TotalAttValues]; // calculate the counts for(int k = 0; k < m_NumInstances; k++) { addToCounts((Instance)m_Instances.instance(k)); } // free up some space m_Instances = new Instances(m_Instances, 0); } /** * Updates the classifier with the given instance. * * @param instance the new training instance to include in the model */ public void updateClassifier(Instance instance) { this.addToCounts(instance); } /** * Puts an instance's values into m_CondiCounts, m_ClassCounts and * m_SumInstances. * * @param instance the instance whose values are to be put into the counts * variables */ private void addToCounts(Instance instance) { double [] countsPointer; if(instance.classIsMissing()) return; // ignore instances with missing class int classVal = (int)instance.classValue(); double weight = instance.weight(); m_ClassCounts[classVal] += weight; m_SumInstances += weight; // store instance's att val indexes in an array, b/c accessing it // in loop(s) is more efficient int [] attIndex = new int[m_NumAttributes]; for(int i = 0; i < m_NumAttributes; i++) { if(i == m_ClassIndex) attIndex[i] = -1; // we don't use the class attribute in counts else { if(instance.isMissing(i)) attIndex[i] = m_StartAttIndex[i] + m_NumAttValues[i]; else attIndex[i] = m_StartAttIndex[i] + (int)instance.value(i); } } for(int Att1 = 0; Att1 < m_NumAttributes; Att1++) { if(attIndex[Att1] == -1) continue; // avoid pointless looping as Att1 is currently the class attribute m_Frequencies[attIndex[Att1]] += weight; // if this is a missing value, we don't want to increase sumforcounts if(!instance.isMissing(Att1)) m_SumForCounts[classVal][Att1] += weight; // save time by referencing this now, rather than do it repeatedly in the loop countsPointer = m_CondiCounts[classVal][attIndex[Att1]]; for(int Att2 = 0; Att2 < m_NumAttributes; Att2++) { if(attIndex[Att2] != -1) { countsPointer[attIndex[Att2]] += weight; } } } } /** * Calculates the class membership probabilities for the given test * instance. * * @param instance the instance to be classified * @return predicted class probability distribution * @throws Exception if there is a problem generating the prediction */ public double [] distributionForInstance(Instance instance) throws Exception { // accumulates posterior probabilities for each class double [] probs = new double[m_NumClasses]; // index for parent attribute value, and a count of parents used int pIndex, parentCount; // pointers for efficiency // for current class, point to joint frequency for any pair of att values double [][] countsForClass; // for current class & parent, point to joint frequency for any att value double [] countsForClassParent; // store instance's att indexes in an int array, so accessing them // is more efficient in loop(s). int [] attIndex = new int[m_NumAttributes]; for(int att = 0; att < m_NumAttributes; att++) { if(instance.isMissing(att) || att == m_ClassIndex) attIndex[att] = -1; // can't use class or missing values in calculations else attIndex[att] = m_StartAttIndex[att] + (int)instance.value(att); } // calculate probabilities for each possible class value for(int classVal = 0; classVal < m_NumClasses; classVal++) { probs[classVal] = 0; double spodeP = 0; // P(X,y) for current parent and class parentCount = 0; countsForClass = m_CondiCounts[classVal]; // each attribute has a turn of being the parent for(int parent = 0; parent < m_NumAttributes; parent++) { if(attIndex[parent] == -1) continue; // skip class attribute or missing value // determine correct index for the parent in m_CondiCounts matrix pIndex = attIndex[parent]; // check that the att value has a frequency of m_Limit or greater if(m_Frequencies[pIndex] < m_Limit) continue; countsForClassParent = countsForClass[pIndex]; // block the parent from being its own child attIndex[parent] = -1; parentCount++; // joint frequency of class and parent double classparentfreq = countsForClassParent[pIndex]; // find the number of missing values for parent's attribute double missing4ParentAtt = m_Frequencies[m_StartAttIndex[parent] + m_NumAttValues[parent]]; // calculate the prior probability -- P(parent & classVal) if (!m_MEstimates) { spodeP = (classparentfreq + 1.0) / ((m_SumInstances - missing4ParentAtt) + m_NumClasses * m_NumAttValues[parent]); } else { spodeP = (classparentfreq + ((double)m_Weight / (double)(m_NumClasses * m_NumAttValues[parent]))) / ((m_SumInstances - missing4ParentAtt) + m_Weight); } // take into account the value of each attribute for(int att = 0; att < m_NumAttributes; att++) { if(attIndex[att] == -1) continue; double missingForParentandChildAtt = countsForClassParent[m_StartAttIndex[att] + m_NumAttValues[att]]; if(!m_MEstimates) { spodeP *= (countsForClassParent[attIndex[att]] + 1.0) / ((classparentfreq - missingForParentandChildAtt) + m_NumAttValues[att]); } else { spodeP *= (countsForClassParent[attIndex[att]] + ((double)m_Weight / (double)m_NumAttValues[att])) / ((classparentfreq - missingForParentandChildAtt) + m_Weight); } } // add this probability to the overall probability probs[classVal] += spodeP; // unblock the parent attIndex[parent] = pIndex; } // check that at least one att was a parent if(parentCount < 1) { // do plain naive bayes conditional prob probs[classVal] = NBconditionalProb(instance, classVal); } else { // divide by number of parent atts to get the mean probs[classVal] /= (double)(parentCount); } } Utils.normalize(probs); return probs; } /** * Calculates the probability of the specified class for the given test * instance, using naive Bayes. * * @param instance the instance to be classified * @param classVal the class for which to calculate the probability * @return predicted class probability */ public double NBconditionalProb(Instance instance, int classVal) { double prob; double [][] pointer; // calculate the prior probability if(!m_MEstimates) { prob = (m_ClassCounts[classVal] + 1.0) / (m_SumInstances + m_NumClasses); } else { prob = (m_ClassCounts[classVal] + ((double)m_Weight / (double)m_NumClasses)) / (m_SumInstances + m_Weight); } pointer = m_CondiCounts[classVal]; // consider effect of each att value for(int att = 0; att < m_NumAttributes; att++) { if(att == m_ClassIndex || instance.isMissing(att)) continue; // determine correct index for att in m_CondiCounts int aIndex = m_StartAttIndex[att] + (int)instance.value(att); if(!m_MEstimates) { prob *= (double)(pointer[aIndex][aIndex] + 1.0) / ((double)m_SumForCounts[classVal][att] + m_NumAttValues[att]); } else { prob *= (double)(pointer[aIndex][aIndex] + ((double)m_Weight / (double)m_NumAttValues[att])) / (double)(m_SumForCounts[classVal][att] + m_Weight); } } return prob; } /** * Returns an enumeration describing the available options * * @return an enumeration of all the available options */ public Enumeration listOptions() { Vector newVector = new Vector(4); newVector.addElement( new Option("\tOutput debugging information\n", "D", 0,"-D")); newVector.addElement( new Option("\tImpose a frequency limit for superParents\n" + "\t(default is 1)", "F", 1,"-F <int>")); newVector.addElement( new Option("\tUse m-estimate instead of laplace correction\n", "M", 0,"-M")); newVector.addElement( new Option("\tSpecify a weight to use with m-estimate\n" + "\t(default is 1)", "W", 1,"-W <int>")); return newVector.elements(); } /** * Parses a given list of options. <p/> * <!-- options-start --> * Valid options are: <p/> * * <pre> -D * Output debugging information * </pre> * * <pre> -F &lt;int&gt; * Impose a frequency limit for superParents * (default is 1)</pre> * * <pre> -M * Use m-estimate instead of laplace correction * </pre> * * <pre> -W &lt;int&gt; * Specify a weight to use with m-estimate * (default is 1)</pre> * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { m_Debug = Utils.getFlag('D', options); String Freq = Utils.getOption('F', options); if (Freq.length() != 0) m_Limit = Integer.parseInt(Freq); else m_Limit = 1; m_MEstimates = Utils.getFlag('M', options); String weight = Utils.getOption('W', options); if (weight.length() != 0) { if (!m_MEstimates) throw new Exception("Can't use Laplace AND m-estimate weight. Choose one."); m_Weight = Integer.parseInt(weight); } else { if (m_MEstimates) m_Weight = 1; } Utils.checkForRemainingOptions(options); } /** * Gets the current settings of the classifier. * * @return an array of strings suitable for passing to setOptions */ public String [] getOptions() { Vector result = new Vector(); if (m_Debug) result.add("-D"); result.add("-F"); result.add("" + m_Limit); if (m_MEstimates) { result.add("-M"); result.add("-W"); result.add("" + m_Weight); } return (String[]) result.toArray(new String[result.size()]); } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String weightTipText() { return "Set the weight for m-estimate."; } /** * Sets the weight for m-estimate * * @param w the weight */ public void setWeight(int w) { if (!getUseMEstimates()) { System.out.println( "Weight is only used in conjunction with m-estimate - ignored!"); } else { if (w > 0) m_Weight = w; else System.out.println("Weight must be greater than 0!"); } } /** * Gets the weight used in m-estimate * * @return the frequency limit */ public int getWeight() { return m_Weight; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String useMEstimatesTipText() { return "Use m-estimate instead of laplace correction."; } /** * Gets if m-estimaces is being used. * * @return Value of m_MEstimates. */ public boolean getUseMEstimates() { return m_MEstimates; } /** * Sets if m-estimates is to be used. * * @param value Value to assign to m_MEstimates. */ public void setUseMEstimates(boolean value) { m_MEstimates = value; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String frequencyLimitTipText() { return "Attributes with a frequency in the train set below " + "this value aren't used as parents."; } /** * Sets the frequency limit * * @param f the frequency limit */ public void setFrequencyLimit(int f) { m_Limit = f; } /** * Gets the frequency limit. * * @return the frequency limit */ public int getFrequencyLimit() { return m_Limit; } /** * Returns a description of the classifier. * * @return a description of the classifier as a string. */ public String toString() { StringBuffer text = new StringBuffer(); text.append("The AODE Classifier"); if (m_Instances == null) { text.append(": No model built yet."); } else { try { for (int i = 0; i < m_NumClasses; i++) { // print to string, the prior probabilities of class values text.append("\nClass " + m_Instances.classAttribute().value(i) + ": Prior probability = " + Utils. doubleToString(((m_ClassCounts[i] + 1) /(m_SumInstances + m_NumClasses)), 4, 2)+"\n\n"); } text.append("Dataset: " + m_Instances.relationName() + "\n" + "Instances: " + m_NumInstances + "\n" + "Attributes: " + m_NumAttributes + "\n" + "Frequency limit for superParents: " + m_Limit + "\n"); text.append("Correction: "); if (!m_MEstimates) text.append("laplace\n"); else text.append("m-estimate (m=" + m_Weight + ")\n"); } catch (Exception ex) { text.append(ex.getMessage()); } } return text.toString(); } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 5516 $"); } /** * Main method for testing this class. * * @param argv the options */ public static void main(String [] argv) { runClassifier(new AODE(), argv); } }
24,353
30.103448
345
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/bayes/AODEsr.java
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* * AODEsr.java * Copyright (C) 2007 * Algorithm developed by: Fei ZHENG and Geoff Webb * Code written by: Fei ZHENG and Janice Boughton */ package weka.classifiers.bayes; import weka.classifiers.Classifier; import weka.classifiers.UpdateableClassifier; import weka.core.Capabilities; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.OptionHandler; import weka.core.RevisionUtils; import weka.core.TechnicalInformation; import weka.core.TechnicalInformationHandler; import weka.core.Utils; import weka.core.WeightedInstancesHandler; import weka.core.Capabilities.Capability; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import java.util.Enumeration; import java.util.Vector; import weka.classifiers.AbstractClassifier; /** * <!-- globalinfo-start --> * AODEsr augments AODE with Subsumption Resolution.AODEsr detects specializations between two attribute values at classification time and deletes the generalization attribute value.<br/> * For more information, see:<br/> * Fei Zheng, Geoffrey I. Webb: Efficient Lazy Elimination for Averaged-One Dependence Estimators. In: Proceedings of the Twenty-third International Conference on Machine Learning (ICML 2006), 1113-1120, 2006. * <p/> <!-- globalinfo-end --> * <!-- technical-bibtex-start --> * BibTeX: * <pre> * &#64;inproceedings{Zheng2006, * author = {Fei Zheng and Geoffrey I. Webb}, * booktitle = {Proceedings of the Twenty-third International Conference on Machine Learning (ICML 2006)}, * pages = {1113-1120}, * publisher = {ACM Press}, * title = {Efficient Lazy Elimination for Averaged-One Dependence Estimators}, * year = {2006}, * ISBN = {1-59593-383-2} * } * </pre> * <p/> <!-- technical-bibtex-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -D * Output debugging information * </pre> * * <pre> -C * Impose a critcal value for specialization-generalization relationship * (default is 50)</pre> * * <pre> -F * Impose a frequency limit for superParents * (default is 1)</pre> * * <pre> -L * Using Laplace estimation * (default is m-esimation (m=1))</pre> * * <pre> -M * Weight value for m-estimation * (default is 1.0)</pre> * <!-- options-end --> * * @author Fei Zheng * @author Janice Boughton * @version $Revision: 5516 $ */ public class AODEsr extends AbstractClassifier implements OptionHandler, WeightedInstancesHandler, UpdateableClassifier, TechnicalInformationHandler { /** for serialization */ static final long serialVersionUID = 5602143019183068848L; /** * 3D array (m_NumClasses * m_TotalAttValues * m_TotalAttValues) * of attribute counts, i.e. the number of times an attribute value occurs * in conjunction with another attribute value and a class value. */ private double [][][] m_CondiCounts; /** * 2D array (m_TotalAttValues * m_TotalAttValues) of attributes counts. * similar to m_CondiCounts, but ignoring class value. */ private double [][] m_CondiCountsNoClass; /** The number of times each class value occurs in the dataset */ private double [] m_ClassCounts; /** The sums of attribute-class counts * -- if there are no missing values for att, then * m_SumForCounts[classVal][att] will be the same as * m_ClassCounts[classVal] */ private double [][] m_SumForCounts; /** The number of classes */ private int m_NumClasses; /** The number of attributes in dataset, including class */ private int m_NumAttributes; /** The number of instances in the dataset */ private int m_NumInstances; /** The index of the class attribute */ private int m_ClassIndex; /** The dataset */ private Instances m_Instances; /** * The total number of values (including an extra for each attribute's * missing value, which are included in m_CondiCounts) for all attributes * (not including class). Eg. for three atts each with two possible values, * m_TotalAttValues would be 9 (6 values + 3 missing). * This variable is used when allocating space for m_CondiCounts matrix. */ private int m_TotalAttValues; /** The starting index (in the m_CondiCounts matrix) of the values for each attribute */ private int [] m_StartAttIndex; /** The number of values for each attribute */ private int [] m_NumAttValues; /** The frequency of each attribute value for the dataset */ private double [] m_Frequencies; /** The number of valid class values observed in dataset * -- with no missing classes, this number is the same as m_NumInstances. */ private double m_SumInstances; /** An att's frequency must be this value or more to be a superParent */ private int m_Limit = 1; /** If true, outputs debugging info */ private boolean m_Debug = false; /** m value for m-estimation */ protected double m_MWeight = 1.0; /** Using LapLace estimation or not*/ private boolean m_Laplace = false; /** the critical value for the specialization-generalization */ private int m_Critical = 50; /** * Returns a string describing this classifier * @return a description of the classifier suitable for * displaying in the explorer/experimenter gui */ public String globalInfo() { return "AODEsr augments AODE with Subsumption Resolution." +"AODEsr detects specializations between two attribute " +"values at classification time and deletes the generalization " +"attribute value.\n" +"For more information, see:\n" + getTechnicalInformation().toString(); } /** * Returns an instance of a TechnicalInformation object, containing * detailed information about the technical background of this class, * e.g., paper reference or book this class is based on. * * @return the technical information about this class */ public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; result = new TechnicalInformation(Type.INPROCEEDINGS); result.setValue(Field.AUTHOR, "Fei Zheng and Geoffrey I. Webb"); result.setValue(Field.YEAR, "2006"); result.setValue(Field.TITLE, "Efficient Lazy Elimination for Averaged-One Dependence Estimators"); result.setValue(Field.PAGES, "1113-1120"); result.setValue(Field.BOOKTITLE, "Proceedings of the Twenty-third International Conference on Machine Learning (ICML 2006)"); result.setValue(Field.PUBLISHER, "ACM Press"); result.setValue(Field.ISBN, "1-59593-383-2"); return result; } /** * Returns default capabilities of the classifier. * * @return the capabilities of this classifier */ public Capabilities getCapabilities() { Capabilities result = super.getCapabilities(); result.disableAll(); // attributes result.enable(Capability.NOMINAL_ATTRIBUTES); result.enable(Capability.MISSING_VALUES); // class result.enable(Capability.NOMINAL_CLASS); result.enable(Capability.MISSING_CLASS_VALUES); // instances result.setMinimumNumberInstances(0); return result; } /** * Generates the classifier. * * @param instances set of instances serving as training data * @throws Exception if the classifier has not been generated * successfully */ public void buildClassifier(Instances instances) throws Exception { // can classifier handle the data? getCapabilities().testWithFail(instances); // remove instances with missing class m_Instances = new Instances(instances); m_Instances.deleteWithMissingClass(); // reset variable for this fold m_SumInstances = 0; m_ClassIndex = instances.classIndex(); m_NumInstances = m_Instances.numInstances(); m_NumAttributes = instances.numAttributes(); m_NumClasses = instances.numClasses(); // allocate space for attribute reference arrays m_StartAttIndex = new int[m_NumAttributes]; m_NumAttValues = new int[m_NumAttributes]; m_TotalAttValues = 0; for(int i = 0; i < m_NumAttributes; i++) { if(i != m_ClassIndex) { m_StartAttIndex[i] = m_TotalAttValues; m_NumAttValues[i] = m_Instances.attribute(i).numValues(); m_TotalAttValues += m_NumAttValues[i] + 1; // + 1 so room for missing value count } else { // m_StartAttIndex[i] = -1; // class isn't included m_NumAttValues[i] = m_NumClasses; } } // allocate space for counts and frequencies m_CondiCounts = new double[m_NumClasses][m_TotalAttValues][m_TotalAttValues]; m_ClassCounts = new double[m_NumClasses]; m_SumForCounts = new double[m_NumClasses][m_NumAttributes]; m_Frequencies = new double[m_TotalAttValues]; m_CondiCountsNoClass = new double[m_TotalAttValues][m_TotalAttValues]; // calculate the counts for(int k = 0; k < m_NumInstances; k++) { addToCounts((Instance)m_Instances.instance(k)); } // free up some space m_Instances = new Instances(m_Instances, 0); } /** * Updates the classifier with the given instance. * * @param instance the new training instance to include in the model * @throws Exception if the instance could not be incorporated in * the model. */ public void updateClassifier(Instance instance) { this.addToCounts(instance); } /** * Puts an instance's values into m_CondiCounts, m_ClassCounts and * m_SumInstances. * * @param instance the instance whose values are to be put into the * counts variables */ private void addToCounts(Instance instance) { double [] countsPointer; double [] countsNoClassPointer; if(instance.classIsMissing()) return; // ignore instances with missing class int classVal = (int)instance.classValue(); double weight = instance.weight(); m_ClassCounts[classVal] += weight; m_SumInstances += weight; // store instance's att val indexes in an array, b/c accessing it // in loop(s) is more efficient int [] attIndex = new int[m_NumAttributes]; for(int i = 0; i < m_NumAttributes; i++) { if(i == m_ClassIndex) attIndex[i] = -1; // we don't use the class attribute in counts else { if(instance.isMissing(i)) attIndex[i] = m_StartAttIndex[i] + m_NumAttValues[i]; else attIndex[i] = m_StartAttIndex[i] + (int)instance.value(i); } } for(int Att1 = 0; Att1 < m_NumAttributes; Att1++) { if(attIndex[Att1] == -1) continue; // avoid pointless looping as Att1 is currently the class attribute m_Frequencies[attIndex[Att1]] += weight; // if this is a missing value, we don't want to increase sumforcounts if(!instance.isMissing(Att1)) m_SumForCounts[classVal][Att1] += weight; // save time by referencing this now, rather than repeatedly in the loop countsPointer = m_CondiCounts[classVal][attIndex[Att1]]; countsNoClassPointer = m_CondiCountsNoClass[attIndex[Att1]]; for(int Att2 = 0; Att2 < m_NumAttributes; Att2++) { if(attIndex[Att2] != -1) { countsPointer[attIndex[Att2]] += weight; countsNoClassPointer[attIndex[Att2]] += weight; } } } } /** * Calculates the class membership probabilities for the given test * instance. * * @param instance the instance to be classified * @return predicted class probability distribution * @throws Exception if there is a problem generating the prediction */ public double [] distributionForInstance(Instance instance) throws Exception { // accumulates posterior probabilities for each class double [] probs = new double[m_NumClasses]; // index for parent attribute value, and a count of parents used int pIndex, parentCount; int [] SpecialGeneralArray = new int[m_NumAttributes]; // pointers for efficiency double [][] countsForClass; double [] countsForClassParent; double [] countsForAtti; double [] countsForAttj; // store instance's att values in an int array, so accessing them // is more efficient in loop(s). int [] attIndex = new int[m_NumAttributes]; for(int att = 0; att < m_NumAttributes; att++) { if(instance.isMissing(att) || att == m_ClassIndex) attIndex[att] = -1; // can't use class & missing vals in calculations else attIndex[att] = m_StartAttIndex[att] + (int)instance.value(att); } // -1 indicates attribute is not a generalization of any other attributes for(int i = 0; i < m_NumAttributes; i++) { SpecialGeneralArray[i] = -1; } // calculate the specialization-generalization array for(int i = 0; i < m_NumAttributes; i++){ // skip i if it's the class or is missing if(attIndex[i] == -1) continue; countsForAtti = m_CondiCountsNoClass[attIndex[i]]; for(int j = 0; j < m_NumAttributes; j++) { // skip j if it's the class, missing, is i or a generalization of i if((attIndex[j] == -1) || (i == j) || (SpecialGeneralArray[j] == i)) continue; countsForAttj = m_CondiCountsNoClass[attIndex[j]]; // check j's frequency is above critical value if(countsForAttj[attIndex[j]] > m_Critical) { // skip j if the frequency of i and j together is not equivalent // to the frequency of j alone if(countsForAttj[attIndex[j]] == countsForAtti[attIndex[j]]) { // if attributes i and j are both a specialization of each other // avoid deleting both by skipping j if((countsForAttj[attIndex[j]] == countsForAtti[attIndex[i]]) && (i < j)){ continue; } else { // set the specialization relationship SpecialGeneralArray[i] = j; break; // break out of j loop because a specialization has been found } } } } } // calculate probabilities for each possible class value for(int classVal = 0; classVal < m_NumClasses; classVal++) { probs[classVal] = 0; double x = 0; parentCount = 0; countsForClass = m_CondiCounts[classVal]; // each attribute has a turn of being the parent for(int parent = 0; parent < m_NumAttributes; parent++) { if(attIndex[parent] == -1) continue; // skip class attribute or missing value // determine correct index for the parent in m_CondiCounts matrix pIndex = attIndex[parent]; // check that the att value has a frequency of m_Limit or greater if(m_Frequencies[pIndex] < m_Limit) continue; // delete the generalization attributes. if(SpecialGeneralArray[parent] != -1) continue; countsForClassParent = countsForClass[pIndex]; // block the parent from being its own child attIndex[parent] = -1; parentCount++; double classparentfreq = countsForClassParent[pIndex]; // find the number of missing values for parent's attribute double missing4ParentAtt = m_Frequencies[m_StartAttIndex[parent] + m_NumAttValues[parent]]; // calculate the prior probability -- P(parent & classVal) if (m_Laplace){ x = LaplaceEstimate(classparentfreq, m_SumInstances - missing4ParentAtt, m_NumClasses * m_NumAttValues[parent]); } else { x = MEstimate(classparentfreq, m_SumInstances - missing4ParentAtt, m_NumClasses * m_NumAttValues[parent]); } // take into account the value of each attribute for(int att = 0; att < m_NumAttributes; att++) { if(attIndex[att] == -1) // skip class attribute or missing value continue; // delete the generalization attributes. if(SpecialGeneralArray[att] != -1) continue; double missingForParentandChildAtt = countsForClassParent[m_StartAttIndex[att] + m_NumAttValues[att]]; if (m_Laplace){ x *= LaplaceEstimate(countsForClassParent[attIndex[att]], classparentfreq - missingForParentandChildAtt, m_NumAttValues[att]); } else { x *= MEstimate(countsForClassParent[attIndex[att]], classparentfreq - missingForParentandChildAtt, m_NumAttValues[att]); } } // add this probability to the overall probability probs[classVal] += x; // unblock the parent attIndex[parent] = pIndex; } // check that at least one att was a parent if(parentCount < 1) { // do plain naive bayes conditional prob probs[classVal] = NBconditionalProb(instance, classVal); //probs[classVal] = Double.NaN; } else { // divide by number of parent atts to get the mean probs[classVal] /= (double)(parentCount); } } Utils.normalize(probs); return probs; } /** * Calculates the probability of the specified class for the given test * instance, using naive Bayes. * * @param instance the instance to be classified * @param classVal the class for which to calculate the probability * @return predicted class probability * @throws Exception if there is a problem generating the prediction */ public double NBconditionalProb(Instance instance, int classVal) throws Exception { double prob; int attIndex; double [][] pointer; // calculate the prior probability if(m_Laplace) { prob = LaplaceEstimate(m_ClassCounts[classVal],m_SumInstances,m_NumClasses); } else { prob = MEstimate(m_ClassCounts[classVal], m_SumInstances, m_NumClasses); } pointer = m_CondiCounts[classVal]; // consider effect of each att value for(int att = 0; att < m_NumAttributes; att++) { if(att == m_ClassIndex || instance.isMissing(att)) continue; // determine correct index for att in m_CondiCounts attIndex = m_StartAttIndex[att] + (int)instance.value(att); if (m_Laplace){ prob *= LaplaceEstimate((double)pointer[attIndex][attIndex], (double)m_SumForCounts[classVal][att], m_NumAttValues[att]); } else { prob *= MEstimate((double)pointer[attIndex][attIndex], (double)m_SumForCounts[classVal][att], m_NumAttValues[att]); } } return prob; } /** * Returns the probability estimate, using m-estimate * * @param frequency frequency of value of interest * @param total count of all values * @param numValues number of different values * @return the probability estimate */ public double MEstimate(double frequency, double total, double numValues) { return (frequency + m_MWeight / numValues) / (total + m_MWeight); } /** * Returns the probability estimate, using laplace correction * * @param frequency frequency of value of interest * @param total count of all values * @param numValues number of different values * @return the probability estimate */ public double LaplaceEstimate(double frequency, double total, double numValues) { return (frequency + 1.0) / (total + numValues); } /** * Returns an enumeration describing the available options * * @return an enumeration of all the available options */ public Enumeration listOptions() { Vector newVector = new Vector(5); newVector.addElement( new Option("\tOutput debugging information\n", "D", 0,"-D")); newVector.addElement( new Option("\tImpose a critcal value for specialization-generalization relationship\n" + "\t(default is 50)", "C", 1,"-C")); newVector.addElement( new Option("\tImpose a frequency limit for superParents\n" + "\t(default is 1)", "F", 2,"-F")); newVector.addElement( new Option("\tUsing Laplace estimation\n" + "\t(default is m-esimation (m=1))", "L", 3,"-L")); newVector.addElement( new Option("\tWeight value for m-estimation\n" + "\t(default is 1.0)", "M", 4,"-M")); return newVector.elements(); } /** * Parses a given list of options. <p/> * <!-- options-start --> * Valid options are: <p/> * * <pre> -D * Output debugging information * </pre> * * <pre> -C * Impose a critcal value for specialization-generalization relationship * (default is 50)</pre> * * <pre> -F * Impose a frequency limit for superParents * (default is 1)</pre> * * <pre> -L * Using Laplace estimation * (default is m-esimation (m=1))</pre> * * <pre> -M * Weight value for m-estimation * (default is 1.0)</pre> * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { m_Debug = Utils.getFlag('D', options); String Critical = Utils.getOption('C', options); if(Critical.length() != 0) m_Critical = Integer.parseInt(Critical); else m_Critical = 50; String Freq = Utils.getOption('F', options); if(Freq.length() != 0) m_Limit = Integer.parseInt(Freq); else m_Limit = 1; m_Laplace = Utils.getFlag('L', options); String MWeight = Utils.getOption('M', options); if(MWeight.length() != 0) { if(m_Laplace) throw new Exception("weight for m-estimate is pointless if using laplace estimation!"); m_MWeight = Double.parseDouble(MWeight); } else m_MWeight = 1.0; Utils.checkForRemainingOptions(options); } /** * Gets the current settings of the classifier. * * @return an array of strings suitable for passing to setOptions */ public String [] getOptions() { Vector result = new Vector(); if (m_Debug) result.add("-D"); result.add("-F"); result.add("" + m_Limit); if (m_Laplace) { result.add("-L"); } else { result.add("-M"); result.add("" + m_MWeight); } result.add("-C"); result.add("" + m_Critical); return (String[]) result.toArray(new String[result.size()]); } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String mestWeightTipText() { return "Set the weight for m-estimate."; } /** * Sets the weight for m-estimate * * @param w the weight */ public void setMestWeight(double w) { if (getUseLaplace()) { System.out.println( "Weight is only used in conjunction with m-estimate - ignored!"); } else { if(w > 0) m_MWeight = w; else System.out.println("M-Estimate Weight must be greater than 0!"); } } /** * Gets the weight used in m-estimate * * @return the weight for m-estimation */ public double getMestWeight() { return m_MWeight; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String useLaplaceTipText() { return "Use Laplace correction instead of m-estimation."; } /** * Gets if laplace correction is being used. * * @return Value of m_Laplace. */ public boolean getUseLaplace() { return m_Laplace; } /** * Sets if laplace correction is to be used. * * @param value Value to assign to m_Laplace. */ public void setUseLaplace(boolean value) { m_Laplace = value; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String frequencyLimitTipText() { return "Attributes with a frequency in the train set below " + "this value aren't used as parents."; } /** * Sets the frequency limit * * @param f the frequency limit */ public void setFrequencyLimit(int f) { m_Limit = f; } /** * Gets the frequency limit. * * @return the frequency limit */ public int getFrequencyLimit() { return m_Limit; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String criticalValueTipText() { return "Specify critical value for specialization-generalization " + "relationship (default 50)."; } /** * Sets the critical value * * @param c the critical value */ public void setCriticalValue(int c) { m_Critical = c; } /** * Gets the critical value. * * @return the critical value */ public int getCriticalValue() { return m_Critical; } /** * Returns a description of the classifier. * * @return a description of the classifier as a string. */ public String toString() { StringBuffer text = new StringBuffer(); text.append("The AODEsr Classifier"); if (m_Instances == null) { text.append(": No model built yet."); } else { try { for (int i = 0; i < m_NumClasses; i++) { // print to string, the prior probabilities of class values text.append("\nClass " + m_Instances.classAttribute().value(i) + ": Prior probability = " + Utils. doubleToString(((m_ClassCounts[i] + 1) /(m_SumInstances + m_NumClasses)), 4, 2)+"\n\n"); } text.append("Dataset: " + m_Instances.relationName() + "\n" + "Instances: " + m_NumInstances + "\n" + "Attributes: " + m_NumAttributes + "\n" + "Frequency limit for superParents: " + m_Limit + "\n" + "Critical value for the specializtion-generalization " + "relationship: " + m_Critical + "\n"); if(m_Laplace) { text.append("Using LapLace estimation."); } else { text.append("Using m-estimation, m = " + m_MWeight); } } catch (Exception ex) { text.append(ex.getMessage()); } } return text.toString(); } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 5516 $"); } /** * Main method for testing this class. * * @param argv the options */ public static void main(String [] argv) { runClassifier(new AODEsr(), argv); } }
28,221
30.18453
210
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/bayes/BayesNet.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * BayesNet.java * Copyright (C) 2001-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.bayes; import java.util.Enumeration; import java.util.Vector; import weka.classifiers.AbstractClassifier; import weka.classifiers.bayes.net.ADNode; import weka.classifiers.bayes.net.BIFReader; import weka.classifiers.bayes.net.ParentSet; import weka.classifiers.bayes.net.estimate.BayesNetEstimator; import weka.classifiers.bayes.net.estimate.DiscreteEstimatorBayes; import weka.classifiers.bayes.net.estimate.SimpleEstimator; import weka.classifiers.bayes.net.search.SearchAlgorithm; import weka.classifiers.bayes.net.search.local.K2; import weka.classifiers.bayes.net.search.local.LocalScoreSearchAlgorithm; import weka.classifiers.bayes.net.search.local.Scoreable; import weka.core.AdditionalMeasureProducer; import weka.core.Attribute; import weka.core.Capabilities; import weka.core.Capabilities.Capability; import weka.core.Drawable; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.OptionHandler; import weka.core.RevisionUtils; import weka.core.Utils; import weka.core.WeightedInstancesHandler; import weka.estimators.Estimator; import weka.filters.Filter; import weka.filters.supervised.attribute.Discretize; import weka.filters.unsupervised.attribute.ReplaceMissingValues; /** <!-- globalinfo-start --> * Bayes Network learning using various search algorithms and quality measures.<br/> * Base class for a Bayes Network classifier. Provides datastructures (network structure, conditional probability distributions, etc.) and facilities common to Bayes Network learning algorithms like K2 and B.<br/> * <br/> * For more information see:<br/> * <br/> * http://sourceforge.net/projects/weka/files/documentation/WekaManual-3-7-0.pdf/download * <p/> <!-- globalinfo-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -D * Do not use ADTree data structure * </pre> * * <pre> -B &lt;BIF file&gt; * BIF file to compare with * </pre> * * <pre> -Q weka.classifiers.bayes.net.search.SearchAlgorithm * Search algorithm * </pre> * * <pre> -E weka.classifiers.bayes.net.estimate.SimpleEstimator * Estimator algorithm * </pre> * <!-- options-end --> * * @author Remco Bouckaert (rrb@xm.co.nz) * @version $Revision: 8034 $ */ public class BayesNet extends AbstractClassifier implements OptionHandler, WeightedInstancesHandler, Drawable, AdditionalMeasureProducer { /** for serialization */ static final long serialVersionUID = 746037443258775954L; /** * The parent sets. */ protected ParentSet[] m_ParentSets; /** * The attribute estimators containing CPTs. */ public Estimator[][] m_Distributions; /** filter used to quantize continuous variables, if any **/ protected Discretize m_DiscretizeFilter = null; /** attribute index of a non-nominal attribute */ int m_nNonDiscreteAttribute = -1; /** filter used to fill in missing values, if any **/ protected ReplaceMissingValues m_MissingValuesFilter = null; /** * The number of classes */ protected int m_NumClasses; /** * The dataset header for the purposes of printing out a semi-intelligible * model */ public Instances m_Instances; /** * Datastructure containing ADTree representation of the database. * This may result in more efficient access to the data. */ ADNode m_ADTree; /** * Bayes network to compare the structure with. */ protected BIFReader m_otherBayesNet = null; /** * Use the experimental ADTree datastructure for calculating contingency tables */ boolean m_bUseADTree = false; /** * Search algorithm used for learning the structure of a network. */ SearchAlgorithm m_SearchAlgorithm = new K2(); /** * Search algorithm used for learning the structure of a network. */ BayesNetEstimator m_BayesNetEstimator = new SimpleEstimator(); /** * Returns default capabilities of the classifier. * * @return the capabilities of this classifier */ public Capabilities getCapabilities() { Capabilities result = super.getCapabilities(); result.disableAll(); // attributes result.enable(Capability.NOMINAL_ATTRIBUTES); result.enable(Capability.NUMERIC_ATTRIBUTES); result.enable(Capability.MISSING_VALUES); // class result.enable(Capability.NOMINAL_CLASS); result.enable(Capability.MISSING_CLASS_VALUES); // instances result.setMinimumNumberInstances(0); return result; } /** * Generates the classifier. * * @param instances set of instances serving as training data * @throws Exception if the classifier has not been generated * successfully */ public void buildClassifier(Instances instances) throws Exception { // can classifier handle the data? getCapabilities().testWithFail(instances); // remove instances with missing class instances = new Instances(instances); instances.deleteWithMissingClass(); // ensure we have a data set with discrete variables only and with no missing values instances = normalizeDataSet(instances); // Copy the instances m_Instances = new Instances(instances); // sanity check: need more than 1 variable in datat set m_NumClasses = instances.numClasses(); // initialize ADTree if (m_bUseADTree) { m_ADTree = ADNode.makeADTree(instances); // System.out.println("Oef, done!"); } // build the network structure initStructure(); // build the network structure buildStructure(); // build the set of CPTs estimateCPTs(); // Save space // m_Instances = new Instances(m_Instances, 0); m_ADTree = null; } // buildClassifier /** ensure that all variables are nominal and that there are no missing values * @param instances data set to check and quantize and/or fill in missing values * @return filtered instances * @throws Exception if a filter (Discretize, ReplaceMissingValues) fails */ protected Instances normalizeDataSet(Instances instances) throws Exception { m_DiscretizeFilter = null; m_MissingValuesFilter = null; boolean bHasNonNominal = false; boolean bHasMissingValues = false; Enumeration enu = instances.enumerateAttributes(); while (enu.hasMoreElements()) { Attribute attribute = (Attribute) enu.nextElement(); if (attribute.type() != Attribute.NOMINAL) { m_nNonDiscreteAttribute = attribute.index(); bHasNonNominal = true; //throw new UnsupportedAttributeTypeException("BayesNet handles nominal variables only. Non-nominal variable in dataset detected."); } Enumeration enum2 = instances.enumerateInstances(); while (enum2.hasMoreElements()) { if (((Instance) enum2.nextElement()).isMissing(attribute)) { bHasMissingValues = true; // throw new NoSupportForMissingValuesException("BayesNet: no missing values, please."); } } } if (bHasNonNominal) { // System.err.println("Warning: discretizing data set"); m_DiscretizeFilter = new Discretize(); m_DiscretizeFilter.setInputFormat(instances); instances = Filter.useFilter(instances, m_DiscretizeFilter); } if (bHasMissingValues) { // System.err.println("Warning: filling in missing values in data set"); m_MissingValuesFilter = new ReplaceMissingValues(); m_MissingValuesFilter.setInputFormat(instances); instances = Filter.useFilter(instances, m_MissingValuesFilter); } return instances; } // normalizeDataSet /** ensure that all variables are nominal and that there are no missing values * @param instance instance to check and quantize and/or fill in missing values * @return filtered instance * @throws Exception if a filter (Discretize, ReplaceMissingValues) fails */ protected Instance normalizeInstance(Instance instance) throws Exception { if ((m_DiscretizeFilter != null) && (instance.attribute(m_nNonDiscreteAttribute).type() != Attribute.NOMINAL)) { m_DiscretizeFilter.input(instance); instance = m_DiscretizeFilter.output(); } if (m_MissingValuesFilter != null) { m_MissingValuesFilter.input(instance); instance = m_MissingValuesFilter.output(); } else { // is there a missing value in this instance? // this can happen when there is no missing value in the training set for (int iAttribute = 0; iAttribute < m_Instances.numAttributes(); iAttribute++) { if (iAttribute != instance.classIndex() && instance.isMissing(iAttribute)) { // System.err.println("Warning: Found missing value in test set, filling in values."); m_MissingValuesFilter = new ReplaceMissingValues(); m_MissingValuesFilter.setInputFormat(m_Instances); Filter.useFilter(m_Instances, m_MissingValuesFilter); m_MissingValuesFilter.input(instance); instance = m_MissingValuesFilter.output(); iAttribute = m_Instances.numAttributes(); } } } return instance; } // normalizeInstance /** * Init structure initializes the structure to an empty graph or a Naive Bayes * graph (depending on the -N flag). * * @throws Exception in case of an error */ public void initStructure() throws Exception { // initialize topological ordering // m_nOrder = new int[m_Instances.numAttributes()]; // m_nOrder[0] = m_Instances.classIndex(); int nAttribute = 0; for (int iOrder = 1; iOrder < m_Instances.numAttributes(); iOrder++) { if (nAttribute == m_Instances.classIndex()) { nAttribute++; } // m_nOrder[iOrder] = nAttribute++; } // reserve memory m_ParentSets = new ParentSet[m_Instances.numAttributes()]; for (int iAttribute = 0; iAttribute < m_Instances.numAttributes(); iAttribute++) { m_ParentSets[iAttribute] = new ParentSet(m_Instances.numAttributes()); } } // initStructure /** * buildStructure determines the network structure/graph of the network. * The default behavior is creating a network where all nodes have the first * node as its parent (i.e., a BayesNet that behaves like a naive Bayes classifier). * This method can be overridden by derived classes to restrict the class * of network structures that are acceptable. * * @throws Exception in case of an error */ public void buildStructure() throws Exception { m_SearchAlgorithm.buildStructure(this, m_Instances); } // buildStructure /** * estimateCPTs estimates the conditional probability tables for the Bayes * Net using the network structure. * * @throws Exception in case of an error */ public void estimateCPTs() throws Exception { m_BayesNetEstimator.estimateCPTs(this); } // estimateCPTs /** * initializes the conditional probabilities * * @throws Exception in case of an error */ public void initCPTs() throws Exception { m_BayesNetEstimator.initCPTs(this); } // estimateCPTs /** * Updates the classifier with the given instance. * * @param instance the new training instance to include in the model * @throws Exception if the instance could not be incorporated in * the model. */ public void updateClassifier(Instance instance) throws Exception { instance = normalizeInstance(instance); m_BayesNetEstimator.updateClassifier(this, instance); } // updateClassifier /** * Calculates the class membership probabilities for the given test * instance. * * @param instance the instance to be classified * @return predicted class probability distribution * @throws Exception if there is a problem generating the prediction */ public double[] distributionForInstance(Instance instance) throws Exception { instance = normalizeInstance(instance); return m_BayesNetEstimator.distributionForInstance(this, instance); } // distributionForInstance /** * Calculates the counts for Dirichlet distribution for the * class membership probabilities for the given test instance. * * @param instance the instance to be classified * @return counts for Dirichlet distribution for class probability * @throws Exception if there is a problem generating the prediction */ public double[] countsForInstance(Instance instance) throws Exception { double[] fCounts = new double[m_NumClasses]; for (int iClass = 0; iClass < m_NumClasses; iClass++) { fCounts[iClass] = 0.0; } for (int iClass = 0; iClass < m_NumClasses; iClass++) { double fCount = 0; for (int iAttribute = 0; iAttribute < m_Instances.numAttributes(); iAttribute++) { double iCPT = 0; for (int iParent = 0; iParent < m_ParentSets[iAttribute].getNrOfParents(); iParent++) { int nParent = m_ParentSets[iAttribute].getParent(iParent); if (nParent == m_Instances.classIndex()) { iCPT = iCPT * m_NumClasses + iClass; } else { iCPT = iCPT * m_Instances.attribute(nParent).numValues() + instance.value(nParent); } } if (iAttribute == m_Instances.classIndex()) { fCount += ((DiscreteEstimatorBayes) m_Distributions[iAttribute][(int) iCPT]).getCount(iClass); } else { fCount += ((DiscreteEstimatorBayes) m_Distributions[iAttribute][(int) iCPT]).getCount( instance.value(iAttribute)); } } fCounts[iClass] += fCount; } return fCounts; } // countsForInstance /** * Returns an enumeration describing the available options * * @return an enumeration of all the available options */ public Enumeration listOptions() { Vector newVector = new Vector(4); newVector.addElement(new Option("\tDo not use ADTree data structure\n", "D", 0, "-D")); newVector.addElement(new Option("\tBIF file to compare with\n", "B", 1, "-B <BIF file>")); newVector.addElement(new Option("\tSearch algorithm\n", "Q", 1, "-Q weka.classifiers.bayes.net.search.SearchAlgorithm")); newVector.addElement(new Option("\tEstimator algorithm\n", "E", 1, "-E weka.classifiers.bayes.net.estimate.SimpleEstimator")); return newVector.elements(); } // listOptions /** * Parses a given list of options. <p> * <!-- options-start --> * Valid options are: <p/> * * <pre> -D * Do not use ADTree data structure * </pre> * * <pre> -B &lt;BIF file&gt; * BIF file to compare with * </pre> * * <pre> -Q weka.classifiers.bayes.net.search.SearchAlgorithm * Search algorithm * </pre> * * <pre> -E weka.classifiers.bayes.net.estimate.SimpleEstimator * Estimator algorithm * </pre> * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { m_bUseADTree = !(Utils.getFlag('D', options)); String sBIFFile = Utils.getOption('B', options); if (sBIFFile != null && !sBIFFile.equals("")) { setBIFFile(sBIFFile); } String searchAlgorithmName = Utils.getOption('Q', options); if (searchAlgorithmName.length() != 0) { setSearchAlgorithm( (SearchAlgorithm) Utils.forName( SearchAlgorithm.class, searchAlgorithmName, partitionOptions(options))); } else { setSearchAlgorithm(new K2()); } String estimatorName = Utils.getOption('E', options); if (estimatorName.length() != 0) { setEstimator( (BayesNetEstimator) Utils.forName( BayesNetEstimator.class, estimatorName, Utils.partitionOptions(options))); } else { setEstimator(new SimpleEstimator()); } Utils.checkForRemainingOptions(options); } // setOptions /** * Returns the secondary set of options (if any) contained in * the supplied options array. The secondary set is defined to * be any options after the first "--" but before the "-E". These * options are removed from the original options array. * * @param options the input array of options * @return the array of secondary options */ public static String [] partitionOptions(String [] options) { for (int i = 0; i < options.length; i++) { if (options[i].equals("--")) { // ensure it follows by a -E option int j = i; while ((j < options.length) && !(options[j].equals("-E"))) { j++; } /* if (j >= options.length) { return new String[0]; } */ options[i++] = ""; String [] result = new String [options.length - i]; j = i; while ((j < options.length) && !(options[j].equals("-E"))) { result[j - i] = options[j]; options[j] = ""; j++; } while(j < options.length) { result[j - i] = ""; j++; } return result; } } return new String [0]; } /** * Gets the current settings of the classifier. * * @return an array of strings suitable for passing to setOptions */ public String[] getOptions() { String[] searchOptions = m_SearchAlgorithm.getOptions(); String[] estimatorOptions = m_BayesNetEstimator.getOptions(); String[] options = new String[11 + searchOptions.length + estimatorOptions.length]; int current = 0; if (!m_bUseADTree) { options[current++] = "-D"; } if (m_otherBayesNet != null) { options[current++] = "-B"; options[current++] = ((BIFReader) m_otherBayesNet).getFileName(); } options[current++] = "-Q"; options[current++] = "" + getSearchAlgorithm().getClass().getName(); options[current++] = "--"; for (int iOption = 0; iOption < searchOptions.length; iOption++) { options[current++] = searchOptions[iOption]; } options[current++] = "-E"; options[current++] = "" + getEstimator().getClass().getName(); options[current++] = "--"; for (int iOption = 0; iOption < estimatorOptions.length; iOption++) { options[current++] = estimatorOptions[iOption]; } // Fill up rest with empty strings, not nulls! while (current < options.length) { options[current++] = ""; } return options; } // getOptions /** * Set the SearchAlgorithm used in searching for network structures. * @param newSearchAlgorithm the SearchAlgorithm to use. */ public void setSearchAlgorithm(SearchAlgorithm newSearchAlgorithm) { m_SearchAlgorithm = newSearchAlgorithm; } /** * Get the SearchAlgorithm used as the search algorithm * @return the SearchAlgorithm used as the search algorithm */ public SearchAlgorithm getSearchAlgorithm() { return m_SearchAlgorithm; } /** * Set the Estimator Algorithm used in calculating the CPTs * @param newBayesNetEstimator the Estimator to use. */ public void setEstimator(BayesNetEstimator newBayesNetEstimator) { m_BayesNetEstimator = newBayesNetEstimator; } /** * Get the BayesNetEstimator used for calculating the CPTs * @return the BayesNetEstimator used. */ public BayesNetEstimator getEstimator() { return m_BayesNetEstimator; } /** * Set whether ADTree structure is used or not * @param bUseADTree true if an ADTree structure is used */ public void setUseADTree(boolean bUseADTree) { m_bUseADTree = bUseADTree; } /** * Method declaration * @return whether ADTree structure is used or not */ public boolean getUseADTree() { return m_bUseADTree; } /** * Set name of network in BIF file to compare with * @param sBIFFile the name of the BIF file */ public void setBIFFile(String sBIFFile) { try { m_otherBayesNet = new BIFReader().processFile(sBIFFile); } catch (Throwable t) { m_otherBayesNet = null; } } /** * Get name of network in BIF file to compare with * @return BIF file name */ public String getBIFFile() { if (m_otherBayesNet != null) { return m_otherBayesNet.getFileName(); } return ""; } /** * Returns a description of the classifier. * * @return a description of the classifier as a string. */ public String toString() { StringBuffer text = new StringBuffer(); text.append("Bayes Network Classifier"); text.append("\n" + (m_bUseADTree ? "Using " : "not using ") + "ADTree"); if (m_Instances == null) { text.append(": No model built yet."); } else { // flatten BayesNet down to text text.append("\n#attributes="); text.append(m_Instances.numAttributes()); text.append(" #classindex="); text.append(m_Instances.classIndex()); text.append("\nNetwork structure (nodes followed by parents)\n"); for (int iAttribute = 0; iAttribute < m_Instances.numAttributes(); iAttribute++) { text.append( m_Instances.attribute(iAttribute).name() + "(" + m_Instances.attribute(iAttribute).numValues() + "): "); for (int iParent = 0; iParent < m_ParentSets[iAttribute].getNrOfParents(); iParent++) { text.append(m_Instances.attribute(m_ParentSets[iAttribute].getParent(iParent)).name() + " "); } text.append("\n"); // Description of distributions tends to be too much detail, so it is commented out here // for (int iParent = 0; iParent < m_ParentSets[iAttribute].GetCardinalityOfParents(); iParent++) { // text.append('(' + m_Distributions[iAttribute][iParent].toString() + ')'); // } // text.append("\n"); } text.append("LogScore Bayes: " + measureBayesScore() + "\n"); text.append("LogScore BDeu: " + measureBDeuScore() + "\n"); text.append("LogScore MDL: " + measureMDLScore() + "\n"); text.append("LogScore ENTROPY: " + measureEntropyScore() + "\n"); text.append("LogScore AIC: " + measureAICScore() + "\n"); if (m_otherBayesNet != null) { text.append( "Missing: " + m_otherBayesNet.missingArcs(this) + " Extra: " + m_otherBayesNet.extraArcs(this) + " Reversed: " + m_otherBayesNet.reversedArcs(this) + "\n"); text.append("Divergence: " + m_otherBayesNet.divergence(this) + "\n"); } } return text.toString(); } // toString /** * Returns the type of graph this classifier * represents. * @return Drawable.TREE */ public int graphType() { return Drawable.BayesNet; } /** * Returns a BayesNet graph in XMLBIF ver 0.3 format. * @return String representing this BayesNet in XMLBIF ver 0.3 * @throws Exception in case BIF generation fails */ public String graph() throws Exception { return toXMLBIF03(); } public String getBIFHeader() { StringBuffer text = new StringBuffer(); text.append("<?xml version=\"1.0\"?>\n"); text.append("<!-- DTD for the XMLBIF 0.3 format -->\n"); text.append("<!DOCTYPE BIF [\n"); text.append(" <!ELEMENT BIF ( NETWORK )*>\n"); text.append(" <!ATTLIST BIF VERSION CDATA #REQUIRED>\n"); text.append(" <!ELEMENT NETWORK ( NAME, ( PROPERTY | VARIABLE | DEFINITION )* )>\n"); text.append(" <!ELEMENT NAME (#PCDATA)>\n"); text.append(" <!ELEMENT VARIABLE ( NAME, ( OUTCOME | PROPERTY )* ) >\n"); text.append(" <!ATTLIST VARIABLE TYPE (nature|decision|utility) \"nature\">\n"); text.append(" <!ELEMENT OUTCOME (#PCDATA)>\n"); text.append(" <!ELEMENT DEFINITION ( FOR | GIVEN | TABLE | PROPERTY )* >\n"); text.append(" <!ELEMENT FOR (#PCDATA)>\n"); text.append(" <!ELEMENT GIVEN (#PCDATA)>\n"); text.append(" <!ELEMENT TABLE (#PCDATA)>\n"); text.append(" <!ELEMENT PROPERTY (#PCDATA)>\n"); text.append("]>\n"); return text.toString(); } // getBIFHeader /** * Returns a description of the classifier in XML BIF 0.3 format. * See http://www-2.cs.cmu.edu/~fgcozman/Research/InterchangeFormat/ * for details on XML BIF. * @return an XML BIF 0.3 description of the classifier as a string. */ public String toXMLBIF03() { if (m_Instances == null) { return("<!--No model built yet-->"); } StringBuffer text = new StringBuffer(); text.append(getBIFHeader()); text.append("\n"); text.append("\n"); text.append("<BIF VERSION=\"0.3\">\n"); text.append("<NETWORK>\n"); text.append("<NAME>" + XMLNormalize(m_Instances.relationName()) + "</NAME>\n"); for (int iAttribute = 0; iAttribute < m_Instances.numAttributes(); iAttribute++) { text.append("<VARIABLE TYPE=\"nature\">\n"); text.append("<NAME>" + XMLNormalize(m_Instances.attribute(iAttribute).name()) + "</NAME>\n"); for (int iValue = 0; iValue < m_Instances.attribute(iAttribute).numValues(); iValue++) { text.append("<OUTCOME>" + XMLNormalize(m_Instances.attribute(iAttribute).value(iValue)) + "</OUTCOME>\n"); } text.append("</VARIABLE>\n"); } for (int iAttribute = 0; iAttribute < m_Instances.numAttributes(); iAttribute++) { text.append("<DEFINITION>\n"); text.append("<FOR>" + XMLNormalize(m_Instances.attribute(iAttribute).name()) + "</FOR>\n"); for (int iParent = 0; iParent < m_ParentSets[iAttribute].getNrOfParents(); iParent++) { text.append("<GIVEN>" + XMLNormalize(m_Instances.attribute(m_ParentSets[iAttribute].getParent(iParent)).name()) + "</GIVEN>\n"); } text.append("<TABLE>\n"); for (int iParent = 0; iParent < m_ParentSets[iAttribute].getCardinalityOfParents(); iParent++) { for (int iValue = 0; iValue < m_Instances.attribute(iAttribute).numValues(); iValue++) { text.append(m_Distributions[iAttribute][iParent].getProbability(iValue)); text.append(' '); } text.append('\n'); } text.append("</TABLE>\n"); text.append("</DEFINITION>\n"); } text.append("</NETWORK>\n"); text.append("</BIF>\n"); return text.toString(); } // toXMLBIF03 /** XMLNormalize converts the five standard XML entities in a string * g.e. the string V&D's is returned as V&amp;D&apos;s * @param sStr string to normalize * @return normalized string */ protected String XMLNormalize(String sStr) { StringBuffer sStr2 = new StringBuffer(); for (int iStr = 0; iStr < sStr.length(); iStr++) { char c = sStr.charAt(iStr); switch (c) { case '&': sStr2.append("&amp;"); break; case '\'': sStr2.append("&apos;"); break; case '\"': sStr2.append("&quot;"); break; case '<': sStr2.append("&lt;"); break; case '>': sStr2.append("&gt;"); break; default: sStr2.append(c); } } return sStr2.toString(); } // XMLNormalize /** * @return a string to describe the UseADTreeoption. */ public String useADTreeTipText() { return "When ADTree (the data structure for increasing speed on counts," + " not to be confused with the classifier under the same name) is used" + " learning time goes down typically. However, because ADTrees are memory" + " intensive, memory problems may occur. Switching this option off makes" + " the structure learning algorithms slower, and run with less memory." + " By default, ADTrees are used."; } /** * @return a string to describe the SearchAlgorithm. */ public String searchAlgorithmTipText() { return "Select method used for searching network structures."; } /** * This will return a string describing the BayesNetEstimator. * @return The string. */ public String estimatorTipText() { return "Select Estimator algorithm for finding the conditional probability tables" + " of the Bayes Network."; } /** * @return a string to describe the BIFFile. */ public String BIFFileTipText() { return "Set the name of a file in BIF XML format. A Bayes network learned" + " from data can be compared with the Bayes network represented by the BIF file." + " Statistics calculated are o.a. the number of missing and extra arcs."; } /** * This will return a string describing the classifier. * @return The string. */ public String globalInfo() { return "Bayes Network learning using various search algorithms and " + "quality measures.\n" + "Base class for a Bayes Network classifier. Provides " + "datastructures (network structure, conditional probability " + "distributions, etc.) and facilities common to Bayes Network " + "learning algorithms like K2 and B.\n\n" + "For more information see:\n\n" + "http://www.cs.waikato.ac.nz/~remco/weka.pdf"; } /** * Main method for testing this class. * * @param argv the options */ public static void main(String[] argv) { runClassifier(new BayesNet(), argv); } // main /** get name of the Bayes network * @return name of the Bayes net */ public String getName() { return m_Instances.relationName(); } /** get number of nodes in the Bayes network * @return number of nodes */ public int getNrOfNodes() { return m_Instances.numAttributes(); } /** get name of a node in the Bayes network * @param iNode index of the node * @return name of the specified node */ public String getNodeName(int iNode) { return m_Instances.attribute(iNode).name(); } /** get number of values a node can take * @param iNode index of the node * @return cardinality of the specified node */ public int getCardinality(int iNode) { return m_Instances.attribute(iNode).numValues(); } /** get name of a particular value of a node * @param iNode index of the node * @param iValue index of the value * @return cardinality of the specified node */ public String getNodeValue(int iNode, int iValue) { return m_Instances.attribute(iNode).value(iValue); } /** get number of parents of a node in the network structure * @param iNode index of the node * @return number of parents of the specified node */ public int getNrOfParents(int iNode) { return m_ParentSets[iNode].getNrOfParents(); } /** get node index of a parent of a node in the network structure * @param iNode index of the node * @param iParent index of the parents, e.g., 0 is the first parent, 1 the second parent, etc. * @return node index of the iParent's parent of the specified node */ public int getParent(int iNode, int iParent) { return m_ParentSets[iNode].getParent(iParent); } /** Get full set of parent sets. * @return parent sets; */ public ParentSet[] getParentSets() { return m_ParentSets; } /** Get full set of estimators. * @return estimators; */ public Estimator[][] getDistributions() { return m_Distributions; } /** get number of values the collection of parents of a node can take * @param iNode index of the node * @return cardinality of the parent set of the specified node */ public int getParentCardinality(int iNode) { return m_ParentSets[iNode].getCardinalityOfParents(); } /** get particular probability of the conditional probability distribtion * of a node given its parents. * @param iNode index of the node * @param iParent index of the parent set, 0 <= iParent <= getParentCardinality(iNode) * @param iValue index of the value, 0 <= iValue <= getCardinality(iNode) * @return probability */ public double getProbability(int iNode, int iParent, int iValue) { return m_Distributions[iNode][iParent].getProbability(iValue); } /** get the parent set of a node * @param iNode index of the node * @return Parent set of the specified node. */ public ParentSet getParentSet(int iNode) { return m_ParentSets[iNode]; } /** get ADTree strucrture containing efficient representation of counts. * @return ADTree strucrture */ public ADNode getADTree() { return m_ADTree;} // implementation of AdditionalMeasureProducer interface /** * Returns an enumeration of the measure names. Additional measures * must follow the naming convention of starting with "measure", eg. * double measureBlah() * @return an enumeration of the measure names */ public Enumeration enumerateMeasures() { Vector newVector = new Vector(4); newVector.addElement("measureExtraArcs"); newVector.addElement("measureMissingArcs"); newVector.addElement("measureReversedArcs"); newVector.addElement("measureDivergence"); newVector.addElement("measureBayesScore"); newVector.addElement("measureBDeuScore"); newVector.addElement("measureMDLScore"); newVector.addElement("measureAICScore"); newVector.addElement("measureEntropyScore"); return newVector.elements(); } // enumerateMeasures public double measureExtraArcs() { if (m_otherBayesNet != null) { return m_otherBayesNet.extraArcs(this); } return 0; } // measureExtraArcs public double measureMissingArcs() { if (m_otherBayesNet != null) { return m_otherBayesNet.missingArcs(this); } return 0; } // measureMissingArcs public double measureReversedArcs() { if (m_otherBayesNet != null) { return m_otherBayesNet.reversedArcs(this); } return 0; } // measureReversedArcs public double measureDivergence() { if (m_otherBayesNet != null) { return m_otherBayesNet.divergence(this); } return 0; } // measureDivergence public double measureBayesScore() { LocalScoreSearchAlgorithm s = new LocalScoreSearchAlgorithm(this, m_Instances); return s.logScore(Scoreable.BAYES); } // measureBayesScore public double measureBDeuScore() { LocalScoreSearchAlgorithm s = new LocalScoreSearchAlgorithm(this, m_Instances); return s.logScore(Scoreable.BDeu); } // measureBDeuScore public double measureMDLScore() { LocalScoreSearchAlgorithm s = new LocalScoreSearchAlgorithm(this, m_Instances); return s.logScore(Scoreable.MDL); } // measureMDLScore public double measureAICScore() { LocalScoreSearchAlgorithm s = new LocalScoreSearchAlgorithm(this, m_Instances); return s.logScore(Scoreable.AIC); } // measureAICScore public double measureEntropyScore() { LocalScoreSearchAlgorithm s = new LocalScoreSearchAlgorithm(this, m_Instances); return s.logScore(Scoreable.ENTROPY); } // measureEntropyScore /** * Returns the value of the named measure * @param measureName the name of the measure to query for its value * @return the value of the named measure * @throws IllegalArgumentException if the named measure is not supported */ public double getMeasure(String measureName) { if (measureName.equals("measureExtraArcs")) { return measureExtraArcs(); } if (measureName.equals("measureMissingArcs")) { return measureMissingArcs(); } if (measureName.equals("measureReversedArcs")) { return measureReversedArcs(); } if (measureName.equals("measureDivergence")) { return measureDivergence(); } if (measureName.equals("measureBayesScore")) { return measureBayesScore(); } if (measureName.equals("measureBDeuScore")) { return measureBDeuScore(); } if (measureName.equals("measureMDLScore")) { return measureMDLScore(); } if (measureName.equals("measureAICScore")) { return measureAICScore(); } if (measureName.equals("measureEntropyScore")) { return measureEntropyScore(); } return 0; } // getMeasure /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } } // class BayesNet
36,029
30.913198
213
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/bayes/BayesianLogisticRegression.java
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* * BayesianLogisticRegression.java * Copyright (C) 2008 Illinois Institute of Technology * */ package weka.classifiers.bayes; import weka.classifiers.Classifier; import weka.classifiers.bayes.blr.GaussianPriorImpl; import weka.classifiers.bayes.blr.LaplacePriorImpl; import weka.classifiers.bayes.blr.Prior; import weka.core.Attribute; import weka.core.Capabilities; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.OptionHandler; import weka.core.RevisionUtils; import weka.core.SelectedTag; import weka.core.SerializedObject; import weka.core.Tag; import weka.core.TechnicalInformation; import weka.core.TechnicalInformationHandler; import weka.core.Utils; import weka.core.Capabilities.Capability; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import weka.filters.Filter; import weka.filters.unsupervised.attribute.Normalize; import java.util.Enumeration; import java.util.Random; import java.util.StringTokenizer; import java.util.Vector; import weka.classifiers.AbstractClassifier; /** <!-- globalinfo-start --> * Implements Bayesian Logistic Regression for both Gaussian and Laplace Priors.<br/> * <br/> * For more information, see<br/> * <br/> * Alexander Genkin, David D. Lewis, David Madigan (2004). Large-scale bayesian logistic regression for text categorization. URL http://www.stat.rutgers.edu/~madigan/PAPERS/shortFat-v3a.pdf. * <p/> <!-- globalinfo-end --> * <!-- technical-bibtex-start --> * BibTeX: * <pre> * &#64;techreport{Genkin2004, * author = {Alexander Genkin and David D. Lewis and David Madigan}, * institution = {DIMACS}, * title = {Large-scale bayesian logistic regression for text categorization}, * year = {2004}, * URL = {http://www.stat.rutgers.edu/\~madigan/PAPERS/shortFat-v3a.pdf} * } * </pre> * <p/> <!-- technical-bibtex-end --> * * * @author Navendu Garg (gargnav at iit dot edu) * @version $Revision: 5516 $ */ public class BayesianLogisticRegression extends AbstractClassifier implements OptionHandler, TechnicalInformationHandler { static final long serialVersionUID = -8013478897911757631L; /** Log-likelihood values to be used to choose the best hyperparameter. */ public static double[] LogLikelihood; /** Set of values to be used as hyperparameter values during Cross-Validation. */ public static double[] InputHyperparameterValues; /** DEBUG Mode*/ boolean debug = false; /** Choose whether to normalize data or not */ public boolean NormalizeData = false; /** Tolerance criteria for the stopping criterion. */ public double Tolerance = 0.0005; /** Threshold for binary classification of probabilisitic estimate*/ public double Threshold = 0.5; /** Distributions available */ public static final int GAUSSIAN = 1; public static final int LAPLACIAN = 2; public static final Tag[] TAGS_PRIOR = { new Tag(GAUSSIAN, "Gaussian"), new Tag(LAPLACIAN, "Laplacian") }; /** Distribution Prior class */ public int PriorClass = GAUSSIAN; /** NumFolds for CV based Hyperparameters selection*/ public int NumFolds = 2; /** Methods for selecting the hyperparameter value */ public static final int NORM_BASED = 1; public static final int CV_BASED = 2; public static final int SPECIFIC_VALUE = 3; public static final Tag[] TAGS_HYPER_METHOD = { new Tag(NORM_BASED, "Norm-based"), new Tag(CV_BASED, "CV-based"), new Tag(SPECIFIC_VALUE, "Specific value") }; /** Hyperparameter selection method */ public int HyperparameterSelection = NORM_BASED; /** The class index from the training data */ public int ClassIndex = -1; /** Best hyperparameter for test phase */ public double HyperparameterValue = 0.27; /** CV Hyperparameter Range */ public String HyperparameterRange = "R:0.01-316,3.16"; /** Maximum number of iterations */ public int maxIterations = 100; /**Iteration counter */ public int iterationCounter = 0; /** Array for storing coefficients of Bayesian regression model. */ public double[] BetaVector; /** Array to store Regression Coefficient updates. */ public double[] DeltaBeta; /** Trust Region Radius Update*/ public double[] DeltaUpdate; /** Trust Region Radius*/ public double[] Delta; /** Array to store Hyperparameter values for each feature. */ public double[] Hyperparameters; /** R(i)= BetaVector X x(i) X y(i). * This an intermediate value with respect to vector BETA, input values and corresponding class labels*/ public double[] R; /** This vector is used to store the increments on the R(i). It is also used to determining the stopping criterion.*/ public double[] DeltaR; /** * This variable is used to keep track of change in * the value of delta summation of r(i). */ public double Change; /** * Bayesian Logistic Regression returns the probability of a given instance will belong to a certain * class (p(y=+1|Beta,X). To obtain a binary value the Threshold value is used. * <pre> * p(y=+1|Beta,X)>Threshold ? 1 : -1 * </pre> */ /** Filter interface used to point to weka.filters.unsupervised.attribute.Normalize object * */ public Filter m_Filter; /** Dataset provided to do Training/Test set.*/ protected Instances m_Instances; /** Prior class object interface*/ protected Prior m_PriorUpdate; public String globalInfo() { return "Implements Bayesian Logistic Regression " + "for both Gaussian and Laplace Priors.\n\n" + "For more information, see\n\n" + getTechnicalInformation(); } /** * <pre> * (1)Initialize m_Beta[j] to 0. * (2)Initialize m_DeltaUpdate[j]. * </pre> * * */ public void initialize() throws Exception { int numOfAttributes; int numOfInstances; int i; int j; Change = 0.0; //Manipulate Data if (NormalizeData) { m_Filter = new Normalize(); m_Filter.setInputFormat(m_Instances); m_Instances = Filter.useFilter(m_Instances, m_Filter); } //Set the intecept coefficient. Attribute att = new Attribute("(intercept)"); Instance instance; m_Instances.insertAttributeAt(att, 0); for (i = 0; i < m_Instances.numInstances(); i++) { instance = m_Instances.instance(i); instance.setValue(0, 1.0); } //Get the number of attributes numOfAttributes = m_Instances.numAttributes(); numOfInstances = m_Instances.numInstances(); ClassIndex = m_Instances.classIndex(); iterationCounter = 0; //Initialize Arrays. switch (HyperparameterSelection) { case 1: HyperparameterValue = normBasedHyperParameter(); if (debug) { System.out.println("Norm-based Hyperparameter: " + HyperparameterValue); } break; case 2: HyperparameterValue = CVBasedHyperparameter(); if (debug) { System.out.println("CV-based Hyperparameter: " + HyperparameterValue); } break; } BetaVector = new double[numOfAttributes]; Delta = new double[numOfAttributes]; DeltaBeta = new double[numOfAttributes]; Hyperparameters = new double[numOfAttributes]; DeltaUpdate = new double[numOfAttributes]; for (j = 0; j < numOfAttributes; j++) { BetaVector[j] = 0.0; Delta[j] = 1.0; DeltaBeta[j] = 0.0; DeltaUpdate[j] = 0.0; //TODO: Change the way it takes values. Hyperparameters[j] = HyperparameterValue; } DeltaR = new double[numOfInstances]; R = new double[numOfInstances]; for (i = 0; i < numOfInstances; i++) { DeltaR[i] = 0.0; R[i] = 0.0; } //Set the Prior interface to the appropriate prior implementation. if (PriorClass == GAUSSIAN) { m_PriorUpdate = new GaussianPriorImpl(); } else { m_PriorUpdate = new LaplacePriorImpl(); } } /** * This method tests what kind of data this classifier can handle. * return Capabilities */ public Capabilities getCapabilities() { Capabilities result = super.getCapabilities(); result.disableAll(); // attributes result.enable(Capability.NUMERIC_ATTRIBUTES); result.enable(Capability.BINARY_ATTRIBUTES); // class result.enable(Capability.BINARY_CLASS); // instances result.setMinimumNumberInstances(0); return result; } /** * <ul> * <li>(1) Set the data to the class attribute m_Instances.</li> * <li>(2)Call the method initialize() to initialize the values.</li> * </ul> * @param data training data * @exception Exception if classifier can't be built successfully. */ public void buildClassifier(Instances data) throws Exception { Instance instance; int i; int j; // can classifier handle the data? getCapabilities().testWithFail(data); //(1) Set the data to the class attribute m_Instances. m_Instances = new Instances(data); //(2)Call the method initialize() to initialize the values. initialize(); do { //Compute the prior Trust Region Radius Update; for (j = 0; j < m_Instances.numAttributes(); j++) { if (j != ClassIndex) { DeltaUpdate[j] = m_PriorUpdate.update(j, m_Instances, BetaVector[j], Hyperparameters[j], R, Delta[j]); //limit step to trust region. DeltaBeta[j] = Math.min(Math.max(DeltaUpdate[j], 0 - Delta[j]), Delta[j]); //Update the for (i = 0; i < m_Instances.numInstances(); i++) { instance = m_Instances.instance(i); if (instance.value(j) != 0) { DeltaR[i] = DeltaBeta[j] * instance.value(j) * classSgn(instance.classValue()); R[i] += DeltaR[i]; } } //Updated Beta values. BetaVector[j] += DeltaBeta[j]; //Update size of trust region. Delta[j] = Math.max(2 * Math.abs(DeltaBeta[j]), Delta[j] / 2.0); } } } while (!stoppingCriterion()); m_PriorUpdate.computelogLikelihood(BetaVector, m_Instances); m_PriorUpdate.computePenalty(BetaVector, Hyperparameters); } /** * This class is used to mask the internal class labels. * * @param value internal class label * @return * <pre> * <ul><li> * -1 for internal class label 0 * </li> * <li> * +1 for internal class label 1 * </li> * </ul> * </pre> */ public static double classSgn(double value) { if (value == 0.0) { return -1.0; } else { return 1.0; } } /** * Returns an instance of a TechnicalInformation object, containing * detailed information about the technical background of this class, * e.g., paper reference or book this class is based on. * * @return the technical information about this class */ public TechnicalInformation getTechnicalInformation() { TechnicalInformation result = null; result = new TechnicalInformation(Type.TECHREPORT); result.setValue(Field.AUTHOR, "Alexander Genkin and David D. Lewis and David Madigan"); result.setValue(Field.YEAR, "2004"); result.setValue(Field.TITLE, "Large-scale bayesian logistic regression for text categorization"); result.setValue(Field.INSTITUTION, "DIMACS"); result.setValue(Field.URL, "http://www.stat.rutgers.edu/~madigan/PAPERS/shortFat-v3a.pdf"); return result; } /** * This is a convient function that defines and upper bound * (Delta>0) for values of r(i) reachable by updates in the * trust region. * * r BetaVector X x(i)y(i). * delta A parameter where sigma > 0 * @return double function value */ public static double bigF(double r, double sigma) { double funcValue = 0.25; double absR = Math.abs(r); if (absR > sigma) { funcValue = 1.0 / (2.0 + Math.exp(absR - sigma) + Math.exp(sigma - absR)); } return funcValue; } /** * This method implements the stopping criterion * function. * * @return boolean whether to stop or not. */ public boolean stoppingCriterion() { int i; double sum_deltaR = 0.0; double sum_R = 1.0; boolean shouldStop; double value = 0.0; double delta; //Summation of changes in R(i) vector. for (i = 0; i < m_Instances.numInstances(); i++) { sum_deltaR += Math.abs(DeltaR[i]); //Numerator (deltaR(i)) sum_R += Math.abs(R[i]); // Denominator (1+sum(R(i)) } delta = Math.abs(sum_deltaR - Change); Change = delta / sum_R; if (debug) { System.out.println(Change + " <= " + Tolerance); } shouldStop = ((Change <= Tolerance) || (iterationCounter >= maxIterations)) ? true : false; iterationCounter++; Change = sum_deltaR; return shouldStop; } /** * This method computes the values for the logistic link function. * <pre>f(r)=exp(r)/(1+exp(r))</pre> * * @return output value */ public static double logisticLinkFunction(double r) { return Math.exp(r) / (1.0 + Math.exp(r)); } /** * Sign for a given value. * @param r * @return double +1 if r>0, -1 if r<0 */ public static double sgn(double r) { double sgn = 0.0; if (r > 0) { sgn = 1.0; } else if (r < 0) { sgn = -1.0; } return sgn; } /** * This function computes the norm-based hyperparameters * and stores them in the m_Hyperparameters. */ public double normBasedHyperParameter() { //TODO: Implement this method. Instance instance; double mean = 0.0; for (int i = 0; i < m_Instances.numInstances(); i++) { instance = m_Instances.instance(i); double sqr_sum = 0.0; for (int j = 0; j < m_Instances.numAttributes(); j++) { if (j != ClassIndex) { sqr_sum += (instance.value(j) * instance.value(j)); } } //sqr_sum=Math.sqrt(sqr_sum); mean += sqr_sum; } mean = mean / (double) m_Instances.numInstances(); return ((double) m_Instances.numAttributes()) / mean; } /** * Classifies the given instance using the Bayesian Logistic Regression function. * * @param instance the test instance * @return the classification * @throws Exception if classification can't be done successfully */ public double classifyInstance(Instance instance) throws Exception { //TODO: Implement double sum_R = 0.0; double classification = 0.0; sum_R = BetaVector[0]; for (int j = 0; j < instance.numAttributes(); j++) { if (j != (ClassIndex - 1)) { sum_R += (BetaVector[j + 1] * instance.value(j)); } } sum_R = logisticLinkFunction(sum_R); if (sum_R > Threshold) { classification = 1.0; } else { classification = 0.0; } return classification; } /** * Outputs the linear regression model as a string. * * @return the model as string */ public String toString() { if (m_Instances == null) { return "Bayesian logistic regression: No model built yet."; } StringBuffer buf = new StringBuffer(); String text = ""; switch (HyperparameterSelection) { case 1: text = "Norm-Based Hyperparameter Selection: "; break; case 2: text = "Cross-Validation Based Hyperparameter Selection: "; break; case 3: text = "Specified Hyperparameter: "; break; } buf.append(text).append(HyperparameterValue).append("\n\n"); buf.append("Regression Coefficients\n"); buf.append("=========================\n\n"); for (int j = 0; j < m_Instances.numAttributes(); j++) { if (j != ClassIndex) { if (BetaVector[j] != 0.0) { buf.append(m_Instances.attribute(j).name()).append(" : ") .append(BetaVector[j]).append("\n"); } } } buf.append("===========================\n\n"); buf.append("Likelihood: " + m_PriorUpdate.getLoglikelihood() + "\n\n"); buf.append("Penalty: " + m_PriorUpdate.getPenalty() + "\n\n"); buf.append("Regularized Log Posterior: " + m_PriorUpdate.getLogPosterior() + "\n"); buf.append("===========================\n\n"); return buf.toString(); } /** * Method computes the best hyperparameter value by doing cross * -validation on the training data and compute the likelihood. * The method can parse a range of values or a list of values. * @return Best hyperparameter value with the max likelihood value on the training data. * @throws Exception */ public double CVBasedHyperparameter() throws Exception { //TODO: Method incomplete. double start; //TODO: Method incomplete. double end; //TODO: Method incomplete. double multiplier; int size = 0; double[] list = null; double MaxHypeValue = 0.0; double MaxLikelihood = 0.0; StringTokenizer tokenizer = new StringTokenizer(HyperparameterRange); String rangeType = tokenizer.nextToken(":"); if (rangeType.equals("R")) { String temp = tokenizer.nextToken(); tokenizer = new StringTokenizer(temp); start = Double.parseDouble(tokenizer.nextToken("-")); tokenizer = new StringTokenizer(tokenizer.nextToken()); end = Double.parseDouble(tokenizer.nextToken(",")); multiplier = Double.parseDouble(tokenizer.nextToken()); int steps = (int) (((Math.log10(end) - Math.log10(start)) / Math.log10(multiplier)) + 1); list = new double[steps]; int count = 0; for (double i = start; i <= end; i *= multiplier) { list[count++] = i; } } else if (rangeType.equals("L")) { Vector vec = new Vector(); while (tokenizer.hasMoreTokens()) { vec.add(tokenizer.nextToken(",")); } list = new double[vec.size()]; for (int i = 0; i < vec.size(); i++) { list[i] = Double.parseDouble((String) vec.get(i)); } } else { //throw exception. } // Perform two-fold cross-validation to collect // unbiased predictions if (list != null) { int numFolds = (int) NumFolds; Random random = new Random(); m_Instances.randomize(random); m_Instances.stratify(numFolds); for (int k = 0; k < list.length; k++) { for (int i = 0; i < numFolds; i++) { Instances train = m_Instances.trainCV(numFolds, i, random); SerializedObject so = new SerializedObject(this); BayesianLogisticRegression blr = (BayesianLogisticRegression) so.getObject(); // blr.setHyperparameterSelection(3); blr.setHyperparameterSelection(new SelectedTag(SPECIFIC_VALUE, TAGS_HYPER_METHOD)); blr.setHyperparameterValue(list[k]); // blr.setPriorClass(PriorClass); blr.setPriorClass(new SelectedTag(PriorClass, TAGS_PRIOR)); blr.setThreshold(Threshold); blr.setTolerance(Tolerance); blr.buildClassifier(train); Instances test = m_Instances.testCV(numFolds, i); double val = blr.getLoglikeliHood(blr.BetaVector, test); if (debug) { System.out.println("Fold " + i + "Hyperparameter: " + list[k]); System.out.println("==================================="); System.out.println(" Likelihood: " + val); } if ((k == 0) | (val > MaxLikelihood)) { MaxLikelihood = val; MaxHypeValue = list[k]; } } } } else { return HyperparameterValue; } return MaxHypeValue; } /** * * @return likelihood for a given set of betas and instances */ public double getLoglikeliHood(double[] betas, Instances instances) { m_PriorUpdate.computelogLikelihood(betas, instances); return m_PriorUpdate.getLoglikelihood(); } /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ public Enumeration listOptions() { Vector newVector = new Vector(); newVector.addElement(new Option("\tShow Debugging Output\n", "D", 0, "-D")); newVector.addElement(new Option("\tDistribution of the Prior " +"(1=Gaussian, 2=Laplacian)" +"\n\t(default: 1=Gaussian)" , "P", 1, "-P <integer>")); newVector.addElement(new Option("\tHyperparameter Selection Method " +"(1=Norm-based, 2=CV-based, 3=specific value)\n" +"\t(default: 1=Norm-based)", "H", 1, "-H <integer>")); newVector.addElement(new Option("\tSpecified Hyperparameter Value (use in conjunction with -H 3)\n" +"\t(default: 0.27)", "V", 1, "-V <double>")); newVector.addElement(new Option( "\tHyperparameter Range (use in conjunction with -H 2)\n" +"\t(format: R:start-end,multiplier OR L:val(1), val(2), ..., val(n))\n" +"\t(default: R:0.01-316,3.16)", "R", 1, "-R <string>")); newVector.addElement(new Option("\tTolerance Value\n\t(default: 0.0005)", "Tl", 1, "-Tl <double>")); newVector.addElement(new Option("\tThreshold Value\n\t(default: 0.5)", "S", 1, "-S <double>")); newVector.addElement(new Option("\tNumber Of Folds (use in conjuction with -H 2)\n" +"\t(default: 2)", "F", 1, "-F <integer>")); newVector.addElement(new Option("\tMax Number of Iterations\n\t(default: 100)", "I", 1, "-I <integer>")); newVector.addElement(new Option("\tNormalize the data", "N", 0, "-N")); return newVector.elements(); } /** * Parses a given list of options. <p/> * <!-- options-start --> * Valid options are: <p/> * * <pre> -D * Show Debugging Output * </pre> * * <pre> -P &lt;integer&gt; * Distribution of the Prior (1=Gaussian, 2=Laplacian) * (default: 1=Gaussian)</pre> * * <pre> -H &lt;integer&gt; * Hyperparameter Selection Method (1=Norm-based, 2=CV-based, 3=specific value) * (default: 1=Norm-based)</pre> * * <pre> -V &lt;double&gt; * Specified Hyperparameter Value (use in conjunction with -H 3) * (default: 0.27)</pre> * * <pre> -R &lt;string&gt; * Hyperparameter Range (use in conjunction with -H 2) * (format: R:start-end,multiplier OR L:val(1), val(2), ..., val(n)) * (default: R:0.01-316,3.16)</pre> * * <pre> -Tl &lt;double&gt; * Tolerance Value * (default: 0.0005)</pre> * * <pre> -S &lt;double&gt; * Threshold Value * (default: 0.5)</pre> * * <pre> -F &lt;integer&gt; * Number Of Folds (use in conjuction with -H 2) * (default: 2)</pre> * * <pre> -I &lt;integer&gt; * Max Number of Iterations * (default: 100)</pre> * * <pre> -N * Normalize the data</pre> * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { //Debug Option debug = Utils.getFlag('D', options); // Set Tolerance. String Tol = Utils.getOption("Tl", options); if (Tol.length() != 0) { Tolerance = Double.parseDouble(Tol); } //Set Threshold String Thres = Utils.getOption('S', options); if (Thres.length() != 0) { Threshold = Double.parseDouble(Thres); } //Set Hyperparameter Type String Hype = Utils.getOption('H', options); if (Hype.length() != 0) { HyperparameterSelection = Integer.parseInt(Hype); } //Set Hyperparameter Value String HyperValue = Utils.getOption('V', options); if (HyperValue.length() != 0) { HyperparameterValue = Double.parseDouble(HyperValue); } // Set hyper parameter range or list. String HyperparameterRange = Utils.getOption("R", options); //Set Prior class. String strPrior = Utils.getOption('P', options); if (strPrior.length() != 0) { PriorClass = Integer.parseInt(strPrior); } String folds = Utils.getOption('F', options); if (folds.length() != 0) { NumFolds = Integer.parseInt(folds); } String iterations = Utils.getOption('I', options); if (iterations.length() != 0) { maxIterations = Integer.parseInt(iterations); } NormalizeData = Utils.getFlag('N', options); //TODO: Implement this method for other options. Utils.checkForRemainingOptions(options); } /** * */ public String[] getOptions() { Vector result = new Vector(); //Add Debug Mode to options. result.add("-D"); //Add Tolerance value to options result.add("-Tl"); result.add("" + Tolerance); //Add Threshold value to options result.add("-S"); result.add("" + Threshold); //Add Hyperparameter value to options result.add("-H"); result.add("" + HyperparameterSelection); result.add("-V"); result.add("" + HyperparameterValue); result.add("-R"); result.add("" + HyperparameterRange); //Add Prior Class to options result.add("-P"); result.add("" + PriorClass); result.add("-F"); result.add("" + NumFolds); result.add("-I"); result.add("" + maxIterations); result.add("-N"); return (String[]) result.toArray(new String[result.size()]); } /** * Main method for testing this class. * * @param argv the options */ public static void main(String[] argv) { runClassifier(new BayesianLogisticRegression(), argv); } /** * Returns the tip text for this property * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String debugTipText() { return "Turns on debugging mode."; } /** * */ public void setDebug(boolean debugMode) { debug = debugMode; } /** * Returns the tip text for this property * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String hyperparameterSelectionTipText() { return "Select the type of Hyperparameter to be used."; } /** * Get the method used to select the hyperparameter * * @return the method used to select the hyperparameter */ public SelectedTag getHyperparameterSelection() { return new SelectedTag(HyperparameterSelection, TAGS_HYPER_METHOD); } /** * Set the method used to select the hyperparameter * * @param newMethod the method used to set the hyperparameter */ public void setHyperparameterSelection(SelectedTag newMethod) { if (newMethod.getTags() == TAGS_HYPER_METHOD) { int c = newMethod.getSelectedTag().getID(); if (c >= 1 && c <= 3) { HyperparameterSelection = c; } else { throw new IllegalArgumentException("Wrong selection type, -H value should be: " + "1 for norm-based, 2 for CV-based and " + "3 for specific value"); } } } /** * Returns the tip text for this property * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String priorClassTipText() { return "The type of prior to be used."; } /** * Set the type of prior to use. * * @param newMethod the type of prior to use. */ public void setPriorClass(SelectedTag newMethod) { if (newMethod.getTags() == TAGS_PRIOR) { int c = newMethod.getSelectedTag().getID(); if (c == GAUSSIAN || c == LAPLACIAN) { PriorClass = c; } else { throw new IllegalArgumentException("Wrong selection type, -P value should be: " + "1 for Gaussian or 2 for Laplacian"); } } } /** * Get the type of prior to use. * * @return the type of prior to use */ public SelectedTag getPriorClass() { return new SelectedTag(PriorClass, TAGS_PRIOR); } /** * Returns the tip text for this property * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String thresholdTipText() { return "Set the threshold for classifiction. The logistic function doesn't " + "return a class label but an estimate of p(y=+1|B,x(i)). " + "These estimates need to be converted to binary class label predictions. " + "values above the threshold are assigned class +1."; } /** * Return the threshold being used. * * @return the threshold */ public double getThreshold() { return Threshold; } /** * Set the threshold to use. * * @param threshold the threshold to use */ public void setThreshold(double threshold) { Threshold = threshold; } /** * Returns the tip text for this property * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String toleranceTipText() { return "This value decides the stopping criterion."; } /** * Get the tolerance value * * @return the tolerance value */ public double getTolerance() { return Tolerance; } /** * Set the tolerance value * * @param tolerance the tolerance value to use */ public void setTolerance(double tolerance) { Tolerance = tolerance; } /** * Returns the tip text for this property * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String hyperparameterValueTipText() { return "Specific hyperparameter value. Used when the hyperparameter " + "selection method is set to specific value"; } /** * Get the hyperparameter value. Used when the hyperparameter * selection method is set to specific value * * @return the hyperparameter value */ public double getHyperparameterValue() { return HyperparameterValue; } /** * Set the hyperparameter value. Used when the hyperparameter * selection method is set to specific value * * @param hyperparameterValue the value of the hyperparameter */ public void setHyperparameterValue(double hyperparameterValue) { HyperparameterValue = hyperparameterValue; } /** * Returns the tip text for this property * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String numFoldsTipText() { return "The number of folds to use for CV-based hyperparameter selection."; } /** * Return the number of folds for CV-based hyperparameter selection * * @return the number of CV folds */ public int getNumFolds() { return NumFolds; } /** * Set the number of folds to use for CV-based hyperparameter * selection * * @param numFolds number of folds to select */ public void setNumFolds(int numFolds) { NumFolds = numFolds; } /** * Returns the tip text for this property * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String maxIterationsTipText() { return "The maximum number of iterations to perform."; } /** * Get the maximum number of iterations to perform * * @return the maximum number of iterations */ public int getMaxIterations() { return maxIterations; } /** * Set the maximum number of iterations to perform * * @param maxIterations maximum number of iterations */ public void setMaxIterations(int maxIterations) { this.maxIterations = maxIterations; } /** * Returns the tip text for this property * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String normalizeDataTipText() { return "Normalize the data."; } /** * Returns true if the data is to be normalized first * * @return true if the data is to be normalized */ public boolean isNormalizeData() { return NormalizeData; } /** * Set whether to normalize the data or not * * @param normalizeData true if data is to be normalized */ public void setNormalizeData(boolean normalizeData) { NormalizeData = normalizeData; } /** * Returns the tip text for this property * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String hyperparameterRangeTipText() { return "Hyperparameter value range. In case of CV-based Hyperparameters, " + "you can specify the range in two ways: \n" + "Comma-Separated: L: 3,5,6 (This will be a list of possible values.)\n" + "Range: R:0.01-316,3.16 (This will take values from 0.01-316 (inclusive) " + "in multiplications of 3.16"; } /** * Get the range of hyperparameter values to consider * during CV-based selection. * * @return the range of hyperparameters as a Stringe */ public String getHyperparameterRange() { return HyperparameterRange; } /** * Set the range of hyperparameter values to consider * during CV-based selection * * @param hyperparameterRange the range of hyperparameter values */ public void setHyperparameterRange(String hyperparameterRange) { HyperparameterRange = hyperparameterRange; } /** * Returns true if debug is turned on. * * @return true if debug is turned on */ public boolean isDebug() { return debug; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 5516 $"); } }
35,453
27.138095
190
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/bayes/ComplementNaiveBayes.java
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* * ComplementNaiveBayes.java * Copyright (C) 2003 University of Waikato, Hamilton, New Zealand */ package weka.classifiers.bayes; import weka.classifiers.AbstractClassifier; import weka.classifiers.Classifier; import weka.core.Capabilities; import weka.core.FastVector; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.OptionHandler; import weka.core.RevisionUtils; import weka.core.TechnicalInformation; import weka.core.TechnicalInformationHandler; import weka.core.Utils; import weka.core.WeightedInstancesHandler; import weka.core.Capabilities.Capability; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; /** <!-- globalinfo-start --> * Class for building and using a Complement class Naive Bayes classifier.<br/> * <br/> * For more information see, <br/> * <br/> * Jason D. Rennie, Lawrence Shih, Jaime Teevan, David R. Karger: Tackling the Poor Assumptions of Naive Bayes Text Classifiers. In: ICML, 616-623, 2003.<br/> * <br/> * P.S.: TF, IDF and length normalization transforms, as described in the paper, can be performed through weka.filters.unsupervised.StringToWordVector. * <p/> <!-- globalinfo-end --> * <!-- technical-bibtex-start --> * BibTeX: * <pre> * &#64;inproceedings{Rennie2003, * author = {Jason D. Rennie and Lawrence Shih and Jaime Teevan and David R. Karger}, * booktitle = {ICML}, * pages = {616-623}, * publisher = {AAAI Press}, * title = {Tackling the Poor Assumptions of Naive Bayes Text Classifiers}, * year = {2003} * } * </pre> * <p/> <!-- technical-bibtex-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -N * Normalize the word weights for each class * </pre> * * <pre> -S * Smoothing value to avoid zero WordGivenClass probabilities (default=1.0). * </pre> * <!-- options-end --> * * @author Ashraf M. Kibriya (amk14@cs.waikato.ac.nz) * @version $Revision: 5516 $ */ public class ComplementNaiveBayes extends AbstractClassifier implements OptionHandler, WeightedInstancesHandler, TechnicalInformationHandler { /** for serialization */ static final long serialVersionUID = 7246302925903086397L; /** Weight of words for each class. The weight is actually the log of the probability of a word (w) given a class (c) (i.e. log(Pr[w|c])). The format of the matrix is: wordWeights[class][wordAttribute] */ private double[][] wordWeights; /** Holds the smoothing value to avoid word probabilities of zero.<br> P.S.: According to the paper this is the Alpha i parameter */ private double smoothingParameter = 1.0; /** True if the words weights are to be normalized */ private boolean m_normalizeWordWeights = false; /** Holds the number of Class values present in the set of specified instances */ private int numClasses; /** The instances header that'll be used in toString */ private Instances header; /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ public java.util.Enumeration listOptions() { FastVector newVector = new FastVector(2); newVector.addElement( new Option("\tNormalize the word weights for each class\n", "N", 0,"-N")); newVector.addElement( new Option("\tSmoothing value to avoid zero WordGivenClass"+ " probabilities (default=1.0).\n", "S", 1,"-S")); return newVector.elements(); } /** * Gets the current settings of the classifier. * * @return an array of strings suitable for passing to setOptions */ public String[] getOptions() { String options[] = new String[4]; int current=0; if(getNormalizeWordWeights()) options[current++] = "-N"; options[current++] = "-S"; options[current++] = Double.toString(smoothingParameter); while (current < options.length) { options[current++] = ""; } return options; } /** * Parses a given list of options. <p/> * <!-- options-start --> * Valid options are: <p/> * * <pre> -N * Normalize the word weights for each class * </pre> * * <pre> -S * Smoothing value to avoid zero WordGivenClass probabilities (default=1.0). * </pre> * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { setNormalizeWordWeights(Utils.getFlag('N', options)); String val = Utils.getOption('S', options); if(val.length()!=0) setSmoothingParameter(Double.parseDouble(val)); else setSmoothingParameter(1.0); } /** * Returns true if the word weights for each class are to be normalized * * @return true if the word weights are normalized */ public boolean getNormalizeWordWeights() { return m_normalizeWordWeights; } /** * Sets whether if the word weights for each class should be normalized * * @param doNormalize whether the word weights are to be normalized */ public void setNormalizeWordWeights(boolean doNormalize) { m_normalizeWordWeights = doNormalize; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String normalizeWordWeightsTipText() { return "Normalizes the word weights for each class."; } /** * Gets the smoothing value to be used to avoid zero WordGivenClass * probabilities. * * @return the smoothing value */ public double getSmoothingParameter() { return smoothingParameter; } /** * Sets the smoothing value used to avoid zero WordGivenClass probabilities * * @param val the new smooting value */ public void setSmoothingParameter(double val) { smoothingParameter = val; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String smoothingParameterTipText() { return "Sets the smoothing parameter to avoid zero WordGivenClass "+ "probabilities (default=1.0)."; } /** * Returns a string describing this classifier * @return a description of the classifier suitable for * displaying in the explorer/experimenter gui */ public String globalInfo() { return "Class for building and using a Complement class Naive Bayes "+ "classifier.\n\nFor more information see, \n\n"+ getTechnicalInformation().toString() + "\n\n" + "P.S.: TF, IDF and length normalization transforms, as "+ "described in the paper, can be performed through "+ "weka.filters.unsupervised.StringToWordVector."; } /** * Returns an instance of a TechnicalInformation object, containing * detailed information about the technical background of this class, * e.g., paper reference or book this class is based on. * * @return the technical information about this class */ public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; result = new TechnicalInformation(Type.INPROCEEDINGS); result.setValue(Field.AUTHOR, "Jason D. Rennie and Lawrence Shih and Jaime Teevan and David R. Karger"); result.setValue(Field.TITLE, "Tackling the Poor Assumptions of Naive Bayes Text Classifiers"); result.setValue(Field.BOOKTITLE, "ICML"); result.setValue(Field.YEAR, "2003"); result.setValue(Field.PAGES, "616-623"); result.setValue(Field.PUBLISHER, "AAAI Press"); return result; } /** * Returns default capabilities of the classifier. * * @return the capabilities of this classifier */ public Capabilities getCapabilities() { Capabilities result = super.getCapabilities(); result.disableAll(); // attributes result.enable(Capability.NUMERIC_ATTRIBUTES); result.enable(Capability.MISSING_VALUES); // class result.enable(Capability.NOMINAL_CLASS); result.enable(Capability.MISSING_CLASS_VALUES); return result; } /** * Generates the classifier. * * @param instances set of instances serving as training data * @throws Exception if the classifier has not been built successfully */ public void buildClassifier(Instances instances) throws Exception { // can classifier handle the data? getCapabilities().testWithFail(instances); // remove instances with missing class instances = new Instances(instances); instances.deleteWithMissingClass(); numClasses = instances.numClasses(); int numAttributes = instances.numAttributes(); header = new Instances(instances, 0); double [][] ocrnceOfWordInClass = new double[numClasses][numAttributes]; wordWeights = new double[numClasses][numAttributes]; //double [] docsPerClass = new double[numClasses]; double[] wordsPerClass = new double[numClasses]; double totalWordOccurrences = 0; double sumOfSmoothingParams = (numAttributes-1)*smoothingParameter; int classIndex = instances.instance(0).classIndex(); Instance instance; int docClass; double numOccurrences; java.util.Enumeration enumInsts = instances.enumerateInstances(); while (enumInsts.hasMoreElements()) { instance = (Instance) enumInsts.nextElement(); docClass = (int)instance.value(classIndex); //docsPerClass[docClass] += instance.weight(); for(int a = 0; a<instance.numValues(); a++) if(instance.index(a) != instance.classIndex()) { if(!instance.isMissing(a)) { numOccurrences = instance.valueSparse(a) * instance.weight(); if(numOccurrences < 0) throw new Exception("Numeric attribute"+ " values must all be greater"+ " or equal to zero."); totalWordOccurrences += numOccurrences; wordsPerClass[docClass] += numOccurrences; ocrnceOfWordInClass[docClass] [instance.index(a)] += numOccurrences; //For the time being wordweights[0][i] //will hold the total occurrence of word // i over all classes wordWeights[0] [instance.index(a)] += numOccurrences; } } } //Calculating the complement class probability for all classes except 0 for(int c=1; c<numClasses; c++) { //total occurrence of words in classes other than c double totalWordOcrnces = totalWordOccurrences - wordsPerClass[c]; for(int w=0; w<numAttributes; w++) { if(w != classIndex ) { //occurrence of w in classes other that c double ocrncesOfWord = wordWeights[0][w] - ocrnceOfWordInClass[c][w]; wordWeights[c][w] = Math.log((ocrncesOfWord+smoothingParameter) / (totalWordOcrnces+sumOfSmoothingParams)); } } } //Now calculating the complement class probability for class 0 for(int w=0; w<numAttributes; w++) { if(w != classIndex) { //occurrence of w in classes other that c double ocrncesOfWord = wordWeights[0][w] - ocrnceOfWordInClass[0][w]; //total occurrence of words in classes other than c double totalWordOcrnces = totalWordOccurrences - wordsPerClass[0]; wordWeights[0][w] = Math.log((ocrncesOfWord+smoothingParameter) / (totalWordOcrnces+sumOfSmoothingParams)); } } //Normalizing weights if(m_normalizeWordWeights==true) for(int c=0; c<numClasses; c++) { double sum=0; for(int w=0; w<numAttributes; w++) { if(w!=classIndex) sum += Math.abs(wordWeights[c][w]); } for(int w=0; w<numAttributes; w++) { if(w!=classIndex) { wordWeights[c][w] = wordWeights[c][w]/sum; } } } } /** * Classifies a given instance. <p> * * The classification rule is: <br> * MinC(forAllWords(ti*Wci)) <br> * where <br> * ti is the frequency of word i in the given instance <br> * Wci is the weight of word i in Class c. <p> * * For more information see section 4.4 of the paper mentioned above * in the classifiers description. * * @param instance the instance to classify * @return the index of the class the instance is most likely to belong. * @throws Exception if the classifier has not been built yet. */ public double classifyInstance(Instance instance) throws Exception { if(wordWeights==null) throw new Exception("Error. The classifier has not been built "+ "properly."); double [] valueForClass = new double[numClasses]; double sumOfClassValues=0; for(int c=0; c<numClasses; c++) { double sumOfWordValues=0; for(int w=0; w<instance.numValues(); w++) { if(instance.index(w)!=instance.classIndex()) { double freqOfWordInDoc = instance.valueSparse(w); sumOfWordValues += freqOfWordInDoc * wordWeights[c][instance.index(w)]; } } //valueForClass[c] = Math.log(probOfClass[c]) - sumOfWordValues; valueForClass[c] = sumOfWordValues; sumOfClassValues += valueForClass[c]; } int minidx=0; for(int i=0; i<numClasses; i++) if(valueForClass[i]<valueForClass[minidx]) minidx = i; return minidx; } /** * Prints out the internal model built by the classifier. In this case * it prints out the word weights calculated when building the classifier. */ public String toString() { if(wordWeights==null) { return "The classifier hasn't been built yet."; } int numAttributes = header.numAttributes(); StringBuffer result = new StringBuffer("The word weights for each class are: \n"+ "------------------------------------\n\t"); for(int c = 0; c<numClasses; c++) result.append(header.classAttribute().value(c)).append("\t"); result.append("\n"); for(int w = 0; w<numAttributes; w++) { result.append(header.attribute(w).name()).append("\t"); for(int c = 0; c<numClasses; c++) result.append(Double.toString(wordWeights[c][w])).append("\t"); result.append("\n"); } return result.toString(); } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 5516 $"); } /** * Main method for testing this class. * * @param argv the options */ public static void main(String [] argv) { runClassifier(new ComplementNaiveBayes(), argv); } }
17,185
33.440882
158
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/bayes/DMNBtext.java
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* * Discriminative Multinomial Naive Bayes for Text Classification * Copyright (C) 2008 Jiang Su */ package weka.classifiers.bayes; import weka.classifiers.Classifier; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.TechnicalInformation; import weka.core.TechnicalInformationHandler; import weka.core.Utils; import weka.core.WeightedInstancesHandler; import weka.core.Capabilities.Capability; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import weka.classifiers.UpdateableClassifier; import java.util.*; import java.io.Serializable; import weka.classifiers.AbstractClassifier; import weka.core.Capabilities; import weka.core.OptionHandler; /** <!-- globalinfo-start --> * Class for building and using a Discriminative Multinomial Naive Bayes classifier. For more information see,<br/> * <br/> * Jiang Su,Harry Zhang,Charles X. Ling,Stan Matwin: Discriminative Parameter Learning for Bayesian Networks. In: ICML 2008', 2008.<br/> * <br/> * The core equation for this classifier:<br/> * <br/> * P[Ci|D] = (P[D|Ci] x P[Ci]) / P[D] (Bayes rule)<br/> * <br/> * where Ci is class i and D is a document. * <p/> <!-- globalinfo-end --> * <!-- technical-bibtex-start --> * BibTeX: * <pre> * &#64;inproceedings{JiangSu2008, * author = {Jiang Su,Harry Zhang,Charles X. Ling,Stan Matwin}, * booktitle = {ICML 2008'}, * title = {Discriminative Parameter Learning for Bayesian Networks}, * year = {2008} * } * </pre> * <p/> <!-- technical-bibtex-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -I &lt;iterations&gt; * The number of iterations that the classifier * will scan the training data (default = 1)</pre> * * <pre> -M * Use the frequency information in data</pre> * <!-- options-end --> * * @author Jiang Su (Jiang.Su@unb.ca) 2008 * @version $Revision: 6363 $ */ public class DMNBtext extends AbstractClassifier implements OptionHandler, WeightedInstancesHandler, TechnicalInformationHandler, UpdateableClassifier { /** for serialization */ static final long serialVersionUID = 5932177450183457085L; /** The number of iterations. */ protected int m_NumIterations = 1; protected boolean m_MultinomialWord = false; int m_numClasses=-1; protected Instances m_headerInfo; DNBBinary[] m_binaryClassifiers = null; /** * Returns a string describing this classifier * @return a description of the classifier suitable for * displaying in the explorer/experimenter gui */ public String globalInfo() { return "Class for building and using a Discriminative Multinomial Naive Bayes classifier. " + "For more information see,\n\n" + getTechnicalInformation().toString() + "\n\n" + "The core equation for this classifier:\n\n" + "P[Ci|D] = (P[D|Ci] x P[Ci]) / P[D] (Bayes rule)\n\n" + "where Ci is class i and D is a document."; } /** * Returns an instance of a TechnicalInformation object, containing * detailed information about the technical background of this class, * e.g., paper reference or book this class is based on. * * @return the technical information about this class */ public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; result = new TechnicalInformation(Type.INPROCEEDINGS); result.setValue(Field.AUTHOR, "Jiang Su,Harry Zhang,Charles X. Ling,Stan Matwin"); result.setValue(Field.YEAR, "2008"); result.setValue(Field.TITLE, "Discriminative Parameter Learning for Bayesian Networks"); result.setValue(Field.BOOKTITLE, "ICML 2008'"); return result; } /** * Returns default capabilities of the classifier. * * @return the capabilities of this classifier */ public Capabilities getCapabilities() { Capabilities result = super.getCapabilities(); result.disableAll(); // attributes result.enable(Capability.NUMERIC_ATTRIBUTES); // class result.enable(Capability.NOMINAL_CLASS); result.enable(Capability.MISSING_CLASS_VALUES); // instances result.setMinimumNumberInstances(0); return result; } /** * Generates the classifier. * * @param instances set of instances serving as training data * @exception Exception if the classifier has not been generated successfully */ public void buildClassifier(Instances data) throws Exception { // can classifier handle the data? getCapabilities().testWithFail(data); // remove instances with missing class Instances instances = new Instances(data); instances.deleteWithMissingClass(); m_binaryClassifiers = new DNBBinary[instances.numClasses()]; m_numClasses=instances.numClasses(); m_headerInfo = new Instances(instances, 0); for (int i = 0; i < instances.numClasses(); i++) { m_binaryClassifiers[i] = new DNBBinary(); m_binaryClassifiers[i].setTargetClass(i); m_binaryClassifiers[i].initClassifier(instances); } if (instances.numInstances() == 0) return; //Iterative update Random random = new Random(); for (int it = 0; it < m_NumIterations; it++) { for (int i = 0; i < instances.numInstances(); i++) { updateClassifier(instances.instance(i)); } } // Utils.normalize(m_oldClassDis); // Utils.normalize(m_ClassDis); // m_originalPositive = m_oldClassDis[0]; // m_positive = m_ClassDis[0]; } /** * Updates the classifier with the given instance. * * @param instance the new training instance to include in the model * @exception Exception if the instance could not be incorporated in * the model. */ public void updateClassifier(Instance instance) throws Exception { if (m_numClasses == 2) { m_binaryClassifiers[0].updateClassifier(instance); } else { for (int i = 0; i < instance.numClasses(); i++) m_binaryClassifiers[i].updateClassifier(instance); } } /** * Calculates the class membership probabilities for the given test * instance. * * @param instance the instance to be classified * @return predicted class probability distribution * @exception Exception if there is a problem generating the prediction */ public double[] distributionForInstance(Instance instance) throws Exception { if (m_numClasses == 2) { // System.out.println(m_binaryClassifiers[0].getProbForTargetClass(instance)); return m_binaryClassifiers[0].distributionForInstance(instance); } double[] logDocGivenClass = new double[instance.numClasses()]; for (int i = 0; i < m_numClasses; i++) logDocGivenClass[i] = m_binaryClassifiers[i].getLogProbForTargetClass(instance); double max = logDocGivenClass[Utils.maxIndex(logDocGivenClass)]; for(int i = 0; i<m_numClasses; i++) logDocGivenClass[i] = Math.exp(logDocGivenClass[i] - max); try { Utils.normalize(logDocGivenClass); } catch (Exception e) { e.printStackTrace(); } return logDocGivenClass; } /** * Returns a string representation of the classifier. * * @return a string representation of the classifier */ public String toString() { StringBuffer result = new StringBuffer(""); result.append("The log ratio of two conditional probabilities of a word w_i: log(p(w_i)|+)/p(w_i)|-)) in decent order based on their absolute values\n"); result.append("Can be used to measure the discriminative power of each word.\n"); if (m_numClasses == 2) { // System.out.println(m_binaryClassifiers[0].getProbForTargetClass(instance)); return result.append(m_binaryClassifiers[0].toString()).toString(); } for (int i = 0; i < m_numClasses; i++) { result.append(i+" against the rest classes\n"); result.append(m_binaryClassifiers[i].toString()+"\n"); } return result.toString(); } /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ public Enumeration<Option> listOptions() { Vector<Option> newVector = new Vector<Option>(); newVector.add(new Option("\tThe number of iterations that the classifier " + "\n\twill scan the training data (default = 1)", "I", 1, "-I <iterations>")); newVector.add(new Option("\tUse the frequency information in data" , "M", 0, "-M")); return newVector.elements(); } /* * Options after -- are passed to the designated classifier.<p> * * @param options the list of options as an array of strings * @exception Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { String iterations = Utils.getOption('I', options); if (iterations.length() != 0) { setNumIterations(Integer.parseInt(iterations)); } setMultinomialWord(Utils.getFlag('M', options)); } /** * Gets the current settings of the classifier. * * @return an array of strings suitable for passing to setOptions */ public String[] getOptions() { ArrayList<String> options = new ArrayList<String>(); options.add("-I"); options.add("" + getNumIterations()); if (getMultinomialWord()) { options.add("-M"); } return options.toArray(new String[1]); } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String numIterationsTipText() { return "The number of iterations that the classifier will scan the training data"; } /** * Sets the number of iterations to be performed */ public void setNumIterations(int numIterations) { m_NumIterations = numIterations; } /** * Gets the number of iterations to be performed * * @return the iterations to be performed */ public int getNumIterations() { return m_NumIterations; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String multinomialWordTipText() { return "Make use of frequency information in data"; } /** * Sets whether use binary text representation */ public void setMultinomialWord(boolean val) { m_MultinomialWord = val; } /** * Gets whether use binary text representation * * @return whether use binary text representation */ public boolean getMultinomialWord() { return m_MultinomialWord; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return "$Revision: 1.0"; } public class DNBBinary implements Serializable { /** The number of iterations. */ private double[][] m_perWordPerClass; private double[] m_wordsPerClass; int m_classIndex = -1; private double[] m_classDistribution; /** number of unique words */ private int m_numAttributes; //set the target class private int m_targetClass = -1; private double m_WordLaplace=1; private double[] m_coefficient; private double m_classRatio; private double m_wordRatio; public void initClassifier(Instances instances) throws Exception { m_numAttributes = instances.numAttributes(); m_perWordPerClass = new double[2][m_numAttributes]; m_coefficient = new double[m_numAttributes]; m_wordsPerClass = new double[2]; m_classDistribution = new double[2]; m_WordLaplace = Math.log(m_numAttributes); m_classIndex = instances.classIndex(); //Laplace for (int c = 0; c < 2; c++) { m_classDistribution[c] = 1; m_wordsPerClass[c] = m_WordLaplace * m_numAttributes; java.util.Arrays.fill(m_perWordPerClass[c], m_WordLaplace); } } public void updateClassifier(Instance ins) throws Exception { //c=0 is 1, which is the target class, and c=1 is the rest int classIndex = 0; if (ins.value(ins.classIndex()) != m_targetClass) classIndex = 1; double prob = 1 - distributionForInstance(ins)[classIndex]; double weight = prob * ins.weight(); for (int a = 0; a < ins.numValues(); a++) { if (ins.index(a) != m_classIndex ) { if (!m_MultinomialWord) { if (ins.valueSparse(a) > 0) { m_wordsPerClass[classIndex] += weight; m_perWordPerClass[classIndex][ins. index(a)] += weight; } } else { double t = ins.valueSparse(a) * weight; m_wordsPerClass[classIndex] += t; m_perWordPerClass[classIndex][ins.index(a)] += t; } //update coefficient m_coefficient[ins.index(a)] = Math.log(m_perWordPerClass[0][ ins.index(a)] / m_perWordPerClass[1][ins.index(a)]); } } m_wordRatio = Math.log(m_wordsPerClass[0] / m_wordsPerClass[1]); m_classDistribution[classIndex] += weight; m_classRatio = Math.log(m_classDistribution[0] / m_classDistribution[1]); } /** * Calculates the class membership probabilities for the given test * instance. * * @param instance the instance to be classified * @return predicted class probability distribution * @exception Exception if there is a problem generating the prediction */ public double getLogProbForTargetClass(Instance ins) throws Exception { double probLog = m_classRatio; for (int a = 0; a < ins.numValues(); a++) { if (ins.index(a) != m_classIndex ) { if (!m_MultinomialWord) { if (ins.valueSparse(a) > 0) { probLog += m_coefficient[ins.index(a)] - m_wordRatio; } } else { probLog += ins.valueSparse(a) * (m_coefficient[ins.index(a)] - m_wordRatio); } } } return probLog; } /** * Calculates the class membership probabilities for the given test * instance. * * @param instance the instance to be classified * @return predicted class probability distribution * @exception Exception if there is a problem generating the prediction */ public double[] distributionForInstance(Instance instance) throws Exception { double[] probOfClassGivenDoc = new double[2]; double ratio=getLogProbForTargetClass(instance); if (ratio > 709) probOfClassGivenDoc[0]=1; else { ratio = Math.exp(ratio); probOfClassGivenDoc[0]=ratio / (1 + ratio); } probOfClassGivenDoc[1] = 1 - probOfClassGivenDoc[0]; return probOfClassGivenDoc; } /** * Returns a string representation of the classifier. * * @return a string representation of the classifier */ public String toString() { // StringBuffer result = new StringBuffer("The cofficiency of a naive Bayes classifier, can be considered as the discriminative power of a word\n--------------------------------------\n"); StringBuffer result = new StringBuffer(); result.append("\n"); TreeMap sort=new TreeMap(); double[] absCoeff=new double[m_numAttributes]; for(int w = 0; w<m_numAttributes; w++) { if(w==m_headerInfo.classIndex())continue; String val= m_headerInfo.attribute(w).name()+": "+m_coefficient[w]; sort.put((-1)*Math.abs(m_coefficient[w]),val); } Iterator it=sort.values().iterator(); while(it.hasNext()) { result.append((String)it.next()); result.append("\n"); } return result.toString(); } /** * Sets the Target Class */ public void setTargetClass(int targetClass) { m_targetClass = targetClass; } /** * Gets the Target Class * * @return the Target Class Index */ public int getTargetClass() { return m_targetClass; } } /** * Main method for testing this class. * * @param argv the options */ public static void main(String[] argv) { DMNBtext c = new DMNBtext(); runClassifier(c, argv); } }
17,355
29.289703
205
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/bayes/HNB.java
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* * HNB.java * Copyright (C) 2004 Liangxiao Jiang */ package weka.classifiers.bayes; import weka.classifiers.AbstractClassifier; import weka.classifiers.Classifier; import weka.core.Capabilities; import weka.core.Instance; import weka.core.Instances; import weka.core.RevisionUtils; import weka.core.TechnicalInformation; import weka.core.TechnicalInformationHandler; import weka.core.Utils; import weka.core.Capabilities.Capability; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; /** <!-- globalinfo-start --> * Contructs Hidden Naive Bayes classification model with high classification accuracy and AUC.<br/> * <br/> * For more information refer to:<br/> * <br/> * H. Zhang, L. Jiang, J. Su: Hidden Naive Bayes. In: Twentieth National Conference on Artificial Intelligence, 919-924, 2005. * <p/> <!-- globalinfo-end --> * <!-- technical-bibtex-start --> * BibTeX: * <pre> * &#64;inproceedings{Zhang2005, * author = {H. Zhang and L. Jiang and J. Su}, * booktitle = {Twentieth National Conference on Artificial Intelligence}, * pages = {919-924}, * publisher = {AAAI Press}, * title = {Hidden Naive Bayes}, * year = {2005} * } * </pre> * <p/> <!-- technical-bibtex-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -D * If set, classifier is run in debug mode and * may output additional info to the console</pre> * <!-- options-end --> * * @author H. Zhang (hzhang@unb.ca) * @author Liangxiao Jiang (ljiang@cug.edu.cn) * @version $Revision: 5516 $ */ public class HNB extends AbstractClassifier implements TechnicalInformationHandler { /** for serialization */ static final long serialVersionUID = -4503874444306113214L; /** The number of each class value occurs in the dataset */ private double [] m_ClassCounts; /** The number of class and two attributes values occurs in the dataset */ private double [][][] m_ClassAttAttCounts; /** The number of values for each attribute in the dataset */ private int [] m_NumAttValues; /** The number of values for all attributes in the dataset */ private int m_TotalAttValues; /** The number of classes in the dataset */ private int m_NumClasses; /** The number of attributes including class in the dataset */ private int m_NumAttributes; /** The number of instances in the dataset */ private int m_NumInstances; /** The index of the class attribute in the dataset */ private int m_ClassIndex; /** The starting index of each attribute in the dataset */ private int[] m_StartAttIndex; /** The 2D array of conditional mutual information of each pair attributes */ private double[][] m_condiMutualInfo; /** * Returns a string describing this classifier. * * @return a description of the data generator suitable for * displaying in the explorer/experimenter gui */ public String globalInfo() { return "Contructs Hidden Naive Bayes classification model with high " + "classification accuracy and AUC.\n\n" + "For more information refer to:\n\n" + getTechnicalInformation().toString(); } /** * Returns an instance of a TechnicalInformation object, containing * detailed information about the technical background of this class, * e.g., paper reference or book this class is based on. * * @return the technical information about this class */ public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; result = new TechnicalInformation(Type.INPROCEEDINGS); result.setValue(Field.AUTHOR, "H. Zhang and L. Jiang and J. Su"); result.setValue(Field.TITLE, "Hidden Naive Bayes"); result.setValue(Field.BOOKTITLE, "Twentieth National Conference on Artificial Intelligence"); result.setValue(Field.YEAR, "2005"); result.setValue(Field.PAGES, "919-924"); result.setValue(Field.PUBLISHER, "AAAI Press"); return result; } /** * Returns default capabilities of the classifier. * * @return the capabilities of this classifier */ public Capabilities getCapabilities() { Capabilities result = super.getCapabilities(); result.disableAll(); // attributes result.enable(Capability.NOMINAL_ATTRIBUTES); // class result.enable(Capability.NOMINAL_CLASS); result.enable(Capability.MISSING_CLASS_VALUES); return result; } /** * Generates the classifier. * * @param instances set of instances serving as training data * @exception Exception if the classifier has not been generated successfully */ public void buildClassifier(Instances instances) throws Exception { // can classifier handle the data? getCapabilities().testWithFail(instances); // remove instances with missing class instances = new Instances(instances); instances.deleteWithMissingClass(); // reset variable m_NumClasses = instances.numClasses(); m_ClassIndex = instances.classIndex(); m_NumAttributes = instances.numAttributes(); m_NumInstances = instances.numInstances(); m_TotalAttValues = 0; // allocate space for attribute reference arrays m_StartAttIndex = new int[m_NumAttributes]; m_NumAttValues = new int[m_NumAttributes]; // set the starting index of each attribute and the number of values for // each attribute and the total number of values for all attributes (not including class). for(int i = 0; i < m_NumAttributes; i++) { if(i != m_ClassIndex) { m_StartAttIndex[i] = m_TotalAttValues; m_NumAttValues[i] = instances.attribute(i).numValues(); m_TotalAttValues += m_NumAttValues[i]; } else { m_StartAttIndex[i] = -1; m_NumAttValues[i] = m_NumClasses; } } // allocate space for counts and frequencies m_ClassCounts = new double[m_NumClasses]; m_ClassAttAttCounts = new double[m_NumClasses][m_TotalAttValues][m_TotalAttValues]; // Calculate the counts for(int k = 0; k < m_NumInstances; k++) { int classVal=(int)instances.instance(k).classValue(); m_ClassCounts[classVal] ++; int[] attIndex = new int[m_NumAttributes]; for(int i = 0; i < m_NumAttributes; i++) { if(i == m_ClassIndex) attIndex[i] = -1; else attIndex[i] = m_StartAttIndex[i] + (int)instances.instance(k).value(i); } for(int Att1 = 0; Att1 < m_NumAttributes; Att1++) { if(attIndex[Att1] == -1) continue; for(int Att2 = 0; Att2 < m_NumAttributes; Att2++) { if((attIndex[Att2] != -1)) { m_ClassAttAttCounts[classVal][attIndex[Att1]][attIndex[Att2]] ++; } } } } //compute conditional mutual information of each pair attributes (not including class) m_condiMutualInfo=new double[m_NumAttributes][m_NumAttributes]; for(int son=0;son<m_NumAttributes;son++){ if(son == m_ClassIndex) continue; for(int parent=0;parent<m_NumAttributes;parent++){ if(parent == m_ClassIndex || son==parent) continue; m_condiMutualInfo[son][parent]=conditionalMutualInfo(son,parent); } } } /** * Computes conditional mutual information between a pair of attributes. * * @param son the son attribute * @param parent the parent attribute * @return the conditional mutual information between son and parent given class * @throws Exception if computation fails */ private double conditionalMutualInfo(int son, int parent) throws Exception{ double CondiMutualInfo=0; int sIndex=m_StartAttIndex[son]; int pIndex=m_StartAttIndex[parent]; double[] PriorsClass = new double[m_NumClasses]; double[][] PriorsClassSon=new double[m_NumClasses][m_NumAttValues[son]]; double[][] PriorsClassParent=new double[m_NumClasses][m_NumAttValues[parent]]; double[][][] PriorsClassParentSon=new double[m_NumClasses][m_NumAttValues[parent]][m_NumAttValues[son]]; for(int i=0;i<m_NumClasses;i++){ PriorsClass[i]=m_ClassCounts[i]/m_NumInstances; } for(int i=0;i<m_NumClasses;i++){ for(int j=0;j<m_NumAttValues[son];j++){ PriorsClassSon[i][j]=m_ClassAttAttCounts[i][sIndex+j][sIndex+j]/m_NumInstances; } } for(int i=0;i<m_NumClasses;i++){ for(int j=0;j<m_NumAttValues[parent];j++){ PriorsClassParent[i][j]=m_ClassAttAttCounts[i][pIndex+j][pIndex+j]/m_NumInstances; } } for(int i=0;i<m_NumClasses;i++){ for(int j=0;j<m_NumAttValues[parent];j++){ for(int k=0;k<m_NumAttValues[son];k++){ PriorsClassParentSon[i][j][k]=m_ClassAttAttCounts[i][pIndex+j][sIndex+k]/m_NumInstances; } } } for(int i=0;i<m_NumClasses;i++){ for(int j=0;j<m_NumAttValues[parent];j++){ for(int k=0;k<m_NumAttValues[son];k++){ CondiMutualInfo+=PriorsClassParentSon[i][j][k]*log2(PriorsClassParentSon[i][j][k]*PriorsClass[i],PriorsClassParent[i][j]*PriorsClassSon[i][k]); } } } return CondiMutualInfo; } /** * compute the logarithm whose base is 2. * * @param x numerator of the fraction. * @param y denominator of the fraction. * @return the natual logarithm of this fraction. */ private double log2(double x,double y){ if(x<1e-6||y<1e-6) return 0.0; else return Math.log(x/y)/Math.log(2); } /** * Calculates the class membership probabilities for the given test instance * * @param instance the instance to be classified * @return predicted class probability distribution * @exception Exception if there is a problem generating the prediction */ public double[] distributionForInstance(Instance instance) throws Exception { //Definition of local variables double[] probs = new double[m_NumClasses]; int sIndex; double prob; double condiMutualInfoSum; // store instance's att values in an int array int[] attIndex = new int[m_NumAttributes]; for(int att = 0; att < m_NumAttributes; att++) { if(att == m_ClassIndex) attIndex[att] = -1; else attIndex[att] = m_StartAttIndex[att] + (int)instance.value(att); } // calculate probabilities for each possible class value for(int classVal = 0; classVal < m_NumClasses; classVal++) { probs[classVal]=(m_ClassCounts[classVal]+1.0/m_NumClasses)/(m_NumInstances+1.0); for(int son = 0; son < m_NumAttributes; son++) { if(attIndex[son]==-1) continue; sIndex=attIndex[son]; attIndex[son]=-1; prob=0; condiMutualInfoSum=0; for(int parent=0; parent<m_NumAttributes; parent++) { if(attIndex[parent]==-1) continue; condiMutualInfoSum+=m_condiMutualInfo[son][parent]; prob+=m_condiMutualInfo[son][parent]*(m_ClassAttAttCounts[classVal][attIndex[parent]][sIndex]+1.0/m_NumAttValues[son])/(m_ClassAttAttCounts[classVal][attIndex[parent]][attIndex[parent]] + 1.0); } if(condiMutualInfoSum>0){ prob=prob/condiMutualInfoSum; probs[classVal] *= prob; } else{ prob=(m_ClassAttAttCounts[classVal][sIndex][sIndex]+1.0/m_NumAttValues[son])/(m_ClassCounts[classVal]+1.0); probs[classVal]*= prob; } attIndex[son] = sIndex; } } Utils.normalize(probs); return probs; } /** * returns a string representation of the classifier * * @return a representation of the classifier */ public String toString() { return "HNB (Hidden Naive Bayes)"; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 5516 $"); } /** * Main method for testing this class. * * @param args the options */ public static void main(String[] args) { runClassifier(new HNB(), args); } }
12,689
31.372449
203
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/bayes/NaiveBayes.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * NaiveBayes.java * Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.bayes; import java.util.Enumeration; import java.util.Vector; import weka.classifiers.AbstractClassifier; import weka.core.Attribute; import weka.core.Capabilities; import weka.core.Capabilities.Capability; import weka.core.Aggregateable; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.OptionHandler; import weka.core.RevisionUtils; import weka.core.TechnicalInformation; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import weka.core.TechnicalInformationHandler; import weka.core.Utils; import weka.core.WeightedInstancesHandler; import weka.estimators.DiscreteEstimator; import weka.estimators.Estimator; import weka.estimators.KernelEstimator; import weka.estimators.NormalEstimator; /** <!-- globalinfo-start --> * Class for a Naive Bayes classifier using estimator classes. Numeric estimator precision values are chosen based on analysis of the training data. For this reason, the classifier is not an UpdateableClassifier (which in typical usage are initialized with zero training instances) -- if you need the UpdateableClassifier functionality, use the NaiveBayesUpdateable classifier. The NaiveBayesUpdateable classifier will use a default precision of 0.1 for numeric attributes when buildClassifier is called with zero training instances.<br/> * <br/> * For more information on Naive Bayes classifiers, see<br/> * <br/> * George H. John, Pat Langley: Estimating Continuous Distributions in Bayesian Classifiers. In: Eleventh Conference on Uncertainty in Artificial Intelligence, San Mateo, 338-345, 1995. * <p/> <!-- globalinfo-end --> * <!-- technical-bibtex-start --> * BibTeX: * <pre> * &#64;inproceedings{John1995, * address = {San Mateo}, * author = {George H. John and Pat Langley}, * booktitle = {Eleventh Conference on Uncertainty in Artificial Intelligence}, * pages = {338-345}, * publisher = {Morgan Kaufmann}, * title = {Estimating Continuous Distributions in Bayesian Classifiers}, * year = {1995} * } * </pre> * <p/> <!-- technical-bibtex-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -K * Use kernel density estimator rather than normal * distribution for numeric attributes</pre> * * <pre> -D * Use supervised discretization to process numeric attributes * </pre> * * <pre> -O * Display model in old format (good when there are many classes) * </pre> * <!-- options-end --> * * @author Len Trigg (trigg@cs.waikato.ac.nz) * @author Eibe Frank (eibe@cs.waikato.ac.nz) * @version $Revision: 9785 $ */ public class NaiveBayes extends AbstractClassifier implements OptionHandler, WeightedInstancesHandler, TechnicalInformationHandler, Aggregateable<NaiveBayes> { /** for serialization */ static final long serialVersionUID = 5995231201785697655L; /** The attribute estimators. */ protected Estimator [][] m_Distributions; /** The class estimator. */ protected Estimator m_ClassDistribution; /** * Whether to use kernel density estimator rather than normal distribution * for numeric attributes */ protected boolean m_UseKernelEstimator = false; /** * Whether to use discretization than normal distribution * for numeric attributes */ protected boolean m_UseDiscretization = false; /** The number of classes (or 1 for numeric class) */ protected int m_NumClasses; /** * The dataset header for the purposes of printing out a semi-intelligible * model */ protected Instances m_Instances; /*** The precision parameter used for numeric attributes */ protected static final double DEFAULT_NUM_PRECISION = 0.01; /** * The discretization filter. */ protected weka.filters.supervised.attribute.Discretize m_Disc = null; protected boolean m_displayModelInOldFormat = false; /** * Returns a string describing this classifier * @return a description of the classifier suitable for * displaying in the explorer/experimenter gui */ public String globalInfo() { return "Class for a Naive Bayes classifier using estimator classes. Numeric" +" estimator precision values are chosen based on analysis of the " +" training data. For this reason, the classifier is not an" +" UpdateableClassifier (which in typical usage are initialized with zero" +" training instances) -- if you need the UpdateableClassifier functionality," +" use the NaiveBayesUpdateable classifier. The NaiveBayesUpdateable" +" classifier will use a default precision of 0.1 for numeric attributes" +" when buildClassifier is called with zero training instances.\n\n" +"For more information on Naive Bayes classifiers, see\n\n" + getTechnicalInformation().toString(); } /** * Returns an instance of a TechnicalInformation object, containing * detailed information about the technical background of this class, * e.g., paper reference or book this class is based on. * * @return the technical information about this class */ public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; result = new TechnicalInformation(Type.INPROCEEDINGS); result.setValue(Field.AUTHOR, "George H. John and Pat Langley"); result.setValue(Field.TITLE, "Estimating Continuous Distributions in Bayesian Classifiers"); result.setValue(Field.BOOKTITLE, "Eleventh Conference on Uncertainty in Artificial Intelligence"); result.setValue(Field.YEAR, "1995"); result.setValue(Field.PAGES, "338-345"); result.setValue(Field.PUBLISHER, "Morgan Kaufmann"); result.setValue(Field.ADDRESS, "San Mateo"); return result; } /** * Returns default capabilities of the classifier. * * @return the capabilities of this classifier */ public Capabilities getCapabilities() { Capabilities result = super.getCapabilities(); result.disableAll(); // attributes result.enable(Capability.NOMINAL_ATTRIBUTES); result.enable(Capability.NUMERIC_ATTRIBUTES); result.enable(Capability.MISSING_VALUES); // class result.enable(Capability.NOMINAL_CLASS); result.enable(Capability.MISSING_CLASS_VALUES); // instances result.setMinimumNumberInstances(0); return result; } /** * Generates the classifier. * * @param instances set of instances serving as training data * @exception Exception if the classifier has not been generated * successfully */ public void buildClassifier(Instances instances) throws Exception { // can classifier handle the data? getCapabilities().testWithFail(instances); // remove instances with missing class instances = new Instances(instances); instances.deleteWithMissingClass(); m_NumClasses = instances.numClasses(); // Copy the instances m_Instances = new Instances(instances); // Discretize instances if required if (m_UseDiscretization) { m_Disc = new weka.filters.supervised.attribute.Discretize(); m_Disc.setInputFormat(m_Instances); m_Instances = weka.filters.Filter.useFilter(m_Instances, m_Disc); } else { m_Disc = null; } // Reserve space for the distributions m_Distributions = new Estimator[m_Instances.numAttributes() - 1] [m_Instances.numClasses()]; m_ClassDistribution = new DiscreteEstimator(m_Instances.numClasses(), true); int attIndex = 0; Enumeration enu = m_Instances.enumerateAttributes(); while (enu.hasMoreElements()) { Attribute attribute = (Attribute) enu.nextElement(); // If the attribute is numeric, determine the estimator // numeric precision from differences between adjacent values double numPrecision = DEFAULT_NUM_PRECISION; if (attribute.type() == Attribute.NUMERIC) { m_Instances.sort(attribute); if ((m_Instances.numInstances() > 0) && !m_Instances.instance(0).isMissing(attribute)) { double lastVal = m_Instances.instance(0).value(attribute); double currentVal, deltaSum = 0; int distinct = 0; for (int i = 1; i < m_Instances.numInstances(); i++) { Instance currentInst = m_Instances.instance(i); if (currentInst.isMissing(attribute)) { break; } currentVal = currentInst.value(attribute); if (currentVal != lastVal) { deltaSum += currentVal - lastVal; lastVal = currentVal; distinct++; } } if (distinct > 0) { numPrecision = deltaSum / distinct; } } } for (int j = 0; j < m_Instances.numClasses(); j++) { switch (attribute.type()) { case Attribute.NUMERIC: if (m_UseKernelEstimator) { m_Distributions[attIndex][j] = new KernelEstimator(numPrecision); } else { m_Distributions[attIndex][j] = new NormalEstimator(numPrecision); } break; case Attribute.NOMINAL: m_Distributions[attIndex][j] = new DiscreteEstimator(attribute.numValues(), true); break; default: throw new Exception("Attribute type unknown to NaiveBayes"); } } attIndex++; } // Compute counts Enumeration enumInsts = m_Instances.enumerateInstances(); while (enumInsts.hasMoreElements()) { Instance instance = (Instance) enumInsts.nextElement(); updateClassifier(instance); } // Save space m_Instances = new Instances(m_Instances, 0); } /** * Updates the classifier with the given instance. * * @param instance the new training instance to include in the model * @exception Exception if the instance could not be incorporated in * the model. */ public void updateClassifier(Instance instance) throws Exception { if (!instance.classIsMissing()) { Enumeration enumAtts = m_Instances.enumerateAttributes(); int attIndex = 0; while (enumAtts.hasMoreElements()) { Attribute attribute = (Attribute) enumAtts.nextElement(); if (!instance.isMissing(attribute)) { m_Distributions[attIndex][(int)instance.classValue()]. addValue(instance.value(attribute), instance.weight()); } attIndex++; } m_ClassDistribution.addValue(instance.classValue(), instance.weight()); } } /** * Calculates the class membership probabilities for the given test * instance. * * @param instance the instance to be classified * @return predicted class probability distribution * @exception Exception if there is a problem generating the prediction */ public double [] distributionForInstance(Instance instance) throws Exception { if (m_UseDiscretization) { m_Disc.input(instance); instance = m_Disc.output(); } double [] probs = new double[m_NumClasses]; for (int j = 0; j < m_NumClasses; j++) { probs[j] = m_ClassDistribution.getProbability(j); } Enumeration enumAtts = instance.enumerateAttributes(); int attIndex = 0; while (enumAtts.hasMoreElements()) { Attribute attribute = (Attribute) enumAtts.nextElement(); if (!instance.isMissing(attribute)) { double temp, max = 0; for (int j = 0; j < m_NumClasses; j++) { temp = Math.max(1e-75, Math.pow(m_Distributions[attIndex][j]. getProbability(instance.value(attribute)), m_Instances.attribute(attIndex).weight())); probs[j] *= temp; if (probs[j] > max) { max = probs[j]; } if (Double.isNaN(probs[j])) { throw new Exception("NaN returned from estimator for attribute " + attribute.name() + ":\n" + m_Distributions[attIndex][j].toString()); } } if ((max > 0) && (max < 1e-75)) { // Danger of probability underflow for (int j = 0; j < m_NumClasses; j++) { probs[j] *= 1e75; } } } attIndex++; } // Display probabilities Utils.normalize(probs); return probs; } /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ public Enumeration listOptions() { Vector newVector = new Vector(3); newVector.addElement( new Option("\tUse kernel density estimator rather than normal\n" +"\tdistribution for numeric attributes", "K", 0,"-K")); newVector.addElement( new Option("\tUse supervised discretization to process numeric attributes\n", "D", 0,"-D")); newVector.addElement( new Option("\tDisplay model in old format (good when there are " + "many classes)\n", "O", 0, "-O")); return newVector.elements(); } /** * Parses a given list of options. <p/> * <!-- options-start --> * Valid options are: <p/> * * <pre> -K * Use kernel density estimator rather than normal * distribution for numeric attributes</pre> * * <pre> -D * Use supervised discretization to process numeric attributes * </pre> * * <pre> -O * Display model in old format (good when there are many classes) * </pre> * <!-- options-end --> * * @param options the list of options as an array of strings * @exception Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { boolean k = Utils.getFlag('K', options); boolean d = Utils.getFlag('D', options); if (k && d) { throw new IllegalArgumentException("Can't use both kernel density " + "estimation and discretization!"); } setUseSupervisedDiscretization(d); setUseKernelEstimator(k); setDisplayModelInOldFormat(Utils.getFlag('O', options)); Utils.checkForRemainingOptions(options); } /** * Gets the current settings of the classifier. * * @return an array of strings suitable for passing to setOptions */ public String [] getOptions() { String [] options = new String [3]; int current = 0; if (m_UseKernelEstimator) { options[current++] = "-K"; } if (m_UseDiscretization) { options[current++] = "-D"; } if (m_displayModelInOldFormat) { options[current++] = "-O"; } while (current < options.length) { options[current++] = ""; } return options; } /** * Returns a description of the classifier. * * @return a description of the classifier as a string. */ public String toString() { if (m_displayModelInOldFormat) { return toStringOriginal(); } StringBuffer temp = new StringBuffer(); temp.append("Naive Bayes Classifier"); if (m_Instances == null) { temp.append(": No model built yet."); } else { int maxWidth = 0; int maxAttWidth = 0; boolean containsKernel = false; // set up max widths // class values for (int i = 0; i < m_Instances.numClasses(); i++) { if (m_Instances.classAttribute().value(i).length() > maxWidth) { maxWidth = m_Instances.classAttribute().value(i).length(); } } // attributes for (int i = 0; i < m_Instances.numAttributes(); i++) { if (i != m_Instances.classIndex()) { Attribute a = m_Instances.attribute(i); if (a.name().length() > maxAttWidth) { maxAttWidth = m_Instances.attribute(i).name().length(); } if (a.isNominal()) { // check values for (int j = 0; j < a.numValues(); j++) { String val = a.value(j) + " "; if (val.length() > maxAttWidth) { maxAttWidth = val.length(); } } } } } for (int i = 0; i < m_Distributions.length; i++) { for (int j = 0; j < m_Instances.numClasses(); j++) { if (m_Distributions[i][0] instanceof NormalEstimator) { // check mean/precision dev against maxWidth NormalEstimator n = (NormalEstimator)m_Distributions[i][j]; double mean = Math.log(Math.abs(n.getMean())) / Math.log(10.0); double precision = Math.log(Math.abs(n.getPrecision())) / Math.log(10.0); double width = (mean > precision) ? mean : precision; if (width < 0) { width = 1; } // decimal + # decimal places + 1 width += 6.0; if ((int)width > maxWidth) { maxWidth = (int)width; } } else if (m_Distributions[i][0] instanceof KernelEstimator) { containsKernel = true; KernelEstimator ke = (KernelEstimator)m_Distributions[i][j]; int numK = ke.getNumKernels(); String temps = "K" + numK + ": mean (weight)"; if (maxAttWidth < temps.length()) { maxAttWidth = temps.length(); } // check means + weights against maxWidth if (ke.getNumKernels() > 0) { double[] means = ke.getMeans(); double[] weights = ke.getWeights(); for (int k = 0; k < ke.getNumKernels(); k++) { String m = Utils.doubleToString(means[k], maxWidth, 4).trim(); m += " (" + Utils.doubleToString(weights[k], maxWidth, 1).trim() + ")"; if (maxWidth < m.length()) { maxWidth = m.length(); } } } } else if (m_Distributions[i][0] instanceof DiscreteEstimator) { DiscreteEstimator d = (DiscreteEstimator)m_Distributions[i][j]; for (int k = 0; k < d.getNumSymbols(); k++) { String size = "" + d.getCount(k); if (size.length() > maxWidth) { maxWidth = size.length(); } } int sum = ("" + d.getSumOfCounts()).length(); if (sum > maxWidth) { maxWidth = sum; } } } } // Check width of class labels for (int i = 0; i < m_Instances.numClasses(); i++) { String cSize = m_Instances.classAttribute().value(i); if (cSize.length() > maxWidth) { maxWidth = cSize.length(); } } // Check width of class priors for (int i = 0; i < m_Instances.numClasses(); i++) { String priorP = Utils.doubleToString(((DiscreteEstimator)m_ClassDistribution).getProbability(i), maxWidth, 2).trim(); priorP = "(" + priorP + ")"; if (priorP.length() > maxWidth) { maxWidth = priorP.length(); } } if (maxAttWidth < "Attribute".length()) { maxAttWidth = "Attribute".length(); } if (maxAttWidth < " weight sum".length()) { maxAttWidth = " weight sum".length(); } if (containsKernel) { if (maxAttWidth < " [precision]".length()) { maxAttWidth = " [precision]".length(); } } maxAttWidth += 2; temp.append("\n\n"); temp.append(pad("Class", " ", (maxAttWidth + maxWidth + 1) - "Class".length(), true)); temp.append("\n"); temp.append(pad("Attribute", " ", maxAttWidth - "Attribute".length(), false)); // class labels for (int i = 0; i < m_Instances.numClasses(); i++) { String classL = m_Instances.classAttribute().value(i); temp.append(pad(classL, " ", maxWidth + 1 - classL.length(), true)); } temp.append("\n"); // class priors temp.append(pad("", " ", maxAttWidth, true)); for (int i = 0; i < m_Instances.numClasses(); i++) { String priorP = Utils.doubleToString(((DiscreteEstimator)m_ClassDistribution).getProbability(i), maxWidth, 2).trim(); priorP = "(" + priorP + ")"; temp.append(pad(priorP, " ", maxWidth + 1 - priorP.length(), true)); } temp.append("\n"); temp.append(pad("", "=", maxAttWidth + (maxWidth * m_Instances.numClasses()) + m_Instances.numClasses() + 1, true)); temp.append("\n"); // loop over the attributes int counter = 0; for (int i = 0; i < m_Instances.numAttributes(); i++) { if (i == m_Instances.classIndex()) { continue; } String attName = m_Instances.attribute(i).name(); temp.append(attName + "\n"); if (m_Distributions[counter][0] instanceof NormalEstimator) { String meanL = " mean"; temp.append(pad(meanL, " ", maxAttWidth + 1 - meanL.length(), false)); for (int j = 0; j < m_Instances.numClasses(); j++) { // means NormalEstimator n = (NormalEstimator)m_Distributions[counter][j]; String mean = Utils.doubleToString(n.getMean(), maxWidth, 4).trim(); temp.append(pad(mean, " ", maxWidth + 1 - mean.length(), true)); } temp.append("\n"); // now do std deviations String stdDevL = " std. dev."; temp.append(pad(stdDevL, " ", maxAttWidth + 1 - stdDevL.length(), false)); for (int j = 0; j < m_Instances.numClasses(); j++) { NormalEstimator n = (NormalEstimator)m_Distributions[counter][j]; String stdDev = Utils.doubleToString(n.getStdDev(), maxWidth, 4).trim(); temp.append(pad(stdDev, " ", maxWidth + 1 - stdDev.length(), true)); } temp.append("\n"); // now the weight sums String weightL = " weight sum"; temp.append(pad(weightL, " ", maxAttWidth + 1 - weightL.length(), false)); for (int j = 0; j < m_Instances.numClasses(); j++) { NormalEstimator n = (NormalEstimator)m_Distributions[counter][j]; String weight = Utils.doubleToString(n.getSumOfWeights(), maxWidth, 4).trim(); temp.append(pad(weight, " ", maxWidth + 1 - weight.length(), true)); } temp.append("\n"); // now the precisions String precisionL = " precision"; temp.append(pad(precisionL, " ", maxAttWidth + 1 - precisionL.length(), false)); for (int j = 0; j < m_Instances.numClasses(); j++) { NormalEstimator n = (NormalEstimator)m_Distributions[counter][j]; String precision = Utils.doubleToString(n.getPrecision(), maxWidth, 4).trim(); temp.append(pad(precision, " ", maxWidth + 1 - precision.length(), true)); } temp.append("\n\n"); } else if (m_Distributions[counter][0] instanceof DiscreteEstimator) { Attribute a = m_Instances.attribute(i); for (int j = 0; j < a.numValues(); j++) { String val = " " + a.value(j); temp.append(pad(val, " ", maxAttWidth + 1 - val.length(), false)); for (int k = 0; k < m_Instances.numClasses(); k++) { DiscreteEstimator d = (DiscreteEstimator)m_Distributions[counter][k]; String count = "" + d.getCount(j); temp.append(pad(count, " ", maxWidth + 1 - count.length(), true)); } temp.append("\n"); } // do the totals String total = " [total]"; temp.append(pad(total, " ", maxAttWidth + 1 - total.length(), false)); for (int k = 0; k < m_Instances.numClasses(); k++) { DiscreteEstimator d = (DiscreteEstimator)m_Distributions[counter][k]; String count = "" + d.getSumOfCounts(); temp.append(pad(count, " ", maxWidth + 1 - count.length(), true)); } temp.append("\n\n"); } else if (m_Distributions[counter][0] instanceof KernelEstimator) { String kL = " [# kernels]"; temp.append(pad(kL, " ", maxAttWidth + 1 - kL.length(), false)); for (int k = 0; k < m_Instances.numClasses(); k++) { KernelEstimator ke = (KernelEstimator)m_Distributions[counter][k]; String nk = "" + ke.getNumKernels(); temp.append(pad(nk, " ", maxWidth + 1 - nk.length(), true)); } temp.append("\n"); // do num kernels, std. devs and precisions String stdDevL = " [std. dev]"; temp.append(pad(stdDevL, " ", maxAttWidth + 1 - stdDevL.length(), false)); for (int k = 0; k < m_Instances.numClasses(); k++) { KernelEstimator ke = (KernelEstimator)m_Distributions[counter][k]; String stdD = Utils.doubleToString(ke.getStdDev(), maxWidth, 4).trim(); temp.append(pad(stdD, " ", maxWidth + 1 - stdD.length(), true)); } temp.append("\n"); String precL = " [precision]"; temp.append(pad(precL, " ", maxAttWidth + 1 - precL.length(), false)); for (int k = 0; k < m_Instances.numClasses(); k++) { KernelEstimator ke = (KernelEstimator)m_Distributions[counter][k]; String prec = Utils.doubleToString(ke.getPrecision(), maxWidth, 4).trim(); temp.append(pad(prec, " ", maxWidth + 1 - prec.length(), true)); } temp.append("\n"); // first determine max number of kernels accross the classes int maxK = 0; for (int k = 0; k < m_Instances.numClasses(); k++) { KernelEstimator ke = (KernelEstimator)m_Distributions[counter][k]; if (ke.getNumKernels() > maxK) { maxK = ke.getNumKernels(); } } for (int j = 0; j < maxK; j++) { // means first String meanL = " K" + (j+1) + ": mean (weight)"; temp.append(pad(meanL, " ", maxAttWidth + 1 - meanL.length(), false)); for (int k = 0; k < m_Instances.numClasses(); k++) { KernelEstimator ke = (KernelEstimator)m_Distributions[counter][k]; double[] means = ke.getMeans(); double[] weights = ke.getWeights(); String m = "--"; if (ke.getNumKernels() == 0) { m = "" + 0; } else if (j < ke.getNumKernels()) { m = Utils.doubleToString(means[j], maxWidth, 4).trim(); m += " (" + Utils.doubleToString(weights[j], maxWidth, 1).trim() + ")"; } temp.append(pad(m, " ", maxWidth + 1 - m.length(), true)); } temp.append("\n"); } temp.append("\n"); } counter++; } } return temp.toString(); } /** * Returns a description of the classifier in the old format. * * @return a description of the classifier as a string. */ protected String toStringOriginal() { StringBuffer text = new StringBuffer(); text.append("Naive Bayes Classifier"); if (m_Instances == null) { text.append(": No model built yet."); } else { try { for (int i = 0; i < m_Distributions[0].length; i++) { text.append("\n\nClass " + m_Instances.classAttribute().value(i) + ": Prior probability = " + Utils. doubleToString(m_ClassDistribution.getProbability(i), 4, 2) + "\n\n"); Enumeration enumAtts = m_Instances.enumerateAttributes(); int attIndex = 0; while (enumAtts.hasMoreElements()) { Attribute attribute = (Attribute) enumAtts.nextElement(); if (attribute.weight() > 0) { text.append(attribute.name() + ": " + m_Distributions[attIndex][i]); } attIndex++; } } } catch (Exception ex) { text.append(ex.getMessage()); } } return text.toString(); } private String pad(String source, String padChar, int length, boolean leftPad) { StringBuffer temp = new StringBuffer(); if (leftPad) { for (int i = 0; i< length; i++) { temp.append(padChar); } temp.append(source); } else { temp.append(source); for (int i = 0; i< length; i++) { temp.append(padChar); } } return temp.toString(); } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String useKernelEstimatorTipText() { return "Use a kernel estimator for numeric attributes rather than a " +"normal distribution."; } /** * Gets if kernel estimator is being used. * * @return Value of m_UseKernelEstimatory. */ public boolean getUseKernelEstimator() { return m_UseKernelEstimator; } /** * Sets if kernel estimator is to be used. * * @param v Value to assign to m_UseKernelEstimatory. */ public void setUseKernelEstimator(boolean v) { m_UseKernelEstimator = v; if (v) { setUseSupervisedDiscretization(false); } } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String useSupervisedDiscretizationTipText() { return "Use supervised discretization to convert numeric attributes to nominal " +"ones."; } /** * Get whether supervised discretization is to be used. * * @return true if supervised discretization is to be used. */ public boolean getUseSupervisedDiscretization() { return m_UseDiscretization; } /** * Set whether supervised discretization is to be used. * * @param newblah true if supervised discretization is to be used. */ public void setUseSupervisedDiscretization(boolean newblah) { m_UseDiscretization = newblah; if (newblah) { setUseKernelEstimator(false); } } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String displayModelInOldFormatTipText() { return "Use old format for model output. The old format is " + "better when there are many class values. The new format " + "is better when there are fewer classes and many attributes."; } /** * Set whether to display model output in the old, original * format. * * @param d true if model ouput is to be shown in the old format */ public void setDisplayModelInOldFormat(boolean d) { m_displayModelInOldFormat = d; } /** * Get whether to display model output in the old, original * format. * * @return true if model ouput is to be shown in the old format */ public boolean getDisplayModelInOldFormat() { return m_displayModelInOldFormat; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 9785 $"); } @Override public NaiveBayes aggregate(NaiveBayes toAggregate) throws Exception { // Highly unlikely that discretization intervals will match between the // two classifiers if (m_UseDiscretization || toAggregate.getUseSupervisedDiscretization()) { throw new Exception("Unable to aggregate when supervised discretization " + "has been turned on"); } if (!m_Instances.equalHeaders(toAggregate.m_Instances)) { throw new Exception("Can't aggregate - data headers don't match: " + m_Instances.equalHeadersMsg(toAggregate.m_Instances)); } ((Aggregateable) m_ClassDistribution). aggregate((Aggregateable) toAggregate.m_ClassDistribution); // aggregate all conditional estimators for (int i = 0; i < m_Distributions.length; i++) { for (int j = 0; j < m_Distributions[i].length; j++) { ((Aggregateable) m_Distributions[i][j]). aggregate((Aggregateable) toAggregate.m_Distributions[i][j]); } } return this; } @Override public void finalizeAggregation() throws Exception { // nothing to do } /** * Main method for testing this class. * * @param argv the options */ public static void main(String [] argv) { runClassifier(new NaiveBayes(), argv); } }
33,326
32.937882
540
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/bayes/NaiveBayesMultinomial.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * NaiveBayesMultinomial.java * Copyright (C) 2003-2012 University of Waikato, Hamilton, New Zealand */ package weka.classifiers.bayes; import weka.classifiers.AbstractClassifier; import weka.core.Capabilities; import weka.core.Capabilities.Capability; import weka.core.Instance; import weka.core.Instances; import weka.core.RevisionUtils; import weka.core.TechnicalInformation; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import weka.core.TechnicalInformationHandler; import weka.core.Utils; import weka.core.WeightedInstancesHandler; /** <!-- globalinfo-start --> * Class for building and using a multinomial Naive Bayes classifier. For more information see,<br/> * <br/> * Andrew Mccallum, Kamal Nigam: A Comparison of Event Models for Naive Bayes Text Classification. In: AAAI-98 Workshop on 'Learning for Text Categorization', 1998.<br/> * <br/> * The core equation for this classifier:<br/> * <br/> * P[Ci|D] = (P[D|Ci] x P[Ci]) / P[D] (Bayes rule)<br/> * <br/> * where Ci is class i and D is a document. * <p/> <!-- globalinfo-end --> * <!-- technical-bibtex-start --> * BibTeX: * <pre> * &#64;inproceedings{Mccallum1998, * author = {Andrew Mccallum and Kamal Nigam}, * booktitle = {AAAI-98 Workshop on 'Learning for Text Categorization'}, * title = {A Comparison of Event Models for Naive Bayes Text Classification}, * year = {1998} * } * </pre> * <p/> <!-- technical-bibtex-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -D * If set, classifier is run in debug mode and * may output additional info to the console</pre> * <!-- options-end --> * * @author Andrew Golightly (acg4@cs.waikato.ac.nz) * @author Bernhard Pfahringer (bernhard@cs.waikato.ac.nz) * @version $Revision: 8034 $ */ public class NaiveBayesMultinomial extends AbstractClassifier implements WeightedInstancesHandler,TechnicalInformationHandler { /** for serialization */ static final long serialVersionUID = 5932177440181257085L; /** * probability that a word (w) exists in a class (H) (i.e. Pr[w|H]) * The matrix is in the this format: probOfWordGivenClass[class][wordAttribute] * NOTE: the values are actually the log of Pr[w|H] */ protected double[][] m_probOfWordGivenClass; /** the probability of a class (i.e. Pr[H]) */ protected double[] m_probOfClass; /** number of unique words */ protected int m_numAttributes; /** number of class values */ protected int m_numClasses; /** cache lnFactorial computations */ protected double[] m_lnFactorialCache = new double[]{0.0,0.0}; /** copy of header information for use in toString method */ protected Instances m_headerInfo; /** * Returns a string describing this classifier * @return a description of the classifier suitable for * displaying in the explorer/experimenter gui */ public String globalInfo() { return "Class for building and using a multinomial Naive Bayes classifier. " + "For more information see,\n\n" + getTechnicalInformation().toString() + "\n\n" + "The core equation for this classifier:\n\n" + "P[Ci|D] = (P[D|Ci] x P[Ci]) / P[D] (Bayes rule)\n\n" + "where Ci is class i and D is a document."; } /** * Returns an instance of a TechnicalInformation object, containing * detailed information about the technical background of this class, * e.g., paper reference or book this class is based on. * * @return the technical information about this class */ public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; result = new TechnicalInformation(Type.INPROCEEDINGS); result.setValue(Field.AUTHOR, "Andrew Mccallum and Kamal Nigam"); result.setValue(Field.YEAR, "1998"); result.setValue(Field.TITLE, "A Comparison of Event Models for Naive Bayes Text Classification"); result.setValue(Field.BOOKTITLE, "AAAI-98 Workshop on 'Learning for Text Categorization'"); return result; } /** * Returns default capabilities of the classifier. * * @return the capabilities of this classifier */ public Capabilities getCapabilities() { Capabilities result = super.getCapabilities(); result.disableAll(); // attributes result.enable(Capability.NUMERIC_ATTRIBUTES); // class result.enable(Capability.NOMINAL_CLASS); result.enable(Capability.MISSING_CLASS_VALUES); return result; } /** * Generates the classifier. * * @param instances set of instances serving as training data * @throws Exception if the classifier has not been generated successfully */ public void buildClassifier(Instances instances) throws Exception { // can classifier handle the data? getCapabilities().testWithFail(instances); // remove instances with missing class instances = new Instances(instances); instances.deleteWithMissingClass(); m_headerInfo = new Instances(instances, 0); m_numClasses = instances.numClasses(); m_numAttributes = instances.numAttributes(); m_probOfWordGivenClass = new double[m_numClasses][]; /* initialising the matrix of word counts NOTE: Laplace estimator introduced in case a word that does not appear for a class in the training set does so for the test set */ for(int c = 0; c<m_numClasses; c++) { m_probOfWordGivenClass[c] = new double[m_numAttributes]; for(int att = 0; att<m_numAttributes; att++) { m_probOfWordGivenClass[c][att] = 1; } } //enumerate through the instances Instance instance; int classIndex; double numOccurences; double[] docsPerClass = new double[m_numClasses]; double[] wordsPerClass = new double[m_numClasses]; java.util.Enumeration enumInsts = instances.enumerateInstances(); while (enumInsts.hasMoreElements()) { instance = (Instance) enumInsts.nextElement(); classIndex = (int)instance.value(instance.classIndex()); docsPerClass[classIndex] += instance.weight(); for(int a = 0; a<instance.numValues(); a++) if(instance.index(a) != instance.classIndex()) { if(!instance.isMissing(a)) { numOccurences = instance.valueSparse(a) * instance.weight(); if(numOccurences < 0) throw new Exception("Numeric attribute values must all be greater or equal to zero."); wordsPerClass[classIndex] += numOccurences; m_probOfWordGivenClass[classIndex][instance.index(a)] += numOccurences; } } } /* normalising probOfWordGivenClass values and saving each value as the log of each value */ for(int c = 0; c<m_numClasses; c++) for(int v = 0; v<m_numAttributes; v++) m_probOfWordGivenClass[c][v] = Math.log(m_probOfWordGivenClass[c][v] / (wordsPerClass[c] + m_numAttributes - 1)); /* calculating Pr(H) NOTE: Laplace estimator introduced in case a class does not get mentioned in the set of training instances */ final double numDocs = instances.sumOfWeights() + m_numClasses; m_probOfClass = new double[m_numClasses]; for(int h=0; h<m_numClasses; h++) m_probOfClass[h] = (double)(docsPerClass[h] + 1)/numDocs; } /** * Calculates the class membership probabilities for the given test * instance. * * @param instance the instance to be classified * @return predicted class probability distribution * @throws Exception if there is a problem generating the prediction */ public double [] distributionForInstance(Instance instance) throws Exception { double[] probOfClassGivenDoc = new double[m_numClasses]; //calculate the array of log(Pr[D|C]) double[] logDocGivenClass = new double[m_numClasses]; for(int h = 0; h<m_numClasses; h++) logDocGivenClass[h] = probOfDocGivenClass(instance, h); double max = logDocGivenClass[Utils.maxIndex(logDocGivenClass)]; double probOfDoc = 0.0; for(int i = 0; i<m_numClasses; i++) { probOfClassGivenDoc[i] = Math.exp(logDocGivenClass[i] - max) * m_probOfClass[i]; probOfDoc += probOfClassGivenDoc[i]; } Utils.normalize(probOfClassGivenDoc,probOfDoc); return probOfClassGivenDoc; } /** * log(N!) + (for all the words)(log(Pi^ni) - log(ni!)) * * where * N is the total number of words * Pi is the probability of obtaining word i * ni is the number of times the word at index i occurs in the document * * @param inst The instance to be classified * @param classIndex The index of the class we are calculating the probability with respect to * * @return The log of the probability of the document occuring given the class */ private double probOfDocGivenClass(Instance inst, int classIndex) { double answer = 0; //double totalWords = 0; //no need as we are not calculating the factorial at all. double freqOfWordInDoc; //should be double for(int i = 0; i<inst.numValues(); i++) if(inst.index(i) != inst.classIndex()) { freqOfWordInDoc = inst.valueSparse(i); //totalWords += freqOfWordInDoc; answer += (freqOfWordInDoc * m_probOfWordGivenClass[classIndex][inst.index(i)] ); //- lnFactorial(freqOfWordInDoc)); } //answer += lnFactorial(totalWords);//The factorial terms don't make //any difference to the classifier's //accuracy, so not needed. return answer; } /** * Fast computation of ln(n!) for non-negative ints * * negative ints are passed on to the general gamma-function * based version in weka.core.SpecialFunctions * * if the current n value is higher than any previous one, * the cache is extended and filled to cover it * * the common case is reduced to a simple array lookup * * @param n the integer * @return ln(n!) */ public double lnFactorial(int n) { if (n < 0) return weka.core.SpecialFunctions.lnFactorial(n); if (m_lnFactorialCache.length <= n) { double[] tmp = new double[n+1]; System.arraycopy(m_lnFactorialCache,0,tmp,0,m_lnFactorialCache.length); for(int i = m_lnFactorialCache.length; i < tmp.length; i++) tmp[i] = tmp[i-1] + Math.log(i); m_lnFactorialCache = tmp; } return m_lnFactorialCache[n]; } /** * Returns a string representation of the classifier. * * @return a string representation of the classifier */ public String toString() { StringBuffer result = new StringBuffer("The independent probability of a class\n--------------------------------------\n"); for(int c = 0; c<m_numClasses; c++) result.append(m_headerInfo.classAttribute().value(c)).append("\t").append(Double.toString(m_probOfClass[c])).append("\n"); result.append("\nThe probability of a word given the class\n-----------------------------------------\n\t"); for(int c = 0; c<m_numClasses; c++) result.append(m_headerInfo.classAttribute().value(c)).append("\t"); result.append("\n"); for(int w = 0; w<m_numAttributes; w++) { if (w != m_headerInfo.classIndex()) { result.append(m_headerInfo.attribute(w).name()).append("\t"); for(int c = 0; c<m_numClasses; c++) result.append(Double.toString(Math.exp(m_probOfWordGivenClass[c][w]))).append("\t"); result.append("\n"); } } return result.toString(); } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } /** * Main method for testing this class. * * @param argv the options */ public static void main(String [] argv) { runClassifier(new NaiveBayesMultinomial(), argv); } }
12,525
31.366925
169
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/bayes/NaiveBayesMultinomialUpdateable.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * NaiveBayesMultinomialUpdateable.java * Copyright (C) 2003-2012 University of Waikato, Hamilton, New Zealand * Copyright (C) 2007 Jiang Su (incremental version) */ package weka.classifiers.bayes; import weka.classifiers.UpdateableClassifier; import weka.core.Instance; import weka.core.Instances; import weka.core.RevisionUtils; import weka.core.Utils; /** <!-- globalinfo-start --> * Class for building and using a multinomial Naive Bayes classifier. For more information see,<br/> * <br/> * Andrew Mccallum, Kamal Nigam: A Comparison of Event Models for Naive Bayes Text Classification. In: AAAI-98 Workshop on 'Learning for Text Categorization', 1998.<br/> * <br/> * The core equation for this classifier:<br/> * <br/> * P[Ci|D] = (P[D|Ci] x P[Ci]) / P[D] (Bayes rule)<br/> * <br/> * where Ci is class i and D is a document.<br/> * <br/> * Incremental version of the algorithm. * <p/> <!-- globalinfo-end --> * <!-- technical-bibtex-start --> * BibTeX: * <pre> * &#64;inproceedings{Mccallum1998, * author = {Andrew Mccallum and Kamal Nigam}, * booktitle = {AAAI-98 Workshop on 'Learning for Text Categorization'}, * title = {A Comparison of Event Models for Naive Bayes Text Classification}, * year = {1998} * } * </pre> * <p/> <!-- technical-bibtex-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -D * If set, classifier is run in debug mode and * may output additional info to the console</pre> * <!-- options-end --> * * @author Andrew Golightly (acg4@cs.waikato.ac.nz) * @author Bernhard Pfahringer (bernhard@cs.waikato.ac.nz) * @author Jiang Su * @version $Revision: 9412 $ */ public class NaiveBayesMultinomialUpdateable extends NaiveBayesMultinomial implements UpdateableClassifier { /** for serialization */ private static final long serialVersionUID = -7204398796974263186L; /** the word count per class */ protected double[] m_wordsPerClass; /** * Returns a string describing this classifier * * @return a description of the classifier suitable for * displaying in the explorer/experimenter gui */ public String globalInfo() { return super.globalInfo() + "\n\n" + "Incremental version of the algorithm."; } /** * Generates the classifier. * * @param instances set of instances serving as training data * @throws Exception if the classifier has not been generated successfully */ public void buildClassifier(Instances instances) throws Exception { // can classifier handle the data? getCapabilities().testWithFail(instances); // remove instances with missing class instances = new Instances(instances); instances.deleteWithMissingClass(); m_headerInfo = new Instances(instances, 0); m_numClasses = instances.numClasses(); m_numAttributes = instances.numAttributes(); m_probOfWordGivenClass = new double[m_numClasses][]; m_wordsPerClass = new double[m_numClasses]; m_probOfClass = new double[m_numClasses]; // initialising the matrix of word counts // NOTE: Laplace estimator introduced in case a word that does not // appear for a class in the training set does so for the test set double laplace = 1; for (int c = 0; c < m_numClasses; c++) { m_probOfWordGivenClass[c] = new double[m_numAttributes]; m_probOfClass[c] = laplace; m_wordsPerClass[c] = laplace * m_numAttributes; for(int att = 0; att<m_numAttributes; att++) { m_probOfWordGivenClass[c][att] = laplace; } } for (int i = 0; i < instances.numInstances(); i++) updateClassifier(instances.instance(i)); } /** * Updates the classifier with the given instance. * * @param instance the new training instance to include in the model * @throws Exception if the instance could not be incorporated in * the model. */ public void updateClassifier(Instance instance) throws Exception { int classIndex = (int) instance.value(instance.classIndex()); m_probOfClass[classIndex] += instance.weight(); for (int a = 0; a < instance.numValues(); a++) { if (instance.index(a) == instance.classIndex() || instance.isMissing(a)) continue; double numOccurences = instance.valueSparse(a) * instance.weight(); /*if (numOccurences < 0) throw new Exception( "Numeric attribute values must all be greater or equal to zero."); */ m_wordsPerClass[classIndex] += numOccurences; if (m_wordsPerClass[classIndex] < 0) { throw new Exception("Can't have a negative number of words for class " + (classIndex + 1)); } m_probOfWordGivenClass[classIndex][instance.index(a)] += numOccurences; if (m_probOfWordGivenClass[classIndex][instance.index(a)] < 0) { throw new Exception("Can't have a negative conditional sum for attribute " + instance.index(a)); } } } /** * Calculates the class membership probabilities for the given test * instance. * * @param instance the instance to be classified * @return predicted class probability distribution * @throws Exception if there is a problem generating the prediction */ public double[] distributionForInstance(Instance instance) throws Exception { double[] probOfClassGivenDoc = new double[m_numClasses]; // calculate the array of log(Pr[D|C]) double[] logDocGivenClass = new double[m_numClasses]; for (int c = 0; c < m_numClasses; c++) { logDocGivenClass[c] += Math.log(m_probOfClass[c]); int allWords = 0; for (int i = 0; i < instance.numValues(); i++) { if (instance.index(i) == instance.classIndex()) continue; double frequencies = instance.valueSparse(i); allWords += frequencies; logDocGivenClass[c] += frequencies * Math.log(m_probOfWordGivenClass[c][instance.index(i)]); } logDocGivenClass[c] -= allWords * Math.log(m_wordsPerClass[c]); } double max = logDocGivenClass[Utils.maxIndex(logDocGivenClass)]; for (int i = 0; i < m_numClasses; i++) probOfClassGivenDoc[i] = Math.exp(logDocGivenClass[i] - max); Utils.normalize(probOfClassGivenDoc); return probOfClassGivenDoc; } /** * Returns a string representation of the classifier. * * @return a string representation of the classifier */ public String toString() { StringBuffer result = new StringBuffer(); result.append("The independent probability of a class\n"); result.append("--------------------------------------\n"); for (int c = 0; c < m_numClasses; c++) result.append(m_headerInfo.classAttribute().value(c)).append("\t"). append(Double.toString(m_probOfClass[c])).append("\n"); result.append("\nThe probability of a word given the class\n"); result.append("-----------------------------------------\n\t"); for (int c = 0; c < m_numClasses; c++) result.append(m_headerInfo.classAttribute().value(c)).append("\t"); result.append("\n"); for (int w = 0; w < m_numAttributes; w++) { result.append(m_headerInfo.attribute(w).name()).append("\t"); for (int c = 0; c < m_numClasses; c++) result.append( Double.toString(Math.exp(m_probOfWordGivenClass[c][w]))).append("\t"); result.append("\n"); } return result.toString(); } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 9412 $"); } /** * Main method for testing this class. * * @param args the options */ public static void main(String[] args) { runClassifier(new NaiveBayesMultinomialUpdateable(), args); } }
8,395
32.055118
169
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/bayes/NaiveBayesSimple.java
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* * NaiveBayesSimple.java * Copyright (C) 1999 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.bayes; import weka.classifiers.Classifier; import weka.core.Attribute; import weka.core.Capabilities; import weka.core.Instance; import weka.core.Instances; import weka.core.RevisionUtils; import weka.core.TechnicalInformation; import weka.core.TechnicalInformationHandler; import weka.core.Utils; import weka.core.Capabilities.Capability; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import java.util.Enumeration; import weka.classifiers.AbstractClassifier; /** <!-- globalinfo-start --> * Class for building and using a simple Naive Bayes classifier.Numeric attributes are modelled by a normal distribution.<br/> * <br/> * For more information, see<br/> * <br/> * Richard Duda, Peter Hart (1973). Pattern Classification and Scene Analysis. Wiley, New York. * <p/> <!-- globalinfo-end --> * <!-- technical-bibtex-start --> * BibTeX: * <pre> * &#64;book{Duda1973, * address = {New York}, * author = {Richard Duda and Peter Hart}, * publisher = {Wiley}, * title = {Pattern Classification and Scene Analysis}, * year = {1973} * } * </pre> * <p/> <!-- technical-bibtex-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -D * If set, classifier is run in debug mode and * may output additional info to the console</pre> * <!-- options-end --> * * @author Eibe Frank (eibe@cs.waikato.ac.nz) * @version $Revision: 5516 $ */ public class NaiveBayesSimple extends AbstractClassifier implements TechnicalInformationHandler { /** for serialization */ static final long serialVersionUID = -1478242251770381214L; /** All the counts for nominal attributes. */ protected double [][][] m_Counts; /** The means for numeric attributes. */ protected double [][] m_Means; /** The standard deviations for numeric attributes. */ protected double [][] m_Devs; /** The prior probabilities of the classes. */ protected double [] m_Priors; /** The instances used for training. */ protected Instances m_Instances; /** Constant for normal distribution. */ protected static double NORM_CONST = Math.sqrt(2 * Math.PI); /** * Returns a string describing this classifier * @return a description of the classifier suitable for * displaying in the explorer/experimenter gui */ public String globalInfo() { return "Class for building and using a simple Naive Bayes classifier." + "Numeric attributes are modelled by a normal distribution.\n\n" + "For more information, see\n\n" + getTechnicalInformation().toString(); } /** * Returns an instance of a TechnicalInformation object, containing * detailed information about the technical background of this class, * e.g., paper reference or book this class is based on. * * @return the technical information about this class */ public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; result = new TechnicalInformation(Type.BOOK); result.setValue(Field.AUTHOR, "Richard Duda and Peter Hart"); result.setValue(Field.YEAR, "1973"); result.setValue(Field.TITLE, "Pattern Classification and Scene Analysis"); result.setValue(Field.PUBLISHER, "Wiley"); result.setValue(Field.ADDRESS, "New York"); return result; } /** * Returns default capabilities of the classifier. * * @return the capabilities of this classifier */ public Capabilities getCapabilities() { Capabilities result = super.getCapabilities(); result.disableAll(); // attributes result.enable(Capability.NOMINAL_ATTRIBUTES); result.enable(Capability.NUMERIC_ATTRIBUTES); result.enable(Capability.DATE_ATTRIBUTES); result.enable(Capability.MISSING_VALUES); // class result.enable(Capability.NOMINAL_CLASS); result.enable(Capability.MISSING_CLASS_VALUES); return result; } /** * Generates the classifier. * * @param instances set of instances serving as training data * @exception Exception if the classifier has not been generated successfully */ public void buildClassifier(Instances instances) throws Exception { int attIndex = 0; double sum; // can classifier handle the data? getCapabilities().testWithFail(instances); // remove instances with missing class instances = new Instances(instances); instances.deleteWithMissingClass(); m_Instances = new Instances(instances, 0); // Reserve space m_Counts = new double[instances.numClasses()] [instances.numAttributes() - 1][0]; m_Means = new double[instances.numClasses()] [instances.numAttributes() - 1]; m_Devs = new double[instances.numClasses()] [instances.numAttributes() - 1]; m_Priors = new double[instances.numClasses()]; Enumeration enu = instances.enumerateAttributes(); while (enu.hasMoreElements()) { Attribute attribute = (Attribute) enu.nextElement(); if (attribute.isNominal()) { for (int j = 0; j < instances.numClasses(); j++) { m_Counts[j][attIndex] = new double[attribute.numValues()]; } } else { for (int j = 0; j < instances.numClasses(); j++) { m_Counts[j][attIndex] = new double[1]; } } attIndex++; } // Compute counts and sums Enumeration enumInsts = instances.enumerateInstances(); while (enumInsts.hasMoreElements()) { Instance instance = (Instance) enumInsts.nextElement(); if (!instance.classIsMissing()) { Enumeration enumAtts = instances.enumerateAttributes(); attIndex = 0; while (enumAtts.hasMoreElements()) { Attribute attribute = (Attribute) enumAtts.nextElement(); if (!instance.isMissing(attribute)) { if (attribute.isNominal()) { m_Counts[(int)instance.classValue()][attIndex] [(int)instance.value(attribute)]++; } else { m_Means[(int)instance.classValue()][attIndex] += instance.value(attribute); m_Counts[(int)instance.classValue()][attIndex][0]++; } } attIndex++; } m_Priors[(int)instance.classValue()]++; } } // Compute means Enumeration enumAtts = instances.enumerateAttributes(); attIndex = 0; while (enumAtts.hasMoreElements()) { Attribute attribute = (Attribute) enumAtts.nextElement(); if (attribute.isNumeric()) { for (int j = 0; j < instances.numClasses(); j++) { if (m_Counts[j][attIndex][0] < 2) { throw new Exception("attribute " + attribute.name() + ": less than two values for class " + instances.classAttribute().value(j)); } m_Means[j][attIndex] /= m_Counts[j][attIndex][0]; } } attIndex++; } // Compute standard deviations enumInsts = instances.enumerateInstances(); while (enumInsts.hasMoreElements()) { Instance instance = (Instance) enumInsts.nextElement(); if (!instance.classIsMissing()) { enumAtts = instances.enumerateAttributes(); attIndex = 0; while (enumAtts.hasMoreElements()) { Attribute attribute = (Attribute) enumAtts.nextElement(); if (!instance.isMissing(attribute)) { if (attribute.isNumeric()) { m_Devs[(int)instance.classValue()][attIndex] += (m_Means[(int)instance.classValue()][attIndex]- instance.value(attribute))* (m_Means[(int)instance.classValue()][attIndex]- instance.value(attribute)); } } attIndex++; } } } enumAtts = instances.enumerateAttributes(); attIndex = 0; while (enumAtts.hasMoreElements()) { Attribute attribute = (Attribute) enumAtts.nextElement(); if (attribute.isNumeric()) { for (int j = 0; j < instances.numClasses(); j++) { if (m_Devs[j][attIndex] <= 0) { throw new Exception("attribute " + attribute.name() + ": standard deviation is 0 for class " + instances.classAttribute().value(j)); } else { m_Devs[j][attIndex] /= m_Counts[j][attIndex][0] - 1; m_Devs[j][attIndex] = Math.sqrt(m_Devs[j][attIndex]); } } } attIndex++; } // Normalize counts enumAtts = instances.enumerateAttributes(); attIndex = 0; while (enumAtts.hasMoreElements()) { Attribute attribute = (Attribute) enumAtts.nextElement(); if (attribute.isNominal()) { for (int j = 0; j < instances.numClasses(); j++) { sum = Utils.sum(m_Counts[j][attIndex]); for (int i = 0; i < attribute.numValues(); i++) { m_Counts[j][attIndex][i] = (m_Counts[j][attIndex][i] + 1) / (sum + (double)attribute.numValues()); } } } attIndex++; } // Normalize priors sum = Utils.sum(m_Priors); for (int j = 0; j < instances.numClasses(); j++) m_Priors[j] = (m_Priors[j] + 1) / (sum + (double)instances.numClasses()); } /** * Calculates the class membership probabilities for the given test instance. * * @param instance the instance to be classified * @return predicted class probability distribution * @exception Exception if distribution can't be computed */ public double[] distributionForInstance(Instance instance) throws Exception { double [] probs = new double[instance.numClasses()]; int attIndex; for (int j = 0; j < instance.numClasses(); j++) { probs[j] = 1; Enumeration enumAtts = instance.enumerateAttributes(); attIndex = 0; while (enumAtts.hasMoreElements()) { Attribute attribute = (Attribute) enumAtts.nextElement(); if (!instance.isMissing(attribute)) { if (attribute.isNominal()) { probs[j] *= m_Counts[j][attIndex][(int)instance.value(attribute)]; } else { probs[j] *= normalDens(instance.value(attribute), m_Means[j][attIndex], m_Devs[j][attIndex]);} } attIndex++; } probs[j] *= m_Priors[j]; } // Normalize probabilities Utils.normalize(probs); return probs; } /** * Returns a description of the classifier. * * @return a description of the classifier as a string. */ public String toString() { if (m_Instances == null) { return "Naive Bayes (simple): No model built yet."; } try { StringBuffer text = new StringBuffer("Naive Bayes (simple)"); int attIndex; for (int i = 0; i < m_Instances.numClasses(); i++) { text.append("\n\nClass " + m_Instances.classAttribute().value(i) + ": P(C) = " + Utils.doubleToString(m_Priors[i], 10, 8) + "\n\n"); Enumeration enumAtts = m_Instances.enumerateAttributes(); attIndex = 0; while (enumAtts.hasMoreElements()) { Attribute attribute = (Attribute) enumAtts.nextElement(); text.append("Attribute " + attribute.name() + "\n"); if (attribute.isNominal()) { for (int j = 0; j < attribute.numValues(); j++) { text.append(attribute.value(j) + "\t"); } text.append("\n"); for (int j = 0; j < attribute.numValues(); j++) text.append(Utils. doubleToString(m_Counts[i][attIndex][j], 10, 8) + "\t"); } else { text.append("Mean: " + Utils. doubleToString(m_Means[i][attIndex], 10, 8) + "\t"); text.append("Standard Deviation: " + Utils.doubleToString(m_Devs[i][attIndex], 10, 8)); } text.append("\n\n"); attIndex++; } } return text.toString(); } catch (Exception e) { return "Can't print Naive Bayes classifier!"; } } /** * Density function of normal distribution. * * @param x the value to get the density for * @param mean the mean * @param stdDev the standard deviation * @return the density */ protected double normalDens(double x, double mean, double stdDev) { double diff = x - mean; return (1 / (NORM_CONST * stdDev)) * Math.exp(-(diff * diff / (2 * stdDev * stdDev))); } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 5516 $"); } /** * Main method for testing this class. * * @param argv the options */ public static void main(String [] argv) { runClassifier(new NaiveBayesSimple(), argv); } }
12,997
29.157773
126
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/bayes/NaiveBayesUpdateable.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * NaiveBayesUpdateable.java * Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.bayes; import weka.classifiers.UpdateableClassifier; import weka.core.RevisionUtils; import weka.core.TechnicalInformation; /** <!-- globalinfo-start --> * Class for a Naive Bayes classifier using estimator classes. This is the updateable version of NaiveBayes.<br/> * This classifier will use a default precision of 0.1 for numeric attributes when buildClassifier is called with zero training instances.<br/> * <br/> * For more information on Naive Bayes classifiers, see<br/> * <br/> * George H. John, Pat Langley: Estimating Continuous Distributions in Bayesian Classifiers. In: Eleventh Conference on Uncertainty in Artificial Intelligence, San Mateo, 338-345, 1995. * <p/> <!-- globalinfo-end --> * <!-- technical-bibtex-start --> * BibTeX: * <pre> * &#64;inproceedings{John1995, * address = {San Mateo}, * author = {George H. John and Pat Langley}, * booktitle = {Eleventh Conference on Uncertainty in Artificial Intelligence}, * pages = {338-345}, * publisher = {Morgan Kaufmann}, * title = {Estimating Continuous Distributions in Bayesian Classifiers}, * year = {1995} * } * </pre> * <p/> <!-- technical-bibtex-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -K * Use kernel density estimator rather than normal * distribution for numeric attributes</pre> * * <pre> -D * Use supervised discretization to process numeric attributes * </pre> * * <pre> -O * Display model in old format (good when there are many classes) * </pre> * <!-- options-end --> * * @author Len Trigg (trigg@cs.waikato.ac.nz) * @author Eibe Frank (eibe@cs.waikato.ac.nz) * @version $Revision: 8034 $ */ public class NaiveBayesUpdateable extends NaiveBayes implements UpdateableClassifier { /** for serialization */ static final long serialVersionUID = -5354015843807192221L; /** * Returns a string describing this classifier * @return a description of the classifier suitable for * displaying in the explorer/experimenter gui */ public String globalInfo() { return "Class for a Naive Bayes classifier using estimator classes. This is the " +"updateable version of NaiveBayes.\n" +"This classifier will use a default precision of 0.1 for numeric attributes " +"when buildClassifier is called with zero training instances.\n\n" +"For more information on Naive Bayes classifiers, see\n\n" + getTechnicalInformation().toString(); } /** * Returns an instance of a TechnicalInformation object, containing * detailed information about the technical background of this class, * e.g., paper reference or book this class is based on. * * @return the technical information about this class */ public TechnicalInformation getTechnicalInformation() { return super.getTechnicalInformation(); } /** * Set whether supervised discretization is to be used. * * @param newblah true if supervised discretization is to be used. */ public void setUseSupervisedDiscretization(boolean newblah) { if (newblah) { throw new IllegalArgumentException("Can't use discretization " + "in NaiveBayesUpdateable!"); } m_UseDiscretization = false; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } /** * Main method for testing this class. * * @param argv the options */ public static void main(String [] argv) { runClassifier(new NaiveBayesUpdateable(), argv); } }
4,414
30.535714
185
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/bayes/WAODE.java
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* * WAODE.java * Copyright 2006 Liangxiao Jiang */ package weka.classifiers.bayes; import weka.classifiers.Classifier; import weka.core.Capabilities; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.RevisionUtils; import weka.core.TechnicalInformation; import weka.core.TechnicalInformationHandler; import weka.core.Utils; import weka.core.Capabilities.Capability; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import java.util.Enumeration; import java.util.Vector; import weka.classifiers.AbstractClassifier; /** <!-- globalinfo-start --> * WAODE contructs the model called Weightily Averaged One-Dependence Estimators.<br/> * <br/> * For more information, see<br/> * <br/> * L. Jiang, H. Zhang: Weightily Averaged One-Dependence Estimators. In: Proceedings of the 9th Biennial Pacific Rim International Conference on Artificial Intelligence, PRICAI 2006, 970-974, 2006. * <p/> <!-- globalinfo-end --> * <!-- technical-bibtex-start --> * BibTeX: * <pre> * &#64;inproceedings{Jiang2006, * author = {L. Jiang and H. Zhang}, * booktitle = {Proceedings of the 9th Biennial Pacific Rim International Conference on Artificial Intelligence, PRICAI 2006}, * pages = {970-974}, * series = {LNAI}, * title = {Weightily Averaged One-Dependence Estimators}, * volume = {4099}, * year = {2006} * } * </pre> * <p/> <!-- technical-bibtex-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -D * If set, classifier is run in debug mode and * may output additional info to the console</pre> * * <pre> -I * Whether to print some more internals. * (default: no)</pre> * <!-- options-end --> * * @author Liangxiao Jiang (ljiang@cug.edu.cn) * @author H. Zhang (hzhang@unb.ca) * @version $Revision: 5516 $ */ public class WAODE extends AbstractClassifier implements TechnicalInformationHandler { /** for serialization */ private static final long serialVersionUID = 2170978824284697882L; /** The number of each class value occurs in the dataset */ private double[] m_ClassCounts; /** The number of each attribute value occurs in the dataset */ private double[] m_AttCounts; /** The number of two attributes values occurs in the dataset */ private double[][] m_AttAttCounts; /** The number of class and two attributes values occurs in the dataset */ private double[][][] m_ClassAttAttCounts; /** The number of values for each attribute in the dataset */ private int[] m_NumAttValues; /** The number of values for all attributes in the dataset */ private int m_TotalAttValues; /** The number of classes in the dataset */ private int m_NumClasses; /** The number of attributes including class in the dataset */ private int m_NumAttributes; /** The number of instances in the dataset */ private int m_NumInstances; /** The index of the class attribute in the dataset */ private int m_ClassIndex; /** The starting index of each attribute in the dataset */ private int[] m_StartAttIndex; /** The array of mutual information between each attribute and class */ private double[] m_mutualInformation; /** the header information of the training data */ private Instances m_Header = null; /** whether to print more internals in the toString method * @see #toString() */ private boolean m_Internals = false; /** a ZeroR model in case no model can be built from the data */ private Classifier m_ZeroR; /** * Returns a string describing this classifier * * @return a description of the classifier suitable for * displaying in the explorer/experimenter gui */ public String globalInfo() { return "WAODE contructs the model called Weightily Averaged One-Dependence " + "Estimators.\n\n" + "For more information, see\n\n" + getTechnicalInformation().toString(); } /** * Gets an enumeration describing the available options. * * @return an enumeration of all the available options. */ public Enumeration listOptions() { Vector result = new Vector(); Enumeration enm = super.listOptions(); while (enm.hasMoreElements()) result.add(enm.nextElement()); result.addElement(new Option( "\tWhether to print some more internals.\n" + "\t(default: no)", "I", 0, "-I")); return result.elements(); } /** * Parses a given list of options. <p/> * <!-- options-start --> * Valid options are: <p/> * * <pre> -D * If set, classifier is run in debug mode and * may output additional info to the console</pre> * * <pre> -I * Whether to print some more internals. * (default: no)</pre> * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { super.setOptions(options); setInternals(Utils.getFlag('I', options)); } /** * Gets the current settings of the filter. * * @return an array of strings suitable for passing to setOptions */ public String[] getOptions() { Vector result; String[] options; int i; result = new Vector(); options = super.getOptions(); for (i = 0; i < options.length; i++) result.add(options[i]); if (getInternals()) result.add("-I"); return (String[]) result.toArray(new String[result.size()]); } /** * Returns the tip text for this property * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String internalsTipText() { return "Prints more internals of the classifier."; } /** * Sets whether internals about classifier are printed via toString(). * * @param value if internals should be printed * @see #toString() */ public void setInternals(boolean value) { m_Internals = value; } /** * Gets whether more internals of the classifier are printed. * * @return true if more internals are printed */ public boolean getInternals() { return m_Internals; } /** * Returns an instance of a TechnicalInformation object, containing * detailed information about the technical background of this class, * e.g., paper reference or book this class is based on. * * @return the technical information about this class */ public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; result = new TechnicalInformation(Type.INPROCEEDINGS); result.setValue(Field.AUTHOR, "L. Jiang and H. Zhang"); result.setValue(Field.TITLE, "Weightily Averaged One-Dependence Estimators"); result.setValue(Field.BOOKTITLE, "Proceedings of the 9th Biennial Pacific Rim International Conference on Artificial Intelligence, PRICAI 2006"); result.setValue(Field.YEAR, "2006"); result.setValue(Field.PAGES, "970-974"); result.setValue(Field.SERIES, "LNAI"); result.setValue(Field.VOLUME, "4099"); return result; } /** * Returns default capabilities of the classifier. * * @return the capabilities of this classifier */ public Capabilities getCapabilities() { Capabilities result = super.getCapabilities(); result.disableAll(); // attributes result.enable(Capability.NOMINAL_ATTRIBUTES); // class result.enable(Capability.NOMINAL_CLASS); return result; } /** * Generates the classifier. * * @param instances set of instances serving as training data * @throws Exception if the classifier has not been generated successfully */ public void buildClassifier(Instances instances) throws Exception { // can classifier handle the data? getCapabilities().testWithFail(instances); // only class? -> build ZeroR model if (instances.numAttributes() == 1) { System.err.println( "Cannot build model (only class attribute present in data!), " + "using ZeroR model instead!"); m_ZeroR = new weka.classifiers.rules.ZeroR(); m_ZeroR.buildClassifier(instances); return; } else { m_ZeroR = null; } // reset variable m_NumClasses = instances.numClasses(); m_ClassIndex = instances.classIndex(); m_NumAttributes = instances.numAttributes(); m_NumInstances = instances.numInstances(); m_TotalAttValues = 0; // allocate space for attribute reference arrays m_StartAttIndex = new int[m_NumAttributes]; m_NumAttValues = new int[m_NumAttributes]; // set the starting index of each attribute and the number of values for // each attribute and the total number of values for all attributes (not including class). for (int i = 0; i < m_NumAttributes; i++) { if (i != m_ClassIndex) { m_StartAttIndex[i] = m_TotalAttValues; m_NumAttValues[i] = instances.attribute(i).numValues(); m_TotalAttValues += m_NumAttValues[i]; } else { m_StartAttIndex[i] = -1; m_NumAttValues[i] = m_NumClasses; } } // allocate space for counts and frequencies m_ClassCounts = new double[m_NumClasses]; m_AttCounts = new double[m_TotalAttValues]; m_AttAttCounts = new double[m_TotalAttValues][m_TotalAttValues]; m_ClassAttAttCounts = new double[m_NumClasses][m_TotalAttValues][m_TotalAttValues]; m_Header = new Instances(instances, 0); // Calculate the counts for (int k = 0; k < m_NumInstances; k++) { int classVal=(int)instances.instance(k).classValue(); m_ClassCounts[classVal] ++; int[] attIndex = new int[m_NumAttributes]; for (int i = 0; i < m_NumAttributes; i++) { if (i == m_ClassIndex){ attIndex[i] = -1; } else{ attIndex[i] = m_StartAttIndex[i] + (int)instances.instance(k).value(i); m_AttCounts[attIndex[i]]++; } } for (int Att1 = 0; Att1 < m_NumAttributes; Att1++) { if (attIndex[Att1] == -1) continue; for (int Att2 = 0; Att2 < m_NumAttributes; Att2++) { if ((attIndex[Att2] != -1)) { m_AttAttCounts[attIndex[Att1]][attIndex[Att2]] ++; m_ClassAttAttCounts[classVal][attIndex[Att1]][attIndex[Att2]] ++; } } } } //compute mutual information between each attribute and class m_mutualInformation=new double[m_NumAttributes]; for (int att=0;att<m_NumAttributes;att++){ if (att == m_ClassIndex) continue; m_mutualInformation[att]=mutualInfo(att); } } /** * Computes mutual information between each attribute and class attribute. * * @param att is the attribute * @return the conditional mutual information between son and parent given class */ private double mutualInfo(int att) { double mutualInfo=0; int attIndex=m_StartAttIndex[att]; double[] PriorsClass = new double[m_NumClasses]; double[] PriorsAttribute = new double[m_NumAttValues[att]]; double[][] PriorsClassAttribute=new double[m_NumClasses][m_NumAttValues[att]]; for (int i=0;i<m_NumClasses;i++){ PriorsClass[i]=m_ClassCounts[i]/m_NumInstances; } for (int j=0;j<m_NumAttValues[att];j++){ PriorsAttribute[j]=m_AttCounts[attIndex+j]/m_NumInstances; } for (int i=0;i<m_NumClasses;i++){ for (int j=0;j<m_NumAttValues[att];j++){ PriorsClassAttribute[i][j]=m_ClassAttAttCounts[i][attIndex+j][attIndex+j]/m_NumInstances; } } for (int i=0;i<m_NumClasses;i++){ for (int j=0;j<m_NumAttValues[att];j++){ mutualInfo+=PriorsClassAttribute[i][j]*log2(PriorsClassAttribute[i][j],PriorsClass[i]*PriorsAttribute[j]); } } return mutualInfo; } /** * compute the logarithm whose base is 2. * * @param x numerator of the fraction. * @param y denominator of the fraction. * @return the natual logarithm of this fraction. */ private double log2(double x,double y){ if (x < Utils.SMALL || y < Utils.SMALL) return 0.0; else return Math.log(x/y)/Math.log(2); } /** * Calculates the class membership probabilities for the given test instance * * @param instance the instance to be classified * @return predicted class probability distribution * @throws Exception if there is a problem generating the prediction */ public double[] distributionForInstance(Instance instance) throws Exception { // default model? if (m_ZeroR != null) { return m_ZeroR.distributionForInstance(instance); } //Definition of local variables double[] probs = new double[m_NumClasses]; double prob; double mutualInfoSum; // store instance's att values in an int array int[] attIndex = new int[m_NumAttributes]; for (int att = 0; att < m_NumAttributes; att++) { if (att == m_ClassIndex) attIndex[att] = -1; else attIndex[att] = m_StartAttIndex[att] + (int)instance.value(att); } // calculate probabilities for each possible class value for (int classVal = 0; classVal < m_NumClasses; classVal++) { probs[classVal] = 0; prob=1; mutualInfoSum=0.0; for (int parent = 0; parent < m_NumAttributes; parent++) { if (attIndex[parent]==-1) continue; prob=(m_ClassAttAttCounts[classVal][attIndex[parent]][attIndex[parent]] + 1.0/(m_NumClasses*m_NumAttValues[parent]))/(m_NumInstances + 1.0); for (int son = 0; son < m_NumAttributes; son++) { if (attIndex[son]==-1 || son == parent) continue; prob*=(m_ClassAttAttCounts[classVal][attIndex[parent]][attIndex[son]] + 1.0/m_NumAttValues[son])/(m_ClassAttAttCounts[classVal][attIndex[parent]][attIndex[parent]] + 1.0); } mutualInfoSum+=m_mutualInformation[parent]; probs[classVal]+=m_mutualInformation[parent]*prob; } probs[classVal]/=mutualInfoSum; } if (!Double.isNaN(Utils.sum(probs))) Utils.normalize(probs); return probs; } /** * returns a string representation of the classifier * * @return string representation of the classifier */ public String toString() { StringBuffer result; String classname; int i; // only ZeroR model? if (m_ZeroR != null) { result = new StringBuffer(); result.append(this.getClass().getName().replaceAll(".*\\.", "") + "\n"); result.append(this.getClass().getName().replaceAll(".*\\.", "").replaceAll(".", "=") + "\n\n"); result.append("Warning: No model could be built, hence ZeroR model is used:\n\n"); result.append(m_ZeroR.toString()); } else { classname = this.getClass().getName().replaceAll(".*\\.", ""); result = new StringBuffer(); result.append(classname + "\n"); result.append(classname.replaceAll(".", "=") + "\n\n"); if (m_Header == null) { result.append("No Model built yet.\n"); } else { if (getInternals()) { result.append("Mutual information of attributes with class attribute:\n"); for (i = 0; i < m_Header.numAttributes(); i++) { // skip class if (i == m_Header.classIndex()) continue; result.append( (i+1) + ". " + m_Header.attribute(i).name() + ": " + Utils.doubleToString(m_mutualInformation[i], 6) + "\n"); } } else { result.append("Model built successfully.\n"); } } } return result.toString(); } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 5516 $"); } /** * Main method for testing this class. * * @param argv the commandline options, use -h to list all options */ public static void main(String[] argv) { runClassifier(new WAODE(), argv); } }
16,512
29.410681
197
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/bayes/blr/GaussianPriorImpl.java
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* * GaussianPrior.java * Copyright (C) 2008 Illinois Institute of Technology * */ package weka.classifiers.bayes.blr; import weka.classifiers.bayes.BayesianLogisticRegression; import weka.core.Instance; import weka.core.Instances; import weka.core.RevisionUtils; /** * Implementation of the Gaussian Prior update function based on * CLG Algorithm with a certain Trust Region Update. * * The values are updated in the BayesianLogisticRegressionV variables * used by the algorithm. * * * @author Navendu Garg(gargnav@iit.edu) * @version $Revision: 1.2 $ */ public class GaussianPriorImpl extends Prior { /** for serialization. */ private static final long serialVersionUID = -2995684220141159223L; /** * Update function specific to Laplace Prior. */ public double update(int j, Instances instances, double beta, double hyperparameter, double[] r, double deltaV) { int i; double numerator = 0.0; double denominator = 0.0; double value = 0.0; Instance instance; m_Instances = instances; Beta = beta; Hyperparameter = hyperparameter; Delta = deltaV; R = r; //Compute First Derivative i.e. Numerator //Compute the Second Derivative i.e. for (i = 0; i < m_Instances.numInstances(); i++) { instance = m_Instances.instance(i); if (instance.value(j) != 0) { //Compute Numerator (Note: (0.0-1.0/(1.0+Math.exp(R[i]) numerator += ((instance.value(j) * BayesianLogisticRegression.classSgn(instance.classValue())) * (0.0 - (1.0 / (1.0 + Math.exp(R[i]))))); //Compute Denominator denominator += (instance.value(j) * instance.value(j) * BayesianLogisticRegression.bigF(R[i], Delta * Math.abs(instance.value(j)))); } } numerator += ((2.0 * Beta) / Hyperparameter); denominator += (2.0 / Hyperparameter); value = numerator / denominator; return (0 - (value)); } /** * This method calls the log-likelihood implemented in the Prior * abstract class. * @param betas * @param instances */ public void computeLoglikelihood(double[] betas, Instances instances) { super.computelogLikelihood(betas, instances); } /** * This function computes the penalty term specific to Gaussian distribution. * @param betas * @param hyperparameters */ public void computePenalty(double[] betas, double[] hyperparameters) { penalty = 0.0; for (int j = 0; j < betas.length; j++) { penalty += (Math.log(Math.sqrt(hyperparameters[j])) + (Math.log(2 * Math.PI) / 2) + ((betas[j] * betas[j]) / (2 * hyperparameters[j]))); } penalty = 0 - penalty; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 1.2 $"); } }
3,591
28.203252
111
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/bayes/blr/LaplacePriorImpl.java
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* * GaussianPrior.java * Copyright (C) 2008 Illinois Institute of Technology * */ package weka.classifiers.bayes.blr; import weka.classifiers.bayes.BayesianLogisticRegression; import weka.core.Instance; import weka.core.Instances; import weka.core.RevisionUtils; /** * Implementation of the Gaussian Prior update function based on modified * CLG Algorithm (CLG-Lasso) with a certain Trust Region Update based * on Laplace Priors. * * @author Navendu Garg(gargnav@iit.edu) * @version $Revision: 1.2 $ */ public class LaplacePriorImpl extends Prior { /** for serialization. */ private static final long serialVersionUID = 2353576123257012607L; Instances m_Instances; double Beta; double Hyperparameter; double DeltaUpdate; double[] R; double Delta; /** * Update function specific to Laplace Prior. */ public double update(int j, Instances instances, double beta, double hyperparameter, double[] r, double deltaV) { double sign = 0.0; double change = 0.0; DeltaUpdate = 0.0; m_Instances = instances; Beta = beta; Hyperparameter = hyperparameter; R = r; Delta = deltaV; if (Beta == 0) { sign = 1.0; DeltaUpdate = laplaceUpdate(j, sign); if (DeltaUpdate <= 0.0) { // positive direction failed. sign = -1.0; DeltaUpdate = laplaceUpdate(j, sign); if (DeltaUpdate >= 0.0) { DeltaUpdate = 0; } } } else { sign = Beta / Math.abs(Beta); DeltaUpdate = laplaceUpdate(j, sign); change = Beta + DeltaUpdate; change = change / Math.abs(change); if (change < 0) { DeltaUpdate = 0 - Beta; } } return DeltaUpdate; } /** * This is the CLG-lasso update function described in the *<pre> * &#64;TechReport{blrtext04, *author = {Alexander Genkin and David D. Lewis and David Madigan}, *title = {Large-scale bayesian logistic regression for text categorization}, *institution = {DIMACS}, *year = {2004}, *url = "http://www.stat.rutgers.edu/~madigan/PAPERS/shortFat-v3a.pdf", *OPTannote = {} *}</pre> * * @param j * @return double value */ public double laplaceUpdate(int j, double sign) { double value = 0.0; double numerator = 0.0; double denominator = 0.0; Instance instance; for (int i = 0; i < m_Instances.numInstances(); i++) { instance = m_Instances.instance(i); if (instance.value(j) != 0) { numerator += (instance.value(j) * BayesianLogisticRegression.classSgn(instance.classValue()) * (1.0 / (1.0 + Math.exp(R[i])))); denominator += (instance.value(j) * instance.value(j) * BayesianLogisticRegression.bigF(R[i], Delta * instance.value(j))); } } numerator -= (Math.sqrt(2.0 / Hyperparameter) * sign); if (denominator != 0.0) { value = numerator / denominator; } return value; } /** * Computes the log-likelihood values using the implementation in the Prior class. * @param betas * @param instances * @param hyperparameter */ public void computeLogLikelihood(double[] betas, Instances instances) { //Basic implementation done in the prior class. super.computelogLikelihood(betas, instances); } /** * This function computes the penalty term specific to Laplacian distribution. * @param betas * @param hyperparameters */ public void computePenalty(double[] betas, double[] hyperparameters) { penalty = 0.0; double lambda = 0.0; for (int j = 0; j < betas.length; j++) { lambda = Math.sqrt(hyperparameters[j]); penalty += (Math.log(2) - Math.log(lambda) + (lambda * Math.abs(betas[j]))); } penalty = 0 - penalty; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 1.2 $"); } }
4,665
26.127907
116
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/bayes/blr/Prior.java
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* * Prior.java * Copyright (C) 2008 Illinois Institute of Technology * */ package weka.classifiers.bayes.blr; import weka.classifiers.bayes.BayesianLogisticRegression; import weka.core.Instance; import weka.core.Instances; import weka.core.RevisionHandler; import java.io.Serializable; /** * This is an interface to plug various priors into * the Bayesian Logistic Regression Model. * * @version $Revision: 1.2 $ * @author Navendu Garg (gargnav@iit.edu) */ public abstract class Prior implements Serializable, RevisionHandler { protected Instances m_Instances; protected double Beta = 0.0; protected double Hyperparameter = 0.0; protected double DeltaUpdate; protected double[] R; protected double Delta = 0.0; protected double log_posterior = 0.0; protected double log_likelihood = 0.0; protected double penalty = 0.0; /** * Interface for the update functions for different types of * priors. * */ public double update(int j, Instances instances, double beta, double hyperparameter, double[] r, double deltaV) { return 0.0; } /** * Function computes the log-likelihood value: * -sum{1 to n}{ln(1+exp(-Beta*x(i)*y(i))} * @param betas * @param instances */ public void computelogLikelihood(double[] betas, Instances instances) { Instance instance; log_likelihood = 0.0; for (int i = 0; i < instances.numInstances(); i++) { instance = instances.instance(i); double log_row = 0.0; for (int j = 0; j < instance.numAttributes(); j++) { if (instance.value(j) != 0.0) { log_row += (betas[j] * instance.value(j) * instance.value(j)); } } log_row = log_row * BayesianLogisticRegression.classSgn(instance.classValue()); log_likelihood += Math.log(1.0 + Math.exp(0.0 - log_row)); } log_likelihood = 0 - log_likelihood; } /** * Skeleton function to compute penalty terms. * @param betas * @param hyperparameters */ public void computePenalty(double[] betas, double[] hyperparameters) { //implement specific penalties in the prior implmentation. } /** * * @return log-likelihood value. */ public double getLoglikelihood() { return log_likelihood; } /** * * @return regularized log posterior value. */ public double getLogPosterior() { log_posterior = log_likelihood + penalty; return log_posterior; } /** * * @return penalty term. */ public double getPenalty() { return penalty; } }
3,276
25.216
85
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/bayes/net/ADNode.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * ADNode.java * Copyright (C) 2002-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.bayes.net; import java.io.FileReader; import java.io.Serializable; import weka.core.FastVector; import weka.core.Instance; import weka.core.Instances; import weka.core.RevisionHandler; import weka.core.RevisionUtils; import weka.core.TechnicalInformation; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import weka.core.TechnicalInformationHandler; /** * The ADNode class implements the ADTree datastructure which increases * the speed with which sub-contingency tables can be constructed from * a data set in an Instances object. For details, see: <p/> * <!-- technical-plaintext-start --> * Andrew W. Moore, Mary S. Lee (1998). Cached Sufficient Statistics for Efficient Machine Learning with Large Datasets. Journal of Artificial Intelligence Research. 8:67-91. <!-- technical-plaintext-end --> * <p/> * <!-- technical-bibtex-start --> * BibTeX: * <pre> * &#64;article{Moore1998, * author = {Andrew W. Moore and Mary S. Lee}, * journal = {Journal of Artificial Intelligence Research}, * pages = {67-91}, * title = {Cached Sufficient Statistics for Efficient Machine Learning with Large Datasets}, * volume = {8}, * year = {1998} * } * </pre> * <p/> <!-- technical-bibtex-end --> * * @author Remco Bouckaert (rrb@xm.co.nz) * @version $Revision: 8034 $ */ public class ADNode implements Serializable, TechnicalInformationHandler, RevisionHandler { /** for serialization */ static final long serialVersionUID = 397409728366910204L; final static int MIN_RECORD_SIZE = 0; /** list of VaryNode children **/ public VaryNode [] m_VaryNodes; /** list of Instance children (either m_Instances or m_VaryNodes is instantiated) **/ public Instance [] m_Instances; /** count **/ public int m_nCount; /** first node in VaryNode array **/ public int m_nStartNode; /** Creates new ADNode */ public ADNode() { } /** * Returns an instance of a TechnicalInformation object, containing * detailed information about the technical background of this class, * e.g., paper reference or book this class is based on. * * @return the technical information about this class */ public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; result = new TechnicalInformation(Type.ARTICLE); result.setValue(Field.AUTHOR, "Andrew W. Moore and Mary S. Lee"); result.setValue(Field.YEAR, "1998"); result.setValue(Field.TITLE, "Cached Sufficient Statistics for Efficient Machine Learning with Large Datasets"); result.setValue(Field.JOURNAL, "Journal of Artificial Intelligence Research"); result.setValue(Field.VOLUME, "8"); result.setValue(Field.PAGES, "67-91"); return result; } /** create sub tree * @param iNode index of the lowest node in the tree * @param nRecords set of records in instances to be considered * @param instances data set * @return VaryNode representing part of an ADTree **/ public static VaryNode makeVaryNode(int iNode, FastVector nRecords, Instances instances) { VaryNode _VaryNode = new VaryNode(iNode); int nValues = instances.attribute(iNode).numValues(); // reserve memory and initialize FastVector [] nChildRecords = new FastVector[nValues]; for (int iChild = 0; iChild < nValues; iChild++) { nChildRecords[iChild] = new FastVector(); } // divide the records among children for (int iRecord = 0; iRecord < nRecords.size(); iRecord++) { int iInstance = ((Integer) nRecords.elementAt(iRecord)).intValue(); nChildRecords[(int) instances.instance(iInstance).value(iNode)].addElement(new Integer(iInstance)); } // find most common value int nCount = nChildRecords[0].size(); int nMCV = 0; for (int iChild = 1; iChild < nValues; iChild++) { if (nChildRecords[iChild].size() > nCount) { nCount = nChildRecords[iChild].size(); nMCV = iChild; } } _VaryNode.m_nMCV = nMCV; // determine child nodes _VaryNode.m_ADNodes = new ADNode[nValues]; for (int iChild = 0; iChild < nValues; iChild++) { if (iChild == nMCV || nChildRecords[iChild].size() == 0) { _VaryNode.m_ADNodes[iChild] = null; } else { _VaryNode.m_ADNodes[iChild] = makeADTree(iNode + 1, nChildRecords[iChild], instances); } } return _VaryNode; } // MakeVaryNode /** * create sub tree * * @param iNode index of the lowest node in the tree * @param nRecords set of records in instances to be considered * @param instances data set * @return ADNode representing an ADTree */ public static ADNode makeADTree(int iNode, FastVector nRecords, Instances instances) { ADNode _ADNode = new ADNode(); _ADNode.m_nCount = nRecords.size(); _ADNode.m_nStartNode = iNode; if (nRecords.size() < MIN_RECORD_SIZE) { _ADNode.m_Instances = new Instance[nRecords.size()]; for (int iInstance = 0; iInstance < nRecords.size(); iInstance++) { _ADNode.m_Instances[iInstance] = instances.instance(((Integer) nRecords.elementAt(iInstance)).intValue()); } } else { _ADNode.m_VaryNodes = new VaryNode[instances.numAttributes() - iNode]; for (int iNode2 = iNode; iNode2 < instances.numAttributes(); iNode2++) { _ADNode.m_VaryNodes[iNode2 - iNode] = makeVaryNode(iNode2, nRecords, instances); } } return _ADNode; } // MakeADTree /** * create AD tree from set of instances * * @param instances data set * @return ADNode representing an ADTree */ public static ADNode makeADTree(Instances instances) { FastVector nRecords = new FastVector(instances.numInstances()); for (int iRecord = 0; iRecord < instances.numInstances(); iRecord++) { nRecords.addElement(new Integer(iRecord)); } return makeADTree(0, nRecords, instances); } // MakeADTree /** * get counts for specific instantiation of a set of nodes * * @param nCounts - array for storing counts * @param nNodes - array of node indexes * @param nOffsets - offset for nodes in nNodes in nCounts * @param iNode - index into nNode indicating current node * @param iOffset - Offset into nCounts due to nodes below iNode * @param bSubstract - indicate whether counts should be added or substracted */ public void getCounts( int [] nCounts, int [] nNodes, int [] nOffsets, int iNode, int iOffset, boolean bSubstract ) { //for (int iNode2 = 0; iNode2 < nCounts.length; iNode2++) { // System.out.print(nCounts[iNode2] + " "); //} //System.out.println(); if (iNode >= nNodes.length) { if (bSubstract) { nCounts[iOffset] -= m_nCount; } else { nCounts[iOffset] += m_nCount; } return; } else { if (m_VaryNodes != null) { m_VaryNodes[nNodes[iNode] - m_nStartNode].getCounts(nCounts, nNodes, nOffsets, iNode, iOffset, this, bSubstract); } else { for (int iInstance = 0; iInstance < m_Instances.length; iInstance++) { int iOffset2 = iOffset; Instance instance = m_Instances[iInstance]; for (int iNode2 = iNode; iNode2 < nNodes.length; iNode2++) { iOffset2 = iOffset2 + nOffsets[iNode2] * (int) instance.value(nNodes[iNode2]); } if (bSubstract) { nCounts[iOffset2]--; } else { nCounts[iOffset2]++; } } } } } // getCounts /** * print is used for debugging only and shows the ADTree in ASCII graphics */ public void print() { String sTab = new String();for (int i = 0; i < m_nStartNode; i++) { sTab = sTab + " "; } System.out.println(sTab + "Count = " + m_nCount); if (m_VaryNodes != null) { for (int iNode = 0; iNode < m_VaryNodes.length; iNode++) { System.out.println(sTab + "Node " + (iNode + m_nStartNode)); m_VaryNodes[iNode].print(sTab); } } else { System.out.println(m_Instances); } } /** * for testing only * * @param argv the commandline options */ public static void main(String [] argv) { try { Instances instances = new Instances(new FileReader("\\iris.2.arff")); ADNode ADTree = ADNode.makeADTree(instances); int [] nCounts = new int[12]; int [] nNodes = new int[3]; int [] nOffsets = new int[3]; nNodes[0] = 0; nNodes[1] = 3; nNodes[2] = 4; nOffsets[0] = 2; nOffsets[1] = 1; nOffsets[2] = 4; ADTree.print(); ADTree.getCounts(nCounts, nNodes, nOffsets,0, 0, false); } catch (Throwable t) { t.printStackTrace(); } } // main /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } } // class ADNode
10,742
35.171717
174
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/bayes/net/BIFReader.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * BIFReader.java * Copyright (C) 2003-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.bayes.net; import java.io.File; import java.io.StringReader; import java.util.StringTokenizer; import javax.xml.parsers.DocumentBuilderFactory; import org.w3c.dom.CharacterData; import org.w3c.dom.Document; import org.w3c.dom.Element; import org.w3c.dom.Node; import org.w3c.dom.NodeList; import weka.classifiers.bayes.BayesNet; import weka.classifiers.bayes.net.estimate.DiscreteEstimatorBayes; import weka.core.FastVector; import weka.core.Instances; import weka.core.RevisionUtils; import weka.core.TechnicalInformation; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import weka.core.TechnicalInformationHandler; import weka.estimators.Estimator; /** <!-- globalinfo-start --> * Builds a description of a Bayes Net classifier stored in XML BIF 0.3 format.<br/> * <br/> * For more details on XML BIF see:<br/> * <br/> * Fabio Cozman, Marek Druzdzel, Daniel Garcia (1998). XML BIF version 0.3. URL http://www-2.cs.cmu.edu/~fgcozman/Research/InterchangeFormat/. * <p/> <!-- globalinfo-end --> * <!-- technical-bibtex-start --> * BibTeX: * <pre> * &#64;misc{Cozman1998, * author = {Fabio Cozman and Marek Druzdzel and Daniel Garcia}, * title = {XML BIF version 0.3}, * year = {1998}, * URL = {http://www-2.cs.cmu.edu/\~fgcozman/Research/InterchangeFormat/} * } * </pre> * <p/> <!-- technical-bibtex-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -D * Do not use ADTree data structure * </pre> * * <pre> -B &lt;BIF file&gt; * BIF file to compare with * </pre> * * <pre> -Q weka.classifiers.bayes.net.search.SearchAlgorithm * Search algorithm * </pre> * * <pre> -E weka.classifiers.bayes.net.estimate.SimpleEstimator * Estimator algorithm * </pre> * <!-- options-end --> * * @author Remco Bouckaert (rrb@xm.co.nz) * @version $Revision: 8034 $ */ public class BIFReader extends BayesNet implements TechnicalInformationHandler { protected int [] m_nPositionX; protected int [] m_nPositionY; private int [] m_order; /** for serialization */ static final long serialVersionUID = -8358864680379881429L; /** * This will return a string describing the classifier. * @return The string. */ public String globalInfo() { return "Builds a description of a Bayes Net classifier stored in XML " + "BIF 0.3 format.\n\n" + "For more details on XML BIF see:\n\n" + getTechnicalInformation().toString(); } /** processFile reads a BIFXML file and initializes a Bayes Net * @param sFile name of the file to parse * @return the BIFReader * @throws Exception if processing fails */ public BIFReader processFile(String sFile) throws Exception { m_sFile = sFile; DocumentBuilderFactory factory = DocumentBuilderFactory.newInstance(); factory.setValidating(true); Document doc = factory.newDocumentBuilder().parse(new File(sFile)); doc.normalize(); buildInstances(doc, sFile); buildStructure(doc); return this; } // processFile public BIFReader processString(String sStr) throws Exception { DocumentBuilderFactory factory = DocumentBuilderFactory.newInstance(); factory.setValidating(true); Document doc = factory.newDocumentBuilder().parse(new org.xml.sax.InputSource(new StringReader(sStr))); doc.normalize(); buildInstances(doc, "from-string"); buildStructure(doc); return this; } // processString /** the current filename */ String m_sFile; /** * returns the current filename * * @return the current filename */ public String getFileName() { return m_sFile; } /** * Returns an instance of a TechnicalInformation object, containing * detailed information about the technical background of this class, * e.g., paper reference or book this class is based on. * * @return the technical information about this class */ public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; result = new TechnicalInformation(Type.MISC); result.setValue(Field.AUTHOR, "Fabio Cozman and Marek Druzdzel and Daniel Garcia"); result.setValue(Field.YEAR, "1998"); result.setValue(Field.TITLE, "XML BIF version 0.3"); result.setValue(Field.URL, "http://www-2.cs.cmu.edu/~fgcozman/Research/InterchangeFormat/"); return result; } /** buildStructure parses the BIF document in the DOM tree contained * in the doc parameter and specifies the the network structure and * probability tables. * It assumes that buildInstances has been called before * @param doc DOM document containing BIF document in DOM tree * @throws Exception if building of structure fails */ void buildStructure(Document doc) throws Exception { // Get the name of the network // initialize conditional distribution tables m_Distributions = new Estimator[m_Instances.numAttributes()][]; for (int iNode = 0; iNode < m_Instances.numAttributes(); iNode++) { // find definition that goes with this node String sName = m_Instances.attribute(iNode).name(); Element definition = getDefinition(doc, sName); /* if (nodelist.getLength() == 0) { throw new Exception("No definition found for node " + sName); } if (nodelist.getLength() > 1) { System.err.println("More than one definition found for node " + sName + ". Using first definition."); } Element definition = (Element) nodelist.item(0); */ // get the parents for this node // resolve structure FastVector nodelist = getParentNodes(definition); for (int iParent = 0; iParent < nodelist.size(); iParent++) { Node parentName = ((Node) nodelist.elementAt(iParent)).getFirstChild(); String sParentName = ((CharacterData) (parentName)).getData(); int nParent = getNode(sParentName); m_ParentSets[iNode].addParent(nParent, m_Instances); } // resolve conditional probability table int nCardinality = m_ParentSets[iNode].getCardinalityOfParents(); int nValues = m_Instances.attribute(iNode).numValues(); m_Distributions[iNode] = new Estimator[nCardinality]; for (int i = 0; i < nCardinality; i++) { m_Distributions[iNode][i] = new DiscreteEstimatorBayes(nValues, 0.0f); } /* StringBuffer sTable = new StringBuffer(); for (int iText = 0; iText < nodelist.getLength(); iText++) { sTable.append(((CharacterData) (nodelist.item(iText))).getData()); sTable.append(' '); } StringTokenizer st = new StringTokenizer(sTable.toString()); */ String sTable = getTable(definition); StringTokenizer st = new StringTokenizer(sTable.toString()); for (int i = 0; i < nCardinality; i++) { DiscreteEstimatorBayes d = (DiscreteEstimatorBayes) m_Distributions[iNode][i]; for (int iValue = 0; iValue < nValues; iValue++) { String sWeight = st.nextToken(); d.addValue(iValue, new Double(sWeight).doubleValue()); } } } } // buildStructure /** synchronizes the node ordering of this Bayes network with * those in the other network (if possible). * @param other Bayes network to synchronize with * @throws Exception if nr of attributes differs or not all of the variables have the same name. */ public void Sync(BayesNet other) throws Exception { int nAtts = m_Instances.numAttributes(); if (nAtts != other.m_Instances.numAttributes()) { throw new Exception ("Cannot synchronize networks: different number of attributes."); } m_order = new int[nAtts]; for (int iNode = 0; iNode < nAtts; iNode++) { String sName = other.getNodeName(iNode); m_order[getNode(sName)] = iNode; } } // Sync /** * Returns all TEXT children of the given node in one string. Between * the node values new lines are inserted. * * @param node the node to return the content for * @return the content of the node */ public String getContent(Element node) { NodeList list; Node item; int i; String result; result = ""; list = node.getChildNodes(); for (i = 0; i < list.getLength(); i++) { item = list.item(i); if (item.getNodeType() == Node.TEXT_NODE) result += "\n" + item.getNodeValue(); } return result; } /** buildInstances parses the BIF document and creates a Bayes Net with its * nodes specified, but leaves the network structure and probability tables empty. * @param doc DOM document containing BIF document in DOM tree * @param sName default name to give to the Bayes Net. Will be overridden if specified in the BIF document. * @throws Exception if building fails */ void buildInstances(Document doc, String sName) throws Exception { NodeList nodelist; // Get the name of the network nodelist = selectAllNames(doc); if (nodelist.getLength() > 0) { sName = ((CharacterData) (nodelist.item(0).getFirstChild())).getData(); } // Process variables nodelist = selectAllVariables(doc); int nNodes = nodelist.getLength(); // initialize structure FastVector attInfo = new FastVector(nNodes); // Initialize m_nPositionX = new int[nodelist.getLength()]; m_nPositionY = new int[nodelist.getLength()]; // Process variables for (int iNode = 0; iNode < nodelist.getLength(); iNode++) { // Get element FastVector valueslist; // Get the name of the network valueslist = selectOutCome(nodelist.item(iNode)); int nValues = valueslist.size(); // generate value strings FastVector nomStrings = new FastVector(nValues + 1); for (int iValue = 0; iValue < nValues; iValue++) { Node node = ((Node) valueslist.elementAt(iValue)).getFirstChild(); String sValue = ((CharacterData) (node)).getData(); if (sValue == null) { sValue = "Value" + (iValue + 1); } nomStrings.addElement(sValue); } FastVector nodelist2; // Get the name of the network nodelist2 = selectName(nodelist.item(iNode)); if (nodelist2.size() == 0) { throw new Exception ("No name specified for variable"); } String sNodeName = ((CharacterData) (((Node) nodelist2.elementAt(0)).getFirstChild())).getData(); weka.core.Attribute att = new weka.core.Attribute(sNodeName, nomStrings); attInfo.addElement(att); valueslist = selectProperty(nodelist.item(iNode)); nValues = valueslist.size(); // generate value strings for (int iValue = 0; iValue < nValues; iValue++) { // parsing for strings of the form "position = (73, 165)" Node node = ((Node)valueslist.elementAt(iValue)).getFirstChild(); String sValue = ((CharacterData) (node)).getData(); if (sValue.startsWith("position")) { int i0 = sValue.indexOf('('); int i1 = sValue.indexOf(','); int i2 = sValue.indexOf(')'); String sX = sValue.substring(i0 + 1, i1).trim(); String sY = sValue.substring(i1 + 1, i2).trim(); try { m_nPositionX[iNode] = (int) Integer.parseInt(sX); m_nPositionY[iNode] = (int) Integer.parseInt(sY); } catch (NumberFormatException e) { System.err.println("Wrong number format in position :(" + sX + "," + sY +")"); m_nPositionX[iNode] = 0; m_nPositionY[iNode] = 0; } } } } m_Instances = new Instances(sName, attInfo, 100); m_Instances.setClassIndex(nNodes - 1); setUseADTree(false); initStructure(); } // buildInstances // /** selectNodeList selects list of nodes from document specified in XPath expression // * @param doc : document (or node) to query // * @param sXPath : XPath expression // * @return list of nodes conforming to XPath expression in doc // * @throws Exception // */ // private NodeList selectNodeList(Node doc, String sXPath) throws Exception { // NodeList nodelist = org.apache.xpath.XPathAPI.selectNodeList(doc, sXPath); // return nodelist; // } // selectNodeList NodeList selectAllNames(Document doc) throws Exception { //NodeList nodelist = selectNodeList(doc, "//NAME"); NodeList nodelist = doc.getElementsByTagName("NAME"); return nodelist; } // selectAllNames NodeList selectAllVariables(Document doc) throws Exception { //NodeList nodelist = selectNodeList(doc, "//VARIABLE"); NodeList nodelist = doc.getElementsByTagName("VARIABLE"); return nodelist; } // selectAllVariables Element getDefinition(Document doc, String sName) throws Exception { //NodeList nodelist = selectNodeList(doc, "//DEFINITION[normalize-space(FOR/text())=\"" + sName + "\"]"); NodeList nodelist = doc.getElementsByTagName("DEFINITION"); for (int iNode = 0; iNode < nodelist.getLength(); iNode++) { Node node = nodelist.item(iNode); FastVector list = selectElements(node, "FOR"); if (list.size() > 0) { Node forNode = (Node) list.elementAt(0); if (getContent((Element) forNode).trim().equals(sName)) { return (Element) node; } } } throw new Exception("Could not find definition for ((" + sName + "))"); } // getDefinition FastVector getParentNodes(Node definition) throws Exception { //NodeList nodelist = selectNodeList(definition, "GIVEN"); FastVector nodelist = selectElements(definition, "GIVEN"); return nodelist; } // getParentNodes String getTable(Node definition) throws Exception { //NodeList nodelist = selectNodeList(definition, "TABLE/text()"); FastVector nodelist = selectElements(definition, "TABLE"); String sTable = getContent((Element) nodelist.elementAt(0)); sTable = sTable.replaceAll("\\n"," "); return sTable; } // getTable FastVector selectOutCome(Node item) throws Exception { //NodeList nodelist = selectNodeList(item, "OUTCOME"); FastVector nodelist = selectElements(item, "OUTCOME"); return nodelist; } // selectOutCome FastVector selectName(Node item) throws Exception { //NodeList nodelist = selectNodeList(item, "NAME"); FastVector nodelist = selectElements(item, "NAME"); return nodelist; } // selectName FastVector selectProperty(Node item) throws Exception { // NodeList nodelist = selectNodeList(item, "PROPERTY"); FastVector nodelist = selectElements(item, "PROPERTY"); return nodelist; } // selectProperty FastVector selectElements(Node item, String sElement) throws Exception { NodeList children = item.getChildNodes(); FastVector nodelist = new FastVector(); for (int iNode = 0; iNode < children.getLength(); iNode++) { Node node = children.item(iNode); if ((node.getNodeType() == Node.ELEMENT_NODE) && node.getNodeName().equals(sElement)) { nodelist.addElement(node); } } return nodelist; } // selectElements /** Count nr of arcs missing from other network compared to current network * Note that an arc is not 'missing' if it is reversed. * @param other network to compare with * @return nr of missing arcs */ public int missingArcs(BayesNet other) { try { Sync(other); int nMissing = 0; for (int iAttribute = 0; iAttribute < m_Instances.numAttributes(); iAttribute++) { for (int iParent = 0; iParent < m_ParentSets[iAttribute].getNrOfParents(); iParent++) { int nParent = m_ParentSets[iAttribute].getParent(iParent); if (!other.getParentSet(m_order[iAttribute]).contains(m_order[nParent]) && !other.getParentSet(m_order[nParent]).contains(m_order[iAttribute])) { nMissing++; } } } return nMissing; } catch (Exception e) { System.err.println(e.getMessage()); return 0; } } // missingArcs /** Count nr of exta arcs from other network compared to current network * Note that an arc is not 'extra' if it is reversed. * @param other network to compare with * @return nr of missing arcs */ public int extraArcs(BayesNet other) { try { Sync(other); int nExtra = 0; for (int iAttribute = 0; iAttribute < m_Instances.numAttributes(); iAttribute++) { for (int iParent = 0; iParent < other.getParentSet(m_order[iAttribute]).getNrOfParents(); iParent++) { int nParent = m_order[other.getParentSet(m_order[iAttribute]).getParent(iParent)]; if (!m_ParentSets[iAttribute].contains(nParent) && !m_ParentSets[nParent].contains(iAttribute)) { nExtra++; } } } return nExtra; } catch (Exception e) { System.err.println(e.getMessage()); return 0; } } // extraArcs /** calculates the divergence between the probability distribution * represented by this network and that of another, that is, * \sum_{x\in X} P(x)log P(x)/Q(x) * where X is the set of values the nodes in the network can take, * P(x) the probability of this network for configuration x * Q(x) the probability of the other network for configuration x * @param other network to compare with * @return divergence between this and other Bayes Network */ public double divergence(BayesNet other) { try { Sync(other); // D: divergence double D = 0.0; int nNodes = m_Instances.numAttributes(); int [] nCard = new int[nNodes]; for (int iNode = 0; iNode < nNodes; iNode++) { nCard[iNode] = m_Instances.attribute(iNode).numValues(); } // x: holds current configuration of nodes int [] x = new int[nNodes]; // simply sum over all configurations to calc divergence D int i = 0; while (i < nNodes) { // update configuration x[i]++; while (i < nNodes && x[i] == m_Instances.attribute(i).numValues()) { x[i] = 0; i++; if (i < nNodes){ x[i]++; } } if (i < nNodes) { i = 0; // calc P(x) and Q(x) double P = 1.0; for (int iNode = 0; iNode < nNodes; iNode++) { int iCPT = 0; for (int iParent = 0; iParent < m_ParentSets[iNode].getNrOfParents(); iParent++) { int nParent = m_ParentSets[iNode].getParent(iParent); iCPT = iCPT * nCard[nParent] + x[nParent]; } P = P * m_Distributions[iNode][iCPT].getProbability(x[iNode]); } double Q = 1.0; for (int iNode = 0; iNode < nNodes; iNode++) { int iCPT = 0; for (int iParent = 0; iParent < other.getParentSet(m_order[iNode]).getNrOfParents(); iParent++) { int nParent = m_order[other.getParentSet(m_order[iNode]).getParent(iParent)]; iCPT = iCPT * nCard[nParent] + x[nParent]; } Q = Q * other.m_Distributions[m_order[iNode]][iCPT].getProbability(x[iNode]); } // update divergence if probabilities are positive if (P > 0.0 && Q > 0.0) { D = D + P * Math.log(Q / P); } } } return D; } catch (Exception e) { System.err.println(e.getMessage()); return 0; } } // divergence /** Count nr of reversed arcs from other network compared to current network * @param other network to compare with * @return nr of missing arcs */ public int reversedArcs(BayesNet other) { try { Sync(other); int nReversed = 0; for (int iAttribute = 0; iAttribute < m_Instances.numAttributes(); iAttribute++) { for (int iParent = 0; iParent < m_ParentSets[iAttribute].getNrOfParents(); iParent++) { int nParent = m_ParentSets[iAttribute].getParent(iParent); if (!other.getParentSet(m_order[iAttribute]).contains(m_order[nParent]) && other.getParentSet(m_order[nParent]).contains(m_order[iAttribute])) { nReversed++; } } } return nReversed; } catch (Exception e) { System.err.println(e.getMessage()); return 0; } } // reversedArcs /** getNode finds the index of the node with name sNodeName * and throws an exception if no such node can be found. * @param sNodeName name of the node to get the index from * @return index of the node with name sNodeName * @throws Exception if node cannot be found */ public int getNode(String sNodeName) throws Exception { int iNode = 0; while (iNode < m_Instances.numAttributes()) { if (m_Instances.attribute(iNode).name().equals(sNodeName)) { return iNode; } iNode++; } throw new Exception("Could not find node [[" + sNodeName + "]]"); } // getNode /** * the default constructor */ public BIFReader() { } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } /** * Loads the file specified as first parameter and prints it to stdout. * * @param args the command line parameters */ public static void main(String[] args) { try { BIFReader br = new BIFReader(); br.processFile(args[0]); System.out.println(br.toString()); } catch (Throwable t) { t.printStackTrace(); } } // main } // class BIFReader
22,276
33.645412
150
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/bayes/net/BayesNetGenerator.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * BayesNet.java * Copyright (C) 2003-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.bayes.net; import java.util.Enumeration; import java.util.Random; import java.util.Vector; import weka.classifiers.bayes.net.estimate.DiscreteEstimatorBayes; import weka.core.Attribute; import weka.core.DenseInstance; import weka.core.FastVector; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.OptionHandler; import weka.core.RevisionUtils; import weka.core.Utils; import weka.estimators.Estimator; /** <!-- globalinfo-start --> * Bayes Network learning using various search algorithms and quality measures.<br/> * Base class for a Bayes Network classifier. Provides datastructures (network structure, conditional probability distributions, etc.) and facilities common to Bayes Network learning algorithms like K2 and B.<br/> * <br/> * For more information see:<br/> * <br/> * http://www.cs.waikato.ac.nz/~remco/weka.pdf * <p/> <!-- globalinfo-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -B * Generate network (instead of instances) * </pre> * * <pre> -N &lt;integer&gt; * Nr of nodes * </pre> * * <pre> -A &lt;integer&gt; * Nr of arcs * </pre> * * <pre> -M &lt;integer&gt; * Nr of instances * </pre> * * <pre> -C &lt;integer&gt; * Cardinality of the variables * </pre> * * <pre> -S &lt;integer&gt; * Seed for random number generator * </pre> * * <pre> -F &lt;file&gt; * The BIF file to obtain the structure from. * </pre> * <!-- options-end --> * * @author Remco Bouckaert (rrb@xm.co.nz) * @version $Revision: 8034 $ */ public class BayesNetGenerator extends EditableBayesNet { /** the seed value */ int m_nSeed = 1; /** the random number generator */ Random random; /** for serialization */ static final long serialVersionUID = -7462571170596157720L; /** * Constructor for BayesNetGenerator. */ public BayesNetGenerator() { super(); } // c'tor /** * Generate random connected Bayesian network with discrete nodes * having all the same cardinality. * * @throws Exception if something goes wrong */ public void generateRandomNetwork () throws Exception { if (m_otherBayesNet == null) { // generate from scratch Init(m_nNrOfNodes, m_nCardinality); generateRandomNetworkStructure(m_nNrOfNodes, m_nNrOfArcs); generateRandomDistributions(m_nNrOfNodes, m_nCardinality); } else { // read from file, just copy parent sets and distributions m_nNrOfNodes = m_otherBayesNet.getNrOfNodes(); m_ParentSets = m_otherBayesNet.getParentSets(); m_Distributions = m_otherBayesNet.getDistributions(); random = new Random(m_nSeed); // initialize m_Instances FastVector attInfo = new FastVector(m_nNrOfNodes); // generate value strings for (int iNode = 0; iNode < m_nNrOfNodes; iNode++) { int nValues = m_otherBayesNet.getCardinality(iNode); FastVector nomStrings = new FastVector(nValues + 1); for (int iValue = 0; iValue < nValues; iValue++) { nomStrings.addElement(m_otherBayesNet.getNodeValue(iNode, iValue)); } Attribute att = new Attribute(m_otherBayesNet.getNodeName(iNode), nomStrings); attInfo.addElement(att); } m_Instances = new Instances(m_otherBayesNet.getName(), attInfo, 100); m_Instances.setClassIndex(m_nNrOfNodes - 1); } } // GenerateRandomNetwork /** * Init defines a minimal Bayes net with no arcs * @param nNodes number of nodes in the Bayes net * @param nValues number of values each of the nodes can take * @throws Exception if something goes wrong */ public void Init(int nNodes, int nValues) throws Exception { random = new Random(m_nSeed); // initialize structure FastVector attInfo = new FastVector(nNodes); // generate value strings FastVector nomStrings = new FastVector(nValues + 1); for (int iValue = 0; iValue < nValues; iValue++) { nomStrings.addElement("Value" + (iValue + 1)); } for (int iNode = 0; iNode < nNodes; iNode++) { Attribute att = new Attribute("Node" + (iNode + 1), nomStrings); attInfo.addElement(att); } m_Instances = new Instances("RandomNet", attInfo, 100); m_Instances.setClassIndex(nNodes - 1); setUseADTree(false); // m_bInitAsNaiveBayes = false; // m_bMarkovBlanketClassifier = false; initStructure(); // initialize conditional distribution tables m_Distributions = new Estimator[nNodes][1]; for (int iNode = 0; iNode < nNodes; iNode++) { m_Distributions[iNode][0] = new DiscreteEstimatorBayes(nValues, getEstimator().getAlpha()); } m_nEvidence = new FastVector(nNodes); for (int i = 0; i < nNodes; i++) { m_nEvidence.addElement(-1); } m_fMarginP = new FastVector(nNodes); for (int i = 0; i < nNodes; i++) { double[] P = new double[getCardinality(i)]; m_fMarginP.addElement(P); } m_nPositionX = new FastVector(nNodes); m_nPositionY = new FastVector(nNodes); for (int iNode = 0; iNode < nNodes; iNode++) { m_nPositionX.addElement(iNode%10 * 50); m_nPositionY.addElement(((int)(iNode/10)) * 50); } } // DefineNodes /** * GenerateRandomNetworkStructure generate random connected Bayesian network * @param nNodes number of nodes in the Bayes net to generate * @param nArcs number of arcs to generate. Must be between nNodes - 1 and nNodes * (nNodes-1) / 2 * @throws Exception if number of arcs is incorrect */ public void generateRandomNetworkStructure(int nNodes, int nArcs) throws Exception { if (nArcs < nNodes - 1) { throw new Exception("Number of arcs should be at least (nNodes - 1) = " + (nNodes - 1) + " instead of " + nArcs); } if (nArcs > nNodes * (nNodes - 1) / 2) { throw new Exception("Number of arcs should be at most nNodes * (nNodes - 1) / 2 = "+ (nNodes * (nNodes - 1) / 2) + " instead of " + nArcs); } if (nArcs == 0) {return;} // deal with patalogical case for nNodes = 1 // first generate tree connecting all nodes generateTree(nNodes); // The tree contains nNodes - 1 arcs, so there are // nArcs - (nNodes-1) to add at random. // All arcs point from lower to higher ordered nodes // so that acyclicity is ensured. for (int iArc = nNodes - 1; iArc < nArcs; iArc++) { boolean bDone = false; while (!bDone) { int nNode1 = random.nextInt(nNodes); int nNode2 = random.nextInt(nNodes); if (nNode1 == nNode2) {nNode2 = (nNode1 + 1) % nNodes;} if (nNode2 < nNode1) {int h = nNode1; nNode1 = nNode2; nNode2 = h;} if (!m_ParentSets[nNode2].contains(nNode1)) { m_ParentSets[nNode2].addParent(nNode1, m_Instances); bDone = true; } } } } // GenerateRandomNetworkStructure /** * GenerateTree creates a tree-like network structure (actually a * forest) by starting with a randomly selected pair of nodes, add * an arc between. Then keep on selecting one of the connected nodes * and one of the unconnected ones and add an arrow between them, * till all nodes are connected. * @param nNodes number of nodes in the Bayes net to generate */ void generateTree(int nNodes) { boolean [] bConnected = new boolean [nNodes]; // start adding an arc at random int nNode1 = random.nextInt(nNodes); int nNode2 = random.nextInt(nNodes); if (nNode1 == nNode2) {nNode2 = (nNode1 + 1) % nNodes;} if (nNode2 < nNode1) {int h = nNode1; nNode1 = nNode2; nNode2 = h;} m_ParentSets[nNode2].addParent(nNode1, m_Instances); bConnected[nNode1] = true; bConnected[nNode2] = true; // Repeatedly, select one of the connected nodes, and one of // the unconnected nodes and add an arc. // All arcs point from lower to higher ordered nodes // so that acyclicity is ensured. for (int iArc = 2; iArc < nNodes; iArc++ ) { int nNode = random.nextInt(nNodes); nNode1 = 0; // one of the connected nodes while (nNode >= 0) { nNode1 = (nNode1 + 1) % nNodes; while (!bConnected[nNode1]) { nNode1 = (nNode1 + 1) % nNodes; } nNode--; } nNode = random.nextInt(nNodes); nNode2 = 0; // one of the unconnected nodes while (nNode >= 0) { nNode2 = (nNode2 + 1) % nNodes; while (bConnected[nNode2]) { nNode2 = (nNode2 + 1) % nNodes; } nNode--; } if (nNode2 < nNode1) {int h = nNode1; nNode1 = nNode2; nNode2 = h;} m_ParentSets[nNode2].addParent(nNode1, m_Instances); bConnected[nNode1] = true; bConnected[nNode2] = true; } } // GenerateTree /** * GenerateRandomDistributions generates discrete conditional distribution tables * for all nodes of a Bayes network once a network structure has been determined. * @param nNodes number of nodes in the Bayes net * @param nValues number of values each of the nodes can take */ void generateRandomDistributions(int nNodes, int nValues) { // Reserve space for CPTs int nMaxParentCardinality = 1; for (int iAttribute = 0; iAttribute < nNodes; iAttribute++) { if (m_ParentSets[iAttribute].getCardinalityOfParents() > nMaxParentCardinality) { nMaxParentCardinality = m_ParentSets[iAttribute].getCardinalityOfParents(); } } // Reserve plenty of memory m_Distributions = new Estimator[m_Instances.numAttributes()][nMaxParentCardinality]; // estimate CPTs for (int iAttribute = 0; iAttribute < nNodes; iAttribute++) { int [] nPs = new int [nValues + 1]; nPs[0] = 0; nPs[nValues] = 1000; for (int iParent = 0; iParent < m_ParentSets[iAttribute].getCardinalityOfParents(); iParent++) { // fill array with random nr's for (int iValue = 1; iValue < nValues; iValue++) { nPs[iValue] = random.nextInt(1000); } // sort for (int iValue = 1; iValue < nValues; iValue++) { for (int iValue2 = iValue + 1; iValue2 < nValues; iValue2++) { if (nPs[iValue2] < nPs[iValue]) { int h = nPs[iValue2]; nPs[iValue2] = nPs[iValue]; nPs[iValue] = h; } } } // assign to probability tables DiscreteEstimatorBayes d = new DiscreteEstimatorBayes(nValues, getEstimator().getAlpha()); for (int iValue = 0; iValue < nValues; iValue++) { d.addValue(iValue, nPs[iValue + 1] - nPs[iValue]); } m_Distributions[iAttribute][iParent] = d; } } } // GenerateRandomDistributions /** * GenerateInstances generates random instances sampling from the * distribution represented by the Bayes network structure. It assumes * a Bayes network structure has been initialized * * @throws Exception if something goes wrong */ public void generateInstances () throws Exception { int [] order = getOrder(); for (int iInstance = 0; iInstance < m_nNrOfInstances; iInstance++) { int nNrOfAtts = m_Instances.numAttributes(); Instance instance = new DenseInstance(nNrOfAtts); instance.setDataset(m_Instances); for (int iAtt2 = 0; iAtt2 < nNrOfAtts; iAtt2++) { int iAtt = order[iAtt2]; double iCPT = 0; for (int iParent = 0; iParent < m_ParentSets[iAtt].getNrOfParents(); iParent++) { int nParent = m_ParentSets[iAtt].getParent(iParent); iCPT = iCPT * m_Instances.attribute(nParent).numValues() + instance.value(nParent); } double fRandom = random.nextInt(1000) / 1000.0f; int iValue = 0; while (fRandom > m_Distributions[iAtt][(int) iCPT].getProbability(iValue)) { fRandom = fRandom - m_Distributions[iAtt][(int) iCPT].getProbability(iValue); iValue++ ; } instance.setValue(iAtt, iValue); } m_Instances.add(instance); } } // GenerateInstances /** * @throws Exception if there's a cycle in the graph */ int [] getOrder() throws Exception { int nNrOfAtts = m_Instances.numAttributes(); int [] order = new int[nNrOfAtts]; boolean [] bDone = new boolean[nNrOfAtts]; for (int iAtt = 0; iAtt < nNrOfAtts; iAtt++) { int iAtt2 = 0; boolean allParentsDone = false; while (!allParentsDone && iAtt2 < nNrOfAtts) { if (!bDone[iAtt2]) { allParentsDone = true; int iParent = 0; while (allParentsDone && iParent < m_ParentSets[iAtt2].getNrOfParents()) { allParentsDone = bDone[m_ParentSets[iAtt2].getParent(iParent++)]; } if (allParentsDone && iParent == m_ParentSets[iAtt2].getNrOfParents()) { order[iAtt] = iAtt2; bDone[iAtt2] = true; } else { iAtt2++; } } else { iAtt2++; } } if (!allParentsDone && iAtt2 == nNrOfAtts) { throw new Exception("There appears to be a cycle in the graph"); } } return order; } // getOrder /** * Returns either the net (if BIF format) or the generated instances * * @return either the net or the generated instances */ public String toString() { if (m_bGenerateNet) { return toXMLBIF03(); } return m_Instances.toString(); } // toString boolean m_bGenerateNet = false; int m_nNrOfNodes = 10; int m_nNrOfArcs = 10; int m_nNrOfInstances = 10; int m_nCardinality = 2; String m_sBIFFile = ""; void setNrOfNodes(int nNrOfNodes) {m_nNrOfNodes = nNrOfNodes;} void setNrOfArcs(int nNrOfArcs) {m_nNrOfArcs = nNrOfArcs;} void setNrOfInstances(int nNrOfInstances) {m_nNrOfInstances = nNrOfInstances;} void setCardinality(int nCardinality) {m_nCardinality = nCardinality;} void setSeed(int nSeed) {m_nSeed = nSeed;} /** * Returns an enumeration describing the available options * * @return an enumeration of all the available options */ public Enumeration listOptions() { Vector newVector = new Vector(6); newVector.addElement(new Option("\tGenerate network (instead of instances)\n", "B", 0, "-B")); newVector.addElement(new Option("\tNr of nodes\n", "N", 1, "-N <integer>")); newVector.addElement(new Option("\tNr of arcs\n", "A", 1, "-A <integer>")); newVector.addElement(new Option("\tNr of instances\n", "M", 1, "-M <integer>")); newVector.addElement(new Option("\tCardinality of the variables\n", "C", 1, "-C <integer>")); newVector.addElement(new Option("\tSeed for random number generator\n", "S", 1, "-S <integer>")); newVector.addElement(new Option("\tThe BIF file to obtain the structure from.\n", "F", 1, "-F <file>")); return newVector.elements(); } // listOptions /** * Parses a given list of options. <p/> * <!-- options-start --> * Valid options are: <p/> * * <pre> -B * Generate network (instead of instances) * </pre> * * <pre> -N &lt;integer&gt; * Nr of nodes * </pre> * * <pre> -A &lt;integer&gt; * Nr of arcs * </pre> * * <pre> -M &lt;integer&gt; * Nr of instances * </pre> * * <pre> -C &lt;integer&gt; * Cardinality of the variables * </pre> * * <pre> -S &lt;integer&gt; * Seed for random number generator * </pre> * * <pre> -F &lt;file&gt; * The BIF file to obtain the structure from. * </pre> * <!-- options-end --> * * @param options the list of options as an array of strings * @exception Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { m_bGenerateNet = Utils.getFlag('B', options); String sNrOfNodes = Utils.getOption('N', options); if (sNrOfNodes.length() != 0) { setNrOfNodes(Integer.parseInt(sNrOfNodes)); } else { setNrOfNodes(10); } String sNrOfArcs = Utils.getOption('A', options); if (sNrOfArcs.length() != 0) { setNrOfArcs(Integer.parseInt(sNrOfArcs)); } else { setNrOfArcs(10); } String sNrOfInstances = Utils.getOption('M', options); if (sNrOfInstances.length() != 0) { setNrOfInstances(Integer.parseInt(sNrOfInstances)); } else { setNrOfInstances(10); } String sCardinality = Utils.getOption('C', options); if (sCardinality.length() != 0) { setCardinality(Integer.parseInt(sCardinality)); } else { setCardinality(2); } String sSeed = Utils.getOption('S', options); if (sSeed.length() != 0) { setSeed(Integer.parseInt(sSeed)); } else { setSeed(1); } String sBIFFile = Utils.getOption('F', options); if ((sBIFFile != null) && (sBIFFile != "")) { setBIFFile(sBIFFile); } } // setOptions /** * Gets the current settings of the classifier. * * @return an array of strings suitable for passing to setOptions */ public String[] getOptions() { String[] options = new String[13]; int current = 0; if (m_bGenerateNet) { options[current++] = "-B"; } options[current++] = "-N"; options[current++] = "" + m_nNrOfNodes; options[current++] = "-A"; options[current++] = "" + m_nNrOfArcs; options[current++] = "-M"; options[current++] = "" + m_nNrOfInstances; options[current++] = "-C"; options[current++] = "" + m_nCardinality; options[current++] = "-S"; options[current++] = "" + m_nSeed; if (m_sBIFFile.length() != 0) { options[current++] = "-F"; options[current++] = "" + m_sBIFFile; } // Fill up rest with empty strings, not nulls! while (current < options.length) { options[current++] = ""; } return options; } // getOptions /** * prints all the options to stdout */ protected static void printOptions(OptionHandler o) { Enumeration enm = o.listOptions(); System.out.println("Options for " + o.getClass().getName() + ":\n"); while (enm.hasMoreElements()) { Option option = (Option) enm.nextElement(); System.out.println(option.synopsis()); System.out.println(option.description()); } } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } /** * Main method * * @param args the commandline parameters */ static public void main(String [] args) { BayesNetGenerator b = new BayesNetGenerator(); try { if ( (args.length == 0) || (Utils.getFlag('h', args)) ) { printOptions(b); return; } b.setOptions(args); b.generateRandomNetwork(); if (!b.m_bGenerateNet) { // skip if not required b.generateInstances(); } System.out.println(b.toString()); } catch (Exception e) { e.printStackTrace(); printOptions(b); } } // main } // class BayesNetGenerator
19,389
30.579805
213
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/bayes/net/EditableBayesNet.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * EditableBayesNet.java * Copyright (C) 2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.bayes.net; import java.io.Serializable; import java.io.StringReader; import java.util.StringTokenizer; import javax.xml.parsers.DocumentBuilderFactory; import org.w3c.dom.CharacterData; import org.w3c.dom.Document; import org.w3c.dom.Element; import org.w3c.dom.Node; import org.w3c.dom.NodeList; import weka.classifiers.bayes.BayesNet; import weka.classifiers.bayes.net.estimate.DiscreteEstimatorBayes; import weka.core.Attribute; import weka.core.FastVector; import weka.core.Instances; import weka.core.RevisionUtils; import weka.core.SerializedObject; import weka.estimators.Estimator; import weka.filters.Filter; import weka.filters.unsupervised.attribute.Reorder; /** <!-- globalinfo-start --> * Bayes Network learning using various search algorithms and quality measures.<br/> * Base class for a Bayes Network classifier. Provides datastructures (network structure, conditional probability distributions, etc.) and facilities common to Bayes Network learning algorithms like K2 and B.<br/> * <br/> * For more information see:<br/> * <br/> * http://www.cs.waikato.ac.nz/~remco/weka.pdf * <p/> <!-- globalinfo-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -D * Do not use ADTree data structure * </pre> * * <pre> -B &lt;BIF file&gt; * BIF file to compare with * </pre> * * <pre> -Q weka.classifiers.bayes.net.search.SearchAlgorithm * Search algorithm * </pre> * * <pre> -E weka.classifiers.bayes.net.estimate.SimpleEstimator * Estimator algorithm * </pre> * <!-- options-end --> * * @author Remco Bouckaert (rrb@xm.co.nz) * @version $Revision: 8034 $ */ public class EditableBayesNet extends BayesNet { /** for serialization */ static final long serialVersionUID = 746037443258735954L; /** location of nodes, used for graph drawing * */ protected FastVector m_nPositionX; protected FastVector m_nPositionY; /** marginal distributions * */ protected FastVector m_fMarginP; /** evidence values, used for evidence propagation * */ protected FastVector m_nEvidence; /** standard constructor * */ public EditableBayesNet() { super(); m_nEvidence = new FastVector(0); m_fMarginP = new FastVector(0); m_nPositionX = new FastVector(); m_nPositionY = new FastVector(); clearUndoStack(); } // c'tor /** constructor, creates empty network with nodes based on the attributes in a data set */ public EditableBayesNet(Instances instances) { try { if (instances.classIndex() < 0) { instances.setClassIndex(instances.numAttributes() - 1); } m_Instances = normalizeDataSet(instances); } catch (Exception e) { e.printStackTrace(); } int nNodes = getNrOfNodes(); m_ParentSets = new ParentSet[nNodes]; for (int i = 0; i < nNodes; i++) { m_ParentSets[i] = new ParentSet(); } m_Distributions = new Estimator[nNodes][]; for (int iNode = 0; iNode < nNodes; iNode++) { m_Distributions[iNode] = new Estimator[1]; m_Distributions[iNode][0] = new DiscreteEstimatorBayes(getCardinality(iNode), 0.5); } m_nEvidence = new FastVector(nNodes); for (int i = 0; i < nNodes; i++) { m_nEvidence.addElement(-1); } m_fMarginP = new FastVector(nNodes); for (int i = 0; i < nNodes; i++) { double[] P = new double[getCardinality(i)]; m_fMarginP.addElement(P); } m_nPositionX = new FastVector(nNodes); m_nPositionY = new FastVector(nNodes); for (int iNode = 0; iNode < nNodes; iNode++) { m_nPositionX.addElement(iNode%10 * 50); m_nPositionY.addElement(((int)(iNode/10)) * 50); } } // c'tor /** constructor, copies Bayesian network structure from a Bayesian network * encapsulated in a BIFReader */ public EditableBayesNet(BIFReader other) { m_Instances = other.m_Instances; m_ParentSets = other.getParentSets(); m_Distributions = other.getDistributions(); int nNodes = getNrOfNodes(); m_nPositionX = new FastVector(nNodes); m_nPositionY = new FastVector(nNodes); for (int i = 0; i < nNodes; i++) { m_nPositionX.addElement(other.m_nPositionX[i]); m_nPositionY.addElement(other.m_nPositionY[i]); } m_nEvidence = new FastVector(nNodes); for (int i = 0; i < nNodes; i++) { m_nEvidence.addElement(-1); } m_fMarginP = new FastVector(nNodes); for (int i = 0; i < nNodes; i++) { double[] P = new double[getCardinality(i)]; m_fMarginP.addElement(P); } clearUndoStack(); } // c'tor /** * constructor that potentially initializes instances as well * * @param bSetInstances * flag indicating whether to initialize instances or not */ public EditableBayesNet(boolean bSetInstances) { super(); m_nEvidence = new FastVector(0); m_fMarginP = new FastVector(0); m_nPositionX = new FastVector(); m_nPositionY = new FastVector(); clearUndoStack(); if (bSetInstances) { m_Instances = new Instances("New Network", new FastVector(0), 0); } } // c'tor /** Assuming a network structure is defined and we want to learn from data, * the data set must be put if correct order first and possibly discretized/missing * values filled in before proceeding to CPT learning. * @param instances data set to learn from * @exception Exception when data sets are not compatible, e.g., a variable is missing * or a variable has different nr of values. */ public void setData(Instances instances) throws Exception { // sync order of variables int [] order = new int [getNrOfNodes()]; for (int iNode = 0; iNode < getNrOfNodes(); iNode++) { String sName = getNodeName(iNode); int nNode = 0; while (nNode < getNrOfNodes() && !sName.equals(instances.attribute(nNode).name())) { nNode++; } if (nNode >= getNrOfNodes()) { throw new Exception("Cannot find node named [[[" + sName + "]]] in the data"); } order[iNode] = nNode; } Reorder reorderFilter = new Reorder(); reorderFilter.setAttributeIndicesArray(order); reorderFilter.setInputFormat(instances); instances = Filter.useFilter(instances, reorderFilter); // filter using discretization/missing values filter Instances newInstances = new Instances(m_Instances, 0); if (m_DiscretizeFilter == null && m_MissingValuesFilter == null) { newInstances = normalizeDataSet(instances); } else { for (int iInstance = 0; iInstance < instances.numInstances(); iInstance++) { newInstances.add(normalizeInstance(instances.instance(iInstance))); } } //sanity check for (int iNode = 0; iNode < getNrOfNodes(); iNode++) { if (newInstances.attribute(iNode).numValues() != getCardinality(iNode)) { throw new Exception("Number of values of node [[[" + getNodeName(iNode) + "]]] differs in (discretized) dataset." ); } } // if we got this far, all is ok with the data set and // we can replace data set of Bayes net m_Instances = newInstances; } // setData /** returns index of node with given name, or -1 if no such node exists * @param sNodeName name of the node to get index for */ public int getNode2(String sNodeName) { int iNode = 0; while (iNode < m_Instances.numAttributes()) { if (m_Instances.attribute(iNode).name().equals(sNodeName)) { return iNode; } iNode++; } return -1; } // getNode2 /** returns index of node with given name. Throws exception if no such node exists * @param sNodeName name of the node to get index for */ public int getNode(String sNodeName) throws Exception { int iNode = getNode2(sNodeName); if (iNode < 0) { throw new Exception("Could not find node [[" + sNodeName + "]]"); } return iNode; } // getNode /** * Add new node to the network, initializing instances, parentsets, * distributions. Used for manual manipulation of the Bayesian network. * * @param sName * name of the node. If the name already exists, an x is appended * to the name * @param nCardinality * number of values for this node * @throws Exception */ public void addNode(String sName, int nCardinality) throws Exception { addNode(sName, nCardinality, 100 + getNrOfNodes() * 10, 100 + getNrOfNodes() * 10); } // addNode /** Add node to network at a given position, initializing instances, parentsets, * distributions. Used for manual manipulation of the Bayesian network. * * @param sName * name of the node. If the name already exists, an x is appended * to the name * @param nCardinality * number of values for this node * @param nPosX x-coordiate of the position to place this node * @param nPosY y-coordiate of the position to place this node * @throws Exception */ public void addNode(String sName, int nCardinality, int nPosX, int nPosY) throws Exception { if (getNode2(sName) >= 0) { addNode(sName + "x", nCardinality); return ; } // update instances FastVector values = new FastVector(nCardinality); for (int iValue = 0; iValue < nCardinality; iValue++) { values.addElement("Value" + (iValue + 1)); } Attribute att = new Attribute(sName, values); m_Instances.insertAttributeAt(att, m_Instances.numAttributes()); int nAtts = m_Instances.numAttributes(); // update parentsets ParentSet[] parentSets = new ParentSet[nAtts]; for (int iParentSet = 0; iParentSet < nAtts - 1; iParentSet++) { parentSets[iParentSet] = m_ParentSets[iParentSet]; } parentSets[nAtts - 1] = new ParentSet(); m_ParentSets = parentSets; // update distributions Estimator[][] distributions = new Estimator[nAtts][]; for (int iNode = 0; iNode < nAtts - 1; iNode++) { distributions[iNode] = m_Distributions[iNode]; } distributions[nAtts - 1] = new Estimator[1]; distributions[nAtts - 1][0] = new DiscreteEstimatorBayes(nCardinality, 0.5); m_Distributions = distributions; // update positions m_nPositionX.addElement(nPosX); m_nPositionY.addElement(nPosY); // update evidence & margins m_nEvidence.addElement(-1); double[] fMarginP = new double[nCardinality]; for (int iValue = 0; iValue < nCardinality; iValue++) { fMarginP[iValue] = 1.0 / nCardinality; } m_fMarginP.addElement(fMarginP); // update undo stack if (m_bNeedsUndoAction) { addUndoAction(new AddNodeAction(sName, nCardinality, nPosX, nPosY)); } } // addNode /** * Delete node from the network, updating instances, parentsets, * distributions Conditional distributions are condensed by taking the * values for the target node to be its first value. Used for manual * manipulation of the Bayesian network. * * @param sName * name of the node. If the name does not exists an exception is * thrown * @throws Exception */ public void deleteNode(String sName) throws Exception { int nTargetNode = getNode(sName); deleteNode(nTargetNode); } // deleteNode /** * Delete node from the network, updating instances, parentsets, * distributions Conditional distributions are condensed by taking the * values for the target node to be its first value. Used for manual * manipulation of the Bayesian network. * * @param nTargetNode * index of the node to delete. * @throws Exception */ public void deleteNode(int nTargetNode) throws Exception { // update undo stack if (m_bNeedsUndoAction) { addUndoAction(new DeleteNodeAction(nTargetNode)); } int nAtts = m_Instances.numAttributes() - 1; int nTargetCard = m_Instances.attribute(nTargetNode).numValues(); // update distributions Estimator[][] distributions = new Estimator[nAtts][]; for (int iNode = 0; iNode < nAtts; iNode++) { int iNode2 = iNode; if (iNode >= nTargetNode) { iNode2++; } Estimator[] distribution = m_Distributions[iNode2]; if (m_ParentSets[iNode2].contains(nTargetNode)) { // condense distribution, use values for targetnode = 0 int nParentCard = m_ParentSets[iNode2].getCardinalityOfParents(); nParentCard = nParentCard / nTargetCard; Estimator[] distribution2 = new Estimator[nParentCard]; for (int iParent = 0; iParent < nParentCard; iParent++) { distribution2[iParent] = distribution[iParent]; } distribution = distribution2; } distributions[iNode] = distribution; } m_Distributions = distributions; // update parentsets ParentSet[] parentSets = new ParentSet[nAtts]; for (int iParentSet = 0; iParentSet < nAtts; iParentSet++) { int iParentSet2 = iParentSet; if (iParentSet >= nTargetNode) { iParentSet2++; } ParentSet parentset = m_ParentSets[iParentSet2]; parentset.deleteParent(nTargetNode, m_Instances); for (int iParent = 0; iParent < parentset.getNrOfParents(); iParent++) { int nParent = parentset.getParent(iParent); if (nParent > nTargetNode) { parentset.SetParent(iParent, nParent - 1); } } parentSets[iParentSet] = parentset; } m_ParentSets = parentSets; // update instances m_Instances.setClassIndex(-1); m_Instances.deleteAttributeAt(nTargetNode); m_Instances.setClassIndex(nAtts - 1); // update positions m_nPositionX.removeElementAt(nTargetNode); m_nPositionY.removeElementAt(nTargetNode); // update evidence & margins m_nEvidence.removeElementAt(nTargetNode); m_fMarginP.removeElementAt(nTargetNode); } // deleteNode /** * Delete nodes with indexes in selection from the network, updating instances, parentsets, * distributions Conditional distributions are condensed by taking the * values for the target node to be its first value. Used for manual * manipulation of the Bayesian network. * * @param nodes * array of indexes of nodes to delete. * @throws Exception */ public void deleteSelection(FastVector nodes) { // sort before proceeding for (int i = 0; i < nodes.size(); i++) { for (int j = i + 1; j < nodes.size(); j++) { if ((Integer) nodes.elementAt(i) > (Integer) nodes.elementAt(j)) { int h = (Integer) nodes.elementAt(i); nodes.setElementAt(nodes.elementAt(j), i); nodes.setElementAt(h, j); } } } // update undo stack if (m_bNeedsUndoAction) { addUndoAction(new DeleteSelectionAction(nodes)); } boolean bNeedsUndoAction = m_bNeedsUndoAction; m_bNeedsUndoAction = false; try { for (int iNode = nodes.size() - 1; iNode >= 0; iNode--) { deleteNode((Integer) nodes.elementAt(iNode)); } } catch (Exception e) { e.printStackTrace(); } m_bNeedsUndoAction = bNeedsUndoAction; } // deleteSelection /** XML helper function for selecting elements under a node with a given name * @param item XMLNode to select items from * @param sElement name of the element to return */ FastVector selectElements(Node item, String sElement) throws Exception { NodeList children = item.getChildNodes(); FastVector nodelist = new FastVector(); for (int iNode = 0; iNode < children.getLength(); iNode++) { Node node = children.item(iNode); if ((node.getNodeType() == Node.ELEMENT_NODE) && node.getNodeName().equals(sElement)) { nodelist.addElement(node); } } return nodelist; } // selectElements /** * XML helper function. Returns all TEXT children of the given node in one string. Between the * node values new lines are inserted. * * @param node * the node to return the content for * @return the content of the node */ public String getContent(Element node) { NodeList list; Node item; int i; String result; result = ""; list = node.getChildNodes(); for (i = 0; i < list.getLength(); i++) { item = list.item(i); if (item.getNodeType() == Node.TEXT_NODE) result += "\n" + item.getNodeValue(); } return result; } /** XML helper function that returns DEFINITION element from a XMLBIF document * for a node with a given name. * @param doc XMLBIF document * @param sName name of the node to get the definition for */ Element getDefinition(Document doc, String sName) throws Exception { NodeList nodelist = doc.getElementsByTagName("DEFINITION"); for (int iNode = 0; iNode < nodelist.getLength(); iNode++) { Node node = nodelist.item(iNode); FastVector list = selectElements(node, "FOR"); if (list.size() > 0) { Node forNode = (Node) list.elementAt(0); if (getContent((Element) forNode).trim().equals(sName)) { return (Element) node; } } } throw new Exception("Could not find definition for ((" + sName + "))"); } // getDefinition /** Paste modes. This allows for verifying that a past action does not cause * any problems before actually performing the paste operation. */ final static int TEST = 0; final static int EXECUTE = 1; /** Apply paste operation with XMLBIF fragment. This adds nodes in the XMLBIF fragment * to the network, together with its parents. First, paste in test mode to verify * no problems occur, then execute paste operation. If a problem occurs (e.g. parent * does not exist) then a exception is thrown. * @param sXML XMLBIF fragment to paste into the network */ public void paste(String sXML) throws Exception { try { paste(sXML, TEST); } catch (Exception e) { throw e; } paste(sXML, EXECUTE); } // paste /** Apply paste operation with XMLBIF fragment. Depending on the paste mode, the * nodes are actually added to the network or it is just tested that the nodes can * be added to the network. * @param sXML XMLBIF fragment to paste into the network * @param mode paste mode TEST or EXECUTE */ void paste(String sXML, int mode) throws Exception { DocumentBuilderFactory factory = DocumentBuilderFactory.newInstance(); factory.setValidating(true); Document doc = factory.newDocumentBuilder().parse(new org.xml.sax.InputSource(new StringReader(sXML))); doc.normalize(); // create nodes first NodeList nodelist = doc.getElementsByTagName("VARIABLE"); FastVector sBaseNames = new FastVector(); Instances instances = new Instances(m_Instances, 0); int nBase = instances.numAttributes(); for (int iNode = 0; iNode < nodelist.getLength(); iNode++) { // Get element FastVector valueslist; // Get the name of the node valueslist = selectElements(nodelist.item(iNode), "OUTCOME"); int nValues = valueslist.size(); // generate value strings FastVector nomStrings = new FastVector(nValues + 1); for (int iValue = 0; iValue < nValues; iValue++) { Node node = ((Node) valueslist.elementAt(iValue)).getFirstChild(); String sValue = ((CharacterData) (node)).getData(); if (sValue == null) { sValue = "Value" + (iValue + 1); } nomStrings.addElement(sValue); } FastVector nodelist2; // Get the name of the network nodelist2 = selectElements(nodelist.item(iNode), "NAME"); if (nodelist2.size() == 0) { throw new Exception("No name specified for variable"); } String sBaseName = ((CharacterData) (((Node) nodelist2.elementAt(0)).getFirstChild())).getData(); sBaseNames.addElement(sBaseName); String sNodeName = sBaseName; if (getNode2(sNodeName) >= 0) { sNodeName = "Copy of " + sBaseName; } int iAttempt = 2; while (getNode2(sNodeName) >= 0) { sNodeName = "Copy (" + iAttempt + ") of " + sBaseName; iAttempt++; } Attribute att = new Attribute(sNodeName, nomStrings); instances.insertAttributeAt(att, instances.numAttributes()); valueslist = selectElements(nodelist.item(iNode), "PROPERTY"); nValues = valueslist.size(); // generate value strings int nPosX = iAttempt * 10; int nPosY = iAttempt * 10; for (int iValue = 0; iValue < nValues; iValue++) { // parsing for strings of the form "position = (73, 165)" Node node = ((Node) valueslist.elementAt(iValue)).getFirstChild(); String sValue = ((CharacterData) (node)).getData(); if (sValue.startsWith("position")) { int i0 = sValue.indexOf('('); int i1 = sValue.indexOf(','); int i2 = sValue.indexOf(')'); String sX = sValue.substring(i0 + 1, i1).trim(); String sY = sValue.substring(i1 + 1, i2).trim(); try { nPosX = (Integer.parseInt(sX) + iAttempt * 10); nPosY = (Integer.parseInt(sY) + iAttempt * 10); } catch (NumberFormatException e) { System.err.println("Wrong number format in position :(" + sX + "," + sY + ")"); } } } if (mode == EXECUTE) { m_nPositionX.addElement(nPosX); m_nPositionY.addElement(nPosY); } } FastVector nodelist2; Estimator[][] distributions = new Estimator[nBase + sBaseNames.size()][]; ParentSet[] parentsets = new ParentSet[nBase + sBaseNames.size()]; for (int iNode = 0; iNode < nBase; iNode++) { distributions[iNode] = m_Distributions[iNode]; parentsets[iNode] = m_ParentSets[iNode]; } if (mode == EXECUTE) { m_Instances = instances; } // create arrows & create distributions for (int iNode = 0; iNode < sBaseNames.size(); iNode++) { // find definition that goes with this node String sName = (String) sBaseNames.elementAt(iNode); Element definition = getDefinition(doc, sName); parentsets[nBase + iNode] = new ParentSet(); // get the parents for this node // resolve structure nodelist2 = selectElements(definition, "GIVEN"); for (int iParent = 0; iParent < nodelist2.size(); iParent++) { Node parentName = ((Node) nodelist2.elementAt(iParent)).getFirstChild(); String sParentName = ((CharacterData) (parentName)).getData(); int nParent = -1; for (int iBase = 0; iBase < sBaseNames.size(); iBase++) { if (sParentName.equals((String) sBaseNames.elementAt(iBase))) { nParent = nBase + iBase; } } if (nParent < 0) { nParent = getNode(sParentName); } parentsets[nBase + iNode].addParent(nParent, instances); } // resolve conditional probability table int nCardinality = parentsets[nBase + iNode].getCardinalityOfParents(); int nValues = instances.attribute(nBase + iNode).numValues(); distributions[nBase + iNode] = new Estimator[nCardinality]; for (int i = 0; i < nCardinality; i++) { distributions[nBase + iNode][i] = new DiscreteEstimatorBayes(nValues, 0.0f); } String sTable = getContent((Element) selectElements(definition, "TABLE").elementAt(0)); sTable = sTable.replaceAll("\\n", " "); StringTokenizer st = new StringTokenizer(sTable.toString()); for (int i = 0; i < nCardinality; i++) { DiscreteEstimatorBayes d = (DiscreteEstimatorBayes) distributions[nBase + iNode][i]; for (int iValue = 0; iValue < nValues; iValue++) { String sWeight = st.nextToken(); d.addValue(iValue, new Double(sWeight).doubleValue()); } } if (mode == EXECUTE) { m_nEvidence.insertElementAt(-1, nBase + iNode); m_fMarginP.insertElementAt(new double[getCardinality(nBase + iNode)], nBase + iNode); } } if (mode == EXECUTE) { m_Distributions = distributions; m_ParentSets = parentsets; } // update undo stack if (mode == EXECUTE && m_bNeedsUndoAction) { addUndoAction(new PasteAction(sXML, nBase)); } } // paste /** * Add arc between two nodes Distributions are updated by duplication for * every value of the parent node. * * @param sParent * name of the parent node * @param sChild * name of the child node * @throws Exception * if parent or child cannot be found in network */ public void addArc(String sParent, String sChild) throws Exception { int nParent = getNode(sParent); int nChild = getNode(sChild); addArc(nParent, nChild); } // addArc /** * Add arc between two nodes Distributions are updated by duplication for * every value of the parent node. * * @param nParent * index of the parent node * @param nChild * index of the child node * @throws Exception */ public void addArc(int nParent, int nChild) throws Exception { // update undo stack if (m_bNeedsUndoAction) { addUndoAction(new AddArcAction(nParent, nChild)); } int nOldCard = m_ParentSets[nChild].getCardinalityOfParents(); // update parentsets m_ParentSets[nChild].addParent(nParent, m_Instances); // update distributions int nNewCard = m_ParentSets[nChild].getCardinalityOfParents(); Estimator[] ds = new Estimator[nNewCard]; for (int iParent = 0; iParent < nNewCard; iParent++) { ds[iParent] = Estimator.clone(m_Distributions[nChild][iParent % nOldCard]); } m_Distributions[nChild] = ds; } // addArc /** * Add arc between parent node and each of the nodes in a given list. * Distributions are updated as above. * * @param sParent * name of the parent node * @param nodes * array of indexes of child nodes * @throws Exception */ public void addArc(String sParent, FastVector nodes) throws Exception { int nParent = getNode(sParent); // update undo stack if (m_bNeedsUndoAction) { addUndoAction(new AddArcAction(nParent, nodes)); } boolean bNeedsUndoAction = m_bNeedsUndoAction; m_bNeedsUndoAction = false; for (int iNode = 0; iNode < nodes.size(); iNode++) { int nNode = (Integer) nodes.elementAt(iNode); addArc(nParent, nNode); } m_bNeedsUndoAction = bNeedsUndoAction; } // addArc /** * Delete arc between two nodes. Distributions are updated by condensing for * the parent node taking its first value. * * @param sParent * name of the parent node * @param sChild * name of the child node * @throws Exception * if parent or child cannot be found in network */ public void deleteArc(String sParent, String sChild) throws Exception { int nParent = getNode(sParent); int nChild = getNode(sChild); deleteArc(nParent, nChild); } // deleteArc /** * Delete arc between two nodes. Distributions are updated by condensing for * the parent node taking its first value. * * @param nParent * index of the parent node * @param nChild * index of the child node * @throws Exception */ public void deleteArc(int nParent, int nChild) throws Exception { // update undo stack if (m_bNeedsUndoAction) { addUndoAction(new DeleteArcAction(nParent, nChild)); } // update distributions // condense distribution, use values for targetnode = 0 int nParentCard = m_ParentSets[nChild].getCardinalityOfParents(); int nTargetCard = m_Instances.attribute(nChild).numValues(); nParentCard = nParentCard / nTargetCard; Estimator[] distribution2 = new Estimator[nParentCard]; for (int iParent = 0; iParent < nParentCard; iParent++) { distribution2[iParent] = m_Distributions[nChild][iParent]; } m_Distributions[nChild] = distribution2; // update parentsets m_ParentSets[nChild].deleteParent(nParent, m_Instances); } // deleteArc /** specify distribution of a node * @param sName name of the node to specify distribution for * @param P matrix representing distribution with P[i][j] = P(node = j | parent configuration = i) * @throws Exception * if parent or child cannot be found in network */ public void setDistribution(String sName, double[][] P) throws Exception { int nTargetNode = getNode(sName); setDistribution(nTargetNode, P); } // setDistribution /** specify distribution of a node * @param nTargetNode index of the node to specify distribution for * @param P matrix representing distribution with P[i][j] = P(node = j | parent configuration = i) * @throws Exception * if parent or child cannot be found in network */ public void setDistribution(int nTargetNode, double[][] P) throws Exception { // update undo stack if (m_bNeedsUndoAction) { addUndoAction(new SetDistributionAction(nTargetNode, P)); } Estimator[] distributions = m_Distributions[nTargetNode]; for (int iParent = 0; iParent < distributions.length; iParent++) { DiscreteEstimatorBayes distribution = new DiscreteEstimatorBayes(P[0].length, 0); for (int iValue = 0; iValue < distribution.getNumSymbols(); iValue++) { distribution.addValue(iValue, P[iParent][iValue]); } distributions[iParent] = distribution; } // m_Distributions[nTargetNode] = distributions; } // setDistribution /** returns distribution of a node in matrix form with matrix representing distribution * with P[i][j] = P(node = j | parent configuration = i) * @param sName name of the node to get distribution from */ public double[][] getDistribution(String sName) { int nTargetNode = getNode2(sName); return getDistribution(nTargetNode); } // getDistribution /** returns distribution of a node in matrix form with matrix representing distribution * with P[i][j] = P(node = j | parent configuration = i) * @param nTargetNode index of the node to get distribution from */ public double[][] getDistribution(int nTargetNode) { int nParentCard = m_ParentSets[nTargetNode].getCardinalityOfParents(); int nCard = m_Instances.attribute(nTargetNode).numValues(); double[][] P = new double[nParentCard][nCard]; for (int iParent = 0; iParent < nParentCard; iParent++) { for (int iValue = 0; iValue < nCard; iValue++) { P[iParent][iValue] = m_Distributions[nTargetNode][iParent].getProbability(iValue); } } return P; } // getDistribution /** returns array of values of a node * @param sName name of the node to get values from */ public String[] getValues(String sName) { int nTargetNode = getNode2(sName); return getValues(nTargetNode); } // getValues /** returns array of values of a node * @param nTargetNode index of the node to get values from */ public String[] getValues(int nTargetNode) { String[] values = new String[getCardinality(nTargetNode)]; for (int iValue = 0; iValue < values.length; iValue++) { values[iValue] = m_Instances.attribute(nTargetNode).value(iValue); } return values; } // getValues /** returns value of a node * @param nTargetNode index of the node to get values from * @param iValue index of the value */ public String getValueName(int nTargetNode, int iValue) { return m_Instances.attribute(nTargetNode).value(iValue); } // getNodeValue /** change the name of a node * @param nTargetNode index of the node to set name for * @param sName new name to assign */ public void setNodeName(int nTargetNode, String sName) { // update undo stack if (m_bNeedsUndoAction) { addUndoAction(new RenameAction(nTargetNode, getNodeName(nTargetNode), sName)); } Attribute att = m_Instances.attribute(nTargetNode); int nCardinality = att.numValues(); FastVector values = new FastVector(nCardinality); for (int iValue = 0; iValue < nCardinality; iValue++) { values.addElement(att.value(iValue)); } replaceAtt(nTargetNode, sName, values); } // setNodeName /** change the name of a value of a node * @param nTargetNode index of the node to set name for * @param sValue current name of the value * @param sNewValue new name of the value */ public void renameNodeValue(int nTargetNode, String sValue, String sNewValue) { // update undo stack if (m_bNeedsUndoAction) { addUndoAction(new RenameValueAction(nTargetNode, sValue, sNewValue)); } Attribute att = m_Instances.attribute(nTargetNode); int nCardinality = att.numValues(); FastVector values = new FastVector(nCardinality); for (int iValue = 0; iValue < nCardinality; iValue++) { if (att.value(iValue).equals(sValue)) { values.addElement(sNewValue); } else { values.addElement(att.value(iValue)); } } replaceAtt(nTargetNode, att.name(), values); } // renameNodeValue /** Add node value to a node. Distributions for the node assign zero probability * to the new value. Child nodes duplicate CPT conditioned on the new value. * @param nTargetNode index of the node to add value for * @param sNewValue name of the value */ public void addNodeValue(int nTargetNode, String sNewValue) { // update undo stack if (m_bNeedsUndoAction) { addUndoAction(new AddValueAction(nTargetNode, sNewValue)); } Attribute att = m_Instances.attribute(nTargetNode); int nCardinality = att.numValues(); FastVector values = new FastVector(nCardinality); for (int iValue = 0; iValue < nCardinality; iValue++) { values.addElement(att.value(iValue)); } values.addElement(sNewValue); replaceAtt(nTargetNode, att.name(), values); // update distributions of this node Estimator[] distributions = m_Distributions[nTargetNode]; int nNewCard = values.size(); for (int iParent = 0; iParent < distributions.length; iParent++) { DiscreteEstimatorBayes distribution = new DiscreteEstimatorBayes(nNewCard, 0); for (int iValue = 0; iValue < nNewCard - 1; iValue++) { distribution.addValue(iValue, distributions[iParent].getProbability(iValue)); } distributions[iParent] = distribution; } // update distributions of all children for (int iNode = 0; iNode < getNrOfNodes(); iNode++) { if (m_ParentSets[iNode].contains(nTargetNode)) { distributions = m_Distributions[iNode]; ParentSet parentSet = m_ParentSets[iNode]; int nParentCard = parentSet.getFreshCardinalityOfParents(m_Instances); Estimator[] newDistributions = new Estimator[nParentCard]; int nCard = getCardinality(iNode); int nParents = parentSet.getNrOfParents(); int[] values2 = new int[nParents]; int iOldPos = 0; int iTargetNode = 0; while (parentSet.getParent(iTargetNode) != nTargetNode) { iTargetNode++; } for (int iPos = 0; iPos < nParentCard; iPos++) { DiscreteEstimatorBayes distribution = new DiscreteEstimatorBayes(nCard, 0); for (int iValue = 0; iValue < nCard; iValue++) { distribution.addValue(iValue, distributions[iOldPos].getProbability(iValue)); } newDistributions[iPos] = distribution; // update values int i = 0; values2[i]++; while (i < nParents && values2[i] == getCardinality(parentSet.getParent(i))) { values2[i] = 0; i++; if (i < nParents) { values2[i]++; } } if (values2[iTargetNode] != nNewCard - 1) { iOldPos++; } } m_Distributions[iNode] = newDistributions; } } } // addNodeValue /** Delete node value from a node. Distributions for the node are scaled * up proportional to existing distribution * (or made uniform if zero probability is assigned to remainder of values). .* Child nodes delete CPTs conditioned on the new value. * @param nTargetNode index of the node to delete value from * @param sValue name of the value to delete */ public void delNodeValue(int nTargetNode, String sValue) throws Exception { // update undo stack if (m_bNeedsUndoAction) { addUndoAction(new DelValueAction(nTargetNode, sValue)); } Attribute att = m_Instances.attribute(nTargetNode); int nCardinality = att.numValues(); FastVector values = new FastVector(nCardinality); int nValue = -1; for (int iValue = 0; iValue < nCardinality; iValue++) { if (att.value(iValue).equals(sValue)) { nValue = iValue; } else { values.addElement(att.value(iValue)); } } if (nValue < 0) { // could not find value throw new Exception("Node " + nTargetNode + " does not have value (" + sValue + ")"); } replaceAtt(nTargetNode, att.name(), values); // update distributions Estimator[] distributions = m_Distributions[nTargetNode]; int nCard = values.size(); for (int iParent = 0; iParent < distributions.length; iParent++) { DiscreteEstimatorBayes distribution = new DiscreteEstimatorBayes(nCard, 0); double sum = 0; for (int iValue = 0; iValue < nCard; iValue++) { sum += distributions[iParent].getProbability(iValue); } if (sum > 0) { for (int iValue = 0; iValue < nCard; iValue++) { distribution.addValue(iValue, distributions[iParent].getProbability(iValue) / sum); } } else { for (int iValue = 0; iValue < nCard; iValue++) { distribution.addValue(iValue, 1.0 / nCard); } } distributions[iParent] = distribution; } // update distributions of all children for (int iNode = 0; iNode < getNrOfNodes(); iNode++) { if (m_ParentSets[iNode].contains(nTargetNode)) { ParentSet parentSet = m_ParentSets[iNode]; distributions = m_Distributions[iNode]; Estimator[] newDistributions = new Estimator[distributions.length * nCard / (nCard + 1)]; int iCurrentDist = 0; int nParents = parentSet.getNrOfParents(); int[] values2 = new int[nParents]; // fill in the values int nParentCard = parentSet.getFreshCardinalityOfParents(m_Instances) * (nCard + 1) / nCard; int iTargetNode = 0; while (parentSet.getParent(iTargetNode) != nTargetNode) { iTargetNode++; } int[] nCards = new int[nParents]; for (int iParent = 0; iParent < nParents; iParent++) { nCards[iParent] = getCardinality(parentSet.getParent(iParent)); } nCards[iTargetNode]++; for (int iPos = 0; iPos < nParentCard; iPos++) { if (values2[iTargetNode] != nValue) { newDistributions[iCurrentDist++] = distributions[iPos]; } // update values int i = 0; values2[i]++; while (i < nParents && values2[i] == nCards[i]) { values2[i] = 0; i++; if (i < nParents) { values2[i]++; } } } m_Distributions[iNode] = newDistributions; } } // update evidence if (getEvidence(nTargetNode) > nValue) { setEvidence(nTargetNode, getEvidence(nTargetNode) - 1); } } // delNodeValue /** set position of node * @param iNode index of node to set position for * @param nX x position of new position * @param nY y position of new position */ public void setPosition(int iNode, int nX, int nY) { // update undo stack if (m_bNeedsUndoAction) { boolean isUpdate = false; UndoAction undoAction = null; try { if (m_undoStack.size() > 0) { undoAction = (UndoAction) m_undoStack.elementAt(m_undoStack.size() - 1); SetPositionAction posAction = (SetPositionAction) undoAction; if (posAction.m_nTargetNode == iNode) { isUpdate = true; posAction.setUndoPosition(nX, nY); } } } catch (Exception e) { // ignore. it's not a SetPositionAction } if (!isUpdate) { addUndoAction(new SetPositionAction(iNode, nX, nY)); } } m_nPositionX.setElementAt(nX, iNode); m_nPositionY.setElementAt(nY, iNode); } // setPosition /** Set position of node. Move set of nodes with the same displacement * as a specified node. * @param nNode index of node to set position for * @param nX x position of new position * @param nY y position of new position * @param nodes array of indexes of nodes to move */ public void setPosition(int nNode, int nX, int nY, FastVector nodes) { int dX = nX - getPositionX(nNode); int dY = nY - getPositionY(nNode); // update undo stack if (m_bNeedsUndoAction) { boolean isUpdate = false; try { UndoAction undoAction = null; if (m_undoStack.size() > 0) { undoAction = (UndoAction) m_undoStack.elementAt(m_undoStack.size() - 1); SetGroupPositionAction posAction = (SetGroupPositionAction) undoAction; isUpdate = true; int iNode = 0; while (isUpdate && iNode < posAction.m_nodes.size()) { if ((Integer)posAction.m_nodes.elementAt(iNode) != (Integer) nodes.elementAt(iNode)) { isUpdate = false; } iNode++; } if (isUpdate == true) { posAction.setUndoPosition(dX, dY); } } } catch (Exception e) { // ignore. it's not a SetPositionAction } if (!isUpdate) { addUndoAction(new SetGroupPositionAction(nodes, dX, dY)); } } for (int iNode = 0; iNode < nodes.size(); iNode++) { nNode = (Integer) nodes.elementAt(iNode); m_nPositionX.setElementAt(getPositionX(nNode) + dX, nNode); m_nPositionY.setElementAt(getPositionY(nNode) + dY, nNode); } } // setPosition /** set positions of all nodes * @param nPosX new x positions for all nodes * @param nPosY new y positions for all nodes */ public void layoutGraph(FastVector nPosX, FastVector nPosY) { if (m_bNeedsUndoAction) { addUndoAction(new LayoutGraphAction(nPosX, nPosY)); } m_nPositionX = nPosX; m_nPositionY = nPosY; } // layoutGraph /** get x position of a node * @param iNode index of node of interest */ public int getPositionX(int iNode) { return (Integer) (m_nPositionX.elementAt(iNode)); } /** get y position of a node * @param iNode index of node of interest */ public int getPositionY(int iNode) { return (Integer) (m_nPositionY.elementAt(iNode)); } /** align set of nodes with the left most node in the list * @param nodes list of indexes of nodes to align */ public void alignLeft(FastVector nodes) { // update undo stack if (m_bNeedsUndoAction) { addUndoAction(new alignLeftAction(nodes)); } int nMinX = -1; for (int iNode = 0; iNode < nodes.size(); iNode++) { int nX = getPositionX((Integer) nodes.elementAt(iNode)); if (nX < nMinX || iNode == 0) { nMinX = nX; } } for (int iNode = 0; iNode < nodes.size(); iNode++) { int nNode = (Integer) nodes.elementAt(iNode); m_nPositionX.setElementAt(nMinX, nNode); } } // alignLeft /** align set of nodes with the right most node in the list * @param nodes list of indexes of nodes to align */ public void alignRight(FastVector nodes) { // update undo stack if (m_bNeedsUndoAction) { addUndoAction(new alignRightAction(nodes)); } int nMaxX = -1; for (int iNode = 0; iNode < nodes.size(); iNode++) { int nX = getPositionX((Integer) nodes.elementAt(iNode)); if (nX > nMaxX || iNode == 0) { nMaxX = nX; } } for (int iNode = 0; iNode < nodes.size(); iNode++) { int nNode = (Integer) nodes.elementAt(iNode); m_nPositionX.setElementAt(nMaxX, nNode); } } // alignRight /** align set of nodes with the top most node in the list * @param nodes list of indexes of nodes to align */ public void alignTop(FastVector nodes) { // update undo stack if (m_bNeedsUndoAction) { addUndoAction(new alignTopAction(nodes)); } int nMinY = -1; for (int iNode = 0; iNode < nodes.size(); iNode++) { int nY = getPositionY((Integer) nodes.elementAt(iNode)); if (nY < nMinY || iNode == 0) { nMinY = nY; } } for (int iNode = 0; iNode < nodes.size(); iNode++) { int nNode = (Integer) nodes.elementAt(iNode); m_nPositionY.setElementAt(nMinY, nNode); } } // alignTop /** align set of nodes with the bottom most node in the list * @param nodes list of indexes of nodes to align */ public void alignBottom(FastVector nodes) { // update undo stack if (m_bNeedsUndoAction) { addUndoAction(new alignBottomAction(nodes)); } int nMaxY = -1; for (int iNode = 0; iNode < nodes.size(); iNode++) { int nY = getPositionY((Integer) nodes.elementAt(iNode)); if (nY > nMaxY || iNode == 0) { nMaxY = nY; } } for (int iNode = 0; iNode < nodes.size(); iNode++) { int nNode = (Integer) nodes.elementAt(iNode); m_nPositionY.setElementAt(nMaxY, nNode); } } // alignBottom /** center set of nodes half way between left and right most node in the list * @param nodes list of indexes of nodes to center */ public void centerHorizontal(FastVector nodes) { // update undo stack if (m_bNeedsUndoAction) { addUndoAction(new centerHorizontalAction(nodes)); } int nMinY = -1; int nMaxY = -1; for (int iNode = 0; iNode < nodes.size(); iNode++) { int nY = getPositionY((Integer) nodes.elementAt(iNode)); if (nY < nMinY || iNode == 0) { nMinY = nY; } if (nY > nMaxY || iNode == 0) { nMaxY = nY; } } for (int iNode = 0; iNode < nodes.size(); iNode++) { int nNode = (Integer) nodes.elementAt(iNode); m_nPositionY.setElementAt((nMinY + nMaxY) / 2, nNode); } } // centerHorizontal /** center set of nodes half way between top and bottom most node in the list * @param nodes list of indexes of nodes to center */ public void centerVertical(FastVector nodes) { // update undo stack if (m_bNeedsUndoAction) { addUndoAction(new centerVerticalAction(nodes)); } int nMinX = -1; int nMaxX = -1; for (int iNode = 0; iNode < nodes.size(); iNode++) { int nX = getPositionX((Integer) nodes.elementAt(iNode)); if (nX < nMinX || iNode == 0) { nMinX = nX; } if (nX > nMaxX || iNode == 0) { nMaxX = nX; } } for (int iNode = 0; iNode < nodes.size(); iNode++) { int nNode = (Integer) nodes.elementAt(iNode); m_nPositionX.setElementAt((nMinX + nMaxX) / 2, nNode); } } // centerVertical /** space out set of nodes evenly between left and right most node in the list * @param nodes list of indexes of nodes to space out */ public void spaceHorizontal(FastVector nodes) { // update undo stack if (m_bNeedsUndoAction) { addUndoAction(new spaceHorizontalAction(nodes)); } int nMinX = -1; int nMaxX = -1; for (int iNode = 0; iNode < nodes.size(); iNode++) { int nX = getPositionX((Integer) nodes.elementAt(iNode)); if (nX < nMinX || iNode == 0) { nMinX = nX; } if (nX > nMaxX || iNode == 0) { nMaxX = nX; } } for (int iNode = 0; iNode < nodes.size(); iNode++) { int nNode = (Integer) nodes.elementAt(iNode); m_nPositionX.setElementAt((int) (nMinX + iNode * (nMaxX - nMinX) / (nodes.size() - 1.0)), nNode); } } // spaceHorizontal /** space out set of nodes evenly between top and bottom most node in the list * @param nodes list of indexes of nodes to space out */ public void spaceVertical(FastVector nodes) { // update undo stack if (m_bNeedsUndoAction) { addUndoAction(new spaceVerticalAction(nodes)); } int nMinY = -1; int nMaxY = -1; for (int iNode = 0; iNode < nodes.size(); iNode++) { int nY = getPositionY((Integer) nodes.elementAt(iNode)); if (nY < nMinY || iNode == 0) { nMinY = nY; } if (nY > nMaxY || iNode == 0) { nMaxY = nY; } } for (int iNode = 0; iNode < nodes.size(); iNode++) { int nNode = (Integer) nodes.elementAt(iNode); m_nPositionY.setElementAt((int) (nMinY + iNode * (nMaxY - nMinY) / (nodes.size() - 1.0)), nNode); } } // spaceVertical /** replace attribute with specified name and values * @param nTargetNode index of node the replace specification for * @param sName new name of the node * @param values array of values of the node */ void replaceAtt(int nTargetNode, String sName, FastVector values) { Attribute newAtt = new Attribute(sName, values); if (m_Instances.classIndex() == nTargetNode) { m_Instances.setClassIndex(-1); /*m_Instances.insertAttributeAt(newAtt, nTargetNode); m_Instances.deleteAttributeAt(nTargetNode + 1); m_Instances.setClassIndex(nTargetNode); */ m_Instances.deleteAttributeAt(nTargetNode); m_Instances.insertAttributeAt(newAtt, nTargetNode); m_Instances.setClassIndex(nTargetNode); } else { /*m_Instances.insertAttributeAt(newAtt, nTargetNode); m_Instances.deleteAttributeAt(nTargetNode + 1); */ m_Instances.deleteAttributeAt(nTargetNode); m_Instances.insertAttributeAt(newAtt, nTargetNode); } } // replaceAtt /** return marginal distibution for a node * @param iNode index of node of interest */ public double[] getMargin(int iNode) { return (double[]) m_fMarginP.elementAt(iNode); }; /** set marginal distibution for a node * @param iNode index of node to set marginal distribution for * @param fMarginP marginal distribution */ public void setMargin(int iNode, double[] fMarginP) { m_fMarginP.setElementAt(fMarginP, iNode); } /** get evidence state of a node. -1 represents no evidence set, otherwise * the index of a value of the node * @param iNode index of node of interest */ public int getEvidence(int iNode) { return (Integer) m_nEvidence.elementAt(iNode); } /** set evidence state of a node. -1 represents no evidence set, otherwise * the index of a value of the node * @param iNode index of node of interest * @param iValue evidence value to set */ public void setEvidence(int iNode, int iValue) { m_nEvidence.setElementAt(iValue, iNode); } /** return list of children of a node * @param nTargetNode index of node of interest */ public FastVector getChildren(int nTargetNode) { FastVector children = new FastVector(); for (int iNode = 0; iNode < getNrOfNodes(); iNode++) { if (m_ParentSets[iNode].contains(nTargetNode)) { children.addElement(iNode); } } return children; } // getChildren /** returns network in XMLBIF format */ public String toXMLBIF03() { if (m_Instances == null) { return ("<!--No model built yet-->"); } StringBuffer text = new StringBuffer(); text.append(getBIFHeader()); text.append("\n"); text.append("\n"); text.append("<BIF VERSION=\"0.3\">\n"); text.append("<NETWORK>\n"); text.append("<NAME>" + XMLNormalize(m_Instances.relationName()) + "</NAME>\n"); for (int iAttribute = 0; iAttribute < m_Instances.numAttributes(); iAttribute++) { text.append("<VARIABLE TYPE=\"nature\">\n"); text.append("<NAME>" + XMLNormalize(m_Instances.attribute(iAttribute).name()) + "</NAME>\n"); for (int iValue = 0; iValue < m_Instances.attribute(iAttribute).numValues(); iValue++) { text.append("<OUTCOME>" + XMLNormalize(m_Instances.attribute(iAttribute).value(iValue)) + "</OUTCOME>\n"); } text.append("<PROPERTY>position = (" + getPositionX(iAttribute) + "," + getPositionY(iAttribute) + ")</PROPERTY>\n"); text.append("</VARIABLE>\n"); } for (int iAttribute = 0; iAttribute < m_Instances.numAttributes(); iAttribute++) { text.append("<DEFINITION>\n"); text.append("<FOR>" + XMLNormalize(m_Instances.attribute(iAttribute).name()) + "</FOR>\n"); for (int iParent = 0; iParent < m_ParentSets[iAttribute].getNrOfParents(); iParent++) { text.append("<GIVEN>" + XMLNormalize(m_Instances.attribute(m_ParentSets[iAttribute].getParent(iParent)).name()) + "</GIVEN>\n"); } text.append("<TABLE>\n"); for (int iParent = 0; iParent < m_ParentSets[iAttribute].getCardinalityOfParents(); iParent++) { for (int iValue = 0; iValue < m_Instances.attribute(iAttribute).numValues(); iValue++) { text.append(m_Distributions[iAttribute][iParent].getProbability(iValue)); text.append(' '); } text.append('\n'); } text.append("</TABLE>\n"); text.append("</DEFINITION>\n"); } text.append("</NETWORK>\n"); text.append("</BIF>\n"); return text.toString(); } // toXMLBIF03 /** return fragment of network in XMLBIF format * @param nodes array of indexes of nodes that should be in the fragment */ public String toXMLBIF03(FastVector nodes) { StringBuffer text = new StringBuffer(); text.append(getBIFHeader()); text.append("\n"); text.append("\n"); text.append("<BIF VERSION=\"0.3\">\n"); text.append("<NETWORK>\n"); text.append("<NAME>" + XMLNormalize(m_Instances.relationName()) + "</NAME>\n"); for (int iNode = 0; iNode < nodes.size(); iNode++) { int nNode = (Integer) nodes.elementAt(iNode); text.append("<VARIABLE TYPE=\"nature\">\n"); text.append("<NAME>" + XMLNormalize(m_Instances.attribute(nNode).name()) + "</NAME>\n"); for (int iValue = 0; iValue < m_Instances.attribute(nNode).numValues(); iValue++) { text.append("<OUTCOME>" + XMLNormalize(m_Instances.attribute(nNode).value(iValue)) + "</OUTCOME>\n"); } text.append("<PROPERTY>position = (" + getPositionX(nNode) + "," + getPositionY(nNode) + ")</PROPERTY>\n"); text.append("</VARIABLE>\n"); } for (int iNode = 0; iNode < nodes.size(); iNode++) { int nNode = (Integer) nodes.elementAt(iNode); text.append("<DEFINITION>\n"); text.append("<FOR>" + XMLNormalize(m_Instances.attribute(nNode).name()) + "</FOR>\n"); for (int iParent = 0; iParent < m_ParentSets[nNode].getNrOfParents(); iParent++) { text.append("<GIVEN>" + XMLNormalize(m_Instances.attribute(m_ParentSets[nNode].getParent(iParent)).name()) + "</GIVEN>\n"); } text.append("<TABLE>\n"); for (int iParent = 0; iParent < m_ParentSets[nNode].getCardinalityOfParents(); iParent++) { for (int iValue = 0; iValue < m_Instances.attribute(nNode).numValues(); iValue++) { text.append(m_Distributions[nNode][iParent].getProbability(iValue)); text.append(' '); } text.append('\n'); } text.append("</TABLE>\n"); text.append("</DEFINITION>\n"); } text.append("</NETWORK>\n"); text.append("</BIF>\n"); return text.toString(); } // toXMLBIF03 /** undo stack for undoin edit actions, or redo edit actions */ FastVector m_undoStack = new FastVector(); /** current action in undo stack */ int m_nCurrentEditAction = -1; /** action that the network is saved */ int m_nSavedPointer = -1; /*************************************************************************** * flag to indicate whether an edit action needs to introduce an undo * action. This is only false when an undo or redo action is performed. **************************************************************************/ boolean m_bNeedsUndoAction = true; /** return whether there is something on the undo stack that can be performed */ public boolean canUndo() { return m_nCurrentEditAction >= 0; } /** return whether there is something on the undo stack that can be performed */ public boolean canRedo() { return m_nCurrentEditAction < m_undoStack.size() - 1; } /** return true when current state differs from the state the network was last saved */ public boolean isChanged() { return m_nCurrentEditAction != m_nSavedPointer; } /** indicate the network state was saved */ public void isSaved() { m_nSavedPointer = m_nCurrentEditAction; } /** get message representing the last action performed on the network */ public String lastActionMsg() { if (m_undoStack.size() == 0) { return ""; } return ((UndoAction) m_undoStack.lastElement()).getRedoMsg(); } // lastActionMsg /** undo the last edit action performed on the network. * returns message representing the action performed. */ public String undo() { if (!canUndo()) { return ""; } UndoAction undoAction = (UndoAction) m_undoStack.elementAt(m_nCurrentEditAction); m_bNeedsUndoAction = false; undoAction.undo(); m_bNeedsUndoAction = true; m_nCurrentEditAction--; // undo stack debugging /* if (m_nCurrentEditAction>0) { String sXML = (String) m_sXMLStack.elementAt(m_nCurrentEditAction); String sXMLCurrent = toXMLBIF03(); if (!sXML.equals(sXMLCurrent)) { String sDiff = ""; String sDiff2 = ""; for (int i = 0; i < sXML.length() && sDiff.length() < 80; i++) { if (sXML.charAt(i) != sXMLCurrent.charAt(i)) { sDiff += sXML.charAt(i); sDiff2 += sXMLCurrent.charAt(i); } } JOptionPane.showMessageDialog(null,"Undo error\n" + sDiff + " \n" + sDiff2); } } */ return undoAction.getUndoMsg(); } // undo /** redo the last edit action performed on the network. * returns message representing the action performed. */ public String redo() { if (!canRedo()) { return ""; } m_nCurrentEditAction++; UndoAction undoAction = (UndoAction) m_undoStack.elementAt(m_nCurrentEditAction); m_bNeedsUndoAction = false; undoAction.redo(); m_bNeedsUndoAction = true; // undo stack debugging /* if (m_nCurrentEditAction < m_sXMLStack.size()) { String sXML = (String) m_sXMLStack.elementAt(m_nCurrentEditAction); String sXMLCurrent = toXMLBIF03(); if (!sXML.equals(sXMLCurrent)) { String sDiff = ""; String sDiff2 = ""; for (int i = 0; i < sXML.length() && sDiff.length() < 80; i++) { if (sXML.charAt(i) != sXMLCurrent.charAt(i)) { sDiff += sXML.charAt(i); sDiff2 += sXMLCurrent.charAt(i); } } JOptionPane.showMessageDialog(null,"redo error\n" + sDiff + " \n" + sDiff2); } } */ return undoAction.getRedoMsg(); } // redo /** add undo action to the undo stack. * @param action operation that needs to be added to the undo stack */ void addUndoAction(UndoAction action) { int iAction = m_undoStack.size() - 1; while (iAction > m_nCurrentEditAction) { m_undoStack.removeElementAt(iAction--); } if (m_nSavedPointer > m_nCurrentEditAction) { m_nSavedPointer = -2; } m_undoStack.addElement(action); //m_sXMLStack.addElement(toXMLBIF03()); m_nCurrentEditAction++; } // addUndoAction /** remove all actions from the undo stack */ public void clearUndoStack() { m_undoStack = new FastVector(); //m_sXMLStack = new FastVector(); m_nCurrentEditAction = -1; m_nSavedPointer = -1; } // clearUndoStack /** base class for actions representing operations on the Bayesian network * that can be undone/redone */ class UndoAction implements Serializable { /** for serialization */ static final long serialVersionUID = 1; public void undo() { } public void redo() { } public String getUndoMsg() { return getMsg(); } public String getRedoMsg() { return getMsg(); } String getMsg() { String sStr = toString(); int iStart = sStr.indexOf('$'); int iEnd = sStr.indexOf('@'); StringBuffer sBuffer = new StringBuffer(); for(int i= iStart + 1; i < iEnd; i++) { char c = sStr.charAt(i); if (Character.isUpperCase(c)) { sBuffer.append(' '); } sBuffer.append(sStr.charAt(i)); } return sBuffer.toString(); } // getMsg } // class UndoAction class AddNodeAction extends UndoAction { /** for serialization */ static final long serialVersionUID = 1; String m_sName; int m_nPosX; int m_nPosY; int m_nCardinality; AddNodeAction(String sName, int nCardinality, int nPosX, int nPosY) { m_sName = sName; m_nCardinality = nCardinality; m_nPosX = nPosX; m_nPosY = nPosY; } // c'tor public void undo() { try { deleteNode(getNrOfNodes() - 1); } catch (Exception e) { e.printStackTrace(); } } // undo public void redo() { try { addNode(m_sName, m_nCardinality, m_nPosX, m_nPosY); } catch (Exception e) { e.printStackTrace(); } } // redo } // class AddNodeAction class DeleteNodeAction extends UndoAction { /** for serialization */ static final long serialVersionUID = 1; int m_nTargetNode; Attribute m_att; Estimator[] m_CPT; ParentSet m_ParentSet; FastVector m_deleteArcActions; int m_nPosX; int m_nPosY; DeleteNodeAction(int nTargetNode) { m_nTargetNode = nTargetNode; m_att = m_Instances.attribute(nTargetNode); try { SerializedObject so = new SerializedObject(m_Distributions[nTargetNode]); m_CPT = (Estimator[]) so.getObject(); ; so = new SerializedObject(m_ParentSets[nTargetNode]); m_ParentSet = (ParentSet) so.getObject(); } catch (Exception e) { e.printStackTrace(); } m_deleteArcActions = new FastVector(); for (int iNode = 0; iNode < getNrOfNodes(); iNode++) { if (m_ParentSets[iNode].contains(nTargetNode)) { m_deleteArcActions.addElement(new DeleteArcAction(nTargetNode, iNode)); } } m_nPosX = getPositionX(m_nTargetNode); m_nPosY = getPositionY(m_nTargetNode); } // c'tor public void undo() { try { m_Instances.insertAttributeAt(m_att, m_nTargetNode); int nAtts = m_Instances.numAttributes(); // update parentsets ParentSet[] parentSets = new ParentSet[nAtts]; int nX = 0; for (int iParentSet = 0; iParentSet < nAtts; iParentSet++) { if (iParentSet == m_nTargetNode) { SerializedObject so = new SerializedObject(m_ParentSet); parentSets[iParentSet] = (ParentSet) so.getObject(); nX = 1; } else { parentSets[iParentSet] = m_ParentSets[iParentSet - nX]; for (int iParent = 0; iParent < parentSets[iParentSet].getNrOfParents(); iParent++) { int nParent = parentSets[iParentSet].getParent(iParent); if (nParent >= m_nTargetNode) { parentSets[iParentSet].SetParent(iParent, nParent + 1); } } } } m_ParentSets = parentSets; // update distributions Estimator[][] distributions = new Estimator[nAtts][]; nX = 0; for (int iNode = 0; iNode < nAtts; iNode++) { if (iNode == m_nTargetNode) { SerializedObject so = new SerializedObject(m_CPT); distributions[iNode] = (Estimator[]) so.getObject(); nX = 1; } else { distributions[iNode] = m_Distributions[iNode - nX]; } } m_Distributions = distributions; for (int deletedArc = 0; deletedArc < m_deleteArcActions.size(); deletedArc++) { DeleteArcAction action = (DeleteArcAction) m_deleteArcActions.elementAt(deletedArc); action.undo(); } m_nPositionX.insertElementAt(m_nPosX, m_nTargetNode); m_nPositionY.insertElementAt(m_nPosY, m_nTargetNode); m_nEvidence.insertElementAt(-1, m_nTargetNode); m_fMarginP.insertElementAt(new double[getCardinality(m_nTargetNode)], m_nTargetNode); } catch (Exception e) { e.printStackTrace(); } } // undo public void redo() { try { deleteNode(m_nTargetNode); } catch (Exception e) { e.printStackTrace(); } } // redo } // class DeleteNodeAction class DeleteSelectionAction extends UndoAction { /** for serialization */ static final long serialVersionUID = 1; FastVector m_nodes; Attribute[] m_att; Estimator[][] m_CPT; ParentSet[] m_ParentSet; FastVector m_deleteArcActions; int[] m_nPosX; int[] m_nPosY; public DeleteSelectionAction(FastVector nodes) { m_nodes = new FastVector(); int nNodes = nodes.size(); m_att = new Attribute[nNodes]; m_CPT = new Estimator[nNodes][]; m_ParentSet = new ParentSet[nNodes]; m_nPosX = new int[nNodes]; m_nPosY = new int[nNodes]; m_deleteArcActions = new FastVector(); for (int iNode = 0; iNode < nodes.size(); iNode++) { int nTargetNode = (Integer) nodes.elementAt(iNode); m_nodes.addElement(nTargetNode); m_att[iNode] = m_Instances.attribute(nTargetNode); try { SerializedObject so = new SerializedObject(m_Distributions[nTargetNode]); m_CPT[iNode] = (Estimator[]) so.getObject(); ; so = new SerializedObject(m_ParentSets[nTargetNode]); m_ParentSet[iNode] = (ParentSet) so.getObject(); } catch (Exception e) { e.printStackTrace(); } m_nPosX[iNode] = getPositionX(nTargetNode); m_nPosY[iNode] = getPositionY(nTargetNode); for (int iNode2 = 0; iNode2 < getNrOfNodes(); iNode2++) { if (!nodes.contains(iNode2) && m_ParentSets[iNode2].contains(nTargetNode)) { m_deleteArcActions.addElement(new DeleteArcAction(nTargetNode, iNode2)); } } } } // c'tor public void undo() { try { for (int iNode = 0; iNode < m_nodes.size(); iNode++) { int nTargetNode = (Integer) m_nodes.elementAt(iNode); m_Instances.insertAttributeAt(m_att[iNode], nTargetNode); } int nAtts = m_Instances.numAttributes(); // update parentsets ParentSet[] parentSets = new ParentSet[nAtts]; int[] offset = new int[nAtts]; for (int iNode = 0; iNode < nAtts; iNode++) { offset[iNode] = iNode; } for (int iNode = m_nodes.size() - 1; iNode >= 0; iNode--) { int nTargetNode = (Integer) m_nodes.elementAt(iNode); for (int i = nTargetNode; i < nAtts - 1; i++) { offset[i] = offset[i + 1]; } } int iTargetNode = 0; for (int iParentSet = 0; iParentSet < nAtts; iParentSet++) { if (iTargetNode < m_nodes.size() && (Integer) m_nodes.elementAt(iTargetNode) == (Integer) iParentSet) { SerializedObject so = new SerializedObject(m_ParentSet[iTargetNode]); parentSets[iParentSet] = (ParentSet) so.getObject(); iTargetNode++; } else { parentSets[iParentSet] = m_ParentSets[iParentSet - iTargetNode]; for (int iParent = 0; iParent < parentSets[iParentSet].getNrOfParents(); iParent++) { int nParent = parentSets[iParentSet].getParent(iParent); parentSets[iParentSet].SetParent(iParent, offset[nParent]); } } } m_ParentSets = parentSets; // update distributions Estimator[][] distributions = new Estimator[nAtts][]; iTargetNode = 0; for (int iNode = 0; iNode < nAtts; iNode++) { if (iTargetNode < m_nodes.size() && (Integer) m_nodes.elementAt(iTargetNode) == (Integer) iNode) { SerializedObject so = new SerializedObject(m_CPT[iTargetNode]); distributions[iNode] = (Estimator[]) so.getObject(); iTargetNode++; } else { distributions[iNode] = m_Distributions[iNode - iTargetNode]; } } m_Distributions = distributions; for (int iNode = 0; iNode < m_nodes.size(); iNode++) { int nTargetNode = (Integer) m_nodes.elementAt(iNode); m_nPositionX.insertElementAt(m_nPosX[iNode], nTargetNode); m_nPositionY.insertElementAt(m_nPosY[iNode], nTargetNode); m_nEvidence.insertElementAt(-1, nTargetNode); m_fMarginP.insertElementAt(new double[getCardinality(nTargetNode)], nTargetNode); } for (int deletedArc = 0; deletedArc < m_deleteArcActions.size(); deletedArc++) { DeleteArcAction action = (DeleteArcAction) m_deleteArcActions.elementAt(deletedArc); action.undo(); } } catch (Exception e) { e.printStackTrace(); } } // undo public void redo() { try { for (int iNode = m_nodes.size() - 1; iNode >= 0; iNode--) { int nNode = (Integer) m_nodes.elementAt(iNode); deleteNode(nNode); } } catch (Exception e) { e.printStackTrace(); } } // redo } // class DeleteSelectionAction class AddArcAction extends UndoAction { /** for serialization */ static final long serialVersionUID = 1; //int m_nChild; FastVector m_children; int m_nParent; Estimator[][] m_CPT; AddArcAction(int nParent, int nChild) { try { m_nParent = nParent; m_children = new FastVector(); m_children.addElement(nChild); //m_nChild = nChild; SerializedObject so = new SerializedObject(m_Distributions[nChild]); m_CPT = new Estimator[1][]; m_CPT[0] = (Estimator[]) so.getObject(); ; } catch (Exception e) { e.printStackTrace(); } } // c'tor AddArcAction(int nParent, FastVector children) { try { m_nParent = nParent; m_children = new FastVector(); m_CPT = new Estimator[children.size()][]; for (int iChild = 0; iChild < children.size(); iChild++) { int nChild = (Integer) children.elementAt(iChild); m_children.addElement(nChild); SerializedObject so = new SerializedObject(m_Distributions[nChild]); m_CPT[iChild] = (Estimator[]) so.getObject(); } } catch (Exception e) { e.printStackTrace(); } } // c'tor public void undo() { try { for (int iChild = 0; iChild < m_children.size(); iChild++) { int nChild = (Integer) m_children.elementAt(iChild); deleteArc(m_nParent, nChild); SerializedObject so = new SerializedObject(m_CPT[iChild]); m_Distributions[nChild] = (Estimator[]) so.getObject(); } } catch (Exception e) { e.printStackTrace(); } } // undo public void redo() { try { for (int iChild = 0; iChild < m_children.size(); iChild++) { int nChild = (Integer) m_children.elementAt(iChild); addArc(m_nParent, nChild); } } catch (Exception e) { e.printStackTrace(); } } // redo } // class AddArcAction class DeleteArcAction extends UndoAction { /** for serialization */ static final long serialVersionUID = 1; int[] m_nParents; int m_nChild; int m_nParent; Estimator[] m_CPT; DeleteArcAction(int nParent, int nChild) { try { m_nChild = nChild; m_nParent = nParent; m_nParents = new int[getNrOfParents(nChild)]; for (int iParent = 0; iParent < m_nParents.length; iParent++) { m_nParents[iParent] = getParent(nChild, iParent); } SerializedObject so = new SerializedObject(m_Distributions[nChild]); m_CPT = (Estimator[]) so.getObject(); } catch (Exception e) { e.printStackTrace(); } } // c'tor public void undo() { try { SerializedObject so = new SerializedObject(m_CPT); m_Distributions[m_nChild] = (Estimator[]) so.getObject(); ParentSet parentSet = new ParentSet(); for (int iParent = 0; iParent < m_nParents.length; iParent++) { parentSet.addParent(m_nParents[iParent], m_Instances); } m_ParentSets[m_nChild] = parentSet; } catch (Exception e) { e.printStackTrace(); } } // undo public void redo() { try { deleteArc(m_nParent, m_nChild); } catch (Exception e) { e.printStackTrace(); } } // redo } // class DeleteArcAction class SetDistributionAction extends UndoAction { /** for serialization */ static final long serialVersionUID = 1; int m_nTargetNode; Estimator[] m_CPT; double[][] m_P; SetDistributionAction(int nTargetNode, double[][] P) { try { m_nTargetNode = nTargetNode; SerializedObject so = new SerializedObject(m_Distributions[nTargetNode]); m_CPT = (Estimator[]) so.getObject(); ; m_P = P; } catch (Exception e) { e.printStackTrace(); } } // c'tor public void undo() { try { SerializedObject so = new SerializedObject(m_CPT); m_Distributions[m_nTargetNode] = (Estimator[]) so.getObject(); } catch (Exception e) { e.printStackTrace(); } } // undo public void redo() { try { setDistribution(m_nTargetNode, m_P); } catch (Exception e) { e.printStackTrace(); } } // redo public String getUndoMsg() { return "Distribution of node " + getNodeName(m_nTargetNode) + " changed"; } public String getRedoMsg() { return "Distribution of node " + getNodeName(m_nTargetNode) + " changed"; } } // class SetDistributionAction class RenameAction extends UndoAction { /** for serialization */ static final long serialVersionUID = 1; int m_nTargetNode; String m_sNewName; String m_sOldName; RenameAction(int nTargetNode, String sOldName, String sNewName) { m_nTargetNode = nTargetNode; m_sNewName = sNewName; m_sOldName = sOldName; } // c'tor public void undo() { setNodeName(m_nTargetNode, m_sOldName); } // undo public void redo() { setNodeName(m_nTargetNode, m_sNewName); } // redo } // class RenameAction class RenameValueAction extends RenameAction { /** for serialization */ static final long serialVersionUID = 1; RenameValueAction(int nTargetNode, String sOldName, String sNewName) { super(nTargetNode, sOldName, sNewName); } // c'tor public void undo() { renameNodeValue(m_nTargetNode, m_sNewName, m_sOldName); } // undo public void redo() { renameNodeValue(m_nTargetNode, m_sOldName, m_sNewName); } // redo public String getUndoMsg() { return "Value of node " + getNodeName(m_nTargetNode) + " changed from " + m_sNewName + " to " + m_sOldName; } public String getRedoMsg() { return "Value of node " + getNodeName(m_nTargetNode) + " changed from " + m_sOldName + " to " + m_sNewName; } } // class RenameValueAction class AddValueAction extends UndoAction { /** for serialization */ static final long serialVersionUID = 1; int m_nTargetNode; String m_sValue; AddValueAction(int nTargetNode, String sValue) { m_nTargetNode = nTargetNode; m_sValue = sValue; } // c'tor public void undo() { try { delNodeValue(m_nTargetNode, m_sValue); } catch (Exception e) { e.printStackTrace(); } } // undo public void redo() { addNodeValue(m_nTargetNode, m_sValue); } // redo public String getUndoMsg() { return "Value " + m_sValue + " removed from node " + getNodeName(m_nTargetNode); } public String getRedoMsg() { return "Value " + m_sValue + " added to node " + getNodeName(m_nTargetNode); } } // class AddValueAction class DelValueAction extends UndoAction { /** for serialization */ static final long serialVersionUID = 1; int m_nTargetNode; String m_sValue; Estimator[] m_CPT; FastVector m_children; Estimator[][] m_childAtts; Attribute m_att; DelValueAction(int nTargetNode, String sValue) { try { m_nTargetNode = nTargetNode; m_sValue = sValue; m_att = m_Instances.attribute(nTargetNode); SerializedObject so = new SerializedObject(m_Distributions[nTargetNode]); m_CPT = (Estimator[]) so.getObject(); ; m_children = new FastVector(); for (int iNode = 0; iNode < getNrOfNodes(); iNode++) { if (m_ParentSets[iNode].contains(nTargetNode)) { m_children.addElement(iNode); } } m_childAtts = new Estimator[m_children.size()][]; for (int iChild = 0; iChild < m_children.size(); iChild++) { int nChild = (Integer) m_children.elementAt(iChild); m_childAtts[iChild] = m_Distributions[nChild]; } } catch (Exception e) { e.printStackTrace(); } } // c'tor public void undo() { try { m_Instances.insertAttributeAt(m_att, m_nTargetNode); SerializedObject so = new SerializedObject(m_CPT); m_Distributions[m_nTargetNode] = (Estimator[]) so.getObject(); for (int iChild = 0; iChild < m_children.size(); iChild++) { int nChild = (Integer) m_children.elementAt(iChild); m_Instances.insertAttributeAt(m_att, m_nTargetNode); so = new SerializedObject(m_childAtts[iChild]); m_Distributions[nChild] = (Estimator[]) so.getObject(); } } catch (Exception e) { e.printStackTrace(); } } // undo public void redo() { try { delNodeValue(m_nTargetNode, m_sValue); } catch (Exception e) { e.printStackTrace(); } } // redo public String getUndoMsg() { return "Value " + m_sValue + " added to node " + getNodeName(m_nTargetNode); } public String getRedoMsg() { return "Value " + m_sValue + " removed from node " + getNodeName(m_nTargetNode); } } // class DelValueAction class alignAction extends UndoAction { /** for serialization */ static final long serialVersionUID = 1; FastVector m_nodes; FastVector m_posX; FastVector m_posY; alignAction(FastVector nodes) { m_nodes = new FastVector(nodes.size()); m_posX = new FastVector(nodes.size()); m_posY = new FastVector(nodes.size()); for (int iNode = 0; iNode < nodes.size(); iNode++) { int nNode = (Integer) nodes.elementAt(iNode); m_nodes.addElement(nNode); m_posX.addElement(getPositionX(nNode)); m_posY.addElement(getPositionY(nNode)); } } // c'tor public void undo() { try { for (int iNode = 0; iNode < m_nodes.size(); iNode++) { int nNode = (Integer) m_nodes.elementAt(iNode); setPosition(nNode, (Integer) m_posX.elementAt(iNode), (Integer) m_posY.elementAt(iNode)); } } catch (Exception e) { e.printStackTrace(); } } // undo } // class alignAction class alignLeftAction extends alignAction { /** for serialization */ static final long serialVersionUID = 1; public alignLeftAction(FastVector nodes) { super(nodes); } // c'tor public void redo() { try { alignLeft(m_nodes); } catch (Exception e) { e.printStackTrace(); } } // redo public String getUndoMsg() { return "Returning " + m_nodes.size() + " from aliging nodes to the left."; } public String getRedoMsg() { return "Aligning " + m_nodes.size() + " nodes to the left."; } } // class alignLeftAction class alignRightAction extends alignAction { /** for serialization */ static final long serialVersionUID = 1; public alignRightAction(FastVector nodes) { super(nodes); } // c'tor public void redo() { try { alignRight(m_nodes); } catch (Exception e) { e.printStackTrace(); } } // redo public String getUndoMsg() { return "Returning " + m_nodes.size() + " from aliging nodes to the right."; } public String getRedoMsg() { return "Aligning " + m_nodes.size() + " nodes to the right."; } } // class alignLeftAction class alignTopAction extends alignAction { /** for serialization */ static final long serialVersionUID = 1; public alignTopAction(FastVector nodes) { super(nodes); } // c'tor public void redo() { try { alignTop(m_nodes); } catch (Exception e) { e.printStackTrace(); } } // redo public String getUndoMsg() { return "Returning " + m_nodes.size() + " from aliging nodes to the top."; } public String getRedoMsg() { return "Aligning " + m_nodes.size() + " nodes to the top."; } } // class alignTopAction class alignBottomAction extends alignAction { /** for serialization */ static final long serialVersionUID = 1; public alignBottomAction(FastVector nodes) { super(nodes); } // c'tor public void redo() { try { alignBottom(m_nodes); } catch (Exception e) { e.printStackTrace(); } } // redo public String getUndoMsg() { return "Returning " + m_nodes.size() + " from aliging nodes to the bottom."; } public String getRedoMsg() { return "Aligning " + m_nodes.size() + " nodes to the bottom."; } } // class alignBottomAction class centerHorizontalAction extends alignAction { /** for serialization */ static final long serialVersionUID = 1; public centerHorizontalAction(FastVector nodes) { super(nodes); } // c'tor public void redo() { try { centerHorizontal(m_nodes); } catch (Exception e) { e.printStackTrace(); } } // redo public String getUndoMsg() { return "Returning " + m_nodes.size() + " from centering horizontally."; } public String getRedoMsg() { return "Centering " + m_nodes.size() + " nodes horizontally."; } } // class centerHorizontalAction class centerVerticalAction extends alignAction { /** for serialization */ static final long serialVersionUID = 1; public centerVerticalAction(FastVector nodes) { super(nodes); } // c'tor public void redo() { try { centerVertical(m_nodes); } catch (Exception e) { e.printStackTrace(); } } // redo public String getUndoMsg() { return "Returning " + m_nodes.size() + " from centering vertically."; } public String getRedoMsg() { return "Centering " + m_nodes.size() + " nodes vertically."; } } // class centerVerticalAction class spaceHorizontalAction extends alignAction { /** for serialization */ static final long serialVersionUID = 1; public spaceHorizontalAction(FastVector nodes) { super(nodes); } // c'tor public void redo() { try { spaceHorizontal(m_nodes); } catch (Exception e) { e.printStackTrace(); } } // redo public String getUndoMsg() { return "Returning " + m_nodes.size() + " from spaceing horizontally."; } public String getRedoMsg() { return "spaceing " + m_nodes.size() + " nodes horizontally."; } } // class spaceHorizontalAction class spaceVerticalAction extends alignAction { /** for serialization */ static final long serialVersionUID = 1; public spaceVerticalAction(FastVector nodes) { super(nodes); } // c'tor public void redo() { try { spaceVertical(m_nodes); } catch (Exception e) { e.printStackTrace(); } } // redo public String getUndoMsg() { return "Returning " + m_nodes.size() + " from spaceng vertically."; } public String getRedoMsg() { return "Spaceng " + m_nodes.size() + " nodes vertically."; } } // class spaceVerticalAction class SetPositionAction extends UndoAction { /** for serialization */ static final long serialVersionUID = 1; int m_nTargetNode; int m_nX; int m_nY; int m_nX2; int m_nY2; SetPositionAction(int nTargetNode, int nX, int nY) { m_nTargetNode = nTargetNode; m_nX2 = nX; m_nY2 = nY; m_nX = getPositionX(nTargetNode); m_nY = getPositionY(nTargetNode); } // c'tor public void undo() { setPosition(m_nTargetNode, m_nX, m_nY); } // undo public void redo() { setPosition(m_nTargetNode, m_nX2, m_nY2); } // redo public void setUndoPosition(int nX, int nY) { m_nX2 = nX; m_nY2 = nY; } // setPosition } // class SetPositionAction class SetGroupPositionAction extends UndoAction { /** for serialization */ static final long serialVersionUID = 1; FastVector m_nodes; int m_dX; int m_dY; SetGroupPositionAction(FastVector nodes, int dX, int dY) { m_nodes = new FastVector(nodes.size()); for (int iNode = 0; iNode < nodes.size(); iNode++) { m_nodes.addElement(nodes.elementAt(iNode)); } m_dX = dX; m_dY = dY; } // c'tor public void undo() { for (int iNode = 0; iNode < m_nodes.size(); iNode++) { int nNode = (Integer) m_nodes.elementAt(iNode); setPosition(nNode, getPositionX(nNode) - m_dX, getPositionY(nNode) - m_dY); } } // undo public void redo() { for (int iNode = 0; iNode < m_nodes.size(); iNode++) { int nNode = (Integer) m_nodes.elementAt(iNode); setPosition(nNode, getPositionX(nNode) + m_dX, getPositionY(nNode) + m_dY); } } // redo public void setUndoPosition(int dX, int dY) { m_dX += dX; m_dY += dY; } // setPosition } // class SetGroupPositionAction class LayoutGraphAction extends UndoAction { /** for serialization */ static final long serialVersionUID = 1; FastVector m_nPosX; FastVector m_nPosY; FastVector m_nPosX2; FastVector m_nPosY2; LayoutGraphAction(FastVector nPosX, FastVector nPosY) { m_nPosX = new FastVector(nPosX.size()); m_nPosY = new FastVector(nPosX.size()); m_nPosX2 = new FastVector(nPosX.size()); m_nPosY2 = new FastVector(nPosX.size()); for (int iNode = 0; iNode < nPosX.size(); iNode++) { m_nPosX.addElement(m_nPositionX.elementAt(iNode)); m_nPosY.addElement(m_nPositionY.elementAt(iNode)); m_nPosX2.addElement(nPosX.elementAt(iNode)); m_nPosY2.addElement(nPosY.elementAt(iNode)); } } // c'tor public void undo() { for (int iNode = 0; iNode < m_nPosX.size(); iNode++) { setPosition(iNode, (Integer) m_nPosX.elementAt(iNode), (Integer) m_nPosY.elementAt(iNode)); } } // undo public void redo() { for (int iNode = 0; iNode < m_nPosX.size(); iNode++) { setPosition(iNode, (Integer) m_nPosX2.elementAt(iNode), (Integer) m_nPosY2.elementAt(iNode)); } } // redo } // class LayoutGraphAction class PasteAction extends UndoAction { /** for serialization */ static final long serialVersionUID = 1; int m_nBase; String m_sXML; PasteAction(String sXML, int nBase) { m_sXML = sXML; m_nBase = nBase; } // c'tor public void undo() { try { int iNode = getNrOfNodes() - 1; while (iNode >= m_nBase) { deleteNode(iNode); iNode--; } } catch (Exception e) { e.printStackTrace(); } } // undo public void redo() { try { paste(m_sXML, EXECUTE); } catch (Exception e) { e.printStackTrace(); } } // redo } // class PasteAction /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } /** * @param args */ public static void main(String[] args) { } // main } // class EditableBayesNet
83,525
30.189694
213
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/bayes/net/MarginCalculator.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * MarginCalculator.java * Copyright (C) 2007-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.bayes.net; import weka.classifiers.bayes.BayesNet; import weka.core.RevisionHandler; import weka.core.RevisionUtils; import java.io.Serializable; import java.util.HashSet; import java.util.Iterator; import java.util.Set; import java.util.Vector; public class MarginCalculator implements Serializable, RevisionHandler { /** for serialization */ private static final long serialVersionUID = 650278019241175534L; boolean m_debug = false; public JunctionTreeNode m_root = null; JunctionTreeNode [] jtNodes; public int getNode(String sNodeName) { int iNode = 0; while (iNode < m_root.m_bayesNet.m_Instances.numAttributes()) { if (m_root.m_bayesNet.m_Instances.attribute(iNode).name().equals(sNodeName)) { return iNode; } iNode++; } //throw new Exception("Could not find node [[" + sNodeName + "]]"); return -1; } public String toXMLBIF03() {return m_root.m_bayesNet.toXMLBIF03();} /** * Calc marginal distributions of nodes in Bayesian network * Note that a connected network is assumed. * Unconnected networks may give unexpected results. * @param bayesNet */ public void calcMargins(BayesNet bayesNet) throws Exception { //System.out.println(bayesNet.toString()); boolean[][] bAdjacencyMatrix = moralize(bayesNet); process(bAdjacencyMatrix, bayesNet); } // calcMargins public void calcFullMargins(BayesNet bayesNet) throws Exception { //System.out.println(bayesNet.toString()); int nNodes = bayesNet.getNrOfNodes(); boolean[][] bAdjacencyMatrix = new boolean[nNodes][nNodes]; for (int iNode = 0; iNode < nNodes; iNode++) { for (int iNode2 = 0; iNode2 < nNodes; iNode2++) { bAdjacencyMatrix[iNode][iNode2] = true; } } process(bAdjacencyMatrix, bayesNet); } // calcMargins public void process(boolean[][] bAdjacencyMatrix, BayesNet bayesNet) throws Exception { int[] order = getMaxCardOrder(bAdjacencyMatrix); bAdjacencyMatrix = fillIn(order, bAdjacencyMatrix); order = getMaxCardOrder(bAdjacencyMatrix); Set [] cliques = getCliques(order, bAdjacencyMatrix); Set [] separators = getSeparators(order, cliques); int [] parentCliques = getCliqueTree(order, cliques, separators); // report cliques int nNodes = bAdjacencyMatrix.length; if (m_debug) { for (int i = 0; i < nNodes; i++) { int iNode = order[i]; if (cliques[iNode] != null) { System.out.print("Clique " + iNode + " ("); Iterator nodes = cliques[iNode].iterator(); while (nodes.hasNext()) { int iNode2 = (Integer) nodes.next(); System.out.print(iNode2 + " " + bayesNet.getNodeName(iNode2)); if (nodes.hasNext()) { System.out.print(","); } } System.out.print(") S("); nodes = separators[iNode].iterator(); while (nodes.hasNext()) { int iNode2 = (Integer) nodes.next(); System.out.print(iNode2 + " " + bayesNet.getNodeName(iNode2)); if (nodes.hasNext()) { System.out.print(","); } } System.out.println(") parent clique " + parentCliques[iNode]); } } } jtNodes = getJunctionTree(cliques, separators, parentCliques, order, bayesNet); m_root = null; for (int iNode = 0; iNode < nNodes; iNode++) { if (parentCliques[iNode] < 0 && jtNodes[iNode] != null) { m_root = jtNodes[iNode]; break; } } m_Margins = new double[nNodes][]; initialize(jtNodes, order, cliques, separators, parentCliques); // sanity check for (int i = 0; i < nNodes; i++) { int iNode = order[i]; if (cliques[iNode] != null) { if (parentCliques[iNode] == -1 && separators[iNode].size() > 0) { throw new Exception("Something wrong in clique tree"); } } } if (m_debug) { //System.out.println(m_root.toString()); } } // process void initialize(JunctionTreeNode [] jtNodes, int [] order, Set [] cliques, Set [] separators, int [] parentCliques) { int nNodes = order.length; for (int i = nNodes - 1; i >= 0; i--) { int iNode = order[i]; if (jtNodes[iNode]!=null) { jtNodes[iNode].initializeUp(); } } for (int i = 0; i < nNodes; i++) { int iNode = order[i]; if (jtNodes[iNode]!=null) { jtNodes[iNode].initializeDown(false); } } } // initialize JunctionTreeNode [] getJunctionTree(Set [] cliques, Set [] separators, int [] parentCliques, int [] order, BayesNet bayesNet) { int nNodes = order.length; JunctionTreeNode root = null; JunctionTreeNode [] jtns = new JunctionTreeNode[nNodes]; boolean [] bDone = new boolean[nNodes]; // create junction tree nodes for (int i = 0; i < nNodes; i++) { int iNode = order[i]; if (cliques[iNode] != null) { jtns[iNode] = new JunctionTreeNode(cliques[iNode], bayesNet, bDone); } } // create junction tree separators for (int i = 0; i < nNodes; i++) { int iNode = order[i]; if (cliques[iNode] != null) { JunctionTreeNode parent = null; if (parentCliques[iNode] > 0) { parent = jtns[parentCliques[iNode]]; JunctionTreeSeparator jts = new JunctionTreeSeparator(separators[iNode], bayesNet, jtns[iNode], parent); jtns[iNode].setParentSeparator(jts); jtns[parentCliques[iNode]].addChildClique(jtns[iNode]); } else { root = jtns[iNode]; } } } return jtns; } // getJunctionTree public class JunctionTreeSeparator implements Serializable, RevisionHandler { private static final long serialVersionUID = 6502780192411755343L; int [] m_nNodes; int m_nCardinality; double [] m_fiParent; double [] m_fiChild; JunctionTreeNode m_parentNode; JunctionTreeNode m_childNode; BayesNet m_bayesNet; JunctionTreeSeparator(Set separator, BayesNet bayesNet, JunctionTreeNode childNode, JunctionTreeNode parentNode) { ////////////////////// // initialize node set m_nNodes = new int[separator.size()]; int iPos = 0; m_nCardinality = 1; for(Iterator nodes = separator.iterator(); nodes.hasNext();) { int iNode = (Integer) nodes.next(); m_nNodes[iPos++] = iNode; m_nCardinality *= bayesNet.getCardinality(iNode); } m_parentNode = parentNode; m_childNode = childNode; m_bayesNet = bayesNet; } // c'tor /** marginalize junciontTreeNode node over all nodes outside the separator set * of the parent clique * */ public void updateFromParent() { double [] fis = update(m_parentNode); if (fis == null) { m_fiParent = null; } else { m_fiParent = fis; // normalize double sum = 0; for (int iPos = 0; iPos < m_nCardinality; iPos++) { sum += m_fiParent[iPos]; } for (int iPos = 0; iPos < m_nCardinality; iPos++) { m_fiParent[iPos] /= sum; } } } // updateFromParent /** marginalize junciontTreeNode node over all nodes outside the separator set * of the child clique * */ public void updateFromChild() { double [] fis = update(m_childNode); if (fis == null) { m_fiChild = null; } else { m_fiChild = fis; // normalize double sum = 0; for (int iPos = 0; iPos < m_nCardinality; iPos++) { sum += m_fiChild[iPos]; } for (int iPos = 0; iPos < m_nCardinality; iPos++) { m_fiChild[iPos] /= sum; } } } // updateFromChild /** marginalize junciontTreeNode node over all nodes outside the separator set * * @param node one of the neighboring junciont tree nodes of this separator */ public double [] update(JunctionTreeNode node) { if (node.m_P == null) { return null; } double [] fi = new double[m_nCardinality]; int [] values = new int[node.m_nNodes.length]; int [] order = new int[m_bayesNet.getNrOfNodes()]; for (int iNode = 0; iNode < node.m_nNodes.length; iNode++) { order[node.m_nNodes[iNode]] = iNode; } // fill in the values for (int iPos = 0; iPos < node.m_nCardinality; iPos++) { int iNodeCPT = getCPT(node.m_nNodes, node.m_nNodes.length, values, order, m_bayesNet); int iSepCPT = getCPT(m_nNodes, m_nNodes.length, values, order, m_bayesNet); fi[iSepCPT] += node.m_P[iNodeCPT]; // update values int i = 0; values[i]++; while (i < node.m_nNodes.length && values[i] == m_bayesNet.getCardinality(node.m_nNodes[i])) { values[i] = 0; i++; if (i < node.m_nNodes.length) { values[i]++; } } } return fi; } // update /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8064 $"); } } // class JunctionTreeSeparator public class JunctionTreeNode implements Serializable, RevisionHandler { private static final long serialVersionUID = 650278019241175536L; /** reference Bayes net for information about variables like name, cardinality, etc. * but not for relations between nodes **/ BayesNet m_bayesNet; /** nodes of the Bayes net in this junction node **/ public int [] m_nNodes; /** cardinality of the instances of variables in this junction node **/ int m_nCardinality; /** potentials for first network **/ double [] m_fi; /** distribution over this junction node according to first Bayes network **/ double [] m_P; double [][] m_MarginalP; JunctionTreeSeparator m_parentSeparator; public void setParentSeparator(JunctionTreeSeparator parentSeparator) {m_parentSeparator = parentSeparator;} public Vector m_children; public void addChildClique(JunctionTreeNode child) {m_children.add(child);} public void initializeUp() { m_P = new double[m_nCardinality]; for (int iPos = 0; iPos < m_nCardinality; iPos++) { m_P[iPos] = m_fi[iPos]; } int [] values = new int[m_nNodes.length]; int [] order = new int[m_bayesNet.getNrOfNodes()]; for (int iNode = 0; iNode < m_nNodes.length; iNode++) { order[m_nNodes[iNode]] = iNode; } for (Iterator child = m_children.iterator(); child.hasNext(); ) { JunctionTreeNode childNode = (JunctionTreeNode) child.next(); JunctionTreeSeparator separator = childNode.m_parentSeparator; // Update the values for (int iPos = 0; iPos < m_nCardinality; iPos++) { int iSepCPT = getCPT(separator.m_nNodes, separator.m_nNodes.length, values, order, m_bayesNet); int iNodeCPT = getCPT(m_nNodes, m_nNodes.length, values, order, m_bayesNet); m_P[iNodeCPT] *= separator.m_fiChild[iSepCPT]; // update values int i = 0; values[i]++; while (i < m_nNodes.length && values[i] == m_bayesNet.getCardinality(m_nNodes[i])) { values[i] = 0; i++; if (i < m_nNodes.length) { values[i]++; } } } } // normalize double sum = 0; for (int iPos = 0; iPos < m_nCardinality; iPos++) { sum += m_P[iPos]; } for (int iPos = 0; iPos < m_nCardinality; iPos++) { m_P[iPos] /= sum; } if (m_parentSeparator != null) { // not a root node m_parentSeparator.updateFromChild(); } } // initializeUp public void initializeDown(boolean recursively) { if (m_parentSeparator == null) { // a root node calcMarginalProbabilities(); } else { m_parentSeparator.updateFromParent(); int [] values = new int[m_nNodes.length]; int [] order = new int[m_bayesNet.getNrOfNodes()]; for (int iNode = 0; iNode < m_nNodes.length; iNode++) { order[m_nNodes[iNode]] = iNode; } // Update the values for (int iPos = 0; iPos < m_nCardinality; iPos++) { int iSepCPT = getCPT(m_parentSeparator.m_nNodes, m_parentSeparator.m_nNodes.length, values, order, m_bayesNet); int iNodeCPT = getCPT(m_nNodes, m_nNodes.length, values, order, m_bayesNet); if ( m_parentSeparator.m_fiChild[iSepCPT] > 0) { m_P[iNodeCPT] *= m_parentSeparator.m_fiParent[iSepCPT] / m_parentSeparator.m_fiChild[iSepCPT]; } else { m_P[iNodeCPT] = 0; } // update values int i = 0; values[i]++; while (i < m_nNodes.length && values[i] == m_bayesNet.getCardinality(m_nNodes[i])) { values[i] = 0; i++; if (i < m_nNodes.length) { values[i]++; } } } // normalize double sum = 0; for (int iPos = 0; iPos < m_nCardinality; iPos++) { sum += m_P[iPos]; } for (int iPos = 0; iPos < m_nCardinality; iPos++) { m_P[iPos] /= sum; } m_parentSeparator.updateFromChild(); calcMarginalProbabilities(); } if (recursively) { for (Iterator child = m_children.iterator(); child.hasNext(); ) { JunctionTreeNode childNode = (JunctionTreeNode) child.next(); childNode.initializeDown(true); } } } // initializeDown /** calculate marginal probabilities for the individual nodes in the clique. * Store results in m_MarginalP */ void calcMarginalProbabilities() { // calculate marginal probabilities int [] values = new int[m_nNodes.length]; int [] order = new int[m_bayesNet.getNrOfNodes()]; m_MarginalP = new double[m_nNodes.length][]; for (int iNode = 0; iNode < m_nNodes.length; iNode++) { order[m_nNodes[iNode]] = iNode; m_MarginalP[iNode]=new double[m_bayesNet.getCardinality(m_nNodes[iNode])]; } for (int iPos = 0; iPos < m_nCardinality; iPos++) { int iNodeCPT = getCPT(m_nNodes, m_nNodes.length, values, order, m_bayesNet); for (int iNode = 0; iNode < m_nNodes.length; iNode++) { m_MarginalP[iNode][values[iNode]] += m_P[iNodeCPT]; } // update values int i = 0; values[i]++; while (i < m_nNodes.length && values[i] == m_bayesNet.getCardinality(m_nNodes[i])) { values[i] = 0; i++; if (i < m_nNodes.length) { values[i]++; } } } for (int iNode = 0; iNode < m_nNodes.length; iNode++) { m_Margins[m_nNodes[iNode]] = m_MarginalP[iNode]; } } // calcMarginalProbabilities public String toString() { StringBuffer buf = new StringBuffer(); for (int iNode = 0; iNode < m_nNodes.length; iNode++) { buf.append(m_bayesNet.getNodeName(m_nNodes[iNode]) + ": "); for (int iValue = 0; iValue < m_MarginalP[iNode].length; iValue++) { buf.append(m_MarginalP[iNode][iValue] + " "); } buf.append('\n'); } for (Iterator child = m_children.iterator(); child.hasNext(); ) { JunctionTreeNode childNode = (JunctionTreeNode) child.next(); buf.append("----------------\n"); buf.append(childNode.toString()); } return buf.toString(); } // toString void calculatePotentials(BayesNet bayesNet, Set clique, boolean [] bDone) { m_fi = new double[m_nCardinality]; int [] values = new int[m_nNodes.length]; int [] order = new int[bayesNet.getNrOfNodes()]; for (int iNode = 0; iNode < m_nNodes.length; iNode++) { order[m_nNodes[iNode]] = iNode; } // find conditional probabilities that need to be taken in account boolean [] bIsContained = new boolean[m_nNodes.length]; for (int iNode = 0; iNode < m_nNodes.length; iNode++) { int nNode = m_nNodes[iNode]; bIsContained[iNode] = !bDone[nNode]; for (int iParent = 0; iParent < bayesNet.getNrOfParents(nNode); iParent++) { int nParent = bayesNet.getParent(nNode, iParent); if (!clique.contains(nParent)) { bIsContained[iNode] = false; } } if (bIsContained[iNode]) { bDone[nNode] = true; if (m_debug) { System.out.println("adding node " +nNode); } } } // fill in the values for (int iPos = 0; iPos < m_nCardinality; iPos++) { int iCPT = getCPT(m_nNodes, m_nNodes.length, values, order, bayesNet); m_fi[iCPT] = 1.0; for (int iNode = 0; iNode < m_nNodes.length; iNode++) { if (bIsContained[iNode]) { int nNode = m_nNodes[iNode]; int [] nNodes = bayesNet.getParentSet(nNode).getParents(); int iCPT2 = getCPT(nNodes, bayesNet.getNrOfParents(nNode), values, order, bayesNet); double f = bayesNet.getDistributions()[nNode][iCPT2].getProbability(values[iNode]); m_fi[iCPT] *= f; } } // update values int i = 0; values[i]++; while (i < m_nNodes.length && values[i] == bayesNet.getCardinality(m_nNodes[i])) { values[i] = 0; i++; if (i < m_nNodes.length) { values[i]++; } } } } // calculatePotentials JunctionTreeNode(Set clique, BayesNet bayesNet, boolean [] bDone) { m_bayesNet = bayesNet; m_children = new Vector(); ////////////////////// // initialize node set m_nNodes = new int[clique.size()]; int iPos = 0; m_nCardinality = 1; for(Iterator nodes = clique.iterator(); nodes.hasNext();) { int iNode = (Integer) nodes.next(); m_nNodes[iPos++] = iNode; m_nCardinality *= bayesNet.getCardinality(iNode); } //////////////////////////////// // initialize potential function calculatePotentials(bayesNet, clique, bDone); } // JunctionTreeNode c'tor /* check whether this junciton tree node contains node nNode * */ boolean contains(int nNode) { for (int iNode = 0; iNode < m_nNodes.length; iNode++) { if (m_nNodes[iNode]== nNode){ return true; } } return false; } // contains public void setEvidence(int nNode, int iValue) throws Exception { int [] values = new int[m_nNodes.length]; int [] order = new int[m_bayesNet.getNrOfNodes()]; int nNodeIdx = -1; for (int iNode = 0; iNode < m_nNodes.length; iNode++) { order[m_nNodes[iNode]] = iNode; if (m_nNodes[iNode] == nNode) { nNodeIdx = iNode; } } if (nNodeIdx < 0) { throw new Exception("setEvidence: Node " + nNode + " not found in this clique"); } for (int iPos = 0; iPos < m_nCardinality; iPos++) { if (values[nNodeIdx] != iValue) { int iNodeCPT = getCPT(m_nNodes, m_nNodes.length, values, order, m_bayesNet); m_P[iNodeCPT] = 0; } // update values int i = 0; values[i]++; while (i < m_nNodes.length && values[i] == m_bayesNet.getCardinality(m_nNodes[i])) { values[i] = 0; i++; if (i < m_nNodes.length) { values[i]++; } } } // normalize double sum = 0; for (int iPos = 0; iPos < m_nCardinality; iPos++) { sum += m_P[iPos]; } for (int iPos = 0; iPos < m_nCardinality; iPos++) { m_P[iPos] /= sum; } calcMarginalProbabilities(); updateEvidence(this); } // setEvidence void updateEvidence(JunctionTreeNode source) { if (source != this) { int [] values = new int[m_nNodes.length]; int [] order = new int[m_bayesNet.getNrOfNodes()]; for (int iNode = 0; iNode < m_nNodes.length; iNode++) { order[m_nNodes[iNode]] = iNode; } int [] nChildNodes = source.m_parentSeparator.m_nNodes; int nNumChildNodes = nChildNodes.length; for (int iPos = 0; iPos < m_nCardinality; iPos++) { int iNodeCPT = getCPT(m_nNodes, m_nNodes.length, values, order, m_bayesNet); int iChildCPT = getCPT(nChildNodes, nNumChildNodes, values, order, m_bayesNet); if (source.m_parentSeparator.m_fiParent[iChildCPT] != 0) { m_P[iNodeCPT] *= source.m_parentSeparator.m_fiChild[iChildCPT]/source.m_parentSeparator.m_fiParent[iChildCPT]; } else { m_P[iNodeCPT] = 0; } // update values int i = 0; values[i]++; while (i < m_nNodes.length && values[i] == m_bayesNet.getCardinality(m_nNodes[i])) { values[i] = 0; i++; if (i < m_nNodes.length) { values[i]++; } } } // normalize double sum = 0; for (int iPos = 0; iPos < m_nCardinality; iPos++) { sum += m_P[iPos]; } for (int iPos = 0; iPos < m_nCardinality; iPos++) { m_P[iPos] /= sum; } calcMarginalProbabilities(); } for (Iterator child = m_children.iterator(); child.hasNext(); ) { JunctionTreeNode childNode = (JunctionTreeNode) child.next(); if (childNode != source) { childNode.initializeDown(true); } } if (m_parentSeparator != null) { m_parentSeparator.updateFromChild(); m_parentSeparator.m_parentNode.updateEvidence(this); m_parentSeparator.updateFromParent(); } } // updateEvidence /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8064 $"); } } // class JunctionTreeNode int getCPT(int [] nodeSet, int nNodes, int[] values, int[] order, BayesNet bayesNet) { int iCPTnew = 0; for (int iNode = 0; iNode < nNodes; iNode++) { int nNode = nodeSet[iNode]; iCPTnew = iCPTnew * bayesNet.getCardinality(nNode); iCPTnew += values[order[nNode]]; } return iCPTnew; } // getCPT int [] getCliqueTree(int [] order, Set [] cliques, Set [] separators) { int nNodes = order.length; int [] parentCliques = new int[nNodes]; //for (int i = nNodes - 1; i >= 0; i--) { for (int i = 0; i < nNodes; i++) { int iNode = order[i]; parentCliques[iNode] = -1; if (cliques[iNode] != null && separators[iNode].size() > 0) { //for (int j = nNodes - 1; j > i; j--) { for (int j = 0; j < nNodes; j++) { int iNode2 = order[j]; if (iNode!= iNode2 && cliques[iNode2] != null && cliques[iNode2].containsAll(separators[iNode])) { parentCliques[iNode] = iNode2; j = i; j = 0; j = nNodes; } } } } return parentCliques; } // getCliqueTree /** calculate separator sets in clique tree * * @param order: maximum cardinality ordering of the graph * @param cliques: set of cliques * @return set of separator sets */ Set [] getSeparators(int [] order, Set [] cliques) { int nNodes = order.length; Set [] separators = new HashSet[nNodes]; Set processedNodes = new HashSet(); //for (int i = nNodes - 1; i >= 0; i--) { for (int i = 0; i < nNodes; i++) { int iNode = order[i]; if (cliques[iNode] != null) { Set separator = new HashSet(); separator.addAll(cliques[iNode]); separator.retainAll(processedNodes); separators[iNode] = separator; processedNodes.addAll(cliques[iNode]); } } return separators; } // getSeparators /** * get cliques in a decomposable graph represented by an adjacency matrix * * @param order: maximum cardinality ordering of the graph * @param bAdjacencyMatrix: decomposable graph * @return set of cliques */ Set [] getCliques(int[] order, boolean[][] bAdjacencyMatrix) throws Exception { int nNodes = bAdjacencyMatrix.length; Set [] cliques = new HashSet[nNodes]; //int[] inverseOrder = new int[nNodes]; //for (int iNode = 0; iNode < nNodes; iNode++) { //inverseOrder[order[iNode]] = iNode; //} // consult nodes in reverse order for (int i = nNodes - 1; i >= 0; i--) { int iNode = order[i]; if (iNode == 22) { int h = 3; h ++; } Set clique = new HashSet(); clique.add(iNode); for (int j = 0; j < i; j++) { int iNode2 = order[j]; if (bAdjacencyMatrix[iNode][iNode2]) { clique.add(iNode2); } } //for (int iNode2 = 0; iNode2 < nNodes; iNode2++) { //if (bAdjacencyMatrix[iNode][iNode2] && inverseOrder[iNode2] < inverseOrder[iNode]) { //clique.add(iNode2); //} //} cliques[iNode] = clique; } for (int iNode = 0; iNode < nNodes; iNode++) { for (int iNode2 = 0; iNode2 < nNodes; iNode2++) { if (iNode != iNode2 && cliques[iNode]!= null && cliques[iNode2]!= null && cliques[iNode].containsAll(cliques[iNode2])) { cliques[iNode2] = null; } } } // sanity check if (m_debug) { int [] nNodeSet = new int[nNodes]; for (int iNode = 0; iNode < nNodes; iNode++) { if (cliques[iNode] != null) { Iterator it = cliques[iNode].iterator(); int k = 0; while (it.hasNext()) { nNodeSet[k++] = (Integer) it.next(); } for (int i = 0; i < cliques[iNode].size(); i++) { for (int j = 0; j < cliques[iNode].size(); j++) { if (i!=j && !bAdjacencyMatrix[nNodeSet[i]][nNodeSet[j]]) { throw new Exception("Non clique" + i + " " + j); } } } } } } return cliques; } // getCliques /** * moralize DAG and calculate * adjacency matrix representation for a Bayes Network, effecively * converting the directed acyclic graph to an undirected graph. * * @param bayesNet * Bayes Network to process * @return adjacencies in boolean matrix format */ public boolean[][] moralize(BayesNet bayesNet) { int nNodes = bayesNet.getNrOfNodes(); boolean[][] bAdjacencyMatrix = new boolean[nNodes][nNodes]; for (int iNode = 0; iNode < nNodes; iNode++) { ParentSet parents = bayesNet.getParentSets()[iNode]; moralizeNode(parents, iNode, bAdjacencyMatrix); } return bAdjacencyMatrix; } // moralize private void moralizeNode(ParentSet parents, int iNode, boolean[][] bAdjacencyMatrix) { for (int iParent = 0; iParent < parents.getNrOfParents(); iParent++) { int nParent = parents.getParent(iParent); if ( m_debug && !bAdjacencyMatrix[iNode][nParent]) System.out.println("Insert " + iNode + "--" + nParent); bAdjacencyMatrix[iNode][nParent] = true; bAdjacencyMatrix[nParent][iNode] = true; for (int iParent2 = iParent + 1; iParent2 < parents.getNrOfParents(); iParent2++) { int nParent2 = parents.getParent(iParent2); if (m_debug && !bAdjacencyMatrix[nParent2][nParent]) System.out.println("Mary " + nParent + "--" + nParent2); bAdjacencyMatrix[nParent2][nParent] = true; bAdjacencyMatrix[nParent][nParent2] = true; } } } // moralizeNode /** * Apply Tarjan and Yannakakis (1984) fill in algorithm for graph * triangulation. In reverse order, insert edges between any non-adjacent * neighbors that are lower numbered in the ordering. * * Side effect: input matrix is used as output * * @param order * node ordering * @param bAdjacencyMatrix * boolean matrix representing the graph * @return boolean matrix representing the graph with fill ins */ public boolean[][] fillIn(int[] order, boolean[][] bAdjacencyMatrix) { int nNodes = bAdjacencyMatrix.length; int[] inverseOrder = new int[nNodes]; for (int iNode = 0; iNode < nNodes; iNode++) { inverseOrder[order[iNode]] = iNode; } // consult nodes in reverse order for (int i = nNodes - 1; i >= 0; i--) { int iNode = order[i]; // find pairs of neighbors with lower order for (int j = 0; j < i; j++) { int iNode2 = order[j]; if (bAdjacencyMatrix[iNode][iNode2]) { for (int k = j+1; k < i; k++) { int iNode3 = order[k]; if (bAdjacencyMatrix[iNode][iNode3]) { // fill in if (m_debug && (!bAdjacencyMatrix[iNode2][iNode3] || !bAdjacencyMatrix[iNode3][iNode2]) ) System.out.println("Fill in " + iNode2 + "--" + iNode3); bAdjacencyMatrix[iNode2][iNode3] = true; bAdjacencyMatrix[iNode3][iNode2] = true; } } } } } return bAdjacencyMatrix; } // fillIn /** * calculate maximum cardinality ordering; start with first node add node * that has most neighbors already ordered till all nodes are in the * ordering * * This implementation does not assume the graph is connected * * @param bAdjacencyMatrix: * n by n matrix with adjacencies in graph of n nodes * @return maximum cardinality ordering */ int[] getMaxCardOrder(boolean[][] bAdjacencyMatrix) { int nNodes = bAdjacencyMatrix.length; int[] order = new int[nNodes]; if (nNodes==0) {return order;} boolean[] bDone = new boolean[nNodes]; // start with node 0 order[0] = 0; bDone[0] = true; // order remaining nodes for (int iNode = 1; iNode < nNodes; iNode++) { int nMaxCard = -1; int iBestNode = -1; // find node with higest cardinality of previously ordered nodes for (int iNode2 = 0; iNode2 < nNodes; iNode2++) { if (!bDone[iNode2]) { int nCard = 0; // calculate cardinality for node iNode2 for (int iNode3 = 0; iNode3 < nNodes; iNode3++) { if (bAdjacencyMatrix[iNode2][iNode3] && bDone[iNode3]) { nCard++; } } if (nCard > nMaxCard) { nMaxCard = nCard; iBestNode = iNode2; } } } order[iNode] = iBestNode; bDone[iBestNode] = true; } return order; } // getMaxCardOrder public void setEvidence(int nNode, int iValue) throws Exception { if (m_root == null) { throw new Exception("Junction tree not initialize yet"); } int iJtNode = 0; while (iJtNode < jtNodes.length && (jtNodes[iJtNode] == null ||!jtNodes[iJtNode].contains(nNode))) { iJtNode++; } if (jtNodes.length == iJtNode) { throw new Exception("Could not find node " + nNode + " in junction tree"); } jtNodes[iJtNode].setEvidence(nNode, iValue); } // setEvidence public String toString() { return m_root.toString(); } // toString double [][] m_Margins; public double [] getMargin(int iNode) { return m_Margins[iNode]; } // getMargin /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8064 $"); } public static void main(String[] args) { try { BIFReader bayesNet = new BIFReader(); bayesNet.processFile(args[0]); MarginCalculator dc = new MarginCalculator(); dc.calcMargins(bayesNet); int iNode = 2; int iValue = 0; int iNode2 = 4; int iValue2 = 0; dc.setEvidence(iNode, iValue); dc.setEvidence(iNode2, iValue2); System.out.print(dc.toString()); dc.calcFullMargins(bayesNet); dc.setEvidence(iNode, iValue); dc.setEvidence(iNode2, iValue2); System.out.println("=============="); System.out.print(dc.toString()); } catch (Exception e) { e.printStackTrace(); } } // main } // class MarginCalculator
30,361
30.268795
128
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/bayes/net/ParentSet.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * ParentSet.java * Copyright (C) 2001-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.bayes.net; import java.io.Serializable; import weka.core.Instances; import weka.core.RevisionHandler; import weka.core.RevisionUtils; /** * Helper class for Bayes Network classifiers. Provides datastructures to * represent a set of parents in a graph. * * @author Remco Bouckaert (rrb@xm.co.nz) * @version $Revision: 8034 $ */ public class ParentSet implements Serializable, RevisionHandler { /** for serialization */ static final long serialVersionUID = 4155021284407181838L; /** * Holds indexes of parents */ private int[] m_nParents; /** * returns index parent of parent specified by index * * @param iParent Index of parent * @return index of parent */ public int getParent(int iParent) { return m_nParents[iParent]; } public int [] getParents() {return m_nParents;} /** * sets index parent of parent specified by index * * @param iParent Index of parent * @param nNode index of the node that becomes parent */ public void SetParent(int iParent, int nNode) { m_nParents[iParent] = nNode; } // SetParent /** * Holds number of parents */ private int m_nNrOfParents = 0; /** * returns number of parents * @return number of parents */ public int getNrOfParents() { return m_nNrOfParents; } /** * test if node is contained in parent set * @param iNode node to test for * @return number of parents */ public boolean contains(int iNode) { for (int iParent = 0; iParent < m_nNrOfParents; iParent++) { if (m_nParents[iParent] == iNode) { return true; } } return false; } /** * Holds cardinality of parents (= number of instantiations the parents can take) */ private int m_nCardinalityOfParents = 1; /** * returns cardinality of parents * * @return the cardinality */ public int getCardinalityOfParents() { return m_nCardinalityOfParents; } /** * returns cardinality of parents after recalculation * * @return the cardinality */ public int getFreshCardinalityOfParents(Instances _Instances) { m_nCardinalityOfParents = 1; for (int iParent = 0; iParent < m_nNrOfParents; iParent++) { m_nCardinalityOfParents *= _Instances.attribute(m_nParents[iParent]).numValues(); } return m_nCardinalityOfParents; } /** * default constructor */ public ParentSet() { m_nParents = new int[10]; m_nNrOfParents = 0; m_nCardinalityOfParents = 1; } // ParentSet /** * constructor * @param nMaxNrOfParents upper bound on nr of parents */ public ParentSet(int nMaxNrOfParents) { m_nParents = new int[nMaxNrOfParents]; m_nNrOfParents = 0; m_nCardinalityOfParents = 1; } // ParentSet /** * copy constructor * @param other other parent set */ public ParentSet(ParentSet other) { m_nNrOfParents = other.m_nNrOfParents; m_nCardinalityOfParents = other.m_nCardinalityOfParents; m_nParents = new int[m_nNrOfParents]; for (int iParent = 0; iParent < m_nNrOfParents; iParent++) { m_nParents[iParent] = other.m_nParents[iParent]; } } // ParentSet /** * reserve memory for parent set * * @param nSize maximum size of parent set to reserver memory for */ public void maxParentSetSize(int nSize) { m_nParents = new int[nSize]; } // MaxParentSetSize /** * Add parent to parent set and update internals (specifically the cardinality of the parent set) * * @param nParent parent to add * @param _Instances used for updating the internals */ public void addParent(int nParent, Instances _Instances) { if (m_nNrOfParents == m_nParents.length) { // 10) { // reserve more memory int [] nParents = new int[2 * m_nParents.length]; // 50]; for (int i = 0; i < m_nNrOfParents; i++) { nParents[i] = m_nParents[i]; } m_nParents = nParents; } m_nParents[m_nNrOfParents] = nParent; m_nNrOfParents++; m_nCardinalityOfParents *= _Instances.attribute(nParent).numValues(); } // AddParent /** * Add parent to parent set at specific location * and update internals (specifically the cardinality of the parent set) * * @param nParent parent to add * @param iParent location to add parent in parent set * @param _Instances used for updating the internals */ public void addParent(int nParent, int iParent, Instances _Instances) { if (m_nNrOfParents == m_nParents.length) { // 10) { // reserve more memory int [] nParents = new int[2 * m_nParents.length]; // 50]; for (int i = 0; i < m_nNrOfParents; i++) { nParents[i] = m_nParents[i]; } m_nParents = nParents; } for (int iParent2 = m_nNrOfParents; iParent2 > iParent; iParent2--) { m_nParents[iParent2] = m_nParents[iParent2 - 1]; } m_nParents[iParent] = nParent; m_nNrOfParents++; m_nCardinalityOfParents *= _Instances.attribute(nParent).numValues(); } // AddParent /** delete node from parent set * @param nParent node number of the parent to delete * @param _Instances data set * @return location of the parent in the parent set. This information can be * used to restore the parent set using the addParent method. */ public int deleteParent(int nParent, Instances _Instances) { int iParent = 0; while ((m_nParents[iParent] != nParent) && (iParent < m_nNrOfParents)) { iParent++; } int iParent2 = -1; if (iParent < m_nNrOfParents) { iParent2 = iParent; } if (iParent < m_nNrOfParents) { while (iParent < m_nNrOfParents - 1) { m_nParents[iParent] = m_nParents[iParent + 1]; iParent++; } m_nNrOfParents--; m_nCardinalityOfParents /= _Instances.attribute(nParent).numValues(); } return iParent2; } // DeleteParent /** * Delete last added parent from parent set and update internals (specifically the cardinality of the parent set) * * @param _Instances used for updating the internals */ public void deleteLastParent(Instances _Instances) { m_nNrOfParents--; m_nCardinalityOfParents = m_nCardinalityOfParents / _Instances.attribute(m_nParents[m_nNrOfParents]).numValues(); } // DeleteLastParent /** Copy makes current parents set equal to other parent set * * @param other : parent set to make a copy from */ public void copy(ParentSet other) { m_nCardinalityOfParents = other.m_nCardinalityOfParents; m_nNrOfParents = other.m_nNrOfParents; for (int iParent = 0; iParent < m_nNrOfParents; iParent++) { m_nParents[iParent] = other.m_nParents[iParent]; } } // Copy /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } } // class ParentSet
7,690
27.697761
115
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/bayes/net/VaryNode.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * VaryNode.java * Copyright (C) 2002-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.bayes.net; import java.io.Serializable; import weka.core.RevisionHandler; import weka.core.RevisionUtils; /** * Part of ADTree implementation. See ADNode.java for more details. * * @author Remco Bouckaert (rrb@xm.co.nz) * @version $Revision: 8034 $ */ public class VaryNode implements Serializable, RevisionHandler { /** for serialization */ private static final long serialVersionUID = -6196294370675872424L; /** index of the node varied **/ public int m_iNode; /** most common value **/ public int m_nMCV; /** list of ADNode children **/ public ADNode [] m_ADNodes; /** Creates new VaryNode */ public VaryNode(int iNode) { m_iNode = iNode; } /** * get counts for specific instantiation of a set of nodes * * @param nCounts array for storing counts * @param nNodes array of node indexes * @param nOffsets offset for nodes in nNodes in nCounts * @param iNode index into nNode indicating current node * @param iOffset Offset into nCounts due to nodes below iNode * @param parent parant ADNode of this VaryNode * @param bSubstract indicate whether counts should be added or substracted */ public void getCounts( int [] nCounts, int [] nNodes, int [] nOffsets, int iNode, int iOffset, ADNode parent, boolean bSubstract) { int nCurrentNode = nNodes[iNode]; for (int iValue = 0 ; iValue < m_ADNodes.length; iValue++) { if (iValue != m_nMCV) { if (m_ADNodes[iValue] != null) { m_ADNodes[iValue].getCounts(nCounts, nNodes, nOffsets, iNode + 1, iOffset + nOffsets[iNode] * iValue, bSubstract); } } else { parent.getCounts(nCounts, nNodes, nOffsets, iNode + 1, iOffset + nOffsets[iNode] * iValue, bSubstract); for (int iValue2 = 0; iValue2 < m_ADNodes.length; iValue2++) { if (iValue2 != m_nMCV && m_ADNodes[iValue2] != null) { m_ADNodes[iValue2].getCounts(nCounts, nNodes, nOffsets, iNode + 1, iOffset + nOffsets[iNode] * iValue, !bSubstract); } } } } } /** * print is used for debugging only, called from ADNode * * @param sTab amount of space. */ public void print(String sTab) { for (int iValue = 0; iValue < m_ADNodes.length; iValue++) { System.out.print(sTab + iValue + ": "); if (m_ADNodes[iValue] == null) { if (iValue == m_nMCV) { System.out.println("MCV"); } else { System.out.println("null"); } } else { System.out.println(); m_ADNodes[iValue].print(); } } } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } }
3,562
25.198529
77
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/bayes/net/estimate/BMAEstimator.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * BayesNet.java * Copyright (C) 2004-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.bayes.net.estimate; import java.util.Enumeration; import java.util.Vector; import weka.classifiers.bayes.BayesNet; import weka.classifiers.bayes.net.search.local.K2; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.RevisionUtils; import weka.core.Statistics; import weka.core.Utils; import weka.estimators.Estimator; /** <!-- globalinfo-start --> * BMAEstimator estimates conditional probability tables of a Bayes network using Bayes Model Averaging (BMA). * <p/> <!-- globalinfo-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -k2 * Whether to use K2 prior. * </pre> * * <pre> -A &lt;alpha&gt; * Initial count (alpha) * </pre> * <!-- options-end --> * * @author Remco Bouckaert (rrb@xm.co.nz) * @version $Revision: 8034 $ */ public class BMAEstimator extends SimpleEstimator { /** for serialization */ static final long serialVersionUID = -1846028304233257309L; /** whether to use K2 prior */ protected boolean m_bUseK2Prior = false; /** * Returns a string describing this object * @return a description of the classifier suitable for * displaying in the explorer/experimenter gui */ public String globalInfo() { return "BMAEstimator estimates conditional probability tables of a Bayes " + "network using Bayes Model Averaging (BMA)."; } /** * estimateCPTs estimates the conditional probability tables for the Bayes * Net using the network structure. * * @param bayesNet the bayes net to use * @throws Exception if an error occurs */ public void estimateCPTs(BayesNet bayesNet) throws Exception { initCPTs(bayesNet); Instances instances = bayesNet.m_Instances; // sanity check to see if nodes have not more than one parent for (int iAttribute = 0; iAttribute < instances.numAttributes(); iAttribute++) { if (bayesNet.getParentSet(iAttribute).getNrOfParents() > 1) { throw new Exception("Cannot handle networks with nodes with more than 1 parent (yet)."); } } BayesNet EmptyNet = new BayesNet(); K2 oSearchAlgorithm = new K2(); oSearchAlgorithm.setInitAsNaiveBayes(false); oSearchAlgorithm.setMaxNrOfParents(0); EmptyNet.setSearchAlgorithm(oSearchAlgorithm); EmptyNet.buildClassifier(instances); BayesNet NBNet = new BayesNet(); oSearchAlgorithm.setInitAsNaiveBayes(true); oSearchAlgorithm.setMaxNrOfParents(1); NBNet.setSearchAlgorithm(oSearchAlgorithm); NBNet.buildClassifier(instances); // estimate CPTs for (int iAttribute = 0; iAttribute < instances.numAttributes(); iAttribute++) { if (iAttribute != instances.classIndex()) { double w1 = 0.0, w2 = 0.0; int nAttValues = instances.attribute(iAttribute).numValues(); if (m_bUseK2Prior == true) { // use Cooper and Herskovitz's metric for (int iAttValue = 0; iAttValue < nAttValues; iAttValue++) { w1 += Statistics.lnGamma(1 + ((DiscreteEstimatorBayes)EmptyNet.m_Distributions[iAttribute][0]).getCount(iAttValue)) - Statistics.lnGamma(1); } w1 += Statistics.lnGamma(nAttValues) - Statistics.lnGamma(nAttValues + instances.numInstances()); for (int iParent = 0; iParent < bayesNet.getParentSet(iAttribute).getCardinalityOfParents(); iParent++) { int nTotal = 0; for (int iAttValue = 0; iAttValue < nAttValues; iAttValue++) { double nCount = ((DiscreteEstimatorBayes)NBNet.m_Distributions[iAttribute][iParent]).getCount(iAttValue); w2 += Statistics.lnGamma(1 + nCount) - Statistics.lnGamma(1); nTotal += nCount; } w2 += Statistics.lnGamma(nAttValues) - Statistics.lnGamma(nAttValues + nTotal); } } else { // use BDe metric for (int iAttValue = 0; iAttValue < nAttValues; iAttValue++) { w1 += Statistics.lnGamma(1.0/nAttValues + ((DiscreteEstimatorBayes)EmptyNet.m_Distributions[iAttribute][0]).getCount(iAttValue)) - Statistics.lnGamma(1.0/nAttValues); } w1 += Statistics.lnGamma(1) - Statistics.lnGamma(1 + instances.numInstances()); int nParentValues = bayesNet.getParentSet(iAttribute).getCardinalityOfParents(); for (int iParent = 0; iParent < nParentValues; iParent++) { int nTotal = 0; for (int iAttValue = 0; iAttValue < nAttValues; iAttValue++) { double nCount = ((DiscreteEstimatorBayes)NBNet.m_Distributions[iAttribute][iParent]).getCount(iAttValue); w2 += Statistics.lnGamma(1.0/(nAttValues * nParentValues) + nCount) - Statistics.lnGamma(1.0/(nAttValues * nParentValues)); nTotal += nCount; } w2 += Statistics.lnGamma(1) - Statistics.lnGamma(1 + nTotal); } } // System.out.println(w1 + " " + w2 + " " + (w2 - w1)); if (w1 < w2) { w2 = w2 - w1; w1 = 0; w1 = 1 / (1 + Math.exp(w2)); w2 = Math.exp(w2) / (1 + Math.exp(w2)); } else { w1 = w1 - w2; w2 = 0; w2 = 1 / (1 + Math.exp(w1)); w1 = Math.exp(w1) / (1 + Math.exp(w1)); } for (int iParent = 0; iParent < bayesNet.getParentSet(iAttribute).getCardinalityOfParents(); iParent++) { bayesNet.m_Distributions[iAttribute][iParent] = new DiscreteEstimatorFullBayes( instances.attribute(iAttribute).numValues(), w1, w2, (DiscreteEstimatorBayes) EmptyNet.m_Distributions[iAttribute][0], (DiscreteEstimatorBayes) NBNet.m_Distributions[iAttribute][iParent], m_fAlpha ); } } } int iAttribute = instances.classIndex(); bayesNet.m_Distributions[iAttribute][0] = EmptyNet.m_Distributions[iAttribute][0]; } // estimateCPTs /** * Updates the classifier with the given instance. * * @param bayesNet the bayes net to use * @param instance the new training instance to include in the model * @throws Exception if the instance could not be incorporated in * the model. */ public void updateClassifier(BayesNet bayesNet, Instance instance) throws Exception { throw new Exception("updateClassifier does not apply to BMA estimator"); } // updateClassifier /** * initCPTs reserves space for CPTs and set all counts to zero * * @param bayesNet the bayes net to use * @throws Exception if something goes wrong */ public void initCPTs(BayesNet bayesNet) throws Exception { // Reserve space for CPTs int nMaxParentCardinality = 1; for (int iAttribute = 0; iAttribute < bayesNet.m_Instances.numAttributes(); iAttribute++) { if (bayesNet.getParentSet(iAttribute).getCardinalityOfParents() > nMaxParentCardinality) { nMaxParentCardinality = bayesNet.getParentSet(iAttribute).getCardinalityOfParents(); } } // Reserve plenty of memory bayesNet.m_Distributions = new Estimator[bayesNet.m_Instances.numAttributes()][nMaxParentCardinality]; } // initCPTs /** * Returns whether K2 prior is used * * @return true if K2 prior is used */ public boolean isUseK2Prior() { return m_bUseK2Prior; } /** * Sets the UseK2Prior. * * @param bUseK2Prior The bUseK2Prior to set */ public void setUseK2Prior(boolean bUseK2Prior) { m_bUseK2Prior = bUseK2Prior; } /** * Returns an enumeration describing the available options * * @return an enumeration of all the available options */ public Enumeration listOptions() { Vector newVector = new Vector(1); newVector.addElement(new Option( "\tWhether to use K2 prior.\n", "k2", 0, "-k2")); Enumeration enu = super.listOptions(); while (enu.hasMoreElements()) { newVector.addElement(enu.nextElement()); } return newVector.elements(); } // listOptions /** * Parses a given list of options. <p/> * <!-- options-start --> * Valid options are: <p/> * * <pre> -k2 * Whether to use K2 prior. * </pre> * * <pre> -A &lt;alpha&gt; * Initial count (alpha) * </pre> * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { setUseK2Prior(Utils.getFlag("k2", options)); super.setOptions(options); } // setOptions /** * Gets the current settings of the classifier. * * @return an array of strings suitable for passing to setOptions */ public String[] getOptions() { String[] superOptions = super.getOptions(); String[] options = new String[1 + superOptions.length]; int current = 0; if (isUseK2Prior()) options[current++] = "-k2"; // insert options from parent class for (int iOption = 0; iOption < superOptions.length; iOption++) { options[current++] = superOptions[iOption]; } // Fill up rest with empty strings, not nulls! while (current < options.length) { options[current++] = ""; } return options; } // getOptions /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } } // class BMAEstimator
11,497
35.617834
152
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/bayes/net/estimate/BayesNetEstimator.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * BayesNetEstimator.java * Copyright (C) 2004-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.bayes.net.estimate; import java.io.Serializable; import java.util.Enumeration; import java.util.Vector; import weka.classifiers.bayes.BayesNet; import weka.core.Instance; import weka.core.Option; import weka.core.OptionHandler; import weka.core.RevisionHandler; import weka.core.RevisionUtils; import weka.core.Utils; /** <!-- globalinfo-start --> * BayesNetEstimator is the base class for estimating the conditional probability tables of a Bayes network once the structure has been learned. * <p/> <!-- globalinfo-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -A &lt;alpha&gt; * Initial count (alpha) * </pre> * <!-- options-end --> * * @author Remco Bouckaert (rrb@xm.co.nz) * @version $Revision: 8034 $ */ public class BayesNetEstimator implements OptionHandler, Serializable, RevisionHandler { /** for serialization */ static final long serialVersionUID = 2184330197666253884L; /** * Holds prior on count */ protected double m_fAlpha = 0.5; /** * estimateCPTs estimates the conditional probability tables for the Bayes * Net using the network structure. * * @param bayesNet the bayes net to use * @throws Exception always throws an exception, since subclass needs to be used */ public void estimateCPTs(BayesNet bayesNet) throws Exception { throw new Exception("Incorrect BayesNetEstimator: use subclass instead."); } /** * Updates the classifier with the given instance. * * @param bayesNet the bayes net to use * @param instance the new training instance to include in the model * @throws Exception always throws an exception, since subclass needs to be used */ public void updateClassifier(BayesNet bayesNet, Instance instance) throws Exception { throw new Exception("Incorrect BayesNetEstimator: use subclass instead."); } /** * Calculates the class membership probabilities for the given test * instance. * * @param bayesNet the bayes net to use * @param instance the instance to be classified * @return predicted class probability distribution * @throws Exception always throws an exception, since subclass needs to be used */ public double[] distributionForInstance(BayesNet bayesNet, Instance instance) throws Exception { throw new Exception("Incorrect BayesNetEstimator: use subclass instead."); } /** * initCPTs reserves space for CPTs and set all counts to zero * * @param bayesNet the bayes net to use * @throws Exception always throws an exception, since subclass needs to be used */ public void initCPTs(BayesNet bayesNet) throws Exception { throw new Exception("Incorrect BayesNetEstimator: use subclass instead."); } // initCPTs /** * Returns an enumeration describing the available options * * @return an enumeration of all the available options */ public Enumeration listOptions() { Vector newVector = new Vector(1); newVector.addElement(new Option("\tInitial count (alpha)\n", "A", 1, "-A <alpha>")); return newVector.elements(); } // listOptions /** * Parses a given list of options. <p/> * <!-- options-start --> * Valid options are: <p/> * * <pre> -A &lt;alpha&gt; * Initial count (alpha) * </pre> * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { String sAlpha = Utils.getOption('A', options); if (sAlpha.length() != 0) { m_fAlpha = (new Float(sAlpha)).floatValue(); } else { m_fAlpha = 0.5f; } Utils.checkForRemainingOptions(options); } // setOptions /** * Gets the current settings of the classifier. * * @return an array of strings suitable for passing to setOptions */ public String[] getOptions() { String[] options = new String[2]; int current = 0; options[current++] = "-A"; options[current++] = "" + m_fAlpha; return options; } // getOptions /** * Set prior used in probability table estimation * @param fAlpha representing prior */ public void setAlpha(double fAlpha) { m_fAlpha = fAlpha; } /** * Get prior used in probability table estimation * @return prior */ public double getAlpha() { return m_fAlpha; } /** * @return a string to describe the Alpha option. */ public String alphaTipText() { return "Alpha is used for estimating the probability tables and can be interpreted" + " as the initial count on each value."; } /** * This will return a string describing the class. * @return The string. */ public String globalInfo() { return "BayesNetEstimator is the base class for estimating the " + "conditional probability tables of a Bayes network once the " + "structure has been learned."; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } } // BayesNetEstimator
6,279
28.763033
144
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/bayes/net/estimate/DiscreteEstimatorBayes.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * DiscreteEstimatorBayes.java * Adapted from DiscreteEstimator.java * Copyright (C) 2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.bayes.net.estimate; import weka.classifiers.bayes.net.search.local.Scoreable; import weka.core.RevisionUtils; import weka.core.Statistics; import weka.core.Utils; import weka.estimators.DiscreteEstimator; import weka.estimators.Estimator; /** * Symbolic probability estimator based on symbol counts and a prior. * * @author Remco Bouckaert (rrb@xm.co.nz) * @version $Revision: 8034 $ */ public class DiscreteEstimatorBayes extends Estimator implements Scoreable { /** for serialization */ static final long serialVersionUID = 4215400230843212684L; /** * Hold the counts */ protected double[] m_Counts; /** * Hold the sum of counts */ protected double m_SumOfCounts; /** * Holds number of symbols in distribution */ protected int m_nSymbols = 0; /** * Holds the prior probability */ protected double m_fPrior = 0.0; /** * Constructor * * @param nSymbols the number of possible symbols (remember to include 0) * @param fPrior */ public DiscreteEstimatorBayes(int nSymbols, double fPrior) { m_fPrior = fPrior; m_nSymbols = nSymbols; m_Counts = new double[m_nSymbols]; for (int iSymbol = 0; iSymbol < m_nSymbols; iSymbol++) { m_Counts[iSymbol] = m_fPrior; } m_SumOfCounts = m_fPrior * (double) m_nSymbols; } // DiscreteEstimatorBayes /** * Add a new data value to the current estimator. * * @param data the new data value * @param weight the weight assigned to the data value */ public void addValue(double data, double weight) { m_Counts[(int) data] += weight; m_SumOfCounts += weight; } /** * Get a probability estimate for a value * * @param data the value to estimate the probability of * @return the estimated probability of the supplied value */ public double getProbability(double data) { if (m_SumOfCounts == 0) { // this can only happen if numSymbols = 0 in constructor return 0; } return (double) m_Counts[(int) data] / m_SumOfCounts; } /** * Get a counts for a value * * @param data the value to get the counts for * @return the count of the supplied value */ public double getCount(double data) { if (m_SumOfCounts == 0) { // this can only happen if numSymbols = 0 in constructor return 0; } return m_Counts[(int) data]; } /** * Gets the number of symbols this estimator operates with * * @return the number of estimator symbols */ public int getNumSymbols() { return (m_Counts == null) ? 0 : m_Counts.length; } /** * Gets the log score contribution of this distribution * @param nType score type * @return the score */ public double logScore(int nType, int nCardinality) { double fScore = 0.0; switch (nType) { case (Scoreable.BAYES): { for (int iSymbol = 0; iSymbol < m_nSymbols; iSymbol++) { fScore += Statistics.lnGamma(m_Counts[iSymbol]); } fScore -= Statistics.lnGamma(m_SumOfCounts); if (m_fPrior != 0.0) { fScore -= m_nSymbols * Statistics.lnGamma(m_fPrior); fScore += Statistics.lnGamma(m_nSymbols * m_fPrior); } } break; case (Scoreable.BDeu): { for (int iSymbol = 0; iSymbol < m_nSymbols; iSymbol++) { fScore += Statistics.lnGamma(m_Counts[iSymbol]); } fScore -= Statistics.lnGamma(m_SumOfCounts); //fScore -= m_nSymbols * Statistics.lnGamma(1.0); //fScore += Statistics.lnGamma(m_nSymbols * 1.0); fScore -= m_nSymbols * Statistics.lnGamma(1.0/(m_nSymbols * nCardinality)); fScore += Statistics.lnGamma(1.0/nCardinality); } break; case (Scoreable.MDL): case (Scoreable.AIC): case (Scoreable.ENTROPY): { for (int iSymbol = 0; iSymbol < m_nSymbols; iSymbol++) { double fP = getProbability(iSymbol); fScore += m_Counts[iSymbol] * Math.log(fP); } } break; default: {} } return fScore; } /** * Display a representation of this estimator * * @return a string representation of the estimator */ public String toString() { String result = "Discrete Estimator. Counts = "; if (m_SumOfCounts > 1) { for (int i = 0; i < m_Counts.length; i++) { result += " " + Utils.doubleToString(m_Counts[i], 2); } result += " (Total = " + Utils.doubleToString(m_SumOfCounts, 2) + ")\n"; } else { for (int i = 0; i < m_Counts.length; i++) { result += " " + m_Counts[i]; } result += " (Total = " + m_SumOfCounts + ")\n"; } return result; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } /** * Main method for testing this class. * * @param argv should contain a sequence of integers which * will be treated as symbolic. */ public static void main(String[] argv) { try { if (argv.length == 0) { System.out.println("Please specify a set of instances."); return; } int current = Integer.parseInt(argv[0]); int max = current; for (int i = 1; i < argv.length; i++) { current = Integer.parseInt(argv[i]); if (current > max) { max = current; } } DiscreteEstimator newEst = new DiscreteEstimator(max + 1, true); for (int i = 0; i < argv.length; i++) { current = Integer.parseInt(argv[i]); System.out.println(newEst); System.out.println("Prediction for " + current + " = " + newEst.getProbability(current)); newEst.addValue(current, 1); } } catch (Exception e) { System.out.println(e.getMessage()); } } // main } // class DiscreteEstimatorBayes
6,645
24.174242
82
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/bayes/net/estimate/DiscreteEstimatorFullBayes.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * DiscreteEstimatorFullBayes.java * Copyright (C) 2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.bayes.net.estimate; import weka.core.RevisionUtils; import weka.estimators.DiscreteEstimator; /** * Symbolic probability estimator based on symbol counts and a prior. * * @author Remco Bouckaert (rrb@xm.co.nz) * @version $Revision: 8034 $ */ public class DiscreteEstimatorFullBayes extends DiscreteEstimatorBayes { /** for serialization */ static final long serialVersionUID = 6774941981423312133L; /** * Constructor * * @param nSymbols the number of possible symbols (remember to include 0) * @param w1 * @param w2 * @param EmptyDist * @param ClassDist * @param fPrior */ public DiscreteEstimatorFullBayes(int nSymbols, double w1, double w2, DiscreteEstimatorBayes EmptyDist, DiscreteEstimatorBayes ClassDist, double fPrior) { super(nSymbols, fPrior); m_SumOfCounts = 0.0; for (int iSymbol = 0; iSymbol < m_nSymbols; iSymbol++) { double p1 = EmptyDist.getProbability(iSymbol); double p2 = ClassDist.getProbability(iSymbol); m_Counts[iSymbol] = w1 * p1 + w2 * p2; m_SumOfCounts += m_Counts[iSymbol]; } } // DiscreteEstimatorFullBayes /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } /** * Main method for testing this class. * * @param argv should contain a sequence of integers which * will be treated as symbolic. */ public static void main(String[] argv) { try { if (argv.length == 0) { System.out.println("Please specify a set of instances."); return; } int current = Integer.parseInt(argv[0]); int max = current; for (int i = 1; i < argv.length; i++) { current = Integer.parseInt(argv[i]); if (current > max) { max = current; } } DiscreteEstimator newEst = new DiscreteEstimator(max + 1, true); for (int i = 0; i < argv.length; i++) { current = Integer.parseInt(argv[i]); System.out.println(newEst); System.out.println("Prediction for " + current + " = " + newEst.getProbability(current)); newEst.addValue(current, 1); } } catch (Exception e) { System.out.println(e.getMessage()); } } // main } // class DiscreteEstimatorFullBayes
3,126
25.277311
75
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/bayes/net/estimate/MultiNomialBMAEstimator.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * MultiNomialBMAEstimator.java * Copyright (C) 2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.bayes.net.estimate; import java.util.Enumeration; import java.util.Vector; import weka.classifiers.bayes.BayesNet; import weka.classifiers.bayes.net.search.local.K2; import weka.core.Attribute; import weka.core.DenseInstance; import weka.core.FastVector; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.RevisionUtils; import weka.core.Statistics; import weka.core.Utils; import weka.estimators.Estimator; /** <!-- globalinfo-start --> * Multinomial BMA Estimator. * <p/> <!-- globalinfo-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -k2 * Whether to use K2 prior. * </pre> * * <pre> -A &lt;alpha&gt; * Initial count (alpha) * </pre> * <!-- options-end --> * * @version $Revision: 8034 $ * @author Remco Bouckaert (rrb@xm.co.nz) */ public class MultiNomialBMAEstimator extends BayesNetEstimator { /** for serialization */ static final long serialVersionUID = 8330705772601586313L; /** whether to use K2 prior */ protected boolean m_bUseK2Prior = true; /** * Returns a string describing this object * @return a description of the classifier suitable for * displaying in the explorer/experimenter gui */ public String globalInfo() { return "Multinomial BMA Estimator."; } /** * estimateCPTs estimates the conditional probability tables for the Bayes * Net using the network structure. * * @param bayesNet the bayes net to use * @throws Exception if number of parents doesn't fit (more than 1) */ public void estimateCPTs(BayesNet bayesNet) throws Exception { initCPTs(bayesNet); // sanity check to see if nodes have not more than one parent for (int iAttribute = 0; iAttribute < bayesNet.m_Instances.numAttributes(); iAttribute++) { if (bayesNet.getParentSet(iAttribute).getNrOfParents() > 1) { throw new Exception("Cannot handle networks with nodes with more than 1 parent (yet)."); } } // filter data to binary Instances instances = new Instances(bayesNet.m_Instances); while (instances.numInstances() > 0) { instances.delete(0); } for (int iAttribute = instances.numAttributes() - 1; iAttribute >= 0; iAttribute--) { if (iAttribute != instances.classIndex()) { FastVector values = new FastVector(); values.addElement("0"); values.addElement("1"); Attribute a = new Attribute(instances.attribute(iAttribute).name(), (FastVector) values); instances.deleteAttributeAt(iAttribute); instances.insertAttributeAt(a,iAttribute); } } for (int iInstance = 0; iInstance < bayesNet.m_Instances.numInstances(); iInstance++) { Instance instanceOrig = bayesNet.m_Instances.instance(iInstance); Instance instance = new DenseInstance(instances.numAttributes()); for (int iAttribute = 0; iAttribute < instances.numAttributes(); iAttribute++) { if (iAttribute != instances.classIndex()) { if (instanceOrig.value(iAttribute) > 0) { instance.setValue(iAttribute, 1); } } else { instance.setValue(iAttribute, instanceOrig.value(iAttribute)); } } } // ok, now all data is binary, except the class attribute // now learn the empty and tree network BayesNet EmptyNet = new BayesNet(); K2 oSearchAlgorithm = new K2(); oSearchAlgorithm.setInitAsNaiveBayes(false); oSearchAlgorithm.setMaxNrOfParents(0); EmptyNet.setSearchAlgorithm(oSearchAlgorithm); EmptyNet.buildClassifier(instances); BayesNet NBNet = new BayesNet(); oSearchAlgorithm.setInitAsNaiveBayes(true); oSearchAlgorithm.setMaxNrOfParents(1); NBNet.setSearchAlgorithm(oSearchAlgorithm); NBNet.buildClassifier(instances); // estimate CPTs for (int iAttribute = 0; iAttribute < instances.numAttributes(); iAttribute++) { if (iAttribute != instances.classIndex()) { double w1 = 0.0, w2 = 0.0; int nAttValues = instances.attribute(iAttribute).numValues(); if (m_bUseK2Prior == true) { // use Cooper and Herskovitz's metric for (int iAttValue = 0; iAttValue < nAttValues; iAttValue++) { w1 += Statistics.lnGamma(1 + ((DiscreteEstimatorBayes)EmptyNet.m_Distributions[iAttribute][0]).getCount(iAttValue)) - Statistics.lnGamma(1); } w1 += Statistics.lnGamma(nAttValues) - Statistics.lnGamma(nAttValues + instances.numInstances()); for (int iParent = 0; iParent < bayesNet.getParentSet(iAttribute).getCardinalityOfParents(); iParent++) { int nTotal = 0; for (int iAttValue = 0; iAttValue < nAttValues; iAttValue++) { double nCount = ((DiscreteEstimatorBayes)NBNet.m_Distributions[iAttribute][iParent]).getCount(iAttValue); w2 += Statistics.lnGamma(1 + nCount) - Statistics.lnGamma(1); nTotal += nCount; } w2 += Statistics.lnGamma(nAttValues) - Statistics.lnGamma(nAttValues + nTotal); } } else { // use BDe metric for (int iAttValue = 0; iAttValue < nAttValues; iAttValue++) { w1 += Statistics.lnGamma(1.0/nAttValues + ((DiscreteEstimatorBayes)EmptyNet.m_Distributions[iAttribute][0]).getCount(iAttValue)) - Statistics.lnGamma(1.0/nAttValues); } w1 += Statistics.lnGamma(1) - Statistics.lnGamma(1 + instances.numInstances()); int nParentValues = bayesNet.getParentSet(iAttribute).getCardinalityOfParents(); for (int iParent = 0; iParent < nParentValues; iParent++) { int nTotal = 0; for (int iAttValue = 0; iAttValue < nAttValues; iAttValue++) { double nCount = ((DiscreteEstimatorBayes)NBNet.m_Distributions[iAttribute][iParent]).getCount(iAttValue); w2 += Statistics.lnGamma(1.0/(nAttValues * nParentValues) + nCount) - Statistics.lnGamma(1.0/(nAttValues * nParentValues)); nTotal += nCount; } w2 += Statistics.lnGamma(1) - Statistics.lnGamma(1 + nTotal); } } // System.out.println(w1 + " " + w2 + " " + (w2 - w1)); // normalize weigths if (w1 < w2) { w2 = w2 - w1; w1 = 0; w1 = 1 / (1 + Math.exp(w2)); w2 = Math.exp(w2) / (1 + Math.exp(w2)); } else { w1 = w1 - w2; w2 = 0; w2 = 1 / (1 + Math.exp(w1)); w1 = Math.exp(w1) / (1 + Math.exp(w1)); } for (int iParent = 0; iParent < bayesNet.getParentSet(iAttribute).getCardinalityOfParents(); iParent++) { bayesNet.m_Distributions[iAttribute][iParent] = new DiscreteEstimatorFullBayes( instances.attribute(iAttribute).numValues(), w1, w2, (DiscreteEstimatorBayes) EmptyNet.m_Distributions[iAttribute][0], (DiscreteEstimatorBayes) NBNet.m_Distributions[iAttribute][iParent], m_fAlpha ); } } } int iAttribute = instances.classIndex(); bayesNet.m_Distributions[iAttribute][0] = EmptyNet.m_Distributions[iAttribute][0]; } // estimateCPTs /** * Updates the classifier with the given instance. * * @param bayesNet the bayes net to use * @param instance the new training instance to include in the model * @throws Exception if the instance could not be incorporated in * the model. */ public void updateClassifier(BayesNet bayesNet, Instance instance) throws Exception { throw new Exception("updateClassifier does not apply to BMA estimator"); } // updateClassifier /** * initCPTs reserves space for CPTs and set all counts to zero * * @param bayesNet the bayes net to use * @throws Exception doesn't apply */ public void initCPTs(BayesNet bayesNet) throws Exception { // Reserve sufficient memory bayesNet.m_Distributions = new Estimator[bayesNet.m_Instances.numAttributes()][2]; } // initCPTs /** * @return boolean */ public boolean isUseK2Prior() { return m_bUseK2Prior; } /** * Sets the UseK2Prior. * * @param bUseK2Prior The bUseK2Prior to set */ public void setUseK2Prior(boolean bUseK2Prior) { m_bUseK2Prior = bUseK2Prior; } /** * Calculates the class membership probabilities for the given test * instance. * * @param bayesNet the bayes net to use * @param instance the instance to be classified * @return predicted class probability distribution * @throws Exception if there is a problem generating the prediction */ public double[] distributionForInstance(BayesNet bayesNet, Instance instance) throws Exception { Instances instances = bayesNet.m_Instances; int nNumClasses = instances.numClasses(); double[] fProbs = new double[nNumClasses]; for (int iClass = 0; iClass < nNumClasses; iClass++) { fProbs[iClass] = 1.0; } for (int iClass = 0; iClass < nNumClasses; iClass++) { double logfP = 0; for (int iAttribute = 0; iAttribute < instances.numAttributes(); iAttribute++) { double iCPT = 0; for (int iParent = 0; iParent < bayesNet.getParentSet(iAttribute).getNrOfParents(); iParent++) { int nParent = bayesNet.getParentSet(iAttribute).getParent(iParent); if (nParent == instances.classIndex()) { iCPT = iCPT * nNumClasses + iClass; } else { iCPT = iCPT * instances.attribute(nParent).numValues() + instance.value(nParent); } } if (iAttribute == instances.classIndex()) { logfP += Math.log(bayesNet.m_Distributions[iAttribute][(int) iCPT].getProbability(iClass)); } else { logfP += instance.value(iAttribute) * Math.log( bayesNet.m_Distributions[iAttribute][(int) iCPT].getProbability(instance.value(1))); } } fProbs[iClass] += logfP; } // Find maximum double fMax = fProbs[0]; for (int iClass = 0; iClass < nNumClasses; iClass++) { if (fProbs[iClass] > fMax) { fMax = fProbs[iClass]; } } // transform from log-space to normal-space for (int iClass = 0; iClass < nNumClasses; iClass++) { fProbs[iClass] = Math.exp(fProbs[iClass] - fMax); } // Display probabilities Utils.normalize(fProbs); return fProbs; } // distributionForInstance /** * Returns an enumeration describing the available options * * @return an enumeration of all the available options */ public Enumeration listOptions() { Vector newVector = new Vector(1); newVector.addElement(new Option( "\tWhether to use K2 prior.\n", "k2", 0, "-k2")); Enumeration enu = super.listOptions(); while (enu.hasMoreElements()) { newVector.addElement(enu.nextElement()); } return newVector.elements(); } // listOptions /** * Parses a given list of options. <p/> * <!-- options-start --> * Valid options are: <p/> * * <pre> -k2 * Whether to use K2 prior. * </pre> * * <pre> -A &lt;alpha&gt; * Initial count (alpha) * </pre> * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { setUseK2Prior(Utils.getFlag("k2", options)); super.setOptions(options); } // setOptions /** * Gets the current settings of the classifier. * * @return an array of strings suitable for passing to setOptions */ public String[] getOptions() { String[] superOptions = super.getOptions(); String[] options = new String[1 + superOptions.length]; int current = 0; if (isUseK2Prior()) options[current++] = "-k2"; // insert options from parent class for (int iOption = 0; iOption < superOptions.length; iOption++) { options[current++] = superOptions[iOption]; } // Fill up rest with empty strings, not nulls! while (current < options.length) { options[current++] = ""; } return options; } // getOptions /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } } // class MultiNomialBMAEstimator
14,911
36.28
152
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/bayes/net/estimate/SimpleEstimator.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * BayesNet.java * Copyright (C) 2004-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.bayes.net.estimate; import java.util.Enumeration; import weka.classifiers.bayes.BayesNet; import weka.core.Instance; import weka.core.Instances; import weka.core.RevisionUtils; import weka.core.Utils; import weka.estimators.Estimator; /** <!-- globalinfo-start --> * SimpleEstimator is used for estimating the conditional probability tables of a Bayes network once the structure has been learned. Estimates probabilities directly from data. * <p/> <!-- globalinfo-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -A &lt;alpha&gt; * Initial count (alpha) * </pre> * <!-- options-end --> * * @author Remco Bouckaert (rrb@xm.co.nz) * @version $Revision: 8034 $ */ public class SimpleEstimator extends BayesNetEstimator { /** for serialization */ static final long serialVersionUID = 5874941612331806172L; /** * Returns a string describing this object * @return a description of the classifier suitable for * displaying in the explorer/experimenter gui */ public String globalInfo() { return "SimpleEstimator is used for estimating the conditional probability " + "tables of a Bayes network once the structure has been learned. " + "Estimates probabilities directly from data."; } /** * estimateCPTs estimates the conditional probability tables for the Bayes * Net using the network structure. * * @param bayesNet the bayes net to use * @throws Exception if something goes wrong */ public void estimateCPTs(BayesNet bayesNet) throws Exception { initCPTs(bayesNet); // Compute counts Enumeration enumInsts = bayesNet.m_Instances.enumerateInstances(); while (enumInsts.hasMoreElements()) { Instance instance = (Instance) enumInsts.nextElement(); updateClassifier(bayesNet, instance); } } // estimateCPTs /** * Updates the classifier with the given instance. * * @param bayesNet the bayes net to use * @param instance the new training instance to include in the model * @throws Exception if the instance could not be incorporated in * the model. */ public void updateClassifier(BayesNet bayesNet, Instance instance) throws Exception { for (int iAttribute = 0; iAttribute < bayesNet.m_Instances.numAttributes(); iAttribute++) { double iCPT = 0; for (int iParent = 0; iParent < bayesNet.getParentSet(iAttribute).getNrOfParents(); iParent++) { int nParent = bayesNet.getParentSet(iAttribute).getParent(iParent); iCPT = iCPT * bayesNet.m_Instances.attribute(nParent).numValues() + instance.value(nParent); } bayesNet.m_Distributions[iAttribute][(int) iCPT].addValue(instance.value(iAttribute), instance.weight()); } } // updateClassifier /** * initCPTs reserves space for CPTs and set all counts to zero * * @param bayesNet the bayes net to use * @throws Exception if something goes wrong */ public void initCPTs(BayesNet bayesNet) throws Exception { Instances instances = bayesNet.m_Instances; // Reserve space for CPTs int nMaxParentCardinality = 1; for (int iAttribute = 0; iAttribute < instances.numAttributes(); iAttribute++) { if (bayesNet.getParentSet(iAttribute).getCardinalityOfParents() > nMaxParentCardinality) { nMaxParentCardinality = bayesNet.getParentSet(iAttribute).getCardinalityOfParents(); } } // Reserve plenty of memory bayesNet.m_Distributions = new Estimator[instances.numAttributes()][nMaxParentCardinality]; // estimate CPTs for (int iAttribute = 0; iAttribute < instances.numAttributes(); iAttribute++) { for (int iParent = 0; iParent < bayesNet.getParentSet(iAttribute).getCardinalityOfParents(); iParent++) { bayesNet.m_Distributions[iAttribute][iParent] = new DiscreteEstimatorBayes(instances.attribute(iAttribute).numValues(), m_fAlpha); } } } // initCPTs /** * Calculates the class membership probabilities for the given test * instance. * * @param bayesNet the bayes net to use * @param instance the instance to be classified * @return predicted class probability distribution * @throws Exception if there is a problem generating the prediction */ public double[] distributionForInstance(BayesNet bayesNet, Instance instance) throws Exception { Instances instances = bayesNet.m_Instances; int nNumClasses = instances.numClasses(); double[] fProbs = new double[nNumClasses]; for (int iClass = 0; iClass < nNumClasses; iClass++) { fProbs[iClass] = 1.0; } for (int iClass = 0; iClass < nNumClasses; iClass++) { double logfP = 0; for (int iAttribute = 0; iAttribute < instances.numAttributes(); iAttribute++) { double iCPT = 0; for (int iParent = 0; iParent < bayesNet.getParentSet(iAttribute).getNrOfParents(); iParent++) { int nParent = bayesNet.getParentSet(iAttribute).getParent(iParent); if (nParent == instances.classIndex()) { iCPT = iCPT * nNumClasses + iClass; } else { iCPT = iCPT * instances.attribute(nParent).numValues() + instance.value(nParent); } } if (iAttribute == instances.classIndex()) { // fP *= // m_Distributions[iAttribute][(int) iCPT].getProbability(iClass); logfP += Math.log(bayesNet.m_Distributions[iAttribute][(int) iCPT].getProbability(iClass)); } else { // fP *= // m_Distributions[iAttribute][(int) iCPT] // .getProbability(instance.value(iAttribute)); logfP += Math.log(bayesNet.m_Distributions[iAttribute][(int) iCPT].getProbability(instance.value(iAttribute))); } } // fProbs[iClass] *= fP; fProbs[iClass] += logfP; } // Find maximum double fMax = fProbs[0]; for (int iClass = 0; iClass < nNumClasses; iClass++) { if (fProbs[iClass] > fMax) { fMax = fProbs[iClass]; } } // transform from log-space to normal-space for (int iClass = 0; iClass < nNumClasses; iClass++) { fProbs[iClass] = Math.exp(fProbs[iClass] - fMax); } // Display probabilities Utils.normalize(fProbs); return fProbs; } // distributionForInstance /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } } // SimpleEstimator
7,968
35.388128
176
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/bayes/net/search/SearchAlgorithm.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * SearchAlgorithm.java * Copyright (C) 2003-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.bayes.net.search; import java.io.Serializable; import java.util.Enumeration; import java.util.Vector; import weka.classifiers.bayes.BayesNet; import weka.classifiers.bayes.net.BIFReader; import weka.classifiers.bayes.net.ParentSet; import weka.core.Instances; import weka.core.OptionHandler; import weka.core.RevisionHandler; import weka.core.RevisionUtils; /** * This is the base class for all search algorithms for learning Bayes networks. * It contains some common code, used by other network structure search algorithms, * and should not be used by itself. * <!-- options-start --> <!-- options-end --> * * @author Remco Bouckaert * @version $Revision: 8034 $ */ public class SearchAlgorithm implements OptionHandler, Serializable, RevisionHandler { /** for serialization */ static final long serialVersionUID = 6164792240778525312L; /** * Holds upper bound on number of parents */ protected int m_nMaxNrOfParents = 1; /** * determines whether initial structure is an empty graph or a Naive Bayes network */ protected boolean m_bInitAsNaiveBayes = true; /** * Determines whether after structure is found a MarkovBlanketClassifier correction should be applied * If this is true, m_bInitAsNaiveBayes is overridden and interpreted as false. */ protected boolean m_bMarkovBlanketClassifier = false; /** * File name containing initial network structure. This can be used as starting point for structure search * It will be ignored if not speficied. When specified, it overrides the InitAsNaivBayes flag. */ protected String m_sInitalBIFFile; /** c'tor **/ public SearchAlgorithm() { } // SearchAlgorithm /** * AddArcMakesSense checks whether adding the arc from iAttributeTail to iAttributeHead * does not already exists and does not introduce a cycle * * @param bayesNet * @param instances * @param iAttributeHead index of the attribute that becomes head of the arrow * @param iAttributeTail index of the attribute that becomes tail of the arrow * @return true if adding arc is allowed, otherwise false */ protected boolean addArcMakesSense( BayesNet bayesNet, Instances instances, int iAttributeHead, int iAttributeTail) { if (iAttributeHead == iAttributeTail) { return false; } // sanity check: arc should not be in parent set already if (isArc(bayesNet, iAttributeHead, iAttributeTail)) { return false; } // sanity check: arc should not introduce a cycle int nNodes = instances.numAttributes(); boolean[] bDone = new boolean[nNodes]; for (int iNode = 0; iNode < nNodes; iNode++) { bDone[iNode] = false; } // check for cycles bayesNet.getParentSet(iAttributeHead).addParent(iAttributeTail, instances); for (int iNode = 0; iNode < nNodes; iNode++) { // find a node for which all parents are 'done' boolean bFound = false; for (int iNode2 = 0; !bFound && iNode2 < nNodes; iNode2++) { if (!bDone[iNode2]) { boolean bHasNoParents = true; for (int iParent = 0; iParent < bayesNet.getParentSet(iNode2).getNrOfParents(); iParent++) { if (!bDone[bayesNet.getParentSet(iNode2).getParent(iParent)]) { bHasNoParents = false; } } if (bHasNoParents) { bDone[iNode2] = true; bFound = true; } } } if (!bFound) { bayesNet.getParentSet(iAttributeHead).deleteLastParent(instances); return false; } } bayesNet.getParentSet(iAttributeHead).deleteLastParent(instances); return true; } // AddArcMakesCycle /** * reverseArcMakesSense checks whether the arc from iAttributeTail to * iAttributeHead exists and reversing does not introduce a cycle * * @param bayesNet * @param instances * @param iAttributeHead index of the attribute that is head of the arrow * @param iAttributeTail index of the attribute that is tail of the arrow * @return true if the arc from iAttributeTail to iAttributeHead exists and reversing does not introduce a cycle */ protected boolean reverseArcMakesSense( BayesNet bayesNet, Instances instances, int iAttributeHead, int iAttributeTail) { if (iAttributeHead == iAttributeTail) { return false; } // sanity check: arc should be in parent set already if (!isArc(bayesNet, iAttributeHead, iAttributeTail)) { return false; } // sanity check: arc should not introduce a cycle int nNodes = instances.numAttributes(); boolean[] bDone = new boolean[nNodes]; for (int iNode = 0; iNode < nNodes; iNode++) { bDone[iNode] = false; } // check for cycles bayesNet.getParentSet(iAttributeTail).addParent(iAttributeHead, instances); for (int iNode = 0; iNode < nNodes; iNode++) { // find a node for which all parents are 'done' boolean bFound = false; for (int iNode2 = 0; !bFound && iNode2 < nNodes; iNode2++) { if (!bDone[iNode2]) { ParentSet parentSet = bayesNet.getParentSet(iNode2); boolean bHasNoParents = true; for (int iParent = 0; iParent < parentSet.getNrOfParents(); iParent++) { if (!bDone[parentSet.getParent(iParent)]) { // this one has a parent which is not 'done' UNLESS it is the arc to be reversed if (!(iNode2 == iAttributeHead && parentSet.getParent(iParent) == iAttributeTail)) { bHasNoParents = false; } } } if (bHasNoParents) { bDone[iNode2] = true; bFound = true; } } } if (!bFound) { bayesNet.getParentSet(iAttributeTail).deleteLastParent(instances); return false; } } bayesNet.getParentSet(iAttributeTail).deleteLastParent(instances); return true; } // ReverseArcMakesCycle /** * IsArc checks whether the arc from iAttributeTail to iAttributeHead already exists * * @param bayesNet * @param iAttributeHead index of the attribute that becomes head of the arrow * @param iAttributeTail index of the attribute that becomes tail of the arrow * @return true if the arc from iAttributeTail to iAttributeHead already exists */ protected boolean isArc(BayesNet bayesNet, int iAttributeHead, int iAttributeTail) { for (int iParent = 0; iParent < bayesNet.getParentSet(iAttributeHead).getNrOfParents(); iParent++) { if (bayesNet.getParentSet(iAttributeHead).getParent(iParent) == iAttributeTail) { return true; } } return false; } // IsArc /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ public Enumeration listOptions() { return new Vector(0).elements(); } // listOption /** * Parses a given list of options. <p/> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { } // setOptions /** * Gets the current settings of the Classifier. * * @return an array of strings suitable for passing to setOptions */ public String[] getOptions() { return new String[0]; } // getOptions /** * a string representation of the algorithm * * @return a string representation */ public String toString() { return "SearchAlgorithm\n"; } // toString /** * buildStructure determines the network structure/graph of the network. * The default behavior is creating a network where all nodes have the first * node as its parent (i.e., a BayesNet that behaves like a naive Bayes classifier). * This method can be overridden by derived classes to restrict the class * of network structures that are acceptable. * * @param bayesNet the network * @param instances the data to use * @throws Exception if something goes wrong */ public void buildStructure(BayesNet bayesNet, Instances instances) throws Exception { if (m_sInitalBIFFile != null && !m_sInitalBIFFile.equals("")) { BIFReader initialNet = new BIFReader().processFile(m_sInitalBIFFile); for (int iAttribute = 0; iAttribute < instances.numAttributes(); iAttribute++) { int iNode = initialNet.getNode(bayesNet.getNodeName(iAttribute)); for (int iParent = 0; iParent < initialNet.getNrOfParents(iAttribute);iParent++) { String sParent = initialNet.getNodeName(initialNet.getParent(iNode, iParent)); int nParent = 0; while (nParent < bayesNet.getNrOfNodes() && !bayesNet.getNodeName(nParent).equals(sParent)) { nParent++; } if (nParent< bayesNet.getNrOfNodes()) { bayesNet.getParentSet(iAttribute).addParent(nParent, instances); } else { System.err.println("Warning: Node " + sParent + " is ignored. It is found in initial network but not in data set."); } } } } else if (m_bInitAsNaiveBayes) { int iClass = instances.classIndex(); // initialize parent sets to have arrow from classifier node to // each of the other nodes for (int iAttribute = 0; iAttribute < instances.numAttributes(); iAttribute++) { if (iAttribute != iClass) { bayesNet.getParentSet(iAttribute).addParent(iClass, instances); } } } search(bayesNet, instances); if (m_bMarkovBlanketClassifier) { doMarkovBlanketCorrection(bayesNet, instances); } } // buildStructure /** * * @param bayesNet * @param instances */ protected void search(BayesNet bayesNet, Instances instances) throws Exception { // placeholder with implementation in derived classes } // search /** * for each node in the network make sure it is in the * Markov blanket of the classifier node, and if not, * add arrows so that it is. If the node is an ancestor * of the classifier node, add arrow pointing to the classifier * node, otherwise, add arrow pointing to attribute node. * * @param bayesNet * @param instances */ protected void doMarkovBlanketCorrection(BayesNet bayesNet, Instances instances) { // Add class node as parent if it is not in the Markov Boundary int iClass = instances.classIndex(); ParentSet ancestors = new ParentSet(); int nOldSize = 0; ancestors.addParent(iClass, instances); while (nOldSize != ancestors.getNrOfParents()) { nOldSize = ancestors.getNrOfParents(); for (int iNode = 0; iNode < nOldSize; iNode++) { int iCurrent = ancestors.getParent(iNode); ParentSet p = bayesNet.getParentSet(iCurrent); for (int iParent = 0; iParent < p.getNrOfParents(); iParent++) { if (!ancestors.contains(p.getParent(iParent))) { ancestors.addParent(p.getParent(iParent), instances); } } } } for (int iAttribute = 0; iAttribute < instances.numAttributes(); iAttribute++) { boolean bIsInMarkovBoundary = (iAttribute == iClass) || bayesNet.getParentSet(iAttribute).contains(iClass) || bayesNet.getParentSet(iClass).contains(iAttribute); for (int iAttribute2 = 0; !bIsInMarkovBoundary && iAttribute2 < instances.numAttributes(); iAttribute2++) { bIsInMarkovBoundary = bayesNet.getParentSet(iAttribute2).contains(iAttribute) && bayesNet.getParentSet(iAttribute2).contains(iClass); } if (!bIsInMarkovBoundary) { if (ancestors.contains(iAttribute)) { if (bayesNet.getParentSet(iClass).getCardinalityOfParents() < 1024) { bayesNet.getParentSet(iClass).addParent(iAttribute, instances); } else { // too bad } } else { bayesNet.getParentSet(iAttribute).addParent(iClass, instances); } } } } // doMarkovBlanketCorrection /** * * @param bMarkovBlanketClassifier */ protected void setMarkovBlanketClassifier(boolean bMarkovBlanketClassifier) { m_bMarkovBlanketClassifier = bMarkovBlanketClassifier; } /** * * @return */ protected boolean getMarkovBlanketClassifier() { return m_bMarkovBlanketClassifier; } /** * @return a string to describe the MaxNrOfParentsoption. */ public String maxNrOfParentsTipText() { return "Set the maximum number of parents a node in the Bayes net can have." + " When initialized as Naive Bayes, setting this parameter to 1 results in" + " a Naive Bayes classifier. When set to 2, a Tree Augmented Bayes Network (TAN)" + " is learned, and when set >2, a Bayes Net Augmented Bayes Network (BAN)" + " is learned. By setting it to a value much larger than the number of nodes" + " in the network (the default of 100000 pretty much guarantees this), no" + " restriction on the number of parents is enforced"; } // maxNrOfParentsTipText /** * @return a string to describe the InitAsNaiveBayes option. */ public String initAsNaiveBayesTipText() { return "When set to true (default), the initial network used for structure learning" + " is a Naive Bayes Network, that is, a network with an arrow from the classifier" + " node to each other node. When set to false, an empty network is used as initial" + " network structure"; } // initAsNaiveBayesTipText /** * @return a string to describe the MarkovBlanketClassifier option. */ protected String markovBlanketClassifierTipText() { return "When set to true (default is false), after a network structure is learned" + " a Markov Blanket correction is applied to the network structure. This ensures" + " that all nodes in the network are part of the Markov blanket of the classifier" + " node."; } // markovBlanketClassifierTipText /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } } // class SearchAlgorithm
16,498
36.755149
131
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/bayes/net/search/ci/CISearchAlgorithm.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * CISearchAlgorithm.java * Copyright (C) 2004-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.bayes.net.search.ci; import weka.classifiers.bayes.BayesNet; import weka.classifiers.bayes.net.ParentSet; import weka.classifiers.bayes.net.search.local.LocalScoreSearchAlgorithm; import weka.core.Instances; import weka.core.RevisionUtils; /** <!-- globalinfo-start --> * The CISearchAlgorithm class supports Bayes net structure search algorithms that are based on conditional independence test (as opposed to for example score based of cross validation based search algorithms). * <p/> <!-- globalinfo-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -mbc * Applies a Markov Blanket correction to the network structure, * after a network structure is learned. This ensures that all * nodes in the network are part of the Markov blanket of the * classifier node.</pre> * * <pre> -S [BAYES|MDL|ENTROPY|AIC|CROSS_CLASSIC|CROSS_BAYES] * Score type (BAYES, BDeu, MDL, ENTROPY and AIC)</pre> * <!-- options-end --> * * @author Remco Bouckaert (rrb@xm.co.nz) * @version $Revision: 8034 $ */ public class CISearchAlgorithm extends LocalScoreSearchAlgorithm { /** for serialization */ static final long serialVersionUID = 3165802334119704560L; BayesNet m_BayesNet; Instances m_instances; /** * Returns a string describing this object * @return a description of the classifier suitable for * displaying in the explorer/experimenter gui */ public String globalInfo() { return "The CISearchAlgorithm class supports Bayes net structure " + "search algorithms that are based on conditional independence " + "test (as opposed to for example score based of cross validation " + "based search algorithms)."; } /** IsConditionalIndependent tests whether two nodes X and Y are independent * given a set of variables Z. The test compares the score of the Bayes network * with and without arrow Y->X where all nodes in Z are parents of X. * @param iAttributeX - index of attribute representing variable X * @param iAttributeY - index of attribute representing variable Y * @param iAttributesZ - array of integers representing indices of attributes in set Z * @param nAttributesZ - cardinality of Z * @return true if X and Y conditionally independent given Z */ protected boolean isConditionalIndependent( int iAttributeX, int iAttributeY, int [] iAttributesZ, int nAttributesZ) { ParentSet oParentSetX = m_BayesNet.getParentSet(iAttributeX); // clear parent set of AttributeX while (oParentSetX.getNrOfParents() > 0) { oParentSetX.deleteLastParent(m_instances); } // insert parents in iAttributeZ for (int iAttributeZ = 0; iAttributeZ < nAttributesZ; iAttributeZ++) { oParentSetX.addParent( iAttributesZ[iAttributeZ], m_instances); } double fScoreZ = calcNodeScore(iAttributeX); double fScoreZY = calcScoreWithExtraParent(iAttributeX, iAttributeY); if (fScoreZY <= fScoreZ) { // the score does not improve by adding Y to the parent set of X // so we conclude that nodes X and Y are conditionally independent // given the set of variables Z return true; } return false; } // IsConditionalIndependent /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } } // class CISearchAlgorithm
4,182
33.858333
210
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/bayes/net/search/ci/ICSSearchAlgorithm.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * ICSSearchAlgorithm.java * Copyright (C) 2004-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.bayes.net.search.ci; import java.io.FileReader; import java.util.Enumeration; import java.util.Vector; import weka.classifiers.bayes.BayesNet; import weka.classifiers.bayes.net.ParentSet; import weka.core.Instances; import weka.core.Option; import weka.core.RevisionHandler; import weka.core.RevisionUtils; import weka.core.Utils; /** <!-- globalinfo-start --> * This Bayes Network learning algorithm uses conditional independence tests to find a skeleton, finds V-nodes and applies a set of rules to find the directions of the remaining arrows. * <p/> <!-- globalinfo-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -cardinality &lt;num&gt; * When determining whether an edge exists a search is performed * for a set Z that separates the nodes. MaxCardinality determines * the maximum size of the set Z. This greatly influences the * length of the search. (default 2)</pre> * * <pre> -mbc * Applies a Markov Blanket correction to the network structure, * after a network structure is learned. This ensures that all * nodes in the network are part of the Markov blanket of the * classifier node.</pre> * * <pre> -S [BAYES|MDL|ENTROPY|AIC|CROSS_CLASSIC|CROSS_BAYES] * Score type (BAYES, BDeu, MDL, ENTROPY and AIC)</pre> * <!-- options-end --> * * @author Remco Bouckaert * @version $Revision: 8034 $ */ public class ICSSearchAlgorithm extends CISearchAlgorithm { /** for serialization */ static final long serialVersionUID = -2510985917284798576L; /** * returns the name of the attribute with the given index * * @param iAttribute the index of the attribute * @return the name of the attribute */ String name(int iAttribute) { return m_instances.attribute(iAttribute).name(); } /** * returns the number of attributes * * @return the number of attributes */ int maxn() { return m_instances.numAttributes(); } /** maximum size of separating set **/ private int m_nMaxCardinality = 2; /** * sets the cardinality * * @param nMaxCardinality the max cardinality */ public void setMaxCardinality(int nMaxCardinality) { m_nMaxCardinality = nMaxCardinality; } /** * returns the max cardinality * * @return the max cardinality */ public int getMaxCardinality() { return m_nMaxCardinality; } class SeparationSet implements RevisionHandler { public int [] m_set; /** * constructor */ public SeparationSet() { m_set= new int [getMaxCardinality() + 1]; } // c'tor public boolean contains(int nItem) { for (int iItem = 0; iItem < getMaxCardinality() && m_set[iItem] != -1; iItem++) { if (m_set[iItem] == nItem) { return true; } } return false; } // contains /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } } // class sepset /** * Search for Bayes network structure using ICS algorithm * @param bayesNet datastructure to build network structure for * @param instances data set to learn from * @throws Exception if something goes wrong */ protected void search(BayesNet bayesNet, Instances instances) throws Exception { // init m_BayesNet = bayesNet; m_instances = instances; boolean edges[][] = new boolean [maxn() + 1][]; boolean [] [] arrows = new boolean [maxn() + 1][]; SeparationSet [] [] sepsets = new SeparationSet [maxn() + 1][]; for (int iNode = 0 ; iNode < maxn() + 1; iNode++) { edges[iNode] = new boolean[maxn()]; arrows[iNode] = new boolean[maxn()]; sepsets[iNode] = new SeparationSet[maxn()]; } calcDependencyGraph(edges, sepsets); calcVeeNodes(edges, arrows, sepsets); calcArcDirections(edges, arrows); // transfrom into BayesNet datastructure for (int iNode = 0; iNode < maxn(); iNode++) { // clear parent set of AttributeX ParentSet oParentSet = m_BayesNet.getParentSet(iNode); while (oParentSet.getNrOfParents() > 0) { oParentSet.deleteLastParent(m_instances); } for (int iParent = 0; iParent < maxn(); iParent++) { if (arrows[iParent][iNode]) { oParentSet.addParent(iParent, m_instances); } } } } // search /** CalcDependencyGraph determines the skeleton of the BayesNetwork by * starting with a complete graph and removing edges (a--b) if it can * find a set Z such that a and b conditionally independent given Z. * The set Z is found by trying all possible subsets of nodes adjacent * to a and b, first of size 0, then of size 1, etc. up to size * m_nMaxCardinality * @param edges boolean matrix representing the edges * @param sepsets set of separating sets */ void calcDependencyGraph(boolean[][] edges, SeparationSet[][] sepsets) { /*calc undirected graph a-b iff D(a,S,b) for all S)*/ SeparationSet oSepSet; for (int iNode1 = 0; iNode1 < maxn(); iNode1++) { /*start with complete graph*/ for (int iNode2 = 0; iNode2 < maxn(); iNode2++) { edges[iNode1][iNode2] = true; } } for (int iNode1 = 0; iNode1 < maxn(); iNode1++) { edges[iNode1][iNode1] = false; } for (int iCardinality = 0; iCardinality <= getMaxCardinality(); iCardinality++) { for (int iNode1 = 0; iNode1 <= maxn() - 2; iNode1++) { for (int iNode2 = iNode1 + 1; iNode2 < maxn(); iNode2++) { if (edges[iNode1][iNode2]) { oSepSet = existsSepSet(iNode1, iNode2, iCardinality, edges); if (oSepSet != null) { edges[iNode1][iNode2] = false; edges[iNode2][iNode1] = false; sepsets[iNode1][iNode2] = oSepSet; sepsets[iNode2][iNode1] = oSepSet; // report separating set System.err.print("I(" + name(iNode1) + ", {"); for (int iNode3 = 0; iNode3 < iCardinality; iNode3++) { System.err.print(name(oSepSet.m_set[iNode3]) + " "); } System.err.print("} ," + name(iNode2) + ")\n"); } } } } // report current state of dependency graph System.err.print(iCardinality + " "); for (int iNode1 = 0; iNode1 < maxn(); iNode1++) { System.err.print(name(iNode1) + " "); } System.err.print('\n'); for (int iNode1 = 0; iNode1 < maxn(); iNode1++) { for (int iNode2 = 0; iNode2 < maxn(); iNode2++) { if (edges[iNode1][iNode2]) System.err.print("X "); else System.err.print(". "); } System.err.print(name(iNode1) + " "); System.err.print('\n'); } } } /*CalcDependencyGraph*/ /** ExistsSepSet tests if a separating set Z of node a and b exists of given * cardiniality exists. * The set Z is found by trying all possible subsets of nodes adjacent * to both a and b of the requested cardinality. * @param iNode1 index of first node a * @param iNode2 index of second node b * @param nCardinality size of the separating set Z * @param edges * @return SeparationSet containing set that separates iNode1 and iNode2 or null if no such set exists */ SeparationSet existsSepSet(int iNode1, int iNode2, int nCardinality, boolean [] [] edges) { /*Test if a separating set of node d and e exists of cardiniality k*/ // int iNode1_, iNode2_; int iNode3, iZ; SeparationSet Z = new SeparationSet(); Z.m_set[nCardinality] = -1; // iNode1_ = iNode1; // iNode2_ = iNode2; // find first candidate separating set Z if (nCardinality > 0) { Z.m_set[0] = next(-1, iNode1, iNode2, edges); iNode3 = 1; while (iNode3 < nCardinality) { Z.m_set[iNode3] = next(Z.m_set[iNode3 - 1], iNode1, iNode2, edges); iNode3++; } } if (nCardinality > 0) { iZ = maxn() - Z.m_set[nCardinality - 1] - 1; } else { iZ = 0; } while (iZ >= 0) { //check if candidate separating set makes iNode2_ and iNode1_ independent if (isConditionalIndependent(iNode2, iNode1, Z.m_set, nCardinality)) { return Z; } // calc next candidate separating set if (nCardinality > 0) { Z.m_set[nCardinality - 1] = next(Z.m_set[nCardinality - 1], iNode1, iNode2, edges); } iZ = nCardinality - 1; while (iZ >= 0 && Z.m_set[iZ] >= maxn()) { iZ = nCardinality - 1; while (iZ >= 0 && Z.m_set[iZ] >= maxn()) { iZ--; } if (iZ < 0) { break; } Z.m_set[iZ] = next(Z.m_set[iZ], iNode1, iNode2, edges); for (iNode3 = iZ + 1; iNode3 < nCardinality; iNode3++) { Z.m_set[iNode3] = next(Z.m_set[iNode3 - 1], iNode1, iNode2, edges); } iZ = nCardinality - 1; } } return null; } /*ExistsSepSet*/ /** * determine index of node that makes next candidate separating set * adjacent to iNode1 and iNode2, but not iNode2 itself * @param x index of current node * @param iNode1 first node * @param iNode2 second node (must be larger than iNode1) * @param edges skeleton so far * @return int index of next node adjacent to iNode1 after x */ int next(int x, int iNode1, int iNode2, boolean [] [] edges) { x++; while (x < maxn() && (!edges[iNode1][x] || !edges[iNode2][x] ||x == iNode2)) { x++; } return x; } /*next*/ /** CalcVeeNodes tries to find V-nodes, i.e. nodes a,b,c such that * a->c<-b and a-/-b. These nodes are identified by finding nodes * a,b,c in the skeleton such that a--c, c--b and a-/-b and furthermore * c is not in the set Z that separates a and b * @param edges skeleton * @param arrows resulting partially directed skeleton after all V-nodes * have been identified * @param sepsets separating sets */ void calcVeeNodes( boolean[][] edges, boolean[][] arrows, SeparationSet[][] sepsets) { // start with complete empty graph for (int iNode1 = 0; iNode1 < maxn(); iNode1++) { for (int iNode2 = 0; iNode2 < maxn(); iNode2++) { arrows[iNode1][iNode2] = false; } } for (int iNode1 = 0; iNode1 < maxn() - 1; iNode1++) { for (int iNode2 = iNode1 + 1; iNode2 < maxn(); iNode2++) { if (!edges[iNode1][iNode2]) { /*i nonadj j*/ for (int iNode3 = 0; iNode3 < maxn(); iNode3++) { if ((iNode3 != iNode1 && iNode3 != iNode2 && edges[iNode1][iNode3] && edges[iNode2][iNode3]) & (!sepsets[iNode1][iNode2].contains(iNode3))) { arrows[iNode1][iNode3] = true; /*add arc i->k*/ arrows[iNode2][iNode3] = true; /*add arc j->k*/ } } } } } } // CalcVeeNodes /** CalcArcDirections assigns directions to edges that remain after V-nodes have * been identified. The arcs are directed using the following rules: Rule 1: i->j--k & i-/-k => j->k Rule 2: i->j->k & i--k => i->k Rule 3 m /|\ i | k => m->j i->j<-k \|/ j Rule 4 m / \ i---k => i->m & k->m i->j \ / j Rule 5: if no edges are directed then take a random one (first we can find) * @param edges skeleton * @param arrows resulting fully directed DAG */ void calcArcDirections(boolean[][] edges, boolean[][] arrows) { /*give direction to remaining arcs*/ int i, j, k, m; boolean bFound; do { bFound = false; /*Rule 1: i->j--k & i-/-k => j->k*/ for (i = 0; i < maxn(); i++) { for (j = 0; j < maxn(); j++) { if (i != j && arrows[i][j]) { for (k = 0; k < maxn(); k++) { if (i != k && j != k && edges[j][k] && !edges[i][k] && !arrows[j][k] && !arrows[k][j]) { arrows[j][k] = true; bFound = true; } } } } } /*Rule 2: i->j->k & i--k => i->k*/ for (i = 0; i < maxn(); i++) { for (j = 0; j < maxn(); j++) { if (i != j && arrows[i][j]) { for (k = 0; k < maxn(); k++) { if (i != k && j != k && edges[i][k] && arrows[j][k] && !arrows[i][k] && !arrows[k][i]) { arrows[i][k] = true; bFound = true; } } } } } /* Rule 3 m /|\ i | k => m->j i->j<-k \|/ j */ for (i = 0; i < maxn(); i++) { for (j = 0; j < maxn(); j++) { if (i != j && arrows[i][j]) { for (k = 0; k < maxn(); k++) { if (k != i && k != j && arrows[k][j] && !edges[k][i]) { for (m = 0; m < maxn(); m++) { if (m != i && m != j && m != k && edges[m][i] && !arrows[m][i] && !arrows[i][m] && edges[m][j] && !arrows[m][j] && !arrows[j][m] && edges[m][k] && !arrows[m][k] && !arrows[k][m]) { arrows[m][j] = true; bFound = true; } } } } } } } /* Rule 4 m / \ i---k => i->m & k->m i->j \ / j */ for (i = 0; i < maxn(); i++) { for (j = 0; j < maxn(); j++) { if (i != j && arrows[j][i]) { for (k = 0; k < maxn(); k++) { if (k != i && k != j && edges[k][j] && !arrows[k][j] && !arrows[j][k] && edges[k][i] && !arrows[k][i] && !arrows[i][k]) { for (m = 0; m < maxn(); m++) { if (m != i && m != j && m != k && edges[m][i] && !arrows[m][i] && !arrows[i][m] && edges[m][k] && !arrows[m][k] && !arrows[k][m]) { arrows[i][m] = true; arrows[k][m] = true; bFound = true; } } } } } } } /*Rule 5: if no edges are directed then take a random one (first we can find)*/ if (!bFound) { i = 0; while (!bFound && i < maxn()) { j = 0; while (!bFound && j < maxn()) { if (edges[i][j] && !arrows[i][j] && !arrows[j][i]) { arrows[i][j] = true; bFound = true; } j++; } i++; } } } while (bFound); } // CalcArcDirections /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ public Enumeration listOptions() { Vector result = new Vector(); result.addElement(new Option( "\tWhen determining whether an edge exists a search is performed \n" + "\tfor a set Z that separates the nodes. MaxCardinality determines \n" + "\tthe maximum size of the set Z. This greatly influences the \n" + "\tlength of the search. (default 2)", "cardinality", 1, "-cardinality <num>")); Enumeration en = super.listOptions(); while (en.hasMoreElements()) result.addElement(en.nextElement()); return result.elements(); } // listOption /** * Parses a given list of options. <p/> * <!-- options-start --> * Valid options are: <p/> * * <pre> -cardinality &lt;num&gt; * When determining whether an edge exists a search is performed * for a set Z that separates the nodes. MaxCardinality determines * the maximum size of the set Z. This greatly influences the * length of the search. (default 2)</pre> * * <pre> -mbc * Applies a Markov Blanket correction to the network structure, * after a network structure is learned. This ensures that all * nodes in the network are part of the Markov blanket of the * classifier node.</pre> * * <pre> -S [BAYES|MDL|ENTROPY|AIC|CROSS_CLASSIC|CROSS_BAYES] * Score type (BAYES, BDeu, MDL, ENTROPY and AIC)</pre> * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { String tmpStr; tmpStr = Utils.getOption("cardinality", options); if (tmpStr.length() != 0) setMaxCardinality(Integer.parseInt(tmpStr)); else setMaxCardinality(2); super.setOptions(options); } // setOptions /** * Gets the current settings of the Classifier. * * @return an array of strings suitable for passing to setOptions */ public String[] getOptions() { Vector result; String[] options; int i; result = new Vector(); options = super.getOptions(); for (i = 0; i < options.length; i++) result.add(options[i]); result.add("-cardinality"); result.add("" + getMaxCardinality()); return (String[]) result.toArray(new String[result.size()]); } // getOptions /** * @return a string to describe the MaxCardinality option. */ public String maxCardinalityTipText() { return "When determining whether an edge exists a search is performed for a set Z "+ "that separates the nodes. MaxCardinality determines the maximum size of the set Z. " + "This greatly influences the length of the search. Default value is 2."; } // maxCardinalityTipText /** * This will return a string describing the search algorithm. * @return The string. */ public String globalInfo() { return "This Bayes Network learning algorithm uses conditional independence tests " + "to find a skeleton, finds V-nodes and applies a set of rules to find the directions " + "of the remaining arrows."; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } /** * for testing the class * * @param argv the commandline parameters */ static public void main(String [] argv) { try { BayesNet b = new BayesNet(); b.setSearchAlgorithm( new ICSSearchAlgorithm()); Instances instances = new Instances(new FileReader("C:\\eclipse\\workspace\\weka\\data\\contact-lenses.arff")); instances.setClassIndex(instances.numAttributes() - 1); b.buildClassifier(instances); System.out.println(b.toString()); } catch (Exception e) { e.printStackTrace(); } } // main } // class ICSSearchAlgorithm
19,387
27.894188
185
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/bayes/net/search/fixed/FromFile.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * FromFile.java * Copyright (C) 2004-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.bayes.net.search.fixed; import java.util.Enumeration; import java.util.Vector; import weka.classifiers.bayes.BayesNet; import weka.classifiers.bayes.net.BIFReader; import weka.classifiers.bayes.net.ParentSet; import weka.classifiers.bayes.net.search.SearchAlgorithm; import weka.core.Instances; import weka.core.Option; import weka.core.RevisionUtils; import weka.core.Utils; /** <!-- globalinfo-start --> * The FromFile reads the structure of a Bayes net from a file in BIFF format. * <p/> <!-- globalinfo-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -B &lt;BIF File&gt; * Name of file containing network structure in BIF format * </pre> * <!-- options-end --> * * @author Remco Bouckaert * @version $Revision: 8034 $ */ public class FromFile extends SearchAlgorithm { /** for serialization */ static final long serialVersionUID = 7334358169507619525L; /** name of file to read structure from **/ String m_sBIFFile = ""; /** * Returns a string describing this object * @return a description of the classifier suitable for * displaying in the explorer/experimenter gui */ public String globalInfo() { return "The FromFile reads the structure of a Bayes net from a file " + "in BIFF format."; } /** * * @param bayesNet * @param instances the instances to work with * @throws Exception if attribute from BIF file could not be found */ public void buildStructure (BayesNet bayesNet, Instances instances) throws Exception { // read network structure in BIF format BIFReader bifReader = new BIFReader(); bifReader.processFile(m_sBIFFile); // copy parent sets for (int iAttribute = 0; iAttribute < instances.numAttributes(); iAttribute++) { int iBIFAttribute = bifReader.getNode(bayesNet.getNodeName(iAttribute)); ParentSet bifParentSet = bifReader.getParentSet(iBIFAttribute); for (int iBIFParent = 0; iBIFParent < bifParentSet.getNrOfParents(); iBIFParent++) { String sParent = bifReader.getNodeName(bifParentSet.getParent(iBIFParent)); int iParent = 0; while (iParent < instances.numAttributes() && !bayesNet.getNodeName(iParent).equals(sParent)) { iParent++; } if (iParent >= instances.numAttributes()) { throw new Exception("Could not find attribute " + sParent + " from BIF file in data"); } bayesNet.getParentSet(iAttribute).addParent(iParent, instances); } } } // buildStructure /** * Set name of network in BIF file to read structure from * * @param sBIFFile the name of the BIF file */ public void setBIFFile(String sBIFFile) { m_sBIFFile = sBIFFile; } /** * Get name of network in BIF file to read structure from * @return BIF file name */ public String getBIFFile() { return m_sBIFFile; } /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ public Enumeration listOptions() { Vector newVector = new Vector(); newVector.addElement(new Option("\tName of file containing network structure in BIF format\n", "B", 1, "-B <BIF File>")); Enumeration en = super.listOptions(); while (en.hasMoreElements()) newVector.addElement(en.nextElement()); return newVector.elements(); } /** * Parses a given list of options. <p/> * <!-- options-start --> * Valid options are: <p/> * * <pre> -B &lt;BIF File&gt; * Name of file containing network structure in BIF format * </pre> * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { setBIFFile( Utils.getOption('B', options)); super.setOptions(options); } /** * Gets the current settings of the search algorithm. * * @return an array of strings suitable for passing to setOptions */ public String [] getOptions() { String[] superOptions = super.getOptions(); String [] options = new String [2 + superOptions.length]; int current = 0; options[current++] = "-B"; options[current++] = "" + getBIFFile(); // insert options from parent class for (int iOption = 0; iOption < superOptions.length; iOption++) { options[current++] = superOptions[iOption]; } // Fill up rest with empty strings, not nulls! while (current < options.length) { options[current++] = ""; } return options; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } } // class FromFile
5,726
28.828125
108
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/bayes/net/search/fixed/NaiveBayes.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * NaiveBayes.java * Copyright (C) 2004-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.bayes.net.search.fixed; import weka.classifiers.bayes.BayesNet; import weka.classifiers.bayes.net.search.SearchAlgorithm; import weka.core.Instances; import weka.core.RevisionUtils; /** <!-- globalinfo-start --> * The NaiveBayes class generates a fixed Bayes network structure with arrows from the class variable to each of the attribute variables. * <p/> <!-- globalinfo-end --> * <!-- options-start --> <!-- options-end --> * * @author Remco Bouckaert * @version $Revision: 8034 $ */ public class NaiveBayes extends SearchAlgorithm { /** for serialization */ static final long serialVersionUID = -4808572519709755811L; /** * Returns a string describing this object * @return a description of the classifier suitable for * displaying in the explorer/experimenter gui */ public String globalInfo() { return "The NaiveBayes class generates a fixed Bayes network structure " + "with arrows from the class variable to each of the attribute " + "variables."; } /** * * @param bayesNet * @param instances the instances to work with * @throws Exception if something goes wrong */ public void buildStructure (BayesNet bayesNet, Instances instances) throws Exception { for (int iAttribute = 0; iAttribute < instances.numAttributes(); iAttribute++) { if (iAttribute != instances.classIndex()) { bayesNet.getParentSet(iAttribute).addParent(instances.classIndex(), instances); } } } // buildStructure /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } } // class NaiveBayes
2,515
29.682927
137
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/bayes/net/search/global/GeneticSearch.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * GeneticSearch.java * Copyright (C) 2004-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.bayes.net.search.global; import java.util.Enumeration; import java.util.Random; import java.util.Vector; import weka.classifiers.bayes.BayesNet; import weka.classifiers.bayes.net.ParentSet; import weka.core.Instances; import weka.core.Option; import weka.core.RevisionHandler; import weka.core.RevisionUtils; import weka.core.Utils; /** <!-- globalinfo-start --> * This Bayes Network learning algorithm uses genetic search for finding a well scoring Bayes network structure. Genetic search works by having a population of Bayes network structures and allow them to mutate and apply cross over to get offspring. The best network structure found during the process is returned. * <p/> <!-- globalinfo-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -L &lt;integer&gt; * Population size</pre> * * <pre> -A &lt;integer&gt; * Descendant population size</pre> * * <pre> -U &lt;integer&gt; * Number of runs</pre> * * <pre> -M * Use mutation. * (default true)</pre> * * <pre> -C * Use cross-over. * (default true)</pre> * * <pre> -O * Use tournament selection (true) or maximum subpopulatin (false). * (default false)</pre> * * <pre> -R &lt;seed&gt; * Random number seed</pre> * * <pre> -mbc * Applies a Markov Blanket correction to the network structure, * after a network structure is learned. This ensures that all * nodes in the network are part of the Markov blanket of the * classifier node.</pre> * * <pre> -S [LOO-CV|k-Fold-CV|Cumulative-CV] * Score type (LOO-CV,k-Fold-CV,Cumulative-CV)</pre> * * <pre> -Q * Use probabilistic or 0/1 scoring. * (default probabilistic scoring)</pre> * <!-- options-end --> * * @author Remco Bouckaert (rrb@xm.co.nz) * @version $Revision: 8034 $ */ public class GeneticSearch extends GlobalScoreSearchAlgorithm { /** for serialization */ static final long serialVersionUID = 4236165533882462203L; /** number of runs **/ int m_nRuns = 10; /** size of population **/ int m_nPopulationSize = 10; /** size of descendant population **/ int m_nDescendantPopulationSize = 100; /** use cross-over? **/ boolean m_bUseCrossOver = true; /** use mutation? **/ boolean m_bUseMutation = true; /** use tournament selection or take best sub-population **/ boolean m_bUseTournamentSelection = false; /** random number seed **/ int m_nSeed = 1; /** random number generator **/ Random m_random = null; /** used in BayesNetRepresentation for efficiently determining * whether a number is square */ static boolean [] g_bIsSquare; class BayesNetRepresentation implements RevisionHandler { /** number of nodes in network **/ int m_nNodes = 0; /** bit representation of parent sets * m_bits[iTail + iHead * m_nNodes] represents arc iTail->iHead */ boolean [] m_bits; /** score of represented network structure **/ double m_fScore = 0.0f; /** * return score of represented network structure * * @return the score */ public double getScore() { return m_fScore; } // getScore /** * c'tor * * @param nNodes the number of nodes */ BayesNetRepresentation (int nNodes) { m_nNodes = nNodes; } // c'tor /** initialize with a random structure by randomly placing * m_nNodes arcs. */ public void randomInit() { do { m_bits = new boolean [m_nNodes * m_nNodes]; for (int i = 0; i < m_nNodes; i++) { int iPos; do { iPos = m_random.nextInt(m_nNodes * m_nNodes); } while (isSquare(iPos)); m_bits[iPos] = true; } } while (hasCycles()); calcGlobalScore(); } /** calculate score of current network representation * As a side effect, the parent sets are set */ void calcGlobalScore() { // clear current network for (int iNode = 0; iNode < m_nNodes; iNode++) { ParentSet parentSet = m_BayesNet.getParentSet(iNode); while (parentSet.getNrOfParents() > 0) { parentSet.deleteLastParent(m_BayesNet.m_Instances); } } // insert arrows for (int iNode = 0; iNode < m_nNodes; iNode++) { ParentSet parentSet = m_BayesNet.getParentSet(iNode); for (int iNode2 = 0; iNode2 < m_nNodes; iNode2++) { if (m_bits[iNode2 + iNode * m_nNodes]) { parentSet.addParent(iNode2, m_BayesNet.m_Instances); } } } // calc score try { m_fScore = calcScore(m_BayesNet); } catch (Exception e) { // ignore } } // calcScore /** check whether there are cycles in the network * * @return true if a cycle is found, false otherwise */ public boolean hasCycles() { // check for cycles boolean[] bDone = new boolean[m_nNodes]; for (int iNode = 0; iNode < m_nNodes; iNode++) { // find a node for which all parents are 'done' boolean bFound = false; for (int iNode2 = 0; !bFound && iNode2 < m_nNodes; iNode2++) { if (!bDone[iNode2]) { boolean bHasNoParents = true; for (int iParent = 0; iParent < m_nNodes; iParent++) { if (m_bits[iParent + iNode2 * m_nNodes] && !bDone[iParent]) { bHasNoParents = false; } } if (bHasNoParents) { bDone[iNode2] = true; bFound = true; } } } if (!bFound) { return true; } } return false; } // hasCycles /** create clone of current object * @return cloned object */ BayesNetRepresentation copy() { BayesNetRepresentation b = new BayesNetRepresentation(m_nNodes); b.m_bits = new boolean [m_bits.length]; for (int i = 0; i < m_nNodes * m_nNodes; i++) { b.m_bits[i] = m_bits[i]; } b.m_fScore = m_fScore; return b; } // copy /** Apply mutation operation to BayesNet * Calculate score and as a side effect sets BayesNet parent sets. */ void mutate() { // flip a bit do { int iBit; do { iBit = m_random.nextInt(m_nNodes * m_nNodes); } while (isSquare(iBit)); m_bits[iBit] = !m_bits[iBit]; } while (hasCycles()); calcGlobalScore(); } // mutate /** Apply cross-over operation to BayesNet * Calculate score and as a side effect sets BayesNet parent sets. * @param other BayesNetRepresentation to cross over with */ void crossOver(BayesNetRepresentation other) { boolean [] bits = new boolean [m_bits.length]; for (int i = 0; i < m_bits.length; i++) { bits[i] = m_bits[i]; } int iCrossOverPoint = m_bits.length; do { // restore to original state for (int i = iCrossOverPoint; i < m_bits.length; i++) { m_bits[i] = bits[i]; } // take all bits from cross-over points onwards iCrossOverPoint = m_random.nextInt(m_bits.length); for (int i = iCrossOverPoint; i < m_bits.length; i++) { m_bits[i] = other.m_bits[i]; } } while (hasCycles()); calcGlobalScore(); } // crossOver /** check if number is square and initialize g_bIsSquare structure * if necessary * @param nNum number to check (should be below m_nNodes * m_nNodes) * @return true if number is square */ boolean isSquare(int nNum) { if (g_bIsSquare == null || g_bIsSquare.length < nNum) { g_bIsSquare = new boolean [m_nNodes * m_nNodes]; for (int i = 0; i < m_nNodes; i++) { g_bIsSquare[i * m_nNodes + i] = true; } } return g_bIsSquare[nNum]; } // isSquare /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } } // class BayesNetRepresentation /** * search determines the network structure/graph of the network * with a genetic search algorithm. * * @param bayesNet the network to search * @param instances the instances to use * @throws Exception if population size doesn fit or neither cross-over or mutation was chosen */ protected void search(BayesNet bayesNet, Instances instances) throws Exception { // sanity check if (getDescendantPopulationSize() < getPopulationSize()) { throw new Exception ("Descendant PopulationSize should be at least Population Size"); } if (!getUseCrossOver() && !getUseMutation()) { throw new Exception ("At least one of mutation or cross-over should be used"); } m_random = new Random(m_nSeed); // keeps track of best structure found so far BayesNet bestBayesNet; // keeps track of score pf best structure found so far double fBestScore = calcScore(bayesNet); // initialize bestBayesNet bestBayesNet = new BayesNet(); bestBayesNet.m_Instances = instances; bestBayesNet.initStructure(); copyParentSets(bestBayesNet, bayesNet); // initialize population BayesNetRepresentation [] population = new BayesNetRepresentation [getPopulationSize()]; for (int i = 0; i < getPopulationSize(); i++) { population[i] = new BayesNetRepresentation (instances.numAttributes()); population[i].randomInit(); if (population[i].getScore() > fBestScore) { copyParentSets(bestBayesNet, bayesNet); fBestScore = population[i].getScore(); } } // go do the search for (int iRun = 0; iRun < m_nRuns; iRun++) { // create descendants BayesNetRepresentation [] descendantPopulation = new BayesNetRepresentation [getDescendantPopulationSize()]; for (int i = 0; i < getDescendantPopulationSize(); i++) { descendantPopulation[i] = population[m_random.nextInt(getPopulationSize())].copy(); if (getUseMutation()) { if (getUseCrossOver() && m_random.nextBoolean()) { descendantPopulation[i].crossOver(population[m_random.nextInt(getPopulationSize())]); } else { descendantPopulation[i].mutate(); } } else { // use crossover descendantPopulation[i].crossOver(population[m_random.nextInt(getPopulationSize())]); } if (descendantPopulation[i].getScore() > fBestScore) { copyParentSets(bestBayesNet, bayesNet); fBestScore = descendantPopulation[i].getScore(); } } // select new population boolean [] bSelected = new boolean [getDescendantPopulationSize()]; for (int i = 0; i < getPopulationSize(); i++) { int iSelected = 0; if (m_bUseTournamentSelection) { // use tournament selection iSelected = m_random.nextInt(getDescendantPopulationSize()); while (bSelected[iSelected]) { iSelected = (iSelected + 1) % getDescendantPopulationSize(); } int iSelected2 = m_random.nextInt(getDescendantPopulationSize()); while (bSelected[iSelected2]) { iSelected2 = (iSelected2 + 1) % getDescendantPopulationSize(); } if (descendantPopulation[iSelected2].getScore() > descendantPopulation[iSelected].getScore()) { iSelected = iSelected2; } } else { // find best scoring network in population while (bSelected[iSelected]) { iSelected++; } double fScore = descendantPopulation[iSelected].getScore(); for (int j = 0; j < getDescendantPopulationSize(); j++) { if (!bSelected[j] && descendantPopulation[j].getScore() > fScore) { fScore = descendantPopulation[j].getScore(); iSelected = j; } } } population[i] = descendantPopulation[iSelected]; bSelected[iSelected] = true; } } // restore current network to best network copyParentSets(bayesNet, bestBayesNet); // free up memory bestBayesNet = null; } // search /** copyParentSets copies parent sets of source to dest BayesNet * @param dest destination network * @param source source network */ void copyParentSets(BayesNet dest, BayesNet source) { int nNodes = source.getNrOfNodes(); // clear parent set first for (int iNode = 0; iNode < nNodes; iNode++) { dest.getParentSet(iNode).copy(source.getParentSet(iNode)); } } // CopyParentSets /** * @return number of runs */ public int getRuns() { return m_nRuns; } // getRuns /** * Sets the number of runs * @param nRuns The number of runs to set */ public void setRuns(int nRuns) { m_nRuns = nRuns; } // setRuns /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ public Enumeration listOptions() { Vector newVector = new Vector(7); newVector.addElement(new Option("\tPopulation size", "L", 1, "-L <integer>")); newVector.addElement(new Option("\tDescendant population size", "A", 1, "-A <integer>")); newVector.addElement(new Option("\tNumber of runs", "U", 1, "-U <integer>")); newVector.addElement(new Option("\tUse mutation.\n\t(default true)", "M", 0, "-M")); newVector.addElement(new Option("\tUse cross-over.\n\t(default true)", "C", 0, "-C")); newVector.addElement(new Option("\tUse tournament selection (true) or maximum subpopulatin (false).\n\t(default false)", "O", 0, "-O")); newVector.addElement(new Option("\tRandom number seed", "R", 1, "-R <seed>")); Enumeration enu = super.listOptions(); while (enu.hasMoreElements()) { newVector.addElement(enu.nextElement()); } return newVector.elements(); } // listOptions /** * Parses a given list of options. <p/> * <!-- options-start --> * Valid options are: <p/> * * <pre> -L &lt;integer&gt; * Population size</pre> * * <pre> -A &lt;integer&gt; * Descendant population size</pre> * * <pre> -U &lt;integer&gt; * Number of runs</pre> * * <pre> -M * Use mutation. * (default true)</pre> * * <pre> -C * Use cross-over. * (default true)</pre> * * <pre> -O * Use tournament selection (true) or maximum subpopulatin (false). * (default false)</pre> * * <pre> -R &lt;seed&gt; * Random number seed</pre> * * <pre> -mbc * Applies a Markov Blanket correction to the network structure, * after a network structure is learned. This ensures that all * nodes in the network are part of the Markov blanket of the * classifier node.</pre> * * <pre> -S [LOO-CV|k-Fold-CV|Cumulative-CV] * Score type (LOO-CV,k-Fold-CV,Cumulative-CV)</pre> * * <pre> -Q * Use probabilistic or 0/1 scoring. * (default probabilistic scoring)</pre> * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { String sPopulationSize = Utils.getOption('L', options); if (sPopulationSize.length() != 0) { setPopulationSize(Integer.parseInt(sPopulationSize)); } String sDescendantPopulationSize = Utils.getOption('A', options); if (sDescendantPopulationSize.length() != 0) { setDescendantPopulationSize(Integer.parseInt(sDescendantPopulationSize)); } String sRuns = Utils.getOption('U', options); if (sRuns.length() != 0) { setRuns(Integer.parseInt(sRuns)); } String sSeed = Utils.getOption('R', options); if (sSeed.length() != 0) { setSeed(Integer.parseInt(sSeed)); } setUseMutation(Utils.getFlag('M', options)); setUseCrossOver(Utils.getFlag('C', options)); setUseTournamentSelection(Utils.getFlag('O', options)); super.setOptions(options); } // setOptions /** * Gets the current settings of the search algorithm. * * @return an array of strings suitable for passing to setOptions */ public String[] getOptions() { String[] superOptions = super.getOptions(); String[] options = new String[11 + superOptions.length]; int current = 0; options[current++] = "-L"; options[current++] = "" + getPopulationSize(); options[current++] = "-A"; options[current++] = "" + getDescendantPopulationSize(); options[current++] = "-U"; options[current++] = "" + getRuns(); options[current++] = "-R"; options[current++] = "" + getSeed(); if (getUseMutation()) { options[current++] = "-M"; } if (getUseCrossOver()) { options[current++] = "-C"; } if (getUseTournamentSelection()) { options[current++] = "-O"; } // insert options from parent class for (int iOption = 0; iOption < superOptions.length; iOption++) { options[current++] = superOptions[iOption]; } // Fill up rest with empty strings, not nulls! while (current < options.length) { options[current++] = ""; } return options; } // getOptions /** * @return whether cross-over is used */ public boolean getUseCrossOver() { return m_bUseCrossOver; } /** * @return whether mutation is used */ public boolean getUseMutation() { return m_bUseMutation; } /** * @return descendant population size */ public int getDescendantPopulationSize() { return m_nDescendantPopulationSize; } /** * @return population size */ public int getPopulationSize() { return m_nPopulationSize; } /** * @param bUseCrossOver sets whether cross-over is used */ public void setUseCrossOver(boolean bUseCrossOver) { m_bUseCrossOver = bUseCrossOver; } /** * @param bUseMutation sets whether mutation is used */ public void setUseMutation(boolean bUseMutation) { m_bUseMutation = bUseMutation; } /** * @return whether Tournament Selection (true) or Maximum Sub-Population (false) should be used */ public boolean getUseTournamentSelection() { return m_bUseTournamentSelection; } /** * @param bUseTournamentSelection sets whether Tournament Selection or Maximum Sub-Population should be used */ public void setUseTournamentSelection(boolean bUseTournamentSelection) { m_bUseTournamentSelection = bUseTournamentSelection; } /** * @param iDescendantPopulationSize sets descendant population size */ public void setDescendantPopulationSize(int iDescendantPopulationSize) { m_nDescendantPopulationSize = iDescendantPopulationSize; } /** * @param iPopulationSize sets population size */ public void setPopulationSize(int iPopulationSize) { m_nPopulationSize = iPopulationSize; } /** * @return random number seed */ public int getSeed() { return m_nSeed; } // getSeed /** * Sets the random number seed * @param nSeed The number of the seed to set */ public void setSeed(int nSeed) { m_nSeed = nSeed; } // setSeed /** * This will return a string describing the classifier. * @return The string. */ public String globalInfo() { return "This Bayes Network learning algorithm uses genetic search for finding a well scoring " + "Bayes network structure. Genetic search works by having a population of Bayes network structures " + "and allow them to mutate and apply cross over to get offspring. The best network structure " + "found during the process is returned."; } // globalInfo /** * @return a string to describe the Runs option. */ public String runsTipText() { return "Sets the number of generations of Bayes network structure populations."; } // runsTipText /** * @return a string to describe the Seed option. */ public String seedTipText() { return "Initialization value for random number generator." + " Setting the seed allows replicability of experiments."; } // seedTipText /** * @return a string to describe the Population Size option. */ public String populationSizeTipText() { return "Sets the size of the population of network structures that is selected each generation."; } // populationSizeTipText /** * @return a string to describe the Descendant Population Size option. */ public String descendantPopulationSizeTipText() { return "Sets the size of the population of descendants that is created each generation."; } // descendantPopulationSizeTipText /** * @return a string to describe the Use Mutation option. */ public String useMutationTipText() { return "Determines whether mutation is allowed. Mutation flips a bit in the bit " + "representation of the network structure. At least one of mutation or cross-over " + "should be used."; } // useMutationTipText /** * @return a string to describe the Use Cross-Over option. */ public String useCrossOverTipText() { return "Determines whether cross-over is allowed. Cross over combined the bit " + "representations of network structure by taking a random first k bits of one" + "and adding the remainder of the other. At least one of mutation or cross-over " + "should be used."; } // useCrossOverTipText /** * @return a string to describe the Use Tournament Selection option. */ public String useTournamentSelectionTipText() { return "Determines the method of selecting a population. When set to true, tournament " + "selection is used (pick two at random and the highest is allowed to continue). " + "When set to false, the top scoring network structures are selected."; } // useTournamentSelectionTipText /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } } // GeneticSearch
21,777
28.193029
313
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/bayes/net/search/global/GlobalScoreSearchAlgorithm.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * GlobalScoreSearchAlgorithm.java * Copyright (C) 2004-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.bayes.net.search.global; import java.util.Enumeration; import java.util.Vector; import weka.classifiers.bayes.BayesNet; import weka.classifiers.bayes.net.ParentSet; import weka.classifiers.bayes.net.search.SearchAlgorithm; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.RevisionUtils; import weka.core.SelectedTag; import weka.core.Tag; import weka.core.Utils; /** <!-- globalinfo-start --> * This Bayes Network learning algorithm uses cross validation to estimate classification accuracy. * <p/> <!-- globalinfo-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -mbc * Applies a Markov Blanket correction to the network structure, * after a network structure is learned. This ensures that all * nodes in the network are part of the Markov blanket of the * classifier node.</pre> * * <pre> -S [LOO-CV|k-Fold-CV|Cumulative-CV] * Score type (LOO-CV,k-Fold-CV,Cumulative-CV)</pre> * * <pre> -Q * Use probabilistic or 0/1 scoring. * (default probabilistic scoring)</pre> * <!-- options-end --> * * @author Remco Bouckaert * @version $Revision: 8034 $ */ public class GlobalScoreSearchAlgorithm extends SearchAlgorithm { /** for serialization */ static final long serialVersionUID = 7341389867906199781L; /** points to Bayes network for which a structure is searched for **/ BayesNet m_BayesNet; /** toggle between scoring using accuracy = 0-1 loss (when false) or class probabilities (when true) **/ boolean m_bUseProb = true; /** number of folds for k-fold cross validation **/ int m_nNrOfFolds = 10; /** constant for score type: LOO-CV */ final static int LOOCV = 0; /** constant for score type: k-fold-CV */ final static int KFOLDCV = 1; /** constant for score type: Cumulative-CV */ final static int CUMCV = 2; /** the score types **/ public static final Tag[] TAGS_CV_TYPE = { new Tag(LOOCV, "LOO-CV"), new Tag(KFOLDCV, "k-Fold-CV"), new Tag(CUMCV, "Cumulative-CV") }; /** * Holds the cross validation strategy used to measure quality of network */ int m_nCVType = LOOCV; /** * performCV returns the accuracy calculated using cross validation. * The dataset used is m_Instances associated with the Bayes Network. * * @param bayesNet : Bayes Network containing structure to evaluate * @return accuracy (in interval 0..1) measured using cv. * @throws Exception whn m_nCVType is invalided + exceptions passed on by updateClassifier */ public double calcScore(BayesNet bayesNet) throws Exception { switch (m_nCVType) { case LOOCV: return leaveOneOutCV(bayesNet); case CUMCV: return cumulativeCV(bayesNet); case KFOLDCV: return kFoldCV(bayesNet, m_nNrOfFolds); default: throw new Exception("Unrecognized cross validation type encountered: " + m_nCVType); } } // calcScore /** * Calc Node Score With Added Parent * * @param nNode node for which the score is calculate * @param nCandidateParent candidate parent to add to the existing parent set * @return log score * @throws Exception if something goes wrong */ public double calcScoreWithExtraParent(int nNode, int nCandidateParent) throws Exception { ParentSet oParentSet = m_BayesNet.getParentSet(nNode); Instances instances = m_BayesNet.m_Instances; // sanity check: nCandidateParent should not be in parent set already for (int iParent = 0; iParent < oParentSet.getNrOfParents(); iParent++) { if (oParentSet.getParent(iParent) == nCandidateParent) { return -1e100; } } // set up candidate parent oParentSet.addParent(nCandidateParent, instances); // calculate the score double fAccuracy = calcScore(m_BayesNet); // delete temporarily added parent oParentSet.deleteLastParent(instances); return fAccuracy; } // calcScoreWithExtraParent /** * Calc Node Score With Parent Deleted * * @param nNode node for which the score is calculate * @param nCandidateParent candidate parent to delete from the existing parent set * @return log score * @throws Exception if something goes wrong */ public double calcScoreWithMissingParent(int nNode, int nCandidateParent) throws Exception { ParentSet oParentSet = m_BayesNet.getParentSet(nNode); Instances instances = m_BayesNet.m_Instances; // sanity check: nCandidateParent should be in parent set already if (!oParentSet.contains( nCandidateParent)) { return -1e100; } // set up candidate parent int iParent = oParentSet.deleteParent(nCandidateParent, instances); // calculate the score double fAccuracy = calcScore(m_BayesNet); // reinsert temporarily deleted parent oParentSet.addParent(nCandidateParent, iParent, instances); return fAccuracy; } // calcScoreWithMissingParent /** * Calc Node Score With Arrow reversed * * @param nNode node for which the score is calculate * @param nCandidateParent candidate parent to delete from the existing parent set * @return log score * @throws Exception if something goes wrong */ public double calcScoreWithReversedParent(int nNode, int nCandidateParent) throws Exception { ParentSet oParentSet = m_BayesNet.getParentSet(nNode); ParentSet oParentSet2 = m_BayesNet.getParentSet(nCandidateParent); Instances instances = m_BayesNet.m_Instances; // sanity check: nCandidateParent should be in parent set already if (!oParentSet.contains( nCandidateParent)) { return -1e100; } // set up candidate parent int iParent = oParentSet.deleteParent(nCandidateParent, instances); oParentSet2.addParent(nNode, instances); // calculate the score double fAccuracy = calcScore(m_BayesNet); // restate temporarily reversed arrow oParentSet2.deleteLastParent(instances); oParentSet.addParent(nCandidateParent, iParent, instances); return fAccuracy; } // calcScoreWithReversedParent /** * LeaveOneOutCV returns the accuracy calculated using Leave One Out * cross validation. The dataset used is m_Instances associated with * the Bayes Network. * @param bayesNet : Bayes Network containing structure to evaluate * @return accuracy (in interval 0..1) measured using leave one out cv. * @throws Exception passed on by updateClassifier */ public double leaveOneOutCV(BayesNet bayesNet) throws Exception { m_BayesNet = bayesNet; double fAccuracy = 0.0; double fWeight = 0.0; Instances instances = bayesNet.m_Instances; bayesNet.estimateCPTs(); for (int iInstance = 0; iInstance < instances.numInstances(); iInstance++) { Instance instance = instances.instance(iInstance); instance.setWeight(-instance.weight()); bayesNet.updateClassifier(instance); fAccuracy += accuracyIncrease(instance); fWeight += instance.weight(); instance.setWeight(-instance.weight()); bayesNet.updateClassifier(instance); } return fAccuracy / fWeight; } // LeaveOneOutCV /** * CumulativeCV returns the accuracy calculated using cumulative * cross validation. The idea is to run through the data set and * try to classify each of the instances based on the previously * seen data. * The data set used is m_Instances associated with the Bayes Network. * @param bayesNet : Bayes Network containing structure to evaluate * @return accuracy (in interval 0..1) measured using leave one out cv. * @throws Exception passed on by updateClassifier */ public double cumulativeCV(BayesNet bayesNet) throws Exception { m_BayesNet = bayesNet; double fAccuracy = 0.0; double fWeight = 0.0; Instances instances = bayesNet.m_Instances; bayesNet.initCPTs(); for (int iInstance = 0; iInstance < instances.numInstances(); iInstance++) { Instance instance = instances.instance(iInstance); fAccuracy += accuracyIncrease(instance); bayesNet.updateClassifier(instance); fWeight += instance.weight(); } return fAccuracy / fWeight; } // LeaveOneOutCV /** * kFoldCV uses k-fold cross validation to measure the accuracy of a Bayes * network classifier. * @param bayesNet : Bayes Network containing structure to evaluate * @param nNrOfFolds : the number of folds k to perform k-fold cv * @return accuracy (in interval 0..1) measured using leave one out cv. * @throws Exception passed on by updateClassifier */ public double kFoldCV(BayesNet bayesNet, int nNrOfFolds) throws Exception { m_BayesNet = bayesNet; double fAccuracy = 0.0; double fWeight = 0.0; Instances instances = bayesNet.m_Instances; // estimate CPTs based on complete data set bayesNet.estimateCPTs(); int nFoldStart = 0; int nFoldEnd = instances.numInstances() / nNrOfFolds; int iFold = 1; while (nFoldStart < instances.numInstances()) { // remove influence of fold iFold from the probability distribution for (int iInstance = nFoldStart; iInstance < nFoldEnd; iInstance++) { Instance instance = instances.instance(iInstance); instance.setWeight(-instance.weight()); bayesNet.updateClassifier(instance); } // measure accuracy on fold iFold for (int iInstance = nFoldStart; iInstance < nFoldEnd; iInstance++) { Instance instance = instances.instance(iInstance); instance.setWeight(-instance.weight()); fAccuracy += accuracyIncrease(instance); instance.setWeight(-instance.weight()); fWeight += instance.weight(); } // restore influence of fold iFold from the probability distribution for (int iInstance = nFoldStart; iInstance < nFoldEnd; iInstance++) { Instance instance = instances.instance(iInstance); instance.setWeight(-instance.weight()); bayesNet.updateClassifier(instance); } // go to next fold nFoldStart = nFoldEnd; iFold++; nFoldEnd = iFold * instances.numInstances() / nNrOfFolds; } return fAccuracy / fWeight; } // kFoldCV /** accuracyIncrease determines how much the accuracy estimate should * be increased due to the contribution of a single given instance. * * @param instance : instance for which to calculate the accuracy increase. * @return increase in accuracy due to given instance. * @throws Exception passed on by distributionForInstance and classifyInstance */ double accuracyIncrease(Instance instance) throws Exception { if (m_bUseProb) { double [] fProb = m_BayesNet.distributionForInstance(instance); return fProb[(int) instance.classValue()] * instance.weight(); } else { if (m_BayesNet.classifyInstance(instance) == instance.classValue()) { return instance.weight(); } } return 0; } // accuracyIncrease /** * @return use probabilities or not in accuracy estimate */ public boolean getUseProb() { return m_bUseProb; } // getUseProb /** * @param useProb : use probabilities or not in accuracy estimate */ public void setUseProb(boolean useProb) { m_bUseProb = useProb; } // setUseProb /** * set cross validation strategy to be used in searching for networks. * @param newCVType : cross validation strategy */ public void setCVType(SelectedTag newCVType) { if (newCVType.getTags() == TAGS_CV_TYPE) { m_nCVType = newCVType.getSelectedTag().getID(); } } // setCVType /** * get cross validation strategy to be used in searching for networks. * @return cross validation strategy */ public SelectedTag getCVType() { return new SelectedTag(m_nCVType, TAGS_CV_TYPE); } // getCVType /** * * @param bMarkovBlanketClassifier */ public void setMarkovBlanketClassifier(boolean bMarkovBlanketClassifier) { super.setMarkovBlanketClassifier(bMarkovBlanketClassifier); } /** * * @return */ public boolean getMarkovBlanketClassifier() { return super.getMarkovBlanketClassifier(); } /** * Returns an enumeration describing the available options * * @return an enumeration of all the available options */ public Enumeration listOptions() { Vector newVector = new Vector(); newVector.addElement(new Option( "\tApplies a Markov Blanket correction to the network structure, \n" + "\tafter a network structure is learned. This ensures that all \n" + "\tnodes in the network are part of the Markov blanket of the \n" + "\tclassifier node.", "mbc", 0, "-mbc")); newVector.addElement( new Option( "\tScore type (LOO-CV,k-Fold-CV,Cumulative-CV)", "S", 1, "-S [LOO-CV|k-Fold-CV|Cumulative-CV]")); newVector.addElement(new Option("\tUse probabilistic or 0/1 scoring.\n\t(default probabilistic scoring)", "Q", 0, "-Q")); Enumeration enu = super.listOptions(); while (enu.hasMoreElements()) { newVector.addElement(enu.nextElement()); } return newVector.elements(); } // listOptions /** * Parses a given list of options. <p/> * <!-- options-start --> * Valid options are: <p/> * * <pre> -mbc * Applies a Markov Blanket correction to the network structure, * after a network structure is learned. This ensures that all * nodes in the network are part of the Markov blanket of the * classifier node.</pre> * * <pre> -S [LOO-CV|k-Fold-CV|Cumulative-CV] * Score type (LOO-CV,k-Fold-CV,Cumulative-CV)</pre> * * <pre> -Q * Use probabilistic or 0/1 scoring. * (default probabilistic scoring)</pre> * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { setMarkovBlanketClassifier(Utils.getFlag("mbc", options)); String sScore = Utils.getOption('S', options); if (sScore.compareTo("LOO-CV") == 0) { setCVType(new SelectedTag(LOOCV, TAGS_CV_TYPE)); } if (sScore.compareTo("k-Fold-CV") == 0) { setCVType(new SelectedTag(KFOLDCV, TAGS_CV_TYPE)); } if (sScore.compareTo("Cumulative-CV") == 0) { setCVType(new SelectedTag(CUMCV, TAGS_CV_TYPE)); } setUseProb(!Utils.getFlag('Q', options)); super.setOptions(options); } // setOptions /** * Gets the current settings of the search algorithm. * * @return an array of strings suitable for passing to setOptions */ public String[] getOptions() { String[] superOptions = super.getOptions(); String[] options = new String[4 + superOptions.length]; int current = 0; if (getMarkovBlanketClassifier()) options[current++] = "-mbc"; options[current++] = "-S"; switch (m_nCVType) { case (LOOCV) : options[current++] = "LOO-CV"; break; case (KFOLDCV) : options[current++] = "k-Fold-CV"; break; case (CUMCV) : options[current++] = "Cumulative-CV"; break; } if (!getUseProb()) { options[current++] = "-Q"; } // insert options from parent class for (int iOption = 0; iOption < superOptions.length; iOption++) { options[current++] = superOptions[iOption]; } // Fill up rest with empty strings, not nulls! while (current < options.length) { options[current++] = ""; } return options; } // getOptions /** * @return a string to describe the CVType option. */ public String CVTypeTipText() { return "Select cross validation strategy to be used in searching for networks." + "LOO-CV = Leave one out cross validation\n" + "k-Fold-CV = k fold cross validation\n" + "Cumulative-CV = cumulative cross validation." ; } // CVTypeTipText /** * @return a string to describe the UseProb option. */ public String useProbTipText() { return "If set to true, the probability of the class if returned in the estimate of the "+ "accuracy. If set to false, the accuracy estimate is only increased if the classifier returns " + "exactly the correct class."; } // useProbTipText /** * This will return a string describing the search algorithm. * @return The string. */ public String globalInfo() { return "This Bayes Network learning algorithm uses cross validation to estimate " + "classification accuracy."; } // globalInfo /** * @return a string to describe the MarkovBlanketClassifier option. */ public String markovBlanketClassifierTipText() { return super.markovBlanketClassifierTipText(); } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } }
17,102
30.497238
123
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/bayes/net/search/global/HillClimber.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * HillClimber.java * Copyright (C) 2004-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.bayes.net.search.global; import java.io.Serializable; import java.util.Enumeration; import java.util.Vector; import weka.classifiers.bayes.BayesNet; import weka.classifiers.bayes.net.ParentSet; import weka.core.Instances; import weka.core.Option; import weka.core.RevisionHandler; import weka.core.RevisionUtils; import weka.core.Utils; /** <!-- globalinfo-start --> * This Bayes Network learning algorithm uses a hill climbing algorithm adding, deleting and reversing arcs. The search is not restricted by an order on the variables (unlike K2). The difference with B and B2 is that this hill climber also considers arrows part of the naive Bayes structure for deletion. * <p/> <!-- globalinfo-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -P &lt;nr of parents&gt; * Maximum number of parents</pre> * * <pre> -R * Use arc reversal operation. * (default false)</pre> * * <pre> -N * Initial structure is empty (instead of Naive Bayes)</pre> * * <pre> -mbc * Applies a Markov Blanket correction to the network structure, * after a network structure is learned. This ensures that all * nodes in the network are part of the Markov blanket of the * classifier node.</pre> * * <pre> -S [LOO-CV|k-Fold-CV|Cumulative-CV] * Score type (LOO-CV,k-Fold-CV,Cumulative-CV)</pre> * * <pre> -Q * Use probabilistic or 0/1 scoring. * (default probabilistic scoring)</pre> * <!-- options-end --> * * @author Remco Bouckaert (rrb@xm.co.nz) * @version $Revision: 8034 $ */ public class HillClimber extends GlobalScoreSearchAlgorithm { /** for serialization */ static final long serialVersionUID = -3885042888195820149L; /** * the Operation class contains info on operations performed * on the current Bayesian network. */ class Operation implements Serializable, RevisionHandler { /** for serialization */ static final long serialVersionUID = -2934970456587374967L; // constants indicating the type of an operation final static int OPERATION_ADD = 0; final static int OPERATION_DEL = 1; final static int OPERATION_REVERSE = 2; /** c'tor **/ public Operation() { } /** c'tor + initializers * * @param nTail * @param nHead * @param nOperation */ public Operation(int nTail, int nHead, int nOperation) { m_nHead = nHead; m_nTail = nTail; m_nOperation = nOperation; } /** compare this operation with another * @param other operation to compare with * @return true if operation is the same */ public boolean equals(Operation other) { if (other == null) { return false; } return (( m_nOperation == other.m_nOperation) && (m_nHead == other.m_nHead) && (m_nTail == other.m_nTail)); } // equals /** number of the tail node **/ public int m_nTail; /** number of the head node **/ public int m_nHead; /** type of operation (ADD, DEL, REVERSE) **/ public int m_nOperation; /** change of score due to this operation **/ public double m_fScore = -1E100; /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } } // class Operation /** use the arc reversal operator **/ boolean m_bUseArcReversal = false; /** * search determines the network structure/graph of the network * with the Taby algorithm. * * @param bayesNet the network to search * @param instances the instances to work with * @throws Exception if something goes wrong */ protected void search(BayesNet bayesNet, Instances instances) throws Exception { m_BayesNet = bayesNet; double fScore = calcScore(bayesNet); // go do the search Operation oOperation = getOptimalOperation(bayesNet, instances); while ((oOperation != null) && (oOperation.m_fScore > fScore)) { performOperation(bayesNet, instances, oOperation); fScore = oOperation.m_fScore; oOperation = getOptimalOperation(bayesNet, instances); } } // search /** check whether the operation is not in the forbidden. * For base hill climber, there are no restrictions on operations, * so we always return true. * @param oOperation operation to be checked * @return true if operation is not in the tabu list */ boolean isNotTabu(Operation oOperation) { return true; } // isNotTabu /** * getOptimalOperation finds the optimal operation that can be performed * on the Bayes network that is not in the tabu list. * * @param bayesNet Bayes network to apply operation on * @param instances data set to learn from * @return optimal operation found * @throws Exception if something goes wrong */ Operation getOptimalOperation(BayesNet bayesNet, Instances instances) throws Exception { Operation oBestOperation = new Operation(); // Add??? oBestOperation = findBestArcToAdd(bayesNet, instances, oBestOperation); // Delete??? oBestOperation = findBestArcToDelete(bayesNet, instances, oBestOperation); // Reverse??? if (getUseArcReversal()) { oBestOperation = findBestArcToReverse(bayesNet, instances, oBestOperation); } // did we find something? if (oBestOperation.m_fScore == -1E100) { return null; } return oBestOperation; } // getOptimalOperation /** performOperation applies an operation * on the Bayes network and update the cache. * * @param bayesNet Bayes network to apply operation on * @param instances data set to learn from * @param oOperation operation to perform * @throws Exception if something goes wrong */ void performOperation(BayesNet bayesNet, Instances instances, Operation oOperation) throws Exception { // perform operation switch (oOperation.m_nOperation) { case Operation.OPERATION_ADD: applyArcAddition(bayesNet, oOperation.m_nHead, oOperation.m_nTail, instances); if (bayesNet.getDebug()) { System.out.print("Add " + oOperation.m_nHead + " -> " + oOperation.m_nTail); } break; case Operation.OPERATION_DEL: applyArcDeletion(bayesNet, oOperation.m_nHead, oOperation.m_nTail, instances); if (bayesNet.getDebug()) { System.out.print("Del " + oOperation.m_nHead + " -> " + oOperation.m_nTail); } break; case Operation.OPERATION_REVERSE: applyArcDeletion(bayesNet, oOperation.m_nHead, oOperation.m_nTail, instances); applyArcAddition(bayesNet, oOperation.m_nTail, oOperation.m_nHead, instances); if (bayesNet.getDebug()) { System.out.print("Rev " + oOperation.m_nHead+ " -> " + oOperation.m_nTail); } break; } } // performOperation /** * * @param bayesNet * @param iHead * @param iTail * @param instances */ void applyArcAddition(BayesNet bayesNet, int iHead, int iTail, Instances instances) { ParentSet bestParentSet = bayesNet.getParentSet(iHead); bestParentSet.addParent(iTail, instances); } // applyArcAddition /** * * @param bayesNet * @param iHead * @param iTail * @param instances */ void applyArcDeletion(BayesNet bayesNet, int iHead, int iTail, Instances instances) { ParentSet bestParentSet = bayesNet.getParentSet(iHead); bestParentSet.deleteParent(iTail, instances); } // applyArcAddition /** * find best (or least bad) arc addition operation * * @param bayesNet Bayes network to add arc to * @param instances data set * @param oBestOperation * @return Operation containing best arc to add, or null if no arc addition is allowed * (this can happen if any arc addition introduces a cycle, or all parent sets are filled * up to the maximum nr of parents). * @throws Exception if something goes wrong */ Operation findBestArcToAdd(BayesNet bayesNet, Instances instances, Operation oBestOperation) throws Exception { int nNrOfAtts = instances.numAttributes(); // find best arc to add for (int iAttributeHead = 0; iAttributeHead < nNrOfAtts; iAttributeHead++) { if (bayesNet.getParentSet(iAttributeHead).getNrOfParents() < m_nMaxNrOfParents) { for (int iAttributeTail = 0; iAttributeTail < nNrOfAtts; iAttributeTail++) { if (addArcMakesSense(bayesNet, instances, iAttributeHead, iAttributeTail)) { Operation oOperation = new Operation(iAttributeTail, iAttributeHead, Operation.OPERATION_ADD); double fScore = calcScoreWithExtraParent(oOperation.m_nHead, oOperation.m_nTail); if (fScore > oBestOperation.m_fScore) { if (isNotTabu(oOperation)) { oBestOperation = oOperation; oBestOperation.m_fScore = fScore; } } } } } } return oBestOperation; } // findBestArcToAdd /** * find best (or least bad) arc deletion operation * * @param bayesNet Bayes network to delete arc from * @param instances data set * @param oBestOperation * @return Operation containing best arc to delete, or null if no deletion can be made * (happens when there is no arc in the network yet). * @throws Exception of something goes wrong */ Operation findBestArcToDelete(BayesNet bayesNet, Instances instances, Operation oBestOperation) throws Exception { int nNrOfAtts = instances.numAttributes(); // find best arc to delete for (int iNode = 0; iNode < nNrOfAtts; iNode++) { ParentSet parentSet = bayesNet.getParentSet(iNode); for (int iParent = 0; iParent < parentSet.getNrOfParents(); iParent++) { Operation oOperation = new Operation(parentSet.getParent(iParent), iNode, Operation.OPERATION_DEL); double fScore = calcScoreWithMissingParent(oOperation.m_nHead, oOperation.m_nTail); if (fScore > oBestOperation.m_fScore) { if (isNotTabu(oOperation)) { oBestOperation = oOperation; oBestOperation.m_fScore = fScore; } } } } return oBestOperation; } // findBestArcToDelete /** * find best (or least bad) arc reversal operation * * @param bayesNet Bayes network to reverse arc in * @param instances data set * @param oBestOperation * @return Operation containing best arc to reverse, or null if no reversal is allowed * (happens if there is no arc in the network yet, or when any such reversal introduces * a cycle). * @throws Exception if something goes wrong */ Operation findBestArcToReverse(BayesNet bayesNet, Instances instances, Operation oBestOperation) throws Exception { int nNrOfAtts = instances.numAttributes(); // find best arc to reverse for (int iNode = 0; iNode < nNrOfAtts; iNode++) { ParentSet parentSet = bayesNet.getParentSet(iNode); for (int iParent = 0; iParent < parentSet.getNrOfParents(); iParent++) { int iTail = parentSet.getParent(iParent); // is reversal allowed? if (reverseArcMakesSense(bayesNet, instances, iNode, iTail) && bayesNet.getParentSet(iTail).getNrOfParents() < m_nMaxNrOfParents) { // go check if reversal results in the best step forward Operation oOperation = new Operation(parentSet.getParent(iParent), iNode, Operation.OPERATION_REVERSE); double fScore = calcScoreWithReversedParent(oOperation.m_nHead, oOperation.m_nTail); if (fScore > oBestOperation.m_fScore) { if (isNotTabu(oOperation)) { oBestOperation = oOperation; oBestOperation.m_fScore = fScore; } } } } } return oBestOperation; } // findBestArcToReverse /** * Sets the max number of parents * * @param nMaxNrOfParents the max number of parents */ public void setMaxNrOfParents(int nMaxNrOfParents) { m_nMaxNrOfParents = nMaxNrOfParents; } /** * Gets the max number of parents. * * @return the max number of parents */ public int getMaxNrOfParents() { return m_nMaxNrOfParents; } /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ public Enumeration listOptions() { Vector newVector = new Vector(2); newVector.addElement(new Option("\tMaximum number of parents", "P", 1, "-P <nr of parents>")); newVector.addElement(new Option("\tUse arc reversal operation.\n\t(default false)", "R", 0, "-R")); newVector.addElement(new Option("\tInitial structure is empty (instead of Naive Bayes)", "N", 0, "-N")); Enumeration enu = super.listOptions(); while (enu.hasMoreElements()) { newVector.addElement(enu.nextElement()); } return newVector.elements(); } // listOptions /** * Parses a given list of options. <p/> * <!-- options-start --> * Valid options are: <p/> * * <pre> -P &lt;nr of parents&gt; * Maximum number of parents</pre> * * <pre> -R * Use arc reversal operation. * (default false)</pre> * * <pre> -N * Initial structure is empty (instead of Naive Bayes)</pre> * * <pre> -mbc * Applies a Markov Blanket correction to the network structure, * after a network structure is learned. This ensures that all * nodes in the network are part of the Markov blanket of the * classifier node.</pre> * * <pre> -S [LOO-CV|k-Fold-CV|Cumulative-CV] * Score type (LOO-CV,k-Fold-CV,Cumulative-CV)</pre> * * <pre> -Q * Use probabilistic or 0/1 scoring. * (default probabilistic scoring)</pre> * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { setUseArcReversal(Utils.getFlag('R', options)); setInitAsNaiveBayes (!(Utils.getFlag('N', options))); String sMaxNrOfParents = Utils.getOption('P', options); if (sMaxNrOfParents.length() != 0) { setMaxNrOfParents(Integer.parseInt(sMaxNrOfParents)); } else { setMaxNrOfParents(100000); } super.setOptions(options); } // setOptions /** * Gets the current settings of the search algorithm. * * @return an array of strings suitable for passing to setOptions */ public String[] getOptions() { String[] superOptions = super.getOptions(); String[] options = new String[7 + superOptions.length]; int current = 0; if (getUseArcReversal()) { options[current++] = "-R"; } if (!getInitAsNaiveBayes()) { options[current++] = "-N"; } options[current++] = "-P"; options[current++] = "" + m_nMaxNrOfParents; // insert options from parent class for (int iOption = 0; iOption < superOptions.length; iOption++) { options[current++] = superOptions[iOption]; } // Fill up rest with empty strings, not nulls! while (current < options.length) { options[current++] = ""; } return options; } // getOptions /** * Sets whether to init as naive bayes * * @param bInitAsNaiveBayes whether to init as naive bayes */ public void setInitAsNaiveBayes(boolean bInitAsNaiveBayes) { m_bInitAsNaiveBayes = bInitAsNaiveBayes; } /** * Gets whether to init as naive bayes * * @return whether to init as naive bayes */ public boolean getInitAsNaiveBayes() { return m_bInitAsNaiveBayes; } /** get use the arc reversal operation * @return whether the arc reversal operation should be used */ public boolean getUseArcReversal() { return m_bUseArcReversal; } // getUseArcReversal /** set use the arc reversal operation * @param bUseArcReversal whether the arc reversal operation should be used */ public void setUseArcReversal(boolean bUseArcReversal) { m_bUseArcReversal = bUseArcReversal; } // setUseArcReversal /** * This will return a string describing the search algorithm. * @return The string. */ public String globalInfo() { return "This Bayes Network learning algorithm uses a hill climbing algorithm " + "adding, deleting and reversing arcs. The search is not restricted by an order " + "on the variables (unlike K2). The difference with B and B2 is that this hill " + "climber also considers arrows part of the naive Bayes structure for deletion."; } // globalInfo /** * @return a string to describe the Use Arc Reversal option. */ public String useArcReversalTipText() { return "When set to true, the arc reversal operation is used in the search."; } // useArcReversalTipText /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } } // HillClimber
17,250
31.065056
304
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/bayes/net/search/global/K2.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * K2.java * Copyright (C) 2001-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.bayes.net.search.global; import java.util.Enumeration; import java.util.Random; import java.util.Vector; import weka.classifiers.bayes.BayesNet; import weka.core.Instances; import weka.core.Option; import weka.core.RevisionUtils; import weka.core.TechnicalInformation; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import weka.core.TechnicalInformationHandler; import weka.core.Utils; /** <!-- globalinfo-start --> * This Bayes Network learning algorithm uses a hill climbing algorithm restricted by an order on the variables.<br/> * <br/> * For more information see:<br/> * <br/> * G.F. Cooper, E. Herskovits (1990). A Bayesian method for constructing Bayesian belief networks from databases.<br/> * <br/> * G. Cooper, E. Herskovits (1992). A Bayesian method for the induction of probabilistic networks from data. Machine Learning. 9(4):309-347.<br/> * <br/> * Works with nominal variables and no missing values only. * <p/> <!-- globalinfo-end --> * <!-- technical-bibtex-start --> * BibTeX: * <pre> * &#64;proceedings{Cooper1990, * author = {G.F. Cooper and E. Herskovits}, * booktitle = {Proceedings of the Conference on Uncertainty in AI}, * pages = {86-94}, * title = {A Bayesian method for constructing Bayesian belief networks from databases}, * year = {1990} * } * * &#64;article{Cooper1992, * author = {G. Cooper and E. Herskovits}, * journal = {Machine Learning}, * number = {4}, * pages = {309-347}, * title = {A Bayesian method for the induction of probabilistic networks from data}, * volume = {9}, * year = {1992} * } * </pre> * <p/> <!-- technical-bibtex-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -N * Initial structure is empty (instead of Naive Bayes)</pre> * * <pre> -P &lt;nr of parents&gt; * Maximum number of parents</pre> * * <pre> -R * Random order. * (default false)</pre> * * <pre> -mbc * Applies a Markov Blanket correction to the network structure, * after a network structure is learned. This ensures that all * nodes in the network are part of the Markov blanket of the * classifier node.</pre> * * <pre> -S [LOO-CV|k-Fold-CV|Cumulative-CV] * Score type (LOO-CV,k-Fold-CV,Cumulative-CV)</pre> * * <pre> -Q * Use probabilistic or 0/1 scoring. * (default probabilistic scoring)</pre> * <!-- options-end --> * * @author Remco Bouckaert (rrb@xm.co.nz) * @version $Revision: 8034 $ */ public class K2 extends GlobalScoreSearchAlgorithm implements TechnicalInformationHandler { /** for serialization */ static final long serialVersionUID = -6626871067466338256L; /** Holds flag to indicate ordering should be random **/ boolean m_bRandomOrder = false; /** * Returns an instance of a TechnicalInformation object, containing * detailed information about the technical background of this class, * e.g., paper reference or book this class is based on. * * @return the technical information about this class */ public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; TechnicalInformation additional; result = new TechnicalInformation(Type.PROCEEDINGS); result.setValue(Field.AUTHOR, "G.F. Cooper and E. Herskovits"); result.setValue(Field.YEAR, "1990"); result.setValue(Field.TITLE, "A Bayesian method for constructing Bayesian belief networks from databases"); result.setValue(Field.BOOKTITLE, "Proceedings of the Conference on Uncertainty in AI"); result.setValue(Field.PAGES, "86-94"); additional = result.add(Type.ARTICLE); additional.setValue(Field.AUTHOR, "G. Cooper and E. Herskovits"); additional.setValue(Field.YEAR, "1992"); additional.setValue(Field.TITLE, "A Bayesian method for the induction of probabilistic networks from data"); additional.setValue(Field.JOURNAL, "Machine Learning"); additional.setValue(Field.VOLUME, "9"); additional.setValue(Field.NUMBER, "4"); additional.setValue(Field.PAGES, "309-347"); return result; } /** * search determines the network structure/graph of the network * with the K2 algorithm, restricted by its initial structure (which can * be an empty graph, or a Naive Bayes graph. * * @param bayesNet the network * @param instances the data to work with * @throws Exception if something goes wrong */ public void search (BayesNet bayesNet, Instances instances) throws Exception { int nOrder[] = new int [instances.numAttributes()]; nOrder[0] = instances.classIndex(); int nAttribute = 0; for (int iOrder = 1; iOrder < instances.numAttributes(); iOrder++) { if (nAttribute == instances.classIndex()) { nAttribute++; } nOrder[iOrder] = nAttribute++; } if (m_bRandomOrder) { // generate random ordering (if required) Random random = new Random(); int iClass; if (getInitAsNaiveBayes()) { iClass = 0; } else { iClass = -1; } for (int iOrder = 0; iOrder < instances.numAttributes(); iOrder++) { int iOrder2 = Math.abs(random.nextInt()) % instances.numAttributes(); if (iOrder != iClass && iOrder2 != iClass) { int nTmp = nOrder[iOrder]; nOrder[iOrder] = nOrder[iOrder2]; nOrder[iOrder2] = nTmp; } } } // determine base scores double fBaseScore = calcScore(bayesNet); // K2 algorithm: greedy search restricted by ordering for (int iOrder = 1; iOrder < instances.numAttributes(); iOrder++) { int iAttribute = nOrder[iOrder]; double fBestScore = fBaseScore; boolean bProgress = (bayesNet.getParentSet(iAttribute).getNrOfParents() < getMaxNrOfParents()); while (bProgress && (bayesNet.getParentSet(iAttribute).getNrOfParents() < getMaxNrOfParents())) { int nBestAttribute = -1; for (int iOrder2 = 0; iOrder2 < iOrder; iOrder2++) { int iAttribute2 = nOrder[iOrder2]; double fScore = calcScoreWithExtraParent(iAttribute, iAttribute2); if (fScore > fBestScore) { fBestScore = fScore; nBestAttribute = iAttribute2; } } if (nBestAttribute != -1) { bayesNet.getParentSet(iAttribute).addParent(nBestAttribute, instances); fBaseScore = fBestScore; bProgress = true; } else { bProgress = false; } } } } // search /** * Sets the max number of parents * * @param nMaxNrOfParents the max number of parents */ public void setMaxNrOfParents(int nMaxNrOfParents) { m_nMaxNrOfParents = nMaxNrOfParents; } /** * Gets the max number of parents. * * @return the max number of parents */ public int getMaxNrOfParents() { return m_nMaxNrOfParents; } /** * Sets whether to init as naive bayes * * @param bInitAsNaiveBayes whether to init as naive bayes */ public void setInitAsNaiveBayes(boolean bInitAsNaiveBayes) { m_bInitAsNaiveBayes = bInitAsNaiveBayes; } /** * Gets whether to init as naive bayes * * @return whether to init as naive bayes */ public boolean getInitAsNaiveBayes() { return m_bInitAsNaiveBayes; } /** * Set random order flag * * @param bRandomOrder the random order flag */ public void setRandomOrder(boolean bRandomOrder) { m_bRandomOrder = bRandomOrder; } // SetRandomOrder /** * Get random order flag * * @return the random order flag */ public boolean getRandomOrder() { return m_bRandomOrder; } // getRandomOrder /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ public Enumeration listOptions() { Vector newVector = new Vector(0); newVector.addElement(new Option("\tInitial structure is empty (instead of Naive Bayes)", "N", 0, "-N")); newVector.addElement(new Option("\tMaximum number of parents", "P", 1, "-P <nr of parents>")); newVector.addElement(new Option( "\tRandom order.\n" + "\t(default false)", "R", 0, "-R")); Enumeration enu = super.listOptions(); while (enu.hasMoreElements()) { newVector.addElement(enu.nextElement()); } return newVector.elements(); } /** * Parses a given list of options. <p/> * <!-- options-start --> * Valid options are: <p/> * * <pre> -N * Initial structure is empty (instead of Naive Bayes)</pre> * * <pre> -P &lt;nr of parents&gt; * Maximum number of parents</pre> * * <pre> -R * Random order. * (default false)</pre> * * <pre> -mbc * Applies a Markov Blanket correction to the network structure, * after a network structure is learned. This ensures that all * nodes in the network are part of the Markov blanket of the * classifier node.</pre> * * <pre> -S [LOO-CV|k-Fold-CV|Cumulative-CV] * Score type (LOO-CV,k-Fold-CV,Cumulative-CV)</pre> * * <pre> -Q * Use probabilistic or 0/1 scoring. * (default probabilistic scoring)</pre> * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { setRandomOrder(Utils.getFlag('R', options)); m_bInitAsNaiveBayes = !(Utils.getFlag('N', options)); String sMaxNrOfParents = Utils.getOption('P', options); if (sMaxNrOfParents.length() != 0) { setMaxNrOfParents(Integer.parseInt(sMaxNrOfParents)); } else { setMaxNrOfParents(100000); } super.setOptions(options); } /** * Gets the current settings of the search algorithm. * * @return an array of strings suitable for passing to setOptions */ public String [] getOptions() { String[] superOptions = super.getOptions(); String[] options = new String[4 + superOptions.length]; int current = 0; options[current++] = "-P"; options[current++] = "" + m_nMaxNrOfParents; if (!m_bInitAsNaiveBayes) { options[current++] = "-N"; } if (getRandomOrder()) { options[current++] = "-R"; } // insert options from parent class for (int iOption = 0; iOption < superOptions.length; iOption++) { options[current++] = superOptions[iOption]; } // Fill up rest with empty strings, not nulls! while (current < options.length) { options[current++] = ""; } // Fill up rest with empty strings, not nulls! return options; } /** * @return a string to describe the RandomOrder option. */ public String randomOrderTipText() { return "When set to true, the order of the nodes in the network is random." + " Default random order is false and the order" + " of the nodes in the dataset is used." + " In any case, when the network was initialized as Naive Bayes Network, the" + " class variable is first in the ordering though."; } // randomOrderTipText /** * This will return a string describing the search algorithm. * @return The string. */ public String globalInfo() { return "This Bayes Network learning algorithm uses a hill climbing algorithm " + "restricted by an order on the variables.\n\n" + "For more information see:\n\n" + getTechnicalInformation().toString() + "\n\n" + "Works with nominal variables and no missing values only."; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } }
12,198
28.826406
145
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/bayes/net/search/global/RepeatedHillClimber.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * RepeatedHillClimber.java * Copyright (C) 2004-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.bayes.net.search.global; import java.util.Enumeration; import java.util.Random; import java.util.Vector; import weka.classifiers.bayes.BayesNet; import weka.classifiers.bayes.net.ParentSet; import weka.core.Instances; import weka.core.Option; import weka.core.RevisionUtils; import weka.core.Utils; /** <!-- globalinfo-start --> * This Bayes Network learning algorithm repeatedly uses hill climbing starting with a randomly generated network structure and return the best structure of the various runs. * <p/> <!-- globalinfo-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -U &lt;integer&gt; * Number of runs</pre> * * <pre> -A &lt;seed&gt; * Random number seed</pre> * * <pre> -P &lt;nr of parents&gt; * Maximum number of parents</pre> * * <pre> -R * Use arc reversal operation. * (default false)</pre> * * <pre> -N * Initial structure is empty (instead of Naive Bayes)</pre> * * <pre> -mbc * Applies a Markov Blanket correction to the network structure, * after a network structure is learned. This ensures that all * nodes in the network are part of the Markov blanket of the * classifier node.</pre> * * <pre> -S [LOO-CV|k-Fold-CV|Cumulative-CV] * Score type (LOO-CV,k-Fold-CV,Cumulative-CV)</pre> * * <pre> -Q * Use probabilistic or 0/1 scoring. * (default probabilistic scoring)</pre> * <!-- options-end --> * * @author Remco Bouckaert (rrb@xm.co.nz) * @version $Revision: 8034 $ */ public class RepeatedHillClimber extends HillClimber { /** for serialization */ static final long serialVersionUID = -7359197180460703069L; /** number of runs **/ int m_nRuns = 10; /** random number seed **/ int m_nSeed = 1; /** random number generator **/ Random m_random; /** * search determines the network structure/graph of the network * with the repeated hill climbing. * * @param bayesNet the network to use * @param instances the data to use * @throws Exception if something goes wrong **/ protected void search(BayesNet bayesNet, Instances instances) throws Exception { m_random = new Random(getSeed()); // keeps track of score pf best structure found so far double fBestScore; double fCurrentScore = calcScore(bayesNet); // keeps track of best structure found so far BayesNet bestBayesNet; // initialize bestBayesNet fBestScore = fCurrentScore; bestBayesNet = new BayesNet(); bestBayesNet.m_Instances = instances; bestBayesNet.initStructure(); copyParentSets(bestBayesNet, bayesNet); // go do the search for (int iRun = 0; iRun < m_nRuns; iRun++) { // generate random nework generateRandomNet(bayesNet, instances); // search super.search(bayesNet, instances); // calculate score fCurrentScore = calcScore(bayesNet); // keep track of best network seen so far if (fCurrentScore > fBestScore) { fBestScore = fCurrentScore; copyParentSets(bestBayesNet, bayesNet); } } // restore current network to best network copyParentSets(bayesNet, bestBayesNet); // free up memory bestBayesNet = null; } // search /** * * @param bayesNet * @param instances */ void generateRandomNet(BayesNet bayesNet, Instances instances) { int nNodes = instances.numAttributes(); // clear network for (int iNode = 0; iNode < nNodes; iNode++) { ParentSet parentSet = bayesNet.getParentSet(iNode); while (parentSet.getNrOfParents() > 0) { parentSet.deleteLastParent(instances); } } // initialize as naive Bayes? if (getInitAsNaiveBayes()) { int iClass = instances.classIndex(); // initialize parent sets to have arrow from classifier node to // each of the other nodes for (int iNode = 0; iNode < nNodes; iNode++) { if (iNode != iClass) { bayesNet.getParentSet(iNode).addParent(iClass, instances); } } } // insert random arcs int nNrOfAttempts = m_random.nextInt(nNodes * nNodes); for (int iAttempt = 0; iAttempt < nNrOfAttempts; iAttempt++) { int iTail = m_random.nextInt(nNodes); int iHead = m_random.nextInt(nNodes); if (bayesNet.getParentSet(iHead).getNrOfParents() < getMaxNrOfParents() && addArcMakesSense(bayesNet, instances, iHead, iTail)) { bayesNet.getParentSet(iHead).addParent(iTail, instances); } } } // generateRandomNet /** * copyParentSets copies parent sets of source to dest BayesNet * * @param dest destination network * @param source source network */ void copyParentSets(BayesNet dest, BayesNet source) { int nNodes = source.getNrOfNodes(); // clear parent set first for (int iNode = 0; iNode < nNodes; iNode++) { dest.getParentSet(iNode).copy(source.getParentSet(iNode)); } } // CopyParentSets /** * Returns the number of runs * * @return number of runs */ public int getRuns() { return m_nRuns; } // getRuns /** * Sets the number of runs * * @param nRuns The number of runs to set */ public void setRuns(int nRuns) { m_nRuns = nRuns; } // setRuns /** * Returns the random seed * * @return random number seed */ public int getSeed() { return m_nSeed; } // getSeed /** * Sets the random number seed * * @param nSeed The number of the seed to set */ public void setSeed(int nSeed) { m_nSeed = nSeed; } // setSeed /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ public Enumeration listOptions() { Vector newVector = new Vector(4); newVector.addElement(new Option("\tNumber of runs", "U", 1, "-U <integer>")); newVector.addElement(new Option("\tRandom number seed", "A", 1, "-A <seed>")); Enumeration enu = super.listOptions(); while (enu.hasMoreElements()) { newVector.addElement(enu.nextElement()); } return newVector.elements(); } // listOptions /** * Parses a given list of options. <p/> * <!-- options-start --> * Valid options are: <p/> * * <pre> -U &lt;integer&gt; * Number of runs</pre> * * <pre> -A &lt;seed&gt; * Random number seed</pre> * * <pre> -P &lt;nr of parents&gt; * Maximum number of parents</pre> * * <pre> -R * Use arc reversal operation. * (default false)</pre> * * <pre> -N * Initial structure is empty (instead of Naive Bayes)</pre> * * <pre> -mbc * Applies a Markov Blanket correction to the network structure, * after a network structure is learned. This ensures that all * nodes in the network are part of the Markov blanket of the * classifier node.</pre> * * <pre> -S [LOO-CV|k-Fold-CV|Cumulative-CV] * Score type (LOO-CV,k-Fold-CV,Cumulative-CV)</pre> * * <pre> -Q * Use probabilistic or 0/1 scoring. * (default probabilistic scoring)</pre> * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { String sRuns = Utils.getOption('U', options); if (sRuns.length() != 0) { setRuns(Integer.parseInt(sRuns)); } String sSeed = Utils.getOption('A', options); if (sSeed.length() != 0) { setSeed(Integer.parseInt(sSeed)); } super.setOptions(options); } // setOptions /** * Gets the current settings of the search algorithm. * * @return an array of strings suitable for passing to setOptions */ public String[] getOptions() { String[] superOptions = super.getOptions(); String[] options = new String[7 + superOptions.length]; int current = 0; options[current++] = "-U"; options[current++] = "" + getRuns(); options[current++] = "-A"; options[current++] = "" + getSeed(); // insert options from parent class for (int iOption = 0; iOption < superOptions.length; iOption++) { options[current++] = superOptions[iOption]; } // Fill up rest with empty strings, not nulls! while (current < options.length) { options[current++] = ""; } return options; } // getOptions /** * This will return a string describing the classifier. * * @return The string. */ public String globalInfo() { return "This Bayes Network learning algorithm repeatedly uses hill climbing starting " + "with a randomly generated network structure and return the best structure of the " + "various runs."; } // globalInfo /** * @return a string to describe the Runs option. */ public String runsTipText() { return "Sets the number of times hill climbing is performed."; } // runsTipText /** * @return a string to describe the Seed option. */ public String seedTipText() { return "Initialization value for random number generator." + " Setting the seed allows replicability of experiments."; } // seedTipText /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } }
9,891
26.027322
174
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/bayes/net/search/global/SimulatedAnnealing.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * SimulatedAnnealing.java * Copyright (C) 2004-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.bayes.net.search.global; import java.util.Enumeration; import java.util.Random; import java.util.Vector; import weka.classifiers.bayes.BayesNet; import weka.core.Instances; import weka.core.Option; import weka.core.RevisionUtils; import weka.core.TechnicalInformation; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import weka.core.TechnicalInformationHandler; import weka.core.Utils; /** <!-- globalinfo-start --> * This Bayes Network learning algorithm uses the general purpose search method of simulated annealing to find a well scoring network structure.<br/> * <br/> * For more information see:<br/> * <br/> * R.R. Bouckaert (1995). Bayesian Belief Networks: from Construction to Inference. Utrecht, Netherlands. * <p/> <!-- globalinfo-end --> * <!-- technical-bibtex-start --> * BibTeX: * <pre> * &#64;phdthesis{Bouckaert1995, * address = {Utrecht, Netherlands}, * author = {R.R. Bouckaert}, * institution = {University of Utrecht}, * title = {Bayesian Belief Networks: from Construction to Inference}, * year = {1995} * } * </pre> * <p/> <!-- technical-bibtex-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -A &lt;float&gt; * Start temperature</pre> * * <pre> -U &lt;integer&gt; * Number of runs</pre> * * <pre> -D &lt;float&gt; * Delta temperature</pre> * * <pre> -R &lt;seed&gt; * Random number seed</pre> * * <pre> -mbc * Applies a Markov Blanket correction to the network structure, * after a network structure is learned. This ensures that all * nodes in the network are part of the Markov blanket of the * classifier node.</pre> * * <pre> -S [LOO-CV|k-Fold-CV|Cumulative-CV] * Score type (LOO-CV,k-Fold-CV,Cumulative-CV)</pre> * * <pre> -Q * Use probabilistic or 0/1 scoring. * (default probabilistic scoring)</pre> * <!-- options-end --> * * @author Remco Bouckaert (rrb@xm.co.nz) * @version $Revision: 8034 $ */ public class SimulatedAnnealing extends GlobalScoreSearchAlgorithm implements TechnicalInformationHandler { /** for serialization */ static final long serialVersionUID = -5482721887881010916L; /** start temperature **/ double m_fTStart = 10; /** change in temperature at every run **/ double m_fDelta = 0.999; /** number of runs **/ int m_nRuns = 10000; /** use the arc reversal operator **/ boolean m_bUseArcReversal = false; /** random number seed **/ int m_nSeed = 1; /** random number generator **/ Random m_random; /** * Returns an instance of a TechnicalInformation object, containing * detailed information about the technical background of this class, * e.g., paper reference or book this class is based on. * * @return the technical information about this class */ public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; result = new TechnicalInformation(Type.PHDTHESIS); result.setValue(Field.AUTHOR, "R.R. Bouckaert"); result.setValue(Field.YEAR, "1995"); result.setValue(Field.TITLE, "Bayesian Belief Networks: from Construction to Inference"); result.setValue(Field.INSTITUTION, "University of Utrecht"); result.setValue(Field.ADDRESS, "Utrecht, Netherlands"); return result; } /** * * @param bayesNet the bayes net to use * @param instances the data to use * @throws Exception if something goes wrong */ public void search (BayesNet bayesNet, Instances instances) throws Exception { m_random = new Random(m_nSeed); // determine base scores double fCurrentScore = calcScore(bayesNet); // keep track of best scoring network double fBestScore = fCurrentScore; BayesNet bestBayesNet = new BayesNet(); bestBayesNet.m_Instances = instances; bestBayesNet.initStructure(); copyParentSets(bestBayesNet, bayesNet); double fTemp = m_fTStart; for (int iRun = 0; iRun < m_nRuns; iRun++) { boolean bRunSucces = false; double fDeltaScore = 0.0; while (!bRunSucces) { // pick two nodes at random int iTailNode = Math.abs(m_random.nextInt()) % instances.numAttributes(); int iHeadNode = Math.abs(m_random.nextInt()) % instances.numAttributes(); while (iTailNode == iHeadNode) { iHeadNode = Math.abs(m_random.nextInt()) % instances.numAttributes(); } if (isArc(bayesNet, iHeadNode, iTailNode)) { bRunSucces = true; // either try a delete bayesNet.getParentSet(iHeadNode).deleteParent(iTailNode, instances); double fScore = calcScore(bayesNet); fDeltaScore = fScore - fCurrentScore; //System.out.println("Try delete " + iTailNode + "->" + iHeadNode + " dScore = " + fDeltaScore); if (fTemp * Math.log((Math.abs(m_random.nextInt()) % 10000)/10000.0 + 1e-100) < fDeltaScore) { //System.out.println("success!!!"); fCurrentScore = fScore; } else { // roll back bayesNet.getParentSet(iHeadNode).addParent(iTailNode, instances); } } else { // try to add an arc if (addArcMakesSense(bayesNet, instances, iHeadNode, iTailNode)) { bRunSucces = true; double fScore = calcScoreWithExtraParent(iHeadNode, iTailNode); fDeltaScore = fScore - fCurrentScore; //System.out.println("Try add " + iTailNode + "->" + iHeadNode + " dScore = " + fDeltaScore); if (fTemp * Math.log((Math.abs(m_random.nextInt()) % 10000)/10000.0 + 1e-100) < fDeltaScore) { //System.out.println("success!!!"); bayesNet.getParentSet(iHeadNode).addParent(iTailNode, instances); fCurrentScore = fScore; } } } } if (fCurrentScore > fBestScore) { copyParentSets(bestBayesNet, bayesNet); } fTemp = fTemp * m_fDelta; } copyParentSets(bayesNet, bestBayesNet); } // buildStructure /** CopyParentSets copies parent sets of source to dest BayesNet * @param dest destination network * @param source source network */ void copyParentSets(BayesNet dest, BayesNet source) { int nNodes = source.getNrOfNodes(); // clear parent set first for (int iNode = 0; iNode < nNodes; iNode++) { dest.getParentSet(iNode).copy(source.getParentSet(iNode)); } } // CopyParentSets /** * @return double */ public double getDelta() { return m_fDelta; } /** * @return double */ public double getTStart() { return m_fTStart; } /** * @return int */ public int getRuns() { return m_nRuns; } /** * Sets the m_fDelta. * @param fDelta The m_fDelta to set */ public void setDelta(double fDelta) { m_fDelta = fDelta; } /** * Sets the m_fTStart. * @param fTStart The m_fTStart to set */ public void setTStart(double fTStart) { m_fTStart = fTStart; } /** * Sets the m_nRuns. * @param nRuns The m_nRuns to set */ public void setRuns(int nRuns) { m_nRuns = nRuns; } /** * @return random number seed */ public int getSeed() { return m_nSeed; } // getSeed /** * Sets the random number seed * @param nSeed The number of the seed to set */ public void setSeed(int nSeed) { m_nSeed = nSeed; } // setSeed /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ public Enumeration listOptions() { Vector newVector = new Vector(3); newVector.addElement(new Option("\tStart temperature", "A", 1, "-A <float>")); newVector.addElement(new Option("\tNumber of runs", "U", 1, "-U <integer>")); newVector.addElement(new Option("\tDelta temperature", "D", 1, "-D <float>")); newVector.addElement(new Option("\tRandom number seed", "R", 1, "-R <seed>")); Enumeration enu = super.listOptions(); while (enu.hasMoreElements()) { newVector.addElement(enu.nextElement()); } return newVector.elements(); } /** * Parses a given list of options. <p/> * <!-- options-start --> * Valid options are: <p/> * * <pre> -A &lt;float&gt; * Start temperature</pre> * * <pre> -U &lt;integer&gt; * Number of runs</pre> * * <pre> -D &lt;float&gt; * Delta temperature</pre> * * <pre> -R &lt;seed&gt; * Random number seed</pre> * * <pre> -mbc * Applies a Markov Blanket correction to the network structure, * after a network structure is learned. This ensures that all * nodes in the network are part of the Markov blanket of the * classifier node.</pre> * * <pre> -S [LOO-CV|k-Fold-CV|Cumulative-CV] * Score type (LOO-CV,k-Fold-CV,Cumulative-CV)</pre> * * <pre> -Q * Use probabilistic or 0/1 scoring. * (default probabilistic scoring)</pre> * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { String sTStart = Utils.getOption('A', options); if (sTStart.length() != 0) { setTStart(Double.parseDouble(sTStart)); } String sRuns = Utils.getOption('U', options); if (sRuns.length() != 0) { setRuns(Integer.parseInt(sRuns)); } String sDelta = Utils.getOption('D', options); if (sDelta.length() != 0) { setDelta(Double.parseDouble(sDelta)); } String sSeed = Utils.getOption('R', options); if (sSeed.length() != 0) { setSeed(Integer.parseInt(sSeed)); } super.setOptions(options); } /** * Gets the current settings of the search algorithm. * * @return an array of strings suitable for passing to setOptions */ public String[] getOptions() { String[] superOptions = super.getOptions(); String[] options = new String[8 + superOptions.length]; int current = 0; options[current++] = "-A"; options[current++] = "" + getTStart(); options[current++] = "-U"; options[current++] = "" + getRuns(); options[current++] = "-D"; options[current++] = "" + getDelta(); options[current++] = "-R"; options[current++] = "" + getSeed(); // insert options from parent class for (int iOption = 0; iOption < superOptions.length; iOption++) { options[current++] = superOptions[iOption]; } // Fill up rest with empty strings, not nulls! while (current < options.length) { options[current++] = ""; } return options; } /** * This will return a string describing the classifier. * @return The string. */ public String globalInfo() { return "This Bayes Network learning algorithm uses the general purpose search method " + "of simulated annealing to find a well scoring network structure.\n\n" + "For more information see:\n\n" + getTechnicalInformation().toString(); } // globalInfo /** * @return a string to describe the TStart option. */ public String TStartTipText() { return "Sets the start temperature of the simulated annealing search. "+ "The start temperature determines the probability that a step in the 'wrong' direction in the " + "search space is accepted. The higher the temperature, the higher the probability of acceptance."; } // TStartTipText /** * @return a string to describe the Runs option. */ public String runsTipText() { return "Sets the number of iterations to be performed by the simulated annealing search."; } // runsTipText /** * @return a string to describe the Delta option. */ public String deltaTipText() { return "Sets the factor with which the temperature (and thus the acceptance probability of " + "steps in the wrong direction in the search space) is decreased in each iteration."; } // deltaTipText /** * @return a string to describe the Seed option. */ public String seedTipText() { return "Initialization value for random number generator." + " Setting the seed allows replicability of experiments."; } // seedTipText /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } } // SimulatedAnnealing
13,357
29.221719
149
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/bayes/net/search/global/TAN.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * TAN.java * Copyright (C) 2004-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.bayes.net.search.global; import java.util.Enumeration; import weka.classifiers.bayes.BayesNet; import weka.core.Instances; import weka.core.RevisionUtils; import weka.core.TechnicalInformation; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import weka.core.TechnicalInformationHandler; /** <!-- globalinfo-start --> * This Bayes Network learning algorithm determines the maximum weight spanning tree and returns a Naive Bayes network augmented with a tree.<br/> * <br/> * For more information see:<br/> * <br/> * N. Friedman, D. Geiger, M. Goldszmidt (1997). Bayesian network classifiers. Machine Learning. 29(2-3):131-163. * <p/> <!-- globalinfo-end --> * <!-- technical-bibtex-start --> * BibTeX: * <pre> * &#64;article{Friedman1997, * author = {N. Friedman and D. Geiger and M. Goldszmidt}, * journal = {Machine Learning}, * number = {2-3}, * pages = {131-163}, * title = {Bayesian network classifiers}, * volume = {29}, * year = {1997} * } * </pre> * <p/> <!-- technical-bibtex-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -mbc * Applies a Markov Blanket correction to the network structure, * after a network structure is learned. This ensures that all * nodes in the network are part of the Markov blanket of the * classifier node.</pre> * * <pre> -S [LOO-CV|k-Fold-CV|Cumulative-CV] * Score type (LOO-CV,k-Fold-CV,Cumulative-CV)</pre> * * <pre> -Q * Use probabilistic or 0/1 scoring. * (default probabilistic scoring)</pre> * <!-- options-end --> * * @author Remco Bouckaert * @version $Revision: 8034 $ */ public class TAN extends GlobalScoreSearchAlgorithm implements TechnicalInformationHandler { /** for serialization */ static final long serialVersionUID = 1715277053980895298L; /** * Returns an instance of a TechnicalInformation object, containing * detailed information about the technical background of this class, * e.g., paper reference or book this class is based on. * * @return the technical information about this class */ public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; result = new TechnicalInformation(Type.ARTICLE); result.setValue(Field.AUTHOR, "N. Friedman and D. Geiger and M. Goldszmidt"); result.setValue(Field.YEAR, "1997"); result.setValue(Field.TITLE, "Bayesian network classifiers"); result.setValue(Field.JOURNAL, "Machine Learning"); result.setValue(Field.VOLUME, "29"); result.setValue(Field.NUMBER, "2-3"); result.setValue(Field.PAGES, "131-163"); return result; } /** * buildStructure determines the network structure/graph of the network * using the maximimum weight spanning tree algorithm of Chow and Liu * * @param bayesNet * @param instances * @throws Exception if something goes wrong */ public void buildStructure(BayesNet bayesNet, Instances instances) throws Exception { m_BayesNet = bayesNet; m_bInitAsNaiveBayes = true; m_nMaxNrOfParents = 2; super.buildStructure(bayesNet, instances); int nNrOfAtts = instances.numAttributes(); // TAN greedy search (not restricted by ordering like K2) // 1. find strongest link // 2. find remaining links by adding strongest link to already // connected nodes // 3. assign direction to links int nClassNode = instances.classIndex(); int [] link1 = new int [nNrOfAtts - 1]; int [] link2 = new int [nNrOfAtts - 1]; boolean [] linked = new boolean [nNrOfAtts]; // 1. find strongest link int nBestLinkNode1 = -1; int nBestLinkNode2 = -1; double fBestDeltaScore = 0.0; int iLinkNode1; for (iLinkNode1 = 0; iLinkNode1 < nNrOfAtts; iLinkNode1++) { if (iLinkNode1 != nClassNode) { for (int iLinkNode2 = 0; iLinkNode2 < nNrOfAtts; iLinkNode2++) { if ((iLinkNode1 != iLinkNode2) && (iLinkNode2 != nClassNode)) { double fScore = calcScoreWithExtraParent(iLinkNode1, iLinkNode2); if ((nBestLinkNode1 == -1) || (fScore > fBestDeltaScore)) { fBestDeltaScore = fScore; nBestLinkNode1 = iLinkNode2; nBestLinkNode2 = iLinkNode1; } } } } } link1[0] = nBestLinkNode1; link2[0] = nBestLinkNode2; linked[nBestLinkNode1] = true; linked[nBestLinkNode2] = true; // 2. find remaining links by adding strongest link to already // connected nodes for (int iLink = 1; iLink < nNrOfAtts - 2; iLink++) { nBestLinkNode1 = -1; for (iLinkNode1 = 0; iLinkNode1 < nNrOfAtts; iLinkNode1++) { if (iLinkNode1 != nClassNode) { for (int iLinkNode2 = 0; iLinkNode2 < nNrOfAtts; iLinkNode2++) { if ((iLinkNode1 != iLinkNode2) && (iLinkNode2 != nClassNode) && (linked[iLinkNode1] || linked[iLinkNode2]) && (!linked[iLinkNode1] || !linked[iLinkNode2])) { double fScore = calcScoreWithExtraParent(iLinkNode1, iLinkNode2); if ((nBestLinkNode1 == -1) || (fScore > fBestDeltaScore)) { fBestDeltaScore = fScore; nBestLinkNode1 = iLinkNode2; nBestLinkNode2 = iLinkNode1; } } } } } link1[iLink] = nBestLinkNode1; link2[iLink] = nBestLinkNode2; linked[nBestLinkNode1] = true; linked[nBestLinkNode2] = true; } // System.out.println(); // for (int i = 0; i < 3; i++) { // System.out.println(link1[i] + " " + link2[i]); // } // 3. assign direction to links boolean [] hasParent = new boolean [nNrOfAtts]; for (int iLink = 0; iLink < nNrOfAtts - 2; iLink++) { if (!hasParent[link1[iLink]]) { bayesNet.getParentSet(link1[iLink]).addParent(link2[iLink], instances); hasParent[link1[iLink]] = true; } else { if (hasParent[link2[iLink]]) { throw new Exception("Bug condition found: too many arrows"); } bayesNet.getParentSet(link2[iLink]).addParent(link1[iLink], instances); hasParent[link2[iLink]] = true; } } } // buildStructure /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ public Enumeration listOptions() { return super.listOptions(); } // listOption /** * Parses a given list of options. <p/> * <!-- options-start --> * Valid options are: <p/> * * <pre> -mbc * Applies a Markov Blanket correction to the network structure, * after a network structure is learned. This ensures that all * nodes in the network are part of the Markov blanket of the * classifier node.</pre> * * <pre> -S [LOO-CV|k-Fold-CV|Cumulative-CV] * Score type (LOO-CV,k-Fold-CV,Cumulative-CV)</pre> * * <pre> -Q * Use probabilistic or 0/1 scoring. * (default probabilistic scoring)</pre> * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { super.setOptions(options); } // setOptions /** * Gets the current settings of the Classifier. * * @return an array of strings suitable for passing to setOptions */ public String [] getOptions() { return super.getOptions(); } // getOptions /** * This will return a string describing the classifier. * @return The string. */ public String globalInfo() { return "This Bayes Network learning algorithm determines the maximum weight spanning tree " + "and returns a Naive Bayes network augmented with a tree.\n\n" + "For more information see:\n\n" + getTechnicalInformation().toString(); } // globalInfo /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } } // TAN
8,625
29.807143
146
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/bayes/net/search/global/TabuSearch.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * TabuSearch.java * Copyright (C) 2004-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.bayes.net.search.global; import java.util.Enumeration; import java.util.Vector; import weka.classifiers.bayes.BayesNet; import weka.core.Instances; import weka.core.Option; import weka.core.RevisionUtils; import weka.core.TechnicalInformation; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import weka.core.TechnicalInformationHandler; import weka.core.Utils; /** <!-- globalinfo-start --> * This Bayes Network learning algorithm uses tabu search for finding a well scoring Bayes network structure. Tabu search is hill climbing till an optimum is reached. The following step is the least worst possible step. The last X steps are kept in a list and none of the steps in this so called tabu list is considered in taking the next step. The best network found in this traversal is returned.<br/> * <br/> * For more information see:<br/> * <br/> * R.R. Bouckaert (1995). Bayesian Belief Networks: from Construction to Inference. Utrecht, Netherlands. * <p/> <!-- globalinfo-end --> * <!-- technical-bibtex-start --> * BibTeX: * <pre> * &#64;phdthesis{Bouckaert1995, * address = {Utrecht, Netherlands}, * author = {R.R. Bouckaert}, * institution = {University of Utrecht}, * title = {Bayesian Belief Networks: from Construction to Inference}, * year = {1995} * } * </pre> * <p/> <!-- technical-bibtex-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -L &lt;integer&gt; * Tabu list length</pre> * * <pre> -U &lt;integer&gt; * Number of runs</pre> * * <pre> -P &lt;nr of parents&gt; * Maximum number of parents</pre> * * <pre> -R * Use arc reversal operation. * (default false)</pre> * * <pre> -P &lt;nr of parents&gt; * Maximum number of parents</pre> * * <pre> -R * Use arc reversal operation. * (default false)</pre> * * <pre> -N * Initial structure is empty (instead of Naive Bayes)</pre> * * <pre> -mbc * Applies a Markov Blanket correction to the network structure, * after a network structure is learned. This ensures that all * nodes in the network are part of the Markov blanket of the * classifier node.</pre> * * <pre> -S [LOO-CV|k-Fold-CV|Cumulative-CV] * Score type (LOO-CV,k-Fold-CV,Cumulative-CV)</pre> * * <pre> -Q * Use probabilistic or 0/1 scoring. * (default probabilistic scoring)</pre> * <!-- options-end --> * * @author Remco Bouckaert (rrb@xm.co.nz) * @version $Revision: 8034 $ */ public class TabuSearch extends HillClimber implements TechnicalInformationHandler { /** for serialization */ static final long serialVersionUID = 1176705618756672292L; /** number of runs **/ int m_nRuns = 10; /** size of tabu list **/ int m_nTabuList = 5; /** the actual tabu list **/ Operation[] m_oTabuList = null; /** * Returns an instance of a TechnicalInformation object, containing * detailed information about the technical background of this class, * e.g., paper reference or book this class is based on. * * @return the technical information about this class */ public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; result = new TechnicalInformation(Type.PHDTHESIS); result.setValue(Field.AUTHOR, "R.R. Bouckaert"); result.setValue(Field.YEAR, "1995"); result.setValue(Field.TITLE, "Bayesian Belief Networks: from Construction to Inference"); result.setValue(Field.INSTITUTION, "University of Utrecht"); result.setValue(Field.ADDRESS, "Utrecht, Netherlands"); return result; } /** * search determines the network structure/graph of the network * with the Tabu search algorithm. * * @param bayesNet the network to use * @param instances the instances to use * @throws Exception if something goes wrong */ protected void search(BayesNet bayesNet, Instances instances) throws Exception { m_oTabuList = new Operation[m_nTabuList]; int iCurrentTabuList = 0; // keeps track of score pf best structure found so far double fBestScore; double fCurrentScore = calcScore(bayesNet); // keeps track of best structure found so far BayesNet bestBayesNet; // initialize bestBayesNet fBestScore = fCurrentScore; bestBayesNet = new BayesNet(); bestBayesNet.m_Instances = instances; bestBayesNet.initStructure(); copyParentSets(bestBayesNet, bayesNet); // go do the search for (int iRun = 0; iRun < m_nRuns; iRun++) { Operation oOperation = getOptimalOperation(bayesNet, instances); performOperation(bayesNet, instances, oOperation); // sanity check if (oOperation == null) { throw new Exception("Panic: could not find any step to make. Tabu list too long?"); } // update tabu list m_oTabuList[iCurrentTabuList] = oOperation; iCurrentTabuList = (iCurrentTabuList + 1) % m_nTabuList; fCurrentScore += oOperation.m_fScore; // keep track of best network seen so far if (fCurrentScore > fBestScore) { fBestScore = fCurrentScore; copyParentSets(bestBayesNet, bayesNet); } if (bayesNet.getDebug()) { printTabuList(); } } // restore current network to best network copyParentSets(bayesNet, bestBayesNet); // free up memory bestBayesNet = null; } // search /** copyParentSets copies parent sets of source to dest BayesNet * @param dest destination network * @param source source network */ void copyParentSets(BayesNet dest, BayesNet source) { int nNodes = source.getNrOfNodes(); // clear parent set first for (int iNode = 0; iNode < nNodes; iNode++) { dest.getParentSet(iNode).copy(source.getParentSet(iNode)); } } // CopyParentSets /** check whether the operation is not in the tabu list * @param oOperation operation to be checked * @return true if operation is not in the tabu list */ boolean isNotTabu(Operation oOperation) { for (int iTabu = 0; iTabu < m_nTabuList; iTabu++) { if (oOperation.equals(m_oTabuList[iTabu])) { return false; } } return true; } // isNotTabu /** print tabu list for debugging purposes. */ void printTabuList() { for (int i = 0; i < m_nTabuList; i++) { Operation o = m_oTabuList[i]; if (o != null) { if (o.m_nOperation == 0) {System.out.print(" +(");} else {System.out.print(" -(");} System.out.print(o.m_nTail + "->" + o.m_nHead + ")"); } } System.out.println(); } // printTabuList /** * @return number of runs */ public int getRuns() { return m_nRuns; } // getRuns /** * Sets the number of runs * @param nRuns The number of runs to set */ public void setRuns(int nRuns) { m_nRuns = nRuns; } // setRuns /** * @return the Tabu List length */ public int getTabuList() { return m_nTabuList; } // getTabuList /** * Sets the Tabu List length. * @param nTabuList The nTabuList to set */ public void setTabuList(int nTabuList) { m_nTabuList = nTabuList; } // setTabuList /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ public Enumeration listOptions() { Vector newVector = new Vector(4); newVector.addElement(new Option("\tTabu list length", "L", 1, "-L <integer>")); newVector.addElement(new Option("\tNumber of runs", "U", 1, "-U <integer>")); newVector.addElement(new Option("\tMaximum number of parents", "P", 1, "-P <nr of parents>")); newVector.addElement(new Option("\tUse arc reversal operation.\n\t(default false)", "R", 0, "-R")); Enumeration enu = super.listOptions(); while (enu.hasMoreElements()) { newVector.addElement(enu.nextElement()); } return newVector.elements(); } // listOptions /** * Parses a given list of options. <p/> * <!-- options-start --> * Valid options are: <p/> * * <pre> -L &lt;integer&gt; * Tabu list length</pre> * * <pre> -U &lt;integer&gt; * Number of runs</pre> * * <pre> -P &lt;nr of parents&gt; * Maximum number of parents</pre> * * <pre> -R * Use arc reversal operation. * (default false)</pre> * * <pre> -P &lt;nr of parents&gt; * Maximum number of parents</pre> * * <pre> -R * Use arc reversal operation. * (default false)</pre> * * <pre> -N * Initial structure is empty (instead of Naive Bayes)</pre> * * <pre> -mbc * Applies a Markov Blanket correction to the network structure, * after a network structure is learned. This ensures that all * nodes in the network are part of the Markov blanket of the * classifier node.</pre> * * <pre> -S [LOO-CV|k-Fold-CV|Cumulative-CV] * Score type (LOO-CV,k-Fold-CV,Cumulative-CV)</pre> * * <pre> -Q * Use probabilistic or 0/1 scoring. * (default probabilistic scoring)</pre> * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { String sTabuList = Utils.getOption('L', options); if (sTabuList.length() != 0) { setTabuList(Integer.parseInt(sTabuList)); } String sRuns = Utils.getOption('U', options); if (sRuns.length() != 0) { setRuns(Integer.parseInt(sRuns)); } super.setOptions(options); } // setOptions /** * Gets the current settings of the search algorithm. * * @return an array of strings suitable for passing to setOptions */ public String[] getOptions() { String[] superOptions = super.getOptions(); String[] options = new String[7 + superOptions.length]; int current = 0; options[current++] = "-L"; options[current++] = "" + getTabuList(); options[current++] = "-U"; options[current++] = "" + getRuns(); // insert options from parent class for (int iOption = 0; iOption < superOptions.length; iOption++) { options[current++] = superOptions[iOption]; } // Fill up rest with empty strings, not nulls! while (current < options.length) { options[current++] = ""; } return options; } // getOptions /** * This will return a string describing the classifier. * @return The string. */ public String globalInfo() { return "This Bayes Network learning algorithm uses tabu search for finding a well scoring " + "Bayes network structure. Tabu search is hill climbing till an optimum is reached. The " + "following step is the least worst possible step. The last X steps are kept in a list and " + "none of the steps in this so called tabu list is considered in taking the next step. " + "The best network found in this traversal is returned.\n\n" + "For more information see:\n\n" + getTechnicalInformation().toString(); } // globalInfo /** * @return a string to describe the Runs option. */ public String runsTipText() { return "Sets the number of steps to be performed."; } // runsTipText /** * @return a string to describe the TabuList option. */ public String tabuListTipText() { return "Sets the length of the tabu list."; } // tabuListTipText /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } } // TabuSearch
12,207
28.487923
403
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/bayes/net/search/local/GeneticSearch.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * GeneticSearch.java * Copyright (C) 2004-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.bayes.net.search.local; import java.util.Enumeration; import java.util.Random; import java.util.Vector; import weka.classifiers.bayes.BayesNet; import weka.classifiers.bayes.net.ParentSet; import weka.core.Instances; import weka.core.Option; import weka.core.RevisionHandler; import weka.core.RevisionUtils; import weka.core.Utils; /** <!-- globalinfo-start --> * This Bayes Network learning algorithm uses genetic search for finding a well scoring Bayes network structure. Genetic search works by having a population of Bayes network structures and allow them to mutate and apply cross over to get offspring. The best network structure found during the process is returned. * <p/> <!-- globalinfo-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -L &lt;integer&gt; * Population size</pre> * * <pre> -A &lt;integer&gt; * Descendant population size</pre> * * <pre> -U &lt;integer&gt; * Number of runs</pre> * * <pre> -M * Use mutation. * (default true)</pre> * * <pre> -C * Use cross-over. * (default true)</pre> * * <pre> -O * Use tournament selection (true) or maximum subpopulatin (false). * (default false)</pre> * * <pre> -R &lt;seed&gt; * Random number seed</pre> * * <pre> -mbc * Applies a Markov Blanket correction to the network structure, * after a network structure is learned. This ensures that all * nodes in the network are part of the Markov blanket of the * classifier node.</pre> * * <pre> -S [BAYES|MDL|ENTROPY|AIC|CROSS_CLASSIC|CROSS_BAYES] * Score type (BAYES, BDeu, MDL, ENTROPY and AIC)</pre> * <!-- options-end --> * * @author Remco Bouckaert (rrb@xm.co.nz) * @version $Revision: 8034 $ */ public class GeneticSearch extends LocalScoreSearchAlgorithm { /** for serialization */ static final long serialVersionUID = -7037070678911459757L; /** number of runs **/ int m_nRuns = 10; /** size of population **/ int m_nPopulationSize = 10; /** size of descendant population **/ int m_nDescendantPopulationSize = 100; /** use cross-over? **/ boolean m_bUseCrossOver = true; /** use mutation? **/ boolean m_bUseMutation = true; /** use tournament selection or take best sub-population **/ boolean m_bUseTournamentSelection = false; /** random number seed **/ int m_nSeed = 1; /** random number generator **/ Random m_random = null; /** used in BayesNetRepresentation for efficiently determining * whether a number is square */ static boolean [] g_bIsSquare; class BayesNetRepresentation implements RevisionHandler { /** number of nodes in network **/ int m_nNodes = 0; /** bit representation of parent sets * m_bits[iTail + iHead * m_nNodes] represents arc iTail->iHead */ boolean [] m_bits; /** score of represented network structure **/ double m_fScore = 0.0f; /** * return score of represented network structure * * @return the score */ public double getScore() { return m_fScore; } // getScore /** * c'tor * * @param nNodes the number of nodes */ BayesNetRepresentation (int nNodes) { m_nNodes = nNodes; } // c'tor /** initialize with a random structure by randomly placing * m_nNodes arcs. */ public void randomInit() { do { m_bits = new boolean [m_nNodes * m_nNodes]; for (int i = 0; i < m_nNodes; i++) { int iPos; do { iPos = m_random.nextInt(m_nNodes * m_nNodes); } while (isSquare(iPos)); m_bits[iPos] = true; } } while (hasCycles()); calcScore(); } /** calculate score of current network representation * As a side effect, the parent sets are set */ void calcScore() { // clear current network for (int iNode = 0; iNode < m_nNodes; iNode++) { ParentSet parentSet = m_BayesNet.getParentSet(iNode); while (parentSet.getNrOfParents() > 0) { parentSet.deleteLastParent(m_BayesNet.m_Instances); } } // insert arrows for (int iNode = 0; iNode < m_nNodes; iNode++) { ParentSet parentSet = m_BayesNet.getParentSet(iNode); for (int iNode2 = 0; iNode2 < m_nNodes; iNode2++) { if (m_bits[iNode2 + iNode * m_nNodes]) { parentSet.addParent(iNode2, m_BayesNet.m_Instances); } } } // calc score m_fScore = 0.0; for (int iNode = 0; iNode < m_nNodes; iNode++) { m_fScore += calcNodeScore(iNode); } } // calcScore /** check whether there are cycles in the network * * @return true if a cycle is found, false otherwise */ public boolean hasCycles() { // check for cycles boolean[] bDone = new boolean[m_nNodes]; for (int iNode = 0; iNode < m_nNodes; iNode++) { // find a node for which all parents are 'done' boolean bFound = false; for (int iNode2 = 0; !bFound && iNode2 < m_nNodes; iNode2++) { if (!bDone[iNode2]) { boolean bHasNoParents = true; for (int iParent = 0; iParent < m_nNodes; iParent++) { if (m_bits[iParent + iNode2 * m_nNodes] && !bDone[iParent]) { bHasNoParents = false; } } if (bHasNoParents) { bDone[iNode2] = true; bFound = true; } } } if (!bFound) { return true; } } return false; } // hasCycles /** create clone of current object * @return cloned object */ BayesNetRepresentation copy() { BayesNetRepresentation b = new BayesNetRepresentation(m_nNodes); b.m_bits = new boolean [m_bits.length]; for (int i = 0; i < m_nNodes * m_nNodes; i++) { b.m_bits[i] = m_bits[i]; } b.m_fScore = m_fScore; return b; } // copy /** Apply mutation operation to BayesNet * Calculate score and as a side effect sets BayesNet parent sets. */ void mutate() { // flip a bit do { int iBit; do { iBit = m_random.nextInt(m_nNodes * m_nNodes); } while (isSquare(iBit)); m_bits[iBit] = !m_bits[iBit]; } while (hasCycles()); calcScore(); } // mutate /** Apply cross-over operation to BayesNet * Calculate score and as a side effect sets BayesNet parent sets. * @param other BayesNetRepresentation to cross over with */ void crossOver(BayesNetRepresentation other) { boolean [] bits = new boolean [m_bits.length]; for (int i = 0; i < m_bits.length; i++) { bits[i] = m_bits[i]; } int iCrossOverPoint = m_bits.length; do { // restore to original state for (int i = iCrossOverPoint; i < m_bits.length; i++) { m_bits[i] = bits[i]; } // take all bits from cross-over points onwards iCrossOverPoint = m_random.nextInt(m_bits.length); for (int i = iCrossOverPoint; i < m_bits.length; i++) { m_bits[i] = other.m_bits[i]; } } while (hasCycles()); calcScore(); } // crossOver /** check if number is square and initialize g_bIsSquare structure * if necessary * @param nNum number to check (should be below m_nNodes * m_nNodes) * @return true if number is square */ boolean isSquare(int nNum) { if (g_bIsSquare == null || g_bIsSquare.length < nNum) { g_bIsSquare = new boolean [m_nNodes * m_nNodes]; for (int i = 0; i < m_nNodes; i++) { g_bIsSquare[i * m_nNodes + i] = true; } } return g_bIsSquare[nNum]; } // isSquare /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } } // class BayesNetRepresentation /** * search determines the network structure/graph of the network * with a genetic search algorithm. * * @param bayesNet the network to use * @param instances the data to use * @throws Exception if population size doesn fit or neither cross-over or mutation was chosen */ protected void search(BayesNet bayesNet, Instances instances) throws Exception { // sanity check if (getDescendantPopulationSize() < getPopulationSize()) { throw new Exception ("Descendant PopulationSize should be at least Population Size"); } if (!getUseCrossOver() && !getUseMutation()) { throw new Exception ("At least one of mutation or cross-over should be used"); } m_random = new Random(m_nSeed); // keeps track of best structure found so far BayesNet bestBayesNet; // keeps track of score pf best structure found so far double fBestScore = 0.0; for (int iAttribute = 0; iAttribute < instances.numAttributes(); iAttribute++) { fBestScore += calcNodeScore(iAttribute); } // initialize bestBayesNet bestBayesNet = new BayesNet(); bestBayesNet.m_Instances = instances; bestBayesNet.initStructure(); copyParentSets(bestBayesNet, bayesNet); // initialize population BayesNetRepresentation [] population = new BayesNetRepresentation [getPopulationSize()]; for (int i = 0; i < getPopulationSize(); i++) { population[i] = new BayesNetRepresentation (instances.numAttributes()); population[i].randomInit(); if (population[i].getScore() > fBestScore) { copyParentSets(bestBayesNet, bayesNet); fBestScore = population[i].getScore(); } } // go do the search for (int iRun = 0; iRun < m_nRuns; iRun++) { // create descendants BayesNetRepresentation [] descendantPopulation = new BayesNetRepresentation [getDescendantPopulationSize()]; for (int i = 0; i < getDescendantPopulationSize(); i++) { descendantPopulation[i] = population[m_random.nextInt(getPopulationSize())].copy(); if (getUseMutation()) { if (getUseCrossOver() && m_random.nextBoolean()) { descendantPopulation[i].crossOver(population[m_random.nextInt(getPopulationSize())]); } else { descendantPopulation[i].mutate(); } } else { // use crossover descendantPopulation[i].crossOver(population[m_random.nextInt(getPopulationSize())]); } if (descendantPopulation[i].getScore() > fBestScore) { copyParentSets(bestBayesNet, bayesNet); fBestScore = descendantPopulation[i].getScore(); } } // select new population boolean [] bSelected = new boolean [getDescendantPopulationSize()]; for (int i = 0; i < getPopulationSize(); i++) { int iSelected = 0; if (m_bUseTournamentSelection) { // use tournament selection iSelected = m_random.nextInt(getDescendantPopulationSize()); while (bSelected[iSelected]) { iSelected = (iSelected + 1) % getDescendantPopulationSize(); } int iSelected2 = m_random.nextInt(getDescendantPopulationSize()); while (bSelected[iSelected2]) { iSelected2 = (iSelected2 + 1) % getDescendantPopulationSize(); } if (descendantPopulation[iSelected2].getScore() > descendantPopulation[iSelected].getScore()) { iSelected = iSelected2; } } else { // find best scoring network in population while (bSelected[iSelected]) { iSelected++; } double fScore = descendantPopulation[iSelected].getScore(); for (int j = 0; j < getDescendantPopulationSize(); j++) { if (!bSelected[j] && descendantPopulation[j].getScore() > fScore) { fScore = descendantPopulation[j].getScore(); iSelected = j; } } } population[i] = descendantPopulation[iSelected]; bSelected[iSelected] = true; } } // restore current network to best network copyParentSets(bayesNet, bestBayesNet); // free up memory bestBayesNet = null; } // search /** copyParentSets copies parent sets of source to dest BayesNet * @param dest destination network * @param source source network */ void copyParentSets(BayesNet dest, BayesNet source) { int nNodes = source.getNrOfNodes(); // clear parent set first for (int iNode = 0; iNode < nNodes; iNode++) { dest.getParentSet(iNode).copy(source.getParentSet(iNode)); } } // CopyParentSets /** * @return number of runs */ public int getRuns() { return m_nRuns; } // getRuns /** * Sets the number of runs * @param nRuns The number of runs to set */ public void setRuns(int nRuns) { m_nRuns = nRuns; } // setRuns /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ public Enumeration listOptions() { Vector newVector = new Vector(7); newVector.addElement(new Option("\tPopulation size", "L", 1, "-L <integer>")); newVector.addElement(new Option("\tDescendant population size", "A", 1, "-A <integer>")); newVector.addElement(new Option("\tNumber of runs", "U", 1, "-U <integer>")); newVector.addElement(new Option("\tUse mutation.\n\t(default true)", "M", 0, "-M")); newVector.addElement(new Option("\tUse cross-over.\n\t(default true)", "C", 0, "-C")); newVector.addElement(new Option("\tUse tournament selection (true) or maximum subpopulatin (false).\n\t(default false)", "O", 0, "-O")); newVector.addElement(new Option("\tRandom number seed", "R", 1, "-R <seed>")); Enumeration enu = super.listOptions(); while (enu.hasMoreElements()) { newVector.addElement(enu.nextElement()); } return newVector.elements(); } // listOptions /** * Parses a given list of options. <p/> * <!-- options-start --> * Valid options are: <p/> * * <pre> -L &lt;integer&gt; * Population size</pre> * * <pre> -A &lt;integer&gt; * Descendant population size</pre> * * <pre> -U &lt;integer&gt; * Number of runs</pre> * * <pre> -M * Use mutation. * (default true)</pre> * * <pre> -C * Use cross-over. * (default true)</pre> * * <pre> -O * Use tournament selection (true) or maximum subpopulatin (false). * (default false)</pre> * * <pre> -R &lt;seed&gt; * Random number seed</pre> * * <pre> -mbc * Applies a Markov Blanket correction to the network structure, * after a network structure is learned. This ensures that all * nodes in the network are part of the Markov blanket of the * classifier node.</pre> * * <pre> -S [BAYES|MDL|ENTROPY|AIC|CROSS_CLASSIC|CROSS_BAYES] * Score type (BAYES, BDeu, MDL, ENTROPY and AIC)</pre> * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { String sPopulationSize = Utils.getOption('L', options); if (sPopulationSize.length() != 0) { setPopulationSize(Integer.parseInt(sPopulationSize)); } String sDescendantPopulationSize = Utils.getOption('A', options); if (sDescendantPopulationSize.length() != 0) { setDescendantPopulationSize(Integer.parseInt(sDescendantPopulationSize)); } String sRuns = Utils.getOption('U', options); if (sRuns.length() != 0) { setRuns(Integer.parseInt(sRuns)); } String sSeed = Utils.getOption('R', options); if (sSeed.length() != 0) { setSeed(Integer.parseInt(sSeed)); } setUseMutation(Utils.getFlag('M', options)); setUseCrossOver(Utils.getFlag('C', options)); setUseTournamentSelection(Utils.getFlag('O', options)); super.setOptions(options); } // setOptions /** * Gets the current settings of the search algorithm. * * @return an array of strings suitable for passing to setOptions */ public String[] getOptions() { String[] superOptions = super.getOptions(); String[] options = new String[11 + superOptions.length]; int current = 0; options[current++] = "-L"; options[current++] = "" + getPopulationSize(); options[current++] = "-A"; options[current++] = "" + getDescendantPopulationSize(); options[current++] = "-U"; options[current++] = "" + getRuns(); options[current++] = "-R"; options[current++] = "" + getSeed(); if (getUseMutation()) { options[current++] = "-M"; } if (getUseCrossOver()) { options[current++] = "-C"; } if (getUseTournamentSelection()) { options[current++] = "-O"; } // insert options from parent class for (int iOption = 0; iOption < superOptions.length; iOption++) { options[current++] = superOptions[iOption]; } // Fill up rest with empty strings, not nulls! while (current < options.length) { options[current++] = ""; } return options; } // getOptions /** * @return whether cross-over is used */ public boolean getUseCrossOver() { return m_bUseCrossOver; } /** * @return whether mutation is used */ public boolean getUseMutation() { return m_bUseMutation; } /** * @return descendant population size */ public int getDescendantPopulationSize() { return m_nDescendantPopulationSize; } /** * @return population size */ public int getPopulationSize() { return m_nPopulationSize; } /** * @param bUseCrossOver sets whether cross-over is used */ public void setUseCrossOver(boolean bUseCrossOver) { m_bUseCrossOver = bUseCrossOver; } /** * @param bUseMutation sets whether mutation is used */ public void setUseMutation(boolean bUseMutation) { m_bUseMutation = bUseMutation; } /** * @return whether Tournament Selection (true) or Maximum Sub-Population (false) should be used */ public boolean getUseTournamentSelection() { return m_bUseTournamentSelection; } /** * @param bUseTournamentSelection sets whether Tournament Selection or Maximum Sub-Population should be used */ public void setUseTournamentSelection(boolean bUseTournamentSelection) { m_bUseTournamentSelection = bUseTournamentSelection; } /** * @param iDescendantPopulationSize sets descendant population size */ public void setDescendantPopulationSize(int iDescendantPopulationSize) { m_nDescendantPopulationSize = iDescendantPopulationSize; } /** * @param iPopulationSize sets population size */ public void setPopulationSize(int iPopulationSize) { m_nPopulationSize = iPopulationSize; } /** * @return random number seed */ public int getSeed() { return m_nSeed; } // getSeed /** * Sets the random number seed * @param nSeed The number of the seed to set */ public void setSeed(int nSeed) { m_nSeed = nSeed; } // setSeed /** * This will return a string describing the classifier. * @return The string. */ public String globalInfo() { return "This Bayes Network learning algorithm uses genetic search for finding a well scoring " + "Bayes network structure. Genetic search works by having a population of Bayes network structures " + "and allow them to mutate and apply cross over to get offspring. The best network structure " + "found during the process is returned."; } // globalInfo /** * @return a string to describe the Runs option. */ public String runsTipText() { return "Sets the number of generations of Bayes network structure populations."; } // runsTipText /** * @return a string to describe the Seed option. */ public String seedTipText() { return "Initialization value for random number generator." + " Setting the seed allows replicability of experiments."; } // seedTipText /** * @return a string to describe the Population Size option. */ public String populationSizeTipText() { return "Sets the size of the population of network structures that is selected each generation."; } // populationSizeTipText /** * @return a string to describe the Descendant Population Size option. */ public String descendantPopulationSizeTipText() { return "Sets the size of the population of descendants that is created each generation."; } // descendantPopulationSizeTipText /** * @return a string to describe the Use Mutation option. */ public String useMutationTipText() { return "Determines whether mutation is allowed. Mutation flips a bit in the bit " + "representation of the network structure. At least one of mutation or cross-over " + "should be used."; } // useMutationTipText /** * @return a string to describe the Use Cross-Over option. */ public String useCrossOverTipText() { return "Determines whether cross-over is allowed. Cross over combined the bit " + "representations of network structure by taking a random first k bits of one" + "and adding the remainder of the other. At least one of mutation or cross-over " + "should be used."; } // useCrossOverTipText /** * @return a string to describe the Use Tournament Selection option. */ public String useTournamentSelectionTipText() { return "Determines the method of selecting a population. When set to true, tournament " + "selection is used (pick two at random and the highest is allowed to continue). " + "When set to false, the top scoring network structures are selected."; } // useTournamentSelectionTipText /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } } // GeneticSearch
21,727
28.322537
313
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/bayes/net/search/local/HillClimber.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * HillClimber.java * Copyright (C) 2004-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.bayes.net.search.local; import java.io.Serializable; import java.util.Enumeration; import java.util.Vector; import weka.classifiers.bayes.BayesNet; import weka.classifiers.bayes.net.ParentSet; import weka.core.Instances; import weka.core.Option; import weka.core.RevisionHandler; import weka.core.RevisionUtils; import weka.core.Utils; /** <!-- globalinfo-start --> * This Bayes Network learning algorithm uses a hill climbing algorithm adding, deleting and reversing arcs. The search is not restricted by an order on the variables (unlike K2). The difference with B and B2 is that this hill climber also considers arrows part of the naive Bayes structure for deletion. * <p/> <!-- globalinfo-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -P &lt;nr of parents&gt; * Maximum number of parents</pre> * * <pre> -R * Use arc reversal operation. * (default false)</pre> * * <pre> -N * Initial structure is empty (instead of Naive Bayes)</pre> * * <pre> -mbc * Applies a Markov Blanket correction to the network structure, * after a network structure is learned. This ensures that all * nodes in the network are part of the Markov blanket of the * classifier node.</pre> * * <pre> -S [BAYES|MDL|ENTROPY|AIC|CROSS_CLASSIC|CROSS_BAYES] * Score type (BAYES, BDeu, MDL, ENTROPY and AIC)</pre> * <!-- options-end --> * * @author Remco Bouckaert (rrb@xm.co.nz) * @version $Revision: 8034 $ */ public class HillClimber extends LocalScoreSearchAlgorithm { /** for serialization */ static final long serialVersionUID = 4322783593818122403L; /** the Operation class contains info on operations performed * on the current Bayesian network. */ class Operation implements Serializable, RevisionHandler { /** for serialization */ static final long serialVersionUID = -4880888790432547895L; // constants indicating the type of an operation final static int OPERATION_ADD = 0; final static int OPERATION_DEL = 1; final static int OPERATION_REVERSE = 2; /** * c'tor */ public Operation() { } /** c'tor + initializers * * @param nTail * @param nHead * @param nOperation */ public Operation(int nTail, int nHead, int nOperation) { m_nHead = nHead; m_nTail = nTail; m_nOperation = nOperation; } /** compare this operation with another * @param other operation to compare with * @return true if operation is the same */ public boolean equals(Operation other) { if (other == null) { return false; } return (( m_nOperation == other.m_nOperation) && (m_nHead == other.m_nHead) && (m_nTail == other.m_nTail)); } // equals /** number of the tail node **/ public int m_nTail; /** number of the head node **/ public int m_nHead; /** type of operation (ADD, DEL, REVERSE) **/ public int m_nOperation; /** change of score due to this operation **/ public double m_fDeltaScore = -1E100; /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } } // class Operation /** cache for remembering the change in score for steps in the search space */ class Cache implements RevisionHandler { /** change in score due to adding an arc **/ double [] [] m_fDeltaScoreAdd; /** change in score due to deleting an arc **/ double [] [] m_fDeltaScoreDel; /** c'tor * @param nNrOfNodes number of nodes in network, used to determine memory size to reserve */ Cache(int nNrOfNodes) { m_fDeltaScoreAdd = new double [nNrOfNodes][nNrOfNodes]; m_fDeltaScoreDel = new double [nNrOfNodes][nNrOfNodes]; } /** set cache entry * @param oOperation operation to perform * @param fValue value to put in cache */ public void put(Operation oOperation, double fValue) { if (oOperation.m_nOperation == Operation.OPERATION_ADD) { m_fDeltaScoreAdd[oOperation.m_nTail][oOperation.m_nHead] = fValue; } else { m_fDeltaScoreDel[oOperation.m_nTail][oOperation.m_nHead] = fValue; } } // put /** get cache entry * @param oOperation operation to perform * @return cache value */ public double get(Operation oOperation) { switch(oOperation.m_nOperation) { case Operation.OPERATION_ADD: return m_fDeltaScoreAdd[oOperation.m_nTail][oOperation.m_nHead]; case Operation.OPERATION_DEL: return m_fDeltaScoreDel[oOperation.m_nTail][oOperation.m_nHead]; case Operation.OPERATION_REVERSE: return m_fDeltaScoreDel[oOperation.m_nTail][oOperation.m_nHead] + m_fDeltaScoreAdd[oOperation.m_nHead][oOperation.m_nTail]; } // should never get here return 0; } // get /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } } // class Cache /** cache for storing score differences **/ Cache m_Cache = null; /** use the arc reversal operator **/ boolean m_bUseArcReversal = false; /** * search determines the network structure/graph of the network * with the Taby algorithm. * * @param bayesNet the network to use * @param instances the data to use * @throws Exception if something goes wrong */ protected void search(BayesNet bayesNet, Instances instances) throws Exception { initCache(bayesNet, instances); // go do the search Operation oOperation = getOptimalOperation(bayesNet, instances); while ((oOperation != null) && (oOperation.m_fDeltaScore > 0)) { performOperation(bayesNet, instances, oOperation); oOperation = getOptimalOperation(bayesNet, instances); } // free up memory m_Cache = null; } // search /** * initCache initializes the cache * * @param bayesNet Bayes network to be learned * @param instances data set to learn from * @throws Exception if something goes wrong */ void initCache(BayesNet bayesNet, Instances instances) throws Exception { // determine base scores double[] fBaseScores = new double[instances.numAttributes()]; int nNrOfAtts = instances.numAttributes(); m_Cache = new Cache (nNrOfAtts); for (int iAttribute = 0; iAttribute < nNrOfAtts; iAttribute++) { updateCache(iAttribute, nNrOfAtts, bayesNet.getParentSet(iAttribute)); } for (int iAttribute = 0; iAttribute < nNrOfAtts; iAttribute++) { fBaseScores[iAttribute] = calcNodeScore(iAttribute); } for (int iAttributeHead = 0; iAttributeHead < nNrOfAtts; iAttributeHead++) { for (int iAttributeTail = 0; iAttributeTail < nNrOfAtts; iAttributeTail++) { if (iAttributeHead != iAttributeTail) { Operation oOperation = new Operation(iAttributeTail, iAttributeHead, Operation.OPERATION_ADD); m_Cache.put(oOperation, calcScoreWithExtraParent(iAttributeHead, iAttributeTail) - fBaseScores[iAttributeHead]); } } } } // initCache /** check whether the operation is not in the forbidden. * For base hill climber, there are no restrictions on operations, * so we always return true. * @param oOperation operation to be checked * @return true if operation is not in the tabu list */ boolean isNotTabu(Operation oOperation) { return true; } // isNotTabu /** * getOptimalOperation finds the optimal operation that can be performed * on the Bayes network that is not in the tabu list. * * @param bayesNet Bayes network to apply operation on * @param instances data set to learn from * @return optimal operation found * @throws Exception if something goes wrong */ Operation getOptimalOperation(BayesNet bayesNet, Instances instances) throws Exception { Operation oBestOperation = new Operation(); // Add??? oBestOperation = findBestArcToAdd(bayesNet, instances, oBestOperation); // Delete??? oBestOperation = findBestArcToDelete(bayesNet, instances, oBestOperation); // Reverse??? if (getUseArcReversal()) { oBestOperation = findBestArcToReverse(bayesNet, instances, oBestOperation); } // did we find something? if (oBestOperation.m_fDeltaScore == -1E100) { return null; } return oBestOperation; } // getOptimalOperation /** * performOperation applies an operation * on the Bayes network and update the cache. * * @param bayesNet Bayes network to apply operation on * @param instances data set to learn from * @param oOperation operation to perform * @throws Exception if something goes wrong */ void performOperation(BayesNet bayesNet, Instances instances, Operation oOperation) throws Exception { // perform operation switch (oOperation.m_nOperation) { case Operation.OPERATION_ADD: applyArcAddition(bayesNet, oOperation.m_nHead, oOperation.m_nTail, instances); if (bayesNet.getDebug()) { System.out.print("Add " + oOperation.m_nHead + " -> " + oOperation.m_nTail); } break; case Operation.OPERATION_DEL: applyArcDeletion(bayesNet, oOperation.m_nHead, oOperation.m_nTail, instances); if (bayesNet.getDebug()) { System.out.print("Del " + oOperation.m_nHead + " -> " + oOperation.m_nTail); } break; case Operation.OPERATION_REVERSE: applyArcDeletion(bayesNet, oOperation.m_nHead, oOperation.m_nTail, instances); applyArcAddition(bayesNet, oOperation.m_nTail, oOperation.m_nHead, instances); if (bayesNet.getDebug()) { System.out.print("Rev " + oOperation.m_nHead+ " -> " + oOperation.m_nTail); } break; } } // performOperation /** * * @param bayesNet * @param iHead * @param iTail * @param instances */ void applyArcAddition(BayesNet bayesNet, int iHead, int iTail, Instances instances) { ParentSet bestParentSet = bayesNet.getParentSet(iHead); bestParentSet.addParent(iTail, instances); updateCache(iHead, instances.numAttributes(), bestParentSet); } // applyArcAddition /** * * @param bayesNet * @param iHead * @param iTail * @param instances */ void applyArcDeletion(BayesNet bayesNet, int iHead, int iTail, Instances instances) { ParentSet bestParentSet = bayesNet.getParentSet(iHead); bestParentSet.deleteParent(iTail, instances); updateCache(iHead, instances.numAttributes(), bestParentSet); } // applyArcAddition /** * find best (or least bad) arc addition operation * * @param bayesNet Bayes network to add arc to * @param instances data set * @param oBestOperation * @return Operation containing best arc to add, or null if no arc addition is allowed * (this can happen if any arc addition introduces a cycle, or all parent sets are filled * up to the maximum nr of parents). */ Operation findBestArcToAdd(BayesNet bayesNet, Instances instances, Operation oBestOperation) { int nNrOfAtts = instances.numAttributes(); // find best arc to add for (int iAttributeHead = 0; iAttributeHead < nNrOfAtts; iAttributeHead++) { if (bayesNet.getParentSet(iAttributeHead).getNrOfParents() < m_nMaxNrOfParents) { for (int iAttributeTail = 0; iAttributeTail < nNrOfAtts; iAttributeTail++) { if (addArcMakesSense(bayesNet, instances, iAttributeHead, iAttributeTail)) { Operation oOperation = new Operation(iAttributeTail, iAttributeHead, Operation.OPERATION_ADD); if (m_Cache.get(oOperation) > oBestOperation.m_fDeltaScore) { if (isNotTabu(oOperation)) { oBestOperation = oOperation; oBestOperation.m_fDeltaScore = m_Cache.get(oOperation); } } } } } } return oBestOperation; } // findBestArcToAdd /** * find best (or least bad) arc deletion operation * * @param bayesNet Bayes network to delete arc from * @param instances data set * @param oBestOperation * @return Operation containing best arc to delete, or null if no deletion can be made * (happens when there is no arc in the network yet). */ Operation findBestArcToDelete(BayesNet bayesNet, Instances instances, Operation oBestOperation) { int nNrOfAtts = instances.numAttributes(); // find best arc to delete for (int iNode = 0; iNode < nNrOfAtts; iNode++) { ParentSet parentSet = bayesNet.getParentSet(iNode); for (int iParent = 0; iParent < parentSet.getNrOfParents(); iParent++) { Operation oOperation = new Operation(parentSet.getParent(iParent), iNode, Operation.OPERATION_DEL); if (m_Cache.get(oOperation) > oBestOperation.m_fDeltaScore) { if (isNotTabu(oOperation)) { oBestOperation = oOperation; oBestOperation.m_fDeltaScore = m_Cache.get(oOperation); } } } } return oBestOperation; } // findBestArcToDelete /** * find best (or least bad) arc reversal operation * * @param bayesNet Bayes network to reverse arc in * @param instances data set * @param oBestOperation * @return Operation containing best arc to reverse, or null if no reversal is allowed * (happens if there is no arc in the network yet, or when any such reversal introduces * a cycle). */ Operation findBestArcToReverse(BayesNet bayesNet, Instances instances, Operation oBestOperation) { int nNrOfAtts = instances.numAttributes(); // find best arc to reverse for (int iNode = 0; iNode < nNrOfAtts; iNode++) { ParentSet parentSet = bayesNet.getParentSet(iNode); for (int iParent = 0; iParent < parentSet.getNrOfParents(); iParent++) { int iTail = parentSet.getParent(iParent); // is reversal allowed? if (reverseArcMakesSense(bayesNet, instances, iNode, iTail) && bayesNet.getParentSet(iTail).getNrOfParents() < m_nMaxNrOfParents) { // go check if reversal results in the best step forward Operation oOperation = new Operation(parentSet.getParent(iParent), iNode, Operation.OPERATION_REVERSE); if (m_Cache.get(oOperation) > oBestOperation.m_fDeltaScore) { if (isNotTabu(oOperation)) { oBestOperation = oOperation; oBestOperation.m_fDeltaScore = m_Cache.get(oOperation); } } } } } return oBestOperation; } // findBestArcToReverse /** * update the cache due to change of parent set of a node * * @param iAttributeHead node that has its parent set changed * @param nNrOfAtts number of nodes/attributes in data set * @param parentSet new parents set of node iAttributeHead */ void updateCache(int iAttributeHead, int nNrOfAtts, ParentSet parentSet) { // update cache entries for arrows heading towards iAttributeHead double fBaseScore = calcNodeScore(iAttributeHead); int nNrOfParents = parentSet.getNrOfParents(); for (int iAttributeTail = 0; iAttributeTail < nNrOfAtts; iAttributeTail++) { if (iAttributeTail != iAttributeHead) { if (!parentSet.contains(iAttributeTail)) { // add entries to cache for adding arcs if (nNrOfParents < m_nMaxNrOfParents) { Operation oOperation = new Operation(iAttributeTail, iAttributeHead, Operation.OPERATION_ADD); m_Cache.put(oOperation, calcScoreWithExtraParent(iAttributeHead, iAttributeTail) - fBaseScore); } } else { // add entries to cache for deleting arcs Operation oOperation = new Operation(iAttributeTail, iAttributeHead, Operation.OPERATION_DEL); m_Cache.put(oOperation, calcScoreWithMissingParent(iAttributeHead, iAttributeTail) - fBaseScore); } } } } // updateCache /** * Sets the max number of parents * * @param nMaxNrOfParents the max number of parents */ public void setMaxNrOfParents(int nMaxNrOfParents) { m_nMaxNrOfParents = nMaxNrOfParents; } /** * Gets the max number of parents. * * @return the max number of parents */ public int getMaxNrOfParents() { return m_nMaxNrOfParents; } /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ public Enumeration listOptions() { Vector newVector = new Vector(2); newVector.addElement(new Option("\tMaximum number of parents", "P", 1, "-P <nr of parents>")); newVector.addElement(new Option("\tUse arc reversal operation.\n\t(default false)", "R", 0, "-R")); newVector.addElement(new Option("\tInitial structure is empty (instead of Naive Bayes)", "N", 0, "-N")); newVector.addElement(new Option("\tInitial structure specified in XML BIF file", "X", 1, "-X")); Enumeration enu = super.listOptions(); while (enu.hasMoreElements()) { newVector.addElement(enu.nextElement()); } return newVector.elements(); } // listOptions /** * Parses a given list of options. <p/> * <!-- options-start --> * Valid options are: <p/> * * <pre> -P &lt;nr of parents&gt; * Maximum number of parents</pre> * * <pre> -R * Use arc reversal operation. * (default false)</pre> * * <pre> -N * Initial structure is empty (instead of Naive Bayes)</pre> * * <pre> -mbc * Applies a Markov Blanket correction to the network structure, * after a network structure is learned. This ensures that all * nodes in the network are part of the Markov blanket of the * classifier node.</pre> * * <pre> -S [BAYES|MDL|ENTROPY|AIC|CROSS_CLASSIC|CROSS_BAYES] * Score type (BAYES, BDeu, MDL, ENTROPY and AIC)</pre> * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { setUseArcReversal(Utils.getFlag('R', options)); setInitAsNaiveBayes (!(Utils.getFlag('N', options))); m_sInitalBIFFile = Utils.getOption('X', options); String sMaxNrOfParents = Utils.getOption('P', options); if (sMaxNrOfParents.length() != 0) { setMaxNrOfParents(Integer.parseInt(sMaxNrOfParents)); } else { setMaxNrOfParents(100000); } super.setOptions(options); } // setOptions /** * Gets the current settings of the search algorithm. * * @return an array of strings suitable for passing to setOptions */ public String[] getOptions() { String[] superOptions = super.getOptions(); String[] options = new String[9 + superOptions.length]; int current = 0; if (getUseArcReversal()) { options[current++] = "-R"; } if (!getInitAsNaiveBayes()) { options[current++] = "-N"; } if (m_sInitalBIFFile!=null && !m_sInitalBIFFile.equals("")) { options[current++] = "-X"; options[current++] = m_sInitalBIFFile; } options[current++] = "-P"; options[current++] = "" + m_nMaxNrOfParents; // insert options from parent class for (int iOption = 0; iOption < superOptions.length; iOption++) { options[current++] = superOptions[iOption]; } // Fill up rest with empty strings, not nulls! while (current < options.length) { options[current++] = ""; } return options; } // getOptions /** * Sets whether to init as naive bayes * * @param bInitAsNaiveBayes whether to init as naive bayes */ public void setInitAsNaiveBayes(boolean bInitAsNaiveBayes) { m_bInitAsNaiveBayes = bInitAsNaiveBayes; } /** * Gets whether to init as naive bayes * * @return whether to init as naive bayes */ public boolean getInitAsNaiveBayes() { return m_bInitAsNaiveBayes; } /** get use the arc reversal operation * @return whether the arc reversal operation should be used */ public boolean getUseArcReversal() { return m_bUseArcReversal; } // getUseArcReversal /** set use the arc reversal operation * @param bUseArcReversal whether the arc reversal operation should be used */ public void setUseArcReversal(boolean bUseArcReversal) { m_bUseArcReversal = bUseArcReversal; } // setUseArcReversal /** * This will return a string describing the search algorithm. * @return The string. */ public String globalInfo() { return "This Bayes Network learning algorithm uses a hill climbing algorithm " + "adding, deleting and reversing arcs. The search is not restricted by an order " + "on the variables (unlike K2). The difference with B and B2 is that this hill " + "climber also considers arrows part of the naive Bayes structure for deletion."; } // globalInfo /** * @return a string to describe the Use Arc Reversal option. */ public String useArcReversalTipText() { return "When set to true, the arc reversal operation is used in the search."; } // useArcReversalTipText /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } } // HillClimber
21,611
31.499248
304
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/bayes/net/search/local/K2.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * K2.java * Copyright (C) 2001-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.bayes.net.search.local; import java.util.Enumeration; import java.util.Random; import java.util.Vector; import weka.classifiers.bayes.BayesNet; import weka.core.Instances; import weka.core.Option; import weka.core.RevisionUtils; import weka.core.TechnicalInformation; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import weka.core.TechnicalInformationHandler; import weka.core.Utils; /** <!-- globalinfo-start --> * This Bayes Network learning algorithm uses a hill climbing algorithm restricted by an order on the variables.<br/> * <br/> * For more information see:<br/> * <br/> * G.F. Cooper, E. Herskovits (1990). A Bayesian method for constructing Bayesian belief networks from databases.<br/> * <br/> * G. Cooper, E. Herskovits (1992). A Bayesian method for the induction of probabilistic networks from data. Machine Learning. 9(4):309-347.<br/> * <br/> * Works with nominal variables and no missing values only. * <p/> <!-- globalinfo-end --> * <!-- technical-bibtex-start --> * BibTeX: * <pre> * &#64;proceedings{Cooper1990, * author = {G.F. Cooper and E. Herskovits}, * booktitle = {Proceedings of the Conference on Uncertainty in AI}, * pages = {86-94}, * title = {A Bayesian method for constructing Bayesian belief networks from databases}, * year = {1990} * } * * &#64;article{Cooper1992, * author = {G. Cooper and E. Herskovits}, * journal = {Machine Learning}, * number = {4}, * pages = {309-347}, * title = {A Bayesian method for the induction of probabilistic networks from data}, * volume = {9}, * year = {1992} * } * </pre> * <p/> <!-- technical-bibtex-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -N * Initial structure is empty (instead of Naive Bayes)</pre> * * <pre> -P &lt;nr of parents&gt; * Maximum number of parents</pre> * * <pre> -R * Random order. * (default false)</pre> * * <pre> -mbc * Applies a Markov Blanket correction to the network structure, * after a network structure is learned. This ensures that all * nodes in the network are part of the Markov blanket of the * classifier node.</pre> * * <pre> -S [BAYES|MDL|ENTROPY|AIC|CROSS_CLASSIC|CROSS_BAYES] * Score type (BAYES, BDeu, MDL, ENTROPY and AIC)</pre> * <!-- options-end --> * * @author Remco Bouckaert (rrb@xm.co.nz) * @version $Revision: 8034 $ */ public class K2 extends LocalScoreSearchAlgorithm implements TechnicalInformationHandler { /** for serialization */ static final long serialVersionUID = 6176545934752116631L; /** Holds flag to indicate ordering should be random **/ boolean m_bRandomOrder = false; /** * Returns an instance of a TechnicalInformation object, containing * detailed information about the technical background of this class, * e.g., paper reference or book this class is based on. * * @return the technical information about this class */ public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; TechnicalInformation additional; result = new TechnicalInformation(Type.PROCEEDINGS); result.setValue(Field.AUTHOR, "G.F. Cooper and E. Herskovits"); result.setValue(Field.YEAR, "1990"); result.setValue(Field.TITLE, "A Bayesian method for constructing Bayesian belief networks from databases"); result.setValue(Field.BOOKTITLE, "Proceedings of the Conference on Uncertainty in AI"); result.setValue(Field.PAGES, "86-94"); additional = result.add(Type.ARTICLE); additional.setValue(Field.AUTHOR, "G. Cooper and E. Herskovits"); additional.setValue(Field.YEAR, "1992"); additional.setValue(Field.TITLE, "A Bayesian method for the induction of probabilistic networks from data"); additional.setValue(Field.JOURNAL, "Machine Learning"); additional.setValue(Field.VOLUME, "9"); additional.setValue(Field.NUMBER, "4"); additional.setValue(Field.PAGES, "309-347"); return result; } /** * search determines the network structure/graph of the network * with the K2 algorithm, restricted by its initial structure (which can * be an empty graph, or a Naive Bayes graph. * * @param bayesNet the network * @param instances the data to work with * @throws Exception if something goes wrong */ public void search (BayesNet bayesNet, Instances instances) throws Exception { int nOrder[] = new int [instances.numAttributes()]; nOrder[0] = instances.classIndex(); int nAttribute = 0; for (int iOrder = 1; iOrder < instances.numAttributes(); iOrder++) { if (nAttribute == instances.classIndex()) { nAttribute++; } nOrder[iOrder] = nAttribute++; } if (m_bRandomOrder) { // generate random ordering (if required) Random random = new Random(); int iClass; if (getInitAsNaiveBayes()) { iClass = 0; } else { iClass = -1; } for (int iOrder = 0; iOrder < instances.numAttributes(); iOrder++) { int iOrder2 = Math.abs(random.nextInt()) % instances.numAttributes(); if (iOrder != iClass && iOrder2 != iClass) { int nTmp = nOrder[iOrder]; nOrder[iOrder] = nOrder[iOrder2]; nOrder[iOrder2] = nTmp; } } } // determine base scores double [] fBaseScores = new double [instances.numAttributes()]; for (int iOrder = 0; iOrder < instances.numAttributes(); iOrder++) { int iAttribute = nOrder[iOrder]; fBaseScores[iAttribute] = calcNodeScore(iAttribute); } // K2 algorithm: greedy search restricted by ordering for (int iOrder = 1; iOrder < instances.numAttributes(); iOrder++) { int iAttribute = nOrder[iOrder]; double fBestScore = fBaseScores[iAttribute]; boolean bProgress = (bayesNet.getParentSet(iAttribute).getNrOfParents() < getMaxNrOfParents()); while (bProgress) { int nBestAttribute = -1; for (int iOrder2 = 0; iOrder2 < iOrder; iOrder2++) { int iAttribute2 = nOrder[iOrder2]; double fScore = calcScoreWithExtraParent(iAttribute, iAttribute2); if (fScore > fBestScore) { fBestScore = fScore; nBestAttribute = iAttribute2; } } if (nBestAttribute != -1) { bayesNet.getParentSet(iAttribute).addParent(nBestAttribute, instances); fBaseScores[iAttribute] = fBestScore; bProgress = (bayesNet.getParentSet(iAttribute).getNrOfParents() < getMaxNrOfParents()); } else { bProgress = false; } } } } // buildStructure /** * Sets the max number of parents * * @param nMaxNrOfParents the max number of parents */ public void setMaxNrOfParents(int nMaxNrOfParents) { m_nMaxNrOfParents = nMaxNrOfParents; } /** * Gets the max number of parents. * * @return the max number of parents */ public int getMaxNrOfParents() { return m_nMaxNrOfParents; } /** * Sets whether to init as naive bayes * * @param bInitAsNaiveBayes whether to init as naive bayes */ public void setInitAsNaiveBayes(boolean bInitAsNaiveBayes) { m_bInitAsNaiveBayes = bInitAsNaiveBayes; } /** * Gets whether to init as naive bayes * * @return whether to init as naive bayes */ public boolean getInitAsNaiveBayes() { return m_bInitAsNaiveBayes; } /** * Set random order flag * * @param bRandomOrder the random order flag */ public void setRandomOrder(boolean bRandomOrder) { m_bRandomOrder = bRandomOrder; } // SetRandomOrder /** * Get random order flag * * @return the random order flag */ public boolean getRandomOrder() { return m_bRandomOrder; } // getRandomOrder /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ public Enumeration listOptions() { Vector newVector = new Vector(0); newVector.addElement(new Option("\tInitial structure is empty (instead of Naive Bayes)", "N", 0, "-N")); newVector.addElement(new Option("\tMaximum number of parents", "P", 1, "-P <nr of parents>")); newVector.addElement(new Option( "\tRandom order.\n" + "\t(default false)", "R", 0, "-R")); Enumeration enu = super.listOptions(); while (enu.hasMoreElements()) { newVector.addElement(enu.nextElement()); } return newVector.elements(); } /** * Parses a given list of options. <p/> * <!-- options-start --> * Valid options are: <p/> * * <pre> -N * Initial structure is empty (instead of Naive Bayes)</pre> * * <pre> -P &lt;nr of parents&gt; * Maximum number of parents</pre> * * <pre> -R * Random order. * (default false)</pre> * * <pre> -mbc * Applies a Markov Blanket correction to the network structure, * after a network structure is learned. This ensures that all * nodes in the network are part of the Markov blanket of the * classifier node.</pre> * * <pre> -S [BAYES|MDL|ENTROPY|AIC|CROSS_CLASSIC|CROSS_BAYES] * Score type (BAYES, BDeu, MDL, ENTROPY and AIC)</pre> * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { setRandomOrder(Utils.getFlag('R', options)); m_bInitAsNaiveBayes = !(Utils.getFlag('N', options)); String sMaxNrOfParents = Utils.getOption('P', options); if (sMaxNrOfParents.length() != 0) { setMaxNrOfParents(Integer.parseInt(sMaxNrOfParents)); } else { setMaxNrOfParents(100000); } super.setOptions(options); } /** * Gets the current settings of the search algorithm. * * @return an array of strings suitable for passing to setOptions */ public String [] getOptions() { String[] superOptions = super.getOptions(); String [] options = new String [4 + superOptions.length]; int current = 0; options[current++] = "-P"; options[current++] = "" + m_nMaxNrOfParents; if (!m_bInitAsNaiveBayes) { options[current++] = "-N"; } if (getRandomOrder()) { options[current++] = "-R"; } // insert options from parent class for (int iOption = 0; iOption < superOptions.length; iOption++) { options[current++] = superOptions[iOption]; } while (current < options.length) { options[current++] = ""; } // Fill up rest with empty strings, not nulls! return options; } /** * This will return a string describing the search algorithm. * @return The string. */ public String globalInfo() { return "This Bayes Network learning algorithm uses a hill climbing algorithm " + "restricted by an order on the variables.\n\n" + "For more information see:\n\n" + getTechnicalInformation().toString() + "\n\n" + "Works with nominal variables and no missing values only."; } /** * @return a string to describe the RandomOrder option. */ public String randomOrderTipText() { return "When set to true, the order of the nodes in the network is random." + " Default random order is false and the order" + " of the nodes in the dataset is used." + " In any case, when the network was initialized as Naive Bayes Network, the" + " class variable is first in the ordering though."; } // randomOrderTipText /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } }
12,221
29.103448
145
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/bayes/net/search/local/LAGDHillClimber.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * LAGDHillClimber.java * Copyright (C) 2005-2012 Manuel Neubach * */ package weka.classifiers.bayes.net.search.local; import java.util.Enumeration; import java.util.Vector; import weka.classifiers.bayes.BayesNet; import weka.core.Instances; import weka.core.Option; import weka.core.RevisionUtils; import weka.core.Utils; /** <!-- globalinfo-start --> * This Bayes Network learning algorithm uses a Look Ahead Hill Climbing algorithm called LAGD Hill Climbing. Unlike Greedy Hill Climbing it doesn't calculate a best greedy operation (adding, deleting or reversing an arc) but a sequence of nrOfLookAheadSteps operations, which leads to a network structure whose score is most likely higher in comparison to the network obtained by performing a sequence of nrOfLookAheadSteps greedy operations. The search is not restricted by an order on the variables (unlike K2). The difference with B and B2 is that this hill climber also considers arrows part of the naive Bayes structure for deletion. * <p/> <!-- globalinfo-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -L &lt;nr of look ahead steps&gt; * Look Ahead Depth</pre> * * <pre> -G &lt;nr of good operations&gt; * Nr of Good Operations</pre> * * <pre> -P &lt;nr of parents&gt; * Maximum number of parents</pre> * * <pre> -R * Use arc reversal operation. * (default false)</pre> * * <pre> -N * Initial structure is empty (instead of Naive Bayes)</pre> * * <pre> -mbc * Applies a Markov Blanket correction to the network structure, * after a network structure is learned. This ensures that all * nodes in the network are part of the Markov blanket of the * classifier node.</pre> * * <pre> -S [BAYES|MDL|ENTROPY|AIC|CROSS_CLASSIC|CROSS_BAYES] * Score type (BAYES, BDeu, MDL, ENTROPY and AIC)</pre> * <!-- options-end --> * * @author Manuel Neubach * @version $Revision: 8034 $ */ public class LAGDHillClimber extends HillClimber { /** for serialization */ static final long serialVersionUID = 7217437499439184344L; /** Number of Look Ahead Steps **/ int m_nNrOfLookAheadSteps = 2; /** Number of Good Operations per Step **/ int m_nNrOfGoodOperations = 5; /** * search determines the network structure/graph of the network * * @param bayesNet the network * @param instances the data to use * @throws Exception if something goes wrong */ protected void search(BayesNet bayesNet, Instances instances) throws Exception { int k=m_nNrOfLookAheadSteps; // Number of Look Ahead Steps int l=m_nNrOfGoodOperations; // Number of Good Operations per step lookAheadInGoodDirectionsSearch(bayesNet, instances, k, l); } // search /** * lookAheadInGoodDirectionsSearch determines the network structure/graph of the network * with best score according to LAGD Hill Climbing * * @param bayesNet the network * @param instances the data to use * @param nrOfLookAheadSteps * @param nrOfGoodOperations * @throws Exception if something goes wrong */ protected void lookAheadInGoodDirectionsSearch(BayesNet bayesNet, Instances instances, int nrOfLookAheadSteps, int nrOfGoodOperations) throws Exception { System.out.println("Initializing Cache"); initCache(bayesNet, instances); while (nrOfLookAheadSteps>1) { System.out.println("Look Ahead Depth: "+nrOfLookAheadSteps); boolean legalSequence = true; double sequenceDeltaScore = 0; Operation [] bestOperation=new Operation [nrOfLookAheadSteps]; bestOperation = getOptimalOperations(bayesNet, instances, nrOfLookAheadSteps, nrOfGoodOperations); for (int i = 0; i < nrOfLookAheadSteps; i++) { if (bestOperation [i] == null) { legalSequence=false; } else { sequenceDeltaScore += bestOperation [i].m_fDeltaScore; } } while (legalSequence && sequenceDeltaScore > 0) { System.out.println("Next Iteration.........................."); for (int i = 0; i < nrOfLookAheadSteps; i++) { performOperation(bayesNet, instances,bestOperation [i]); } bestOperation = getOptimalOperations(bayesNet, instances, nrOfLookAheadSteps, nrOfGoodOperations); sequenceDeltaScore = 0; for (int i = 0; i < nrOfLookAheadSteps; i++) { if (bestOperation [i] != null) { System.out.println(bestOperation [i].m_nOperation + " " + bestOperation [i].m_nHead + " " + bestOperation [i].m_nTail); sequenceDeltaScore += bestOperation [i].m_fDeltaScore; } else { legalSequence = false; } System.out.println("DeltaScore: "+sequenceDeltaScore); } } --nrOfLookAheadSteps; } /** last steps with greedy HC **/ Operation oOperation = getOptimalOperation(bayesNet, instances); while ((oOperation != null) && (oOperation.m_fDeltaScore > 0)) { performOperation(bayesNet, instances, oOperation); System.out.println("Performing last greedy steps"); oOperation = getOptimalOperation(bayesNet, instances); } // free up memory m_Cache = null; } // lookAheadInGoodDirectionsSearch /** * getAntiOperation determines the Operation, which is needed to cancel oOperation * * @param oOperation Operation to cancel * @return antiOperation to oOperation * @throws Exception if something goes wrong */ protected Operation getAntiOperation(Operation oOperation) throws Exception { if (oOperation.m_nOperation == Operation.OPERATION_ADD) return (new Operation (oOperation.m_nTail, oOperation.m_nHead, Operation.OPERATION_DEL)); else { if (oOperation.m_nOperation == Operation.OPERATION_DEL) return (new Operation (oOperation.m_nTail, oOperation.m_nHead, Operation.OPERATION_ADD)); else { return (new Operation (oOperation.m_nHead, oOperation.m_nTail, Operation.OPERATION_REVERSE)); } } } // getAntiOperation /** * getGoodOperations determines the nrOfGoodOperations best Operations, which are considered for * the calculation of an optimal operationsequence * @param bayesNet Bayes network to apply operation on * @param instances data set to learn from * @param nrOfGoodOperations number of good operations to consider * @return good operations to consider * @throws Exception if something goes wrong **/ protected Operation [] getGoodOperations(BayesNet bayesNet, Instances instances, int nrOfGoodOperations) throws Exception { Operation [] goodOperations=new Operation [nrOfGoodOperations]; for (int i = 0; i < nrOfGoodOperations; i++) { goodOperations [i] = getOptimalOperation(bayesNet, instances); if (goodOperations[i] != null) { m_Cache.put(goodOperations [i], -1E100); } else i=nrOfGoodOperations; } for (int i = 0; i < nrOfGoodOperations; i++) { if (goodOperations[i] != null) { if (goodOperations [i].m_nOperation!=Operation.OPERATION_REVERSE) { m_Cache.put(goodOperations [i], goodOperations [i].m_fDeltaScore); } else { m_Cache.put(goodOperations [i], goodOperations [i].m_fDeltaScore - m_Cache.m_fDeltaScoreAdd[goodOperations[i].m_nHead] [goodOperations [i].m_nTail]); } } else i=nrOfGoodOperations; } return goodOperations; } // getGoodOperations /** * getOptimalOperations determines an optimal operationsequence in respect of the parameters * nrOfLookAheadSteps and nrOfGoodOperations * @param bayesNet Bayes network to apply operation on * @param instances data set to learn from * @param nrOfLookAheadSteps number of lood ahead steps to use * @param nrOfGoodOperations number of good operations to consider * @return optimal sequence of operations in respect to nrOfLookAheadSteps and nrOfGoodOperations * @throws Exception if something goes wrong **/ protected Operation [] getOptimalOperations(BayesNet bayesNet, Instances instances, int nrOfLookAheadSteps, int nrOfGoodOperations) throws Exception { if (nrOfLookAheadSteps == 1) { // Abbruch der Rekursion Operation [] bestOperation = new Operation [1]; bestOperation [0] = getOptimalOperation(bayesNet, instances); return(bestOperation); // Abbruch der Rekursion } else { double bestDeltaScore = 0; double currentDeltaScore = 0; Operation [] bestOperation = new Operation [nrOfLookAheadSteps]; Operation [] goodOperations = new Operation [nrOfGoodOperations]; Operation [] tempOperation = new Operation [nrOfLookAheadSteps-1]; goodOperations = getGoodOperations(bayesNet, instances, nrOfGoodOperations); for (int i = 0; i < nrOfGoodOperations; i++) { if (goodOperations[i] != null) { performOperation(bayesNet, instances, goodOperations [i]); tempOperation = getOptimalOperations(bayesNet, instances, nrOfLookAheadSteps-1, nrOfGoodOperations); // rekursiver Abstieg currentDeltaScore = goodOperations [i].m_fDeltaScore; for (int j = 0; j < nrOfLookAheadSteps-1; j++) { if (tempOperation [j] != null) { currentDeltaScore += tempOperation [j].m_fDeltaScore; } } performOperation(bayesNet, instances, getAntiOperation(goodOperations [i])); if (currentDeltaScore > bestDeltaScore) { bestDeltaScore = currentDeltaScore; bestOperation [0] = goodOperations [i]; for (int j = 1; j < nrOfLookAheadSteps; j++) { bestOperation [j] = tempOperation [j-1]; } } } else i=nrOfGoodOperations; } return(bestOperation); } } // getOptimalOperations /** * Sets the max number of parents * * @param nMaxNrOfParents the max number of parents */ public void setMaxNrOfParents(int nMaxNrOfParents) { m_nMaxNrOfParents = nMaxNrOfParents; } /** * Gets the max number of parents. * * @return the max number of parents */ public int getMaxNrOfParents() { return m_nMaxNrOfParents; } /** * Sets the number of look-ahead steps * * @param nNrOfLookAheadSteps the number of look-ahead steps */ public void setNrOfLookAheadSteps(int nNrOfLookAheadSteps) { m_nNrOfLookAheadSteps = nNrOfLookAheadSteps; } /** * Gets the number of look-ahead steps * * @return the number of look-ahead step */ public int getNrOfLookAheadSteps() { return m_nNrOfLookAheadSteps; } /** * Sets the number of "good operations" * * @param nNrOfGoodOperations the number of "good operations" */ public void setNrOfGoodOperations(int nNrOfGoodOperations) { m_nNrOfGoodOperations = nNrOfGoodOperations; } /** * Gets the number of "good operations" * * @return the number of "good operations" */ public int getNrOfGoodOperations() { return m_nNrOfGoodOperations; } /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ public Enumeration listOptions() { Vector newVector = new Vector(); newVector.addElement(new Option("\tLook Ahead Depth", "L", 2, "-L <nr of look ahead steps>")); newVector.addElement(new Option("\tNr of Good Operations", "G", 5, "-G <nr of good operations>")); Enumeration enm = super.listOptions(); while (enm.hasMoreElements()) { newVector.addElement(enm.nextElement()); } return newVector.elements(); } // listOptions /** * Parses a given list of options. Valid options are:<p> * <!-- options-start --> * Valid options are: <p/> * * <pre> -L &lt;nr of look ahead steps&gt; * Look Ahead Depth</pre> * * <pre> -G &lt;nr of good operations&gt; * Nr of Good Operations</pre> * * <pre> -P &lt;nr of parents&gt; * Maximum number of parents</pre> * * <pre> -R * Use arc reversal operation. * (default false)</pre> * * <pre> -N * Initial structure is empty (instead of Naive Bayes)</pre> * * <pre> -mbc * Applies a Markov Blanket correction to the network structure, * after a network structure is learned. This ensures that all * nodes in the network are part of the Markov blanket of the * classifier node.</pre> * * <pre> -S [BAYES|MDL|ENTROPY|AIC|CROSS_CLASSIC|CROSS_BAYES] * Score type (BAYES, BDeu, MDL, ENTROPY and AIC)</pre> * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { String sNrOfLookAheadSteps = Utils.getOption('L', options); if (sNrOfLookAheadSteps.length() != 0) { setNrOfLookAheadSteps(Integer.parseInt(sNrOfLookAheadSteps)); } else { setNrOfLookAheadSteps(2); } String sNrOfGoodOperations = Utils.getOption('G', options); if (sNrOfGoodOperations.length() != 0) { setNrOfGoodOperations(Integer.parseInt(sNrOfGoodOperations)); } else { setNrOfGoodOperations(5); } super.setOptions(options); } // setOptions /** * Gets the current settings of the search algorithm. * * @return an array of strings suitable for passing to setOptions */ public String[] getOptions() { String[] superOptions = super.getOptions(); String[] options = new String[9 + superOptions.length]; int current = 0; options[current++] = "-L"; options[current++] = "" + m_nNrOfLookAheadSteps; options[current++] = "-G"; options[current++] = "" + m_nNrOfGoodOperations; // insert options from parent class for (int iOption = 0; iOption < superOptions.length; iOption++) { options[current++] = superOptions[iOption]; } // Fill up rest with empty strings, not nulls! while (current < options.length) { options[current++] = ""; } return options; } // getOptions /** * This will return a string describing the search algorithm. * @return The string. */ public String globalInfo() { return "This Bayes Network learning algorithm uses a Look Ahead Hill Climbing algorithm called LAGD Hill Climbing." + " Unlike Greedy Hill Climbing it doesn't calculate a best greedy operation (adding, deleting or reversing an arc) " + "but a sequence of nrOfLookAheadSteps operations, which leads to a network structure whose score is most likely " + "higher in comparison to the network obtained by performing a sequence of nrOfLookAheadSteps greedy operations. " + "The search is not restricted by an order " + "on the variables (unlike K2). The difference with B and B2 is that this hill " + "climber also considers arrows part of the naive Bayes structure for deletion."; } // globalInfo /** * @return a string to describe the Number of Look Ahead Steps option. */ public String nrOfLookAheadStepsTipText() { return "Sets the Number of Look Ahead Steps. 'nrOfLookAheadSteps = 2' means that all network structures in a " + "distance of 2 (from the current network structure) are taken into account for the decision which arcs to add, " + "remove or reverse. 'nrOfLookAheadSteps = 1' results in Greedy Hill Climbing." ; } // nrOfLookAheadStepsTipText /** * @return a string to describe the Number of Good Operations option. */ public String nrOfGoodOperationsTipText() { return "Sets the Number of Good Operations per Look Ahead Step. 'nrOfGoodOperations = 5' means that for the next " + "Look Ahead Step only the 5 best Operations (adding, deleting or reversing an arc) are taken into account for the " + "calculation of the best sequence consisting of nrOfLookAheadSteps operations." ; } // nrOfGoodOperationsTipText /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } } // LAGDHillClimber
17,397
37.49115
639
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/bayes/net/search/local/LocalScoreSearchAlgorithm.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * LocalScoreSearchAlgorithm.java * Copyright (C) 2004-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.bayes.net.search.local; import java.util.Enumeration; import java.util.Vector; import weka.classifiers.bayes.BayesNet; import weka.classifiers.bayes.net.ParentSet; import weka.classifiers.bayes.net.search.SearchAlgorithm; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.RevisionUtils; import weka.core.SelectedTag; import weka.core.Statistics; import weka.core.Tag; import weka.core.Utils; /** <!-- globalinfo-start --> * The ScoreBasedSearchAlgorithm class supports Bayes net structure search algorithms that are based on maximizing scores (as opposed to for example conditional independence based search algorithms). * <p/> <!-- globalinfo-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -mbc * Applies a Markov Blanket correction to the network structure, * after a network structure is learned. This ensures that all * nodes in the network are part of the Markov blanket of the * classifier node.</pre> * * <pre> -S [BAYES|MDL|ENTROPY|AIC|CROSS_CLASSIC|CROSS_BAYES] * Score type (BAYES, BDeu, MDL, ENTROPY and AIC)</pre> * <!-- options-end --> * * @author Remco Bouckaert * @version $Revision: 8034 $ */ public class LocalScoreSearchAlgorithm extends SearchAlgorithm { /** for serialization */ static final long serialVersionUID = 3325995552474190374L; /** points to Bayes network for which a structure is searched for **/ BayesNet m_BayesNet; /** * default constructor */ public LocalScoreSearchAlgorithm() { } // c'tor /** * constructor * * @param bayesNet the network * @param instances the data */ public LocalScoreSearchAlgorithm(BayesNet bayesNet, Instances instances) { m_BayesNet = bayesNet; // m_Instances = instances; } // c'tor /** * Holds prior on count */ double m_fAlpha = 0.5; /** the score types */ public static final Tag[] TAGS_SCORE_TYPE = { new Tag(Scoreable.BAYES, "BAYES"), new Tag(Scoreable.BDeu, "BDeu"), new Tag(Scoreable.MDL, "MDL"), new Tag(Scoreable.ENTROPY, "ENTROPY"), new Tag(Scoreable.AIC, "AIC") }; /** * Holds the score type used to measure quality of network */ int m_nScoreType = Scoreable.BAYES; /** * logScore returns the log of the quality of a network * (e.g. the posterior probability of the network, or the MDL * value). * @param nType score type (Bayes, MDL, etc) to calculate score with * @return log score. */ public double logScore(int nType) { if (m_BayesNet.m_Distributions == null) {return 0;} if (nType < 0) { nType = m_nScoreType; } double fLogScore = 0.0; Instances instances = m_BayesNet.m_Instances; for (int iAttribute = 0; iAttribute < instances.numAttributes(); iAttribute++) { int nCardinality = m_BayesNet.getParentSet(iAttribute).getCardinalityOfParents(); for (int iParent = 0; iParent < nCardinality; iParent++) { fLogScore += ((Scoreable) m_BayesNet.m_Distributions[iAttribute][iParent]).logScore(nType, nCardinality); } switch (nType) { case (Scoreable.MDL) : { fLogScore -= 0.5 * m_BayesNet.getParentSet(iAttribute).getCardinalityOfParents() * (instances.attribute(iAttribute).numValues() - 1) * Math.log(instances.numInstances()); } break; case (Scoreable.AIC) : { fLogScore -= m_BayesNet.getParentSet(iAttribute).getCardinalityOfParents() * (instances.attribute(iAttribute).numValues() - 1); } break; } } return fLogScore; } // logScore /** * buildStructure determines the network structure/graph of the network * with the K2 algorithm, restricted by its initial structure (which can * be an empty graph, or a Naive Bayes graph. * * @param bayesNet the network * @param instances the data to use * @throws Exception if something goes wrong */ public void buildStructure (BayesNet bayesNet, Instances instances) throws Exception { m_BayesNet = bayesNet; super.buildStructure(bayesNet, instances); } // buildStructure /** * Calc Node Score for given parent set * * @param nNode node for which the score is calculate * @return log score */ public double calcNodeScore(int nNode) { if (m_BayesNet.getUseADTree() && m_BayesNet.getADTree() != null) { return calcNodeScoreADTree(nNode); } else { return calcNodeScorePlain(nNode); } } /** * helper function for CalcNodeScore above using the ADTree data structure * * @param nNode node for which the score is calculate * @return log score */ private double calcNodeScoreADTree(int nNode) { Instances instances = m_BayesNet.m_Instances; ParentSet oParentSet = m_BayesNet.getParentSet(nNode); // get set of parents, insert iNode int nNrOfParents = oParentSet.getNrOfParents(); int[] nNodes = new int[nNrOfParents + 1]; for (int iParent = 0; iParent < nNrOfParents; iParent++) { nNodes[iParent] = oParentSet.getParent(iParent); } nNodes[nNrOfParents] = nNode; // calculate offsets int[] nOffsets = new int[nNrOfParents + 1]; int nOffset = 1; nOffsets[nNrOfParents] = 1; nOffset *= instances.attribute(nNode).numValues(); for (int iNode = nNrOfParents - 1; iNode >= 0; iNode--) { nOffsets[iNode] = nOffset; nOffset *= instances.attribute(nNodes[iNode]).numValues(); } // sort nNodes & offsets for (int iNode = 1; iNode < nNodes.length; iNode++) { int iNode2 = iNode; while (iNode2 > 0 && nNodes[iNode2] < nNodes[iNode2 - 1]) { int h = nNodes[iNode2]; nNodes[iNode2] = nNodes[iNode2 - 1]; nNodes[iNode2 - 1] = h; h = nOffsets[iNode2]; nOffsets[iNode2] = nOffsets[iNode2 - 1]; nOffsets[iNode2 - 1] = h; iNode2--; } } // get counts from ADTree int nCardinality = oParentSet.getCardinalityOfParents(); int numValues = instances.attribute(nNode).numValues(); int[] nCounts = new int[nCardinality * numValues]; //if (nNrOfParents > 1) { m_BayesNet.getADTree().getCounts(nCounts, nNodes, nOffsets, 0, 0, false); return calcScoreOfCounts(nCounts, nCardinality, numValues, instances); } // CalcNodeScore private double calcNodeScorePlain(int nNode) { Instances instances = m_BayesNet.m_Instances; ParentSet oParentSet = m_BayesNet.getParentSet(nNode); // determine cardinality of parent set & reserve space for frequency counts int nCardinality = oParentSet.getCardinalityOfParents(); int numValues = instances.attribute(nNode).numValues(); int[] nCounts = new int[nCardinality * numValues]; // initialize (don't need this?) for (int iParent = 0; iParent < nCardinality * numValues; iParent++) { nCounts[iParent] = 0; } // estimate distributions Enumeration enumInsts = instances.enumerateInstances(); while (enumInsts.hasMoreElements()) { Instance instance = (Instance) enumInsts.nextElement(); // updateClassifier; double iCPT = 0; for (int iParent = 0; iParent < oParentSet.getNrOfParents(); iParent++) { int nParent = oParentSet.getParent(iParent); iCPT = iCPT * instances.attribute(nParent).numValues() + instance.value(nParent); } nCounts[numValues * ((int) iCPT) + (int) instance.value(nNode)]++; } return calcScoreOfCounts(nCounts, nCardinality, numValues, instances); } // CalcNodeScore /** * utility function used by CalcScore and CalcNodeScore to determine the score * based on observed frequencies. * * @param nCounts array with observed frequencies * @param nCardinality ardinality of parent set * @param numValues number of values a node can take * @param instances to calc score with * @return log score */ protected double calcScoreOfCounts(int[] nCounts, int nCardinality, int numValues, Instances instances) { // calculate scores using the distributions double fLogScore = 0.0; for (int iParent = 0; iParent < nCardinality; iParent++) { switch (m_nScoreType) { case (Scoreable.BAYES) : { double nSumOfCounts = 0; for (int iSymbol = 0; iSymbol < numValues; iSymbol++) { if (m_fAlpha + nCounts[iParent * numValues + iSymbol] != 0) { fLogScore += Statistics.lnGamma(m_fAlpha + nCounts[iParent * numValues + iSymbol]); nSumOfCounts += m_fAlpha + nCounts[iParent * numValues + iSymbol]; } } if (nSumOfCounts != 0) { fLogScore -= Statistics.lnGamma(nSumOfCounts); } if (m_fAlpha != 0) { fLogScore -= numValues * Statistics.lnGamma(m_fAlpha); fLogScore += Statistics.lnGamma(numValues * m_fAlpha); } } break; case (Scoreable.BDeu) : { double nSumOfCounts = 0; for (int iSymbol = 0; iSymbol < numValues; iSymbol++) { if (m_fAlpha + nCounts[iParent * numValues + iSymbol] != 0) { fLogScore += Statistics.lnGamma(1.0/(numValues * nCardinality) + nCounts[iParent * numValues + iSymbol]); nSumOfCounts += 1.0/(numValues * nCardinality) + nCounts[iParent * numValues + iSymbol]; } } fLogScore -= Statistics.lnGamma(nSumOfCounts); fLogScore -= numValues * Statistics.lnGamma(1.0/(numValues * nCardinality)); fLogScore += Statistics.lnGamma(1.0/nCardinality); } break; case (Scoreable.MDL) : case (Scoreable.AIC) : case (Scoreable.ENTROPY) : { double nSumOfCounts = 0; for (int iSymbol = 0; iSymbol < numValues; iSymbol++) { nSumOfCounts += nCounts[iParent * numValues + iSymbol]; } for (int iSymbol = 0; iSymbol < numValues; iSymbol++) { if (nCounts[iParent * numValues + iSymbol] > 0) { fLogScore += nCounts[iParent * numValues + iSymbol] * Math.log(nCounts[iParent * numValues + iSymbol] / nSumOfCounts); } } } break; default : { } } } switch (m_nScoreType) { case (Scoreable.MDL) : { fLogScore -= 0.5 * nCardinality * (numValues - 1) * Math.log(instances.numInstances()); // it seems safe to assume that numInstances>0 here } break; case (Scoreable.AIC) : { fLogScore -= nCardinality * (numValues - 1); } break; } return fLogScore; } // CalcNodeScore protected double calcScoreOfCounts2(int[][] nCounts, int nCardinality, int numValues, Instances instances) { // calculate scores using the distributions double fLogScore = 0.0; for (int iParent = 0; iParent < nCardinality; iParent++) { switch (m_nScoreType) { case (Scoreable.BAYES) : { double nSumOfCounts = 0; for (int iSymbol = 0; iSymbol < numValues; iSymbol++) { if (m_fAlpha + nCounts[iParent][iSymbol] != 0) { fLogScore += Statistics.lnGamma(m_fAlpha + nCounts[iParent][iSymbol]); nSumOfCounts += m_fAlpha + nCounts[iParent][iSymbol]; } } if (nSumOfCounts != 0) { fLogScore -= Statistics.lnGamma(nSumOfCounts); } if (m_fAlpha != 0) { fLogScore -= numValues * Statistics.lnGamma(m_fAlpha); fLogScore += Statistics.lnGamma(numValues * m_fAlpha); } } break; case (Scoreable.BDeu) : { double nSumOfCounts = 0; for (int iSymbol = 0; iSymbol < numValues; iSymbol++) { if (m_fAlpha + nCounts[iParent][iSymbol] != 0) { fLogScore += Statistics.lnGamma(1.0/(numValues * nCardinality) + nCounts[iParent][iSymbol]); nSumOfCounts += 1.0/(numValues * nCardinality) + nCounts[iParent][iSymbol]; } } fLogScore -= Statistics.lnGamma(nSumOfCounts); fLogScore -= numValues * Statistics.lnGamma(1.0/(nCardinality * numValues)); fLogScore += Statistics.lnGamma(1.0/ nCardinality); } break; case (Scoreable.MDL) : case (Scoreable.AIC) : case (Scoreable.ENTROPY) : { double nSumOfCounts = 0; for (int iSymbol = 0; iSymbol < numValues; iSymbol++) { nSumOfCounts += nCounts[iParent][iSymbol]; } for (int iSymbol = 0; iSymbol < numValues; iSymbol++) { if (nCounts[iParent][iSymbol] > 0) { fLogScore += nCounts[iParent][iSymbol] * Math.log(nCounts[iParent][iSymbol] / nSumOfCounts); } } } break; default : { } } } switch (m_nScoreType) { case (Scoreable.MDL) : { fLogScore -= 0.5 * nCardinality * (numValues - 1) * Math.log(instances.numInstances()); // it seems safe to assume that numInstances>0 here } break; case (Scoreable.AIC) : { fLogScore -= nCardinality * (numValues - 1); } break; } return fLogScore; } // CalcNodeScore /** * Calc Node Score With AddedParent * * @param nNode node for which the score is calculate * @param nCandidateParent candidate parent to add to the existing parent set * @return log score */ public double calcScoreWithExtraParent(int nNode, int nCandidateParent) { ParentSet oParentSet = m_BayesNet.getParentSet(nNode); // sanity check: nCandidateParent should not be in parent set already if (oParentSet.contains(nCandidateParent)) { return -1e100; } // set up candidate parent oParentSet.addParent(nCandidateParent, m_BayesNet.m_Instances); // calculate the score double logScore = calcNodeScore(nNode); // delete temporarily added parent oParentSet.deleteLastParent(m_BayesNet.m_Instances); return logScore; } // CalcScoreWithExtraParent /** * Calc Node Score With Parent Deleted * * @param nNode node for which the score is calculate * @param nCandidateParent candidate parent to delete from the existing parent set * @return log score */ public double calcScoreWithMissingParent(int nNode, int nCandidateParent) { ParentSet oParentSet = m_BayesNet.getParentSet(nNode); // sanity check: nCandidateParent should be in parent set already if (!oParentSet.contains( nCandidateParent)) { return -1e100; } // set up candidate parent int iParent = oParentSet.deleteParent(nCandidateParent, m_BayesNet.m_Instances); // calculate the score double logScore = calcNodeScore(nNode); // restore temporarily deleted parent oParentSet.addParent(nCandidateParent, iParent, m_BayesNet.m_Instances); return logScore; } // CalcScoreWithMissingParent /** * set quality measure to be used in searching for networks. * * @param newScoreType the new score type */ public void setScoreType(SelectedTag newScoreType) { if (newScoreType.getTags() == TAGS_SCORE_TYPE) { m_nScoreType = newScoreType.getSelectedTag().getID(); } } /** * get quality measure to be used in searching for networks. * @return quality measure */ public SelectedTag getScoreType() { return new SelectedTag(m_nScoreType, TAGS_SCORE_TYPE); } /** * * @param bMarkovBlanketClassifier */ public void setMarkovBlanketClassifier(boolean bMarkovBlanketClassifier) { super.setMarkovBlanketClassifier(bMarkovBlanketClassifier); } /** * * @return */ public boolean getMarkovBlanketClassifier() { return super.getMarkovBlanketClassifier(); } /** * Returns an enumeration describing the available options * * @return an enumeration of all the available options */ public Enumeration listOptions() { Vector newVector = new Vector(); newVector.addElement(new Option( "\tApplies a Markov Blanket correction to the network structure, \n" + "\tafter a network structure is learned. This ensures that all \n" + "\tnodes in the network are part of the Markov blanket of the \n" + "\tclassifier node.", "mbc", 0, "-mbc")); newVector.addElement( new Option( "\tScore type (BAYES, BDeu, MDL, ENTROPY and AIC)", "S", 1, "-S [BAYES|MDL|ENTROPY|AIC|CROSS_CLASSIC|CROSS_BAYES]")); return newVector.elements(); } // listOptions /** * Parses a given list of options. <p/> * <!-- options-start --> * Valid options are: <p/> * * <pre> -mbc * Applies a Markov Blanket correction to the network structure, * after a network structure is learned. This ensures that all * nodes in the network are part of the Markov blanket of the * classifier node.</pre> * * <pre> -S [BAYES|MDL|ENTROPY|AIC|CROSS_CLASSIC|CROSS_BAYES] * Score type (BAYES, BDeu, MDL, ENTROPY and AIC)</pre> * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { setMarkovBlanketClassifier(Utils.getFlag("mbc", options)); String sScore = Utils.getOption('S', options); if (sScore.compareTo("BAYES") == 0) { setScoreType(new SelectedTag(Scoreable.BAYES, TAGS_SCORE_TYPE)); } if (sScore.compareTo("BDeu") == 0) { setScoreType(new SelectedTag(Scoreable.BDeu, TAGS_SCORE_TYPE)); } if (sScore.compareTo("MDL") == 0) { setScoreType(new SelectedTag(Scoreable.MDL, TAGS_SCORE_TYPE)); } if (sScore.compareTo("ENTROPY") == 0) { setScoreType(new SelectedTag(Scoreable.ENTROPY, TAGS_SCORE_TYPE)); } if (sScore.compareTo("AIC") == 0) { setScoreType(new SelectedTag(Scoreable.AIC, TAGS_SCORE_TYPE)); } } // setOptions /** * Gets the current settings of the search algorithm. * * @return an array of strings suitable for passing to setOptions */ public String[] getOptions() { String[] superOptions = super.getOptions(); String[] options = new String[3 + superOptions.length]; int current = 0; if (getMarkovBlanketClassifier()) options[current++] = "-mbc"; options[current++] = "-S"; switch (m_nScoreType) { case (Scoreable.BAYES) : options[current++] = "BAYES"; break; case (Scoreable.BDeu) : options[current++] = "BDeu"; break; case (Scoreable.MDL) : options[current++] = "MDL"; break; case (Scoreable.ENTROPY) : options[current++] = "ENTROPY"; break; case (Scoreable.AIC) : options[current++] = "AIC"; break; } // insert options from parent class for (int iOption = 0; iOption < superOptions.length; iOption++) { options[current++] = superOptions[iOption]; } // Fill up rest with empty strings, not nulls! while (current < options.length) { options[current++] = ""; } return options; } // getOptions /** * @return a string to describe the ScoreType option. */ public String scoreTypeTipText() { return "The score type determines the measure used to judge the quality of a" + " network structure. It can be one of Bayes, BDeu, Minimum Description Length (MDL)," + " Akaike Information Criterion (AIC), and Entropy."; } /** * @return a string to describe the MarkovBlanketClassifier option. */ public String markovBlanketClassifierTipText() { return super.markovBlanketClassifierTipText(); } /** * This will return a string describing the search algorithm. * @return The string. */ public String globalInfo() { return "The ScoreBasedSearchAlgorithm class supports Bayes net " + "structure search algorithms that are based on maximizing " + "scores (as opposed to for example conditional independence " + "based search algorithms)."; } // globalInfo /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } }
20,713
27.769444
199
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/bayes/net/search/local/RepeatedHillClimber.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * RepeatedHillClimber.java * Copyright (C) 2004-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.bayes.net.search.local; import java.util.Enumeration; import java.util.Random; import java.util.Vector; import weka.classifiers.bayes.BayesNet; import weka.classifiers.bayes.net.ParentSet; import weka.core.Instances; import weka.core.Option; import weka.core.RevisionUtils; import weka.core.Utils; /** <!-- globalinfo-start --> * This Bayes Network learning algorithm repeatedly uses hill climbing starting with a randomly generated network structure and return the best structure of the various runs. * <p/> <!-- globalinfo-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -U &lt;integer&gt; * Number of runs</pre> * * <pre> -A &lt;seed&gt; * Random number seed</pre> * * <pre> -P &lt;nr of parents&gt; * Maximum number of parents</pre> * * <pre> -R * Use arc reversal operation. * (default false)</pre> * * <pre> -N * Initial structure is empty (instead of Naive Bayes)</pre> * * <pre> -mbc * Applies a Markov Blanket correction to the network structure, * after a network structure is learned. This ensures that all * nodes in the network are part of the Markov blanket of the * classifier node.</pre> * * <pre> -S [BAYES|MDL|ENTROPY|AIC|CROSS_CLASSIC|CROSS_BAYES] * Score type (BAYES, BDeu, MDL, ENTROPY and AIC)</pre> * <!-- options-end --> * * @author Remco Bouckaert (rrb@xm.co.nz) * @version $Revision: 8034 $ */ public class RepeatedHillClimber extends HillClimber { /** for serialization */ static final long serialVersionUID = -6574084564213041174L; /** number of runs **/ int m_nRuns = 10; /** random number seed **/ int m_nSeed = 1; /** random number generator **/ Random m_random; /** * search determines the network structure/graph of the network * with the repeated hill climbing. * * @param bayesNet the network * @param instances the data to use * @throws Exception if something goes wrong */ protected void search(BayesNet bayesNet, Instances instances) throws Exception { m_random = new Random(getSeed()); // keeps track of score pf best structure found so far double fBestScore; double fCurrentScore = 0.0; for (int iAttribute = 0; iAttribute < instances.numAttributes(); iAttribute++) { fCurrentScore += calcNodeScore(iAttribute); } // keeps track of best structure found so far BayesNet bestBayesNet; // initialize bestBayesNet fBestScore = fCurrentScore; bestBayesNet = new BayesNet(); bestBayesNet.m_Instances = instances; bestBayesNet.initStructure(); copyParentSets(bestBayesNet, bayesNet); // go do the search for (int iRun = 0; iRun < m_nRuns; iRun++) { // generate random nework generateRandomNet(bayesNet, instances); // search super.search(bayesNet, instances); // calculate score fCurrentScore = 0.0; for (int iAttribute = 0; iAttribute < instances.numAttributes(); iAttribute++) { fCurrentScore += calcNodeScore(iAttribute); } // keep track of best network seen so far if (fCurrentScore > fBestScore) { fBestScore = fCurrentScore; copyParentSets(bestBayesNet, bayesNet); } } // restore current network to best network copyParentSets(bayesNet, bestBayesNet); // free up memory bestBayesNet = null; m_Cache = null; } // search void generateRandomNet(BayesNet bayesNet, Instances instances) { int nNodes = instances.numAttributes(); // clear network for (int iNode = 0; iNode < nNodes; iNode++) { ParentSet parentSet = bayesNet.getParentSet(iNode); while (parentSet.getNrOfParents() > 0) { parentSet.deleteLastParent(instances); } } // initialize as naive Bayes? if (getInitAsNaiveBayes()) { int iClass = instances.classIndex(); // initialize parent sets to have arrow from classifier node to // each of the other nodes for (int iNode = 0; iNode < nNodes; iNode++) { if (iNode != iClass) { bayesNet.getParentSet(iNode).addParent(iClass, instances); } } } // insert random arcs int nNrOfAttempts = m_random.nextInt(nNodes * nNodes); for (int iAttempt = 0; iAttempt < nNrOfAttempts; iAttempt++) { int iTail = m_random.nextInt(nNodes); int iHead = m_random.nextInt(nNodes); if (bayesNet.getParentSet(iHead).getNrOfParents() < getMaxNrOfParents() && addArcMakesSense(bayesNet, instances, iHead, iTail)) { bayesNet.getParentSet(iHead).addParent(iTail, instances); } } } // generateRandomNet /** * copyParentSets copies parent sets of source to dest BayesNet * * @param dest destination network * @param source source network */ void copyParentSets(BayesNet dest, BayesNet source) { int nNodes = source.getNrOfNodes(); // clear parent set first for (int iNode = 0; iNode < nNodes; iNode++) { dest.getParentSet(iNode).copy(source.getParentSet(iNode)); } } // CopyParentSets /** * @return number of runs */ public int getRuns() { return m_nRuns; } // getRuns /** * Sets the number of runs * @param nRuns The number of runs to set */ public void setRuns(int nRuns) { m_nRuns = nRuns; } // setRuns /** * @return random number seed */ public int getSeed() { return m_nSeed; } // getSeed /** * Sets the random number seed * @param nSeed The number of the seed to set */ public void setSeed(int nSeed) { m_nSeed = nSeed; } // setSeed /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ public Enumeration listOptions() { Vector newVector = new Vector(4); newVector.addElement(new Option("\tNumber of runs", "U", 1, "-U <integer>")); newVector.addElement(new Option("\tRandom number seed", "A", 1, "-A <seed>")); Enumeration enu = super.listOptions(); while (enu.hasMoreElements()) { newVector.addElement(enu.nextElement()); } return newVector.elements(); } // listOptions /** * Parses a given list of options. <p/> * <!-- options-start --> * Valid options are: <p/> * * <pre> -U &lt;integer&gt; * Number of runs</pre> * * <pre> -A &lt;seed&gt; * Random number seed</pre> * * <pre> -P &lt;nr of parents&gt; * Maximum number of parents</pre> * * <pre> -R * Use arc reversal operation. * (default false)</pre> * * <pre> -N * Initial structure is empty (instead of Naive Bayes)</pre> * * <pre> -mbc * Applies a Markov Blanket correction to the network structure, * after a network structure is learned. This ensures that all * nodes in the network are part of the Markov blanket of the * classifier node.</pre> * * <pre> -S [BAYES|MDL|ENTROPY|AIC|CROSS_CLASSIC|CROSS_BAYES] * Score type (BAYES, BDeu, MDL, ENTROPY and AIC)</pre> * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { String sRuns = Utils.getOption('U', options); if (sRuns.length() != 0) { setRuns(Integer.parseInt(sRuns)); } String sSeed = Utils.getOption('A', options); if (sSeed.length() != 0) { setSeed(Integer.parseInt(sSeed)); } super.setOptions(options); } // setOptions /** * Gets the current settings of the search algorithm. * * @return an array of strings suitable for passing to setOptions */ public String[] getOptions() { String[] superOptions = super.getOptions(); String[] options = new String[7 + superOptions.length]; int current = 0; options[current++] = "-U"; options[current++] = "" + getRuns(); options[current++] = "-A"; options[current++] = "" + getSeed(); // insert options from parent class for (int iOption = 0; iOption < superOptions.length; iOption++) { options[current++] = superOptions[iOption]; } // Fill up rest with empty strings, not nulls! while (current < options.length) { options[current++] = ""; } return options; } // getOptions /** * This will return a string describing the classifier. * @return The string. */ public String globalInfo() { return "This Bayes Network learning algorithm repeatedly uses hill climbing starting " + "with a randomly generated network structure and return the best structure of the " + "various runs."; } // globalInfo /** * @return a string to describe the Runs option. */ public String runsTipText() { return "Sets the number of times hill climbing is performed."; } // runsTipText /** * @return a string to describe the Seed option. */ public String seedTipText() { return "Initialization value for random number generator." + " Setting the seed allows replicability of experiments."; } // seedTipText /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } }
9,838
26.793785
174
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/bayes/net/search/local/Scoreable.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * Scoreable.java * Copyright (C) 2001-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.bayes.net.search.local; /** * Interface for allowing to score a classifier * * @author Remco Bouckaert (rrb@xm.co.nz) * @version $Revision: 8034 $ */ public interface Scoreable { /** * score types */ int BAYES = 0; int BDeu = 1; int MDL = 2; int ENTROPY = 3; int AIC = 4; /** * Returns log-score * * @param nType the score type * @return the log-score */ double logScore(int nType, int nCardinality); } // interface Scoreable
1,280
23.634615
74
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/bayes/net/search/local/SimulatedAnnealing.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * SimulatedAnnealing.java * Copyright (C) 2004-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.bayes.net.search.local; import java.util.Enumeration; import java.util.Random; import java.util.Vector; import weka.classifiers.bayes.BayesNet; import weka.core.Instances; import weka.core.Option; import weka.core.RevisionUtils; import weka.core.TechnicalInformation; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import weka.core.TechnicalInformationHandler; import weka.core.Utils; /** <!-- globalinfo-start --> * This Bayes Network learning algorithm uses the general purpose search method of simulated annealing to find a well scoring network structure.<br/> * <br/> * For more information see:<br/> * <br/> * R.R. Bouckaert (1995). Bayesian Belief Networks: from Construction to Inference. Utrecht, Netherlands. * <p/> <!-- globalinfo-end --> * <!-- technical-bibtex-start --> * BibTeX: * <pre> * &#64;phdthesis{Bouckaert1995, * address = {Utrecht, Netherlands}, * author = {R.R. Bouckaert}, * institution = {University of Utrecht}, * title = {Bayesian Belief Networks: from Construction to Inference}, * year = {1995} * } * </pre> * <p/> <!-- technical-bibtex-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -A &lt;float&gt; * Start temperature</pre> * * <pre> -U &lt;integer&gt; * Number of runs</pre> * * <pre> -D &lt;float&gt; * Delta temperature</pre> * * <pre> -R &lt;seed&gt; * Random number seed</pre> * * <pre> -mbc * Applies a Markov Blanket correction to the network structure, * after a network structure is learned. This ensures that all * nodes in the network are part of the Markov blanket of the * classifier node.</pre> * * <pre> -S [BAYES|MDL|ENTROPY|AIC|CROSS_CLASSIC|CROSS_BAYES] * Score type (BAYES, BDeu, MDL, ENTROPY and AIC)</pre> * <!-- options-end --> * * @author Remco Bouckaert (rrb@xm.co.nz) * @version $Revision: 8034 $ */ public class SimulatedAnnealing extends LocalScoreSearchAlgorithm implements TechnicalInformationHandler { /** for serialization */ static final long serialVersionUID = 6951955606060513191L; /** start temperature **/ double m_fTStart = 10; /** change in temperature at every run **/ double m_fDelta = 0.999; /** number of runs **/ int m_nRuns = 10000; /** use the arc reversal operator **/ boolean m_bUseArcReversal = false; /** random number seed **/ int m_nSeed = 1; /** random number generator **/ Random m_random; /** * Returns an instance of a TechnicalInformation object, containing * detailed information about the technical background of this class, * e.g., paper reference or book this class is based on. * * @return the technical information about this class */ public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; result = new TechnicalInformation(Type.PHDTHESIS); result.setValue(Field.AUTHOR, "R.R. Bouckaert"); result.setValue(Field.YEAR, "1995"); result.setValue(Field.TITLE, "Bayesian Belief Networks: from Construction to Inference"); result.setValue(Field.INSTITUTION, "University of Utrecht"); result.setValue(Field.ADDRESS, "Utrecht, Netherlands"); return result; } /** * * @param bayesNet the network * @param instances the data to use * @throws Exception if something goes wrong */ public void search (BayesNet bayesNet, Instances instances) throws Exception { m_random = new Random(m_nSeed); // determine base scores double [] fBaseScores = new double [instances.numAttributes()]; double fCurrentScore = 0; for (int iAttribute = 0; iAttribute < instances.numAttributes(); iAttribute++) { fBaseScores[iAttribute] = calcNodeScore(iAttribute); fCurrentScore += fBaseScores[iAttribute]; } // keep track of best scoring network double fBestScore = fCurrentScore; BayesNet bestBayesNet = new BayesNet(); bestBayesNet.m_Instances = instances; bestBayesNet.initStructure(); copyParentSets(bestBayesNet, bayesNet); double fTemp = m_fTStart; for (int iRun = 0; iRun < m_nRuns; iRun++) { boolean bRunSucces = false; double fDeltaScore = 0.0; while (!bRunSucces) { // pick two nodes at random int iTailNode = Math.abs(m_random.nextInt()) % instances.numAttributes(); int iHeadNode = Math.abs(m_random.nextInt()) % instances.numAttributes(); while (iTailNode == iHeadNode) { iHeadNode = Math.abs(m_random.nextInt()) % instances.numAttributes(); } if (isArc(bayesNet, iHeadNode, iTailNode)) { bRunSucces = true; // either try a delete bayesNet.getParentSet(iHeadNode).deleteParent(iTailNode, instances); double fScore = calcNodeScore(iHeadNode); fDeltaScore = fScore - fBaseScores[iHeadNode]; //System.out.println("Try delete " + iTailNode + "->" + iHeadNode + " dScore = " + fDeltaScore); if (fTemp * Math.log((Math.abs(m_random.nextInt()) % 10000)/10000.0 + 1e-100) < fDeltaScore) { //System.out.println("success!!!"); fCurrentScore += fDeltaScore; fBaseScores[iHeadNode] = fScore; } else { // roll back bayesNet.getParentSet(iHeadNode).addParent(iTailNode, instances); } } else { // try to add an arc if (addArcMakesSense(bayesNet, instances, iHeadNode, iTailNode)) { bRunSucces = true; double fScore = calcScoreWithExtraParent(iHeadNode, iTailNode); fDeltaScore = fScore - fBaseScores[iHeadNode]; //System.out.println("Try add " + iTailNode + "->" + iHeadNode + " dScore = " + fDeltaScore); if (fTemp * Math.log((Math.abs(m_random.nextInt()) % 10000)/10000.0 + 1e-100) < fDeltaScore) { //System.out.println("success!!!"); bayesNet.getParentSet(iHeadNode).addParent(iTailNode, instances); fBaseScores[iHeadNode] = fScore; fCurrentScore += fDeltaScore; } } } } if (fCurrentScore > fBestScore) { copyParentSets(bestBayesNet, bayesNet); } fTemp = fTemp * m_fDelta; } copyParentSets(bayesNet, bestBayesNet); } // buildStructure /** CopyParentSets copies parent sets of source to dest BayesNet * @param dest destination network * @param source source network */ void copyParentSets(BayesNet dest, BayesNet source) { int nNodes = source.getNrOfNodes(); // clear parent set first for (int iNode = 0; iNode < nNodes; iNode++) { dest.getParentSet(iNode).copy(source.getParentSet(iNode)); } } // CopyParentSets /** * @return double */ public double getDelta() { return m_fDelta; } /** * @return double */ public double getTStart() { return m_fTStart; } /** * @return int */ public int getRuns() { return m_nRuns; } /** * Sets the m_fDelta. * @param fDelta The m_fDelta to set */ public void setDelta(double fDelta) { m_fDelta = fDelta; } /** * Sets the m_fTStart. * @param fTStart The m_fTStart to set */ public void setTStart(double fTStart) { m_fTStart = fTStart; } /** * Sets the m_nRuns. * @param nRuns The m_nRuns to set */ public void setRuns(int nRuns) { m_nRuns = nRuns; } /** * @return random number seed */ public int getSeed() { return m_nSeed; } // getSeed /** * Sets the random number seed * @param nSeed The number of the seed to set */ public void setSeed(int nSeed) { m_nSeed = nSeed; } // setSeed /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ public Enumeration listOptions() { Vector newVector = new Vector(3); newVector.addElement(new Option("\tStart temperature", "A", 1, "-A <float>")); newVector.addElement(new Option("\tNumber of runs", "U", 1, "-U <integer>")); newVector.addElement(new Option("\tDelta temperature", "D", 1, "-D <float>")); newVector.addElement(new Option("\tRandom number seed", "R", 1, "-R <seed>")); Enumeration enu = super.listOptions(); while (enu.hasMoreElements()) { newVector.addElement(enu.nextElement()); } return newVector.elements(); } /** * Parses a given list of options. <p/> * <!-- options-start --> * Valid options are: <p/> * * <pre> -A &lt;float&gt; * Start temperature</pre> * * <pre> -U &lt;integer&gt; * Number of runs</pre> * * <pre> -D &lt;float&gt; * Delta temperature</pre> * * <pre> -R &lt;seed&gt; * Random number seed</pre> * * <pre> -mbc * Applies a Markov Blanket correction to the network structure, * after a network structure is learned. This ensures that all * nodes in the network are part of the Markov blanket of the * classifier node.</pre> * * <pre> -S [BAYES|MDL|ENTROPY|AIC|CROSS_CLASSIC|CROSS_BAYES] * Score type (BAYES, BDeu, MDL, ENTROPY and AIC)</pre> * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { String sTStart = Utils.getOption('A', options); if (sTStart.length() != 0) { setTStart(Double.parseDouble(sTStart)); } String sRuns = Utils.getOption('U', options); if (sRuns.length() != 0) { setRuns(Integer.parseInt(sRuns)); } String sDelta = Utils.getOption('D', options); if (sDelta.length() != 0) { setDelta(Double.parseDouble(sDelta)); } String sSeed = Utils.getOption('R', options); if (sSeed.length() != 0) { setSeed(Integer.parseInt(sSeed)); } super.setOptions(options); } /** * Gets the current settings of the search algorithm. * * @return an array of strings suitable for passing to setOptions */ public String[] getOptions() { String[] superOptions = super.getOptions(); String[] options = new String[8 + superOptions.length]; int current = 0; options[current++] = "-A"; options[current++] = "" + getTStart(); options[current++] = "-U"; options[current++] = "" + getRuns(); options[current++] = "-D"; options[current++] = "" + getDelta(); options[current++] = "-R"; options[current++] = "" + getSeed(); // insert options from parent class for (int iOption = 0; iOption < superOptions.length; iOption++) { options[current++] = superOptions[iOption]; } // Fill up rest with empty strings, not nulls! while (current < options.length) { options[current++] = ""; } return options; } /** * This will return a string describing the classifier. * @return The string. */ public String globalInfo() { return "This Bayes Network learning algorithm uses the general purpose search method " + "of simulated annealing to find a well scoring network structure.\n\n" + "For more information see:\n\n" + getTechnicalInformation().toString(); } // globalInfo /** * @return a string to describe the TStart option. */ public String TStartTipText() { return "Sets the start temperature of the simulated annealing search. "+ "The start temperature determines the probability that a step in the 'wrong' direction in the " + "search space is accepted. The higher the temperature, the higher the probability of acceptance."; } // TStartTipText /** * @return a string to describe the Runs option. */ public String runsTipText() { return "Sets the number of iterations to be performed by the simulated annealing search."; } // runsTipText /** * @return a string to describe the Delta option. */ public String deltaTipText() { return "Sets the factor with which the temperature (and thus the acceptance probability of " + "steps in the wrong direction in the search space) is decreased in each iteration."; } // deltaTipText /** * @return a string to describe the Seed option. */ public String seedTipText() { return "Initialization value for random number generator." + " Setting the seed allows replicability of experiments."; } // seedTipText /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } } // SimulatedAnnealing
13,606
29.785068
149
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/bayes/net/search/local/TAN.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * TAN.java * Copyright (C) 2004-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.bayes.net.search.local; import java.util.Enumeration; import weka.classifiers.bayes.BayesNet; import weka.core.Instances; import weka.core.RevisionUtils; import weka.core.TechnicalInformation; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import weka.core.TechnicalInformationHandler; /** <!-- globalinfo-start --> * This Bayes Network learning algorithm determines the maximum weight spanning tree and returns a Naive Bayes network augmented with a tree.<br/> * <br/> * For more information see:<br/> * <br/> * N. Friedman, D. Geiger, M. Goldszmidt (1997). Bayesian network classifiers. Machine Learning. 29(2-3):131-163. * <p/> <!-- globalinfo-end --> * <!-- technical-bibtex-start --> * BibTeX: * <pre> * &#64;article{Friedman1997, * author = {N. Friedman and D. Geiger and M. Goldszmidt}, * journal = {Machine Learning}, * number = {2-3}, * pages = {131-163}, * title = {Bayesian network classifiers}, * volume = {29}, * year = {1997} * } * </pre> * <p/> <!-- technical-bibtex-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -mbc * Applies a Markov Blanket correction to the network structure, * after a network structure is learned. This ensures that all * nodes in the network are part of the Markov blanket of the * classifier node.</pre> * * <pre> -S [BAYES|MDL|ENTROPY|AIC|CROSS_CLASSIC|CROSS_BAYES] * Score type (BAYES, BDeu, MDL, ENTROPY and AIC)</pre> * <!-- options-end --> * * @author Remco Bouckaert * @version $Revision: 8034 $ */ public class TAN extends LocalScoreSearchAlgorithm implements TechnicalInformationHandler { /** for serialization */ static final long serialVersionUID = 965182127977228690L; /** * Returns an instance of a TechnicalInformation object, containing * detailed information about the technical background of this class, * e.g., paper reference or book this class is based on. * * @return the technical information about this class */ public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; result = new TechnicalInformation(Type.ARTICLE); result.setValue(Field.AUTHOR, "N. Friedman and D. Geiger and M. Goldszmidt"); result.setValue(Field.YEAR, "1997"); result.setValue(Field.TITLE, "Bayesian network classifiers"); result.setValue(Field.JOURNAL, "Machine Learning"); result.setValue(Field.VOLUME, "29"); result.setValue(Field.NUMBER, "2-3"); result.setValue(Field.PAGES, "131-163"); return result; } /** * buildStructure determines the network structure/graph of the network * using the maximimum weight spanning tree algorithm of Chow and Liu * * @param bayesNet the network * @param instances the data to use * @throws Exception if something goes wrong */ public void buildStructure(BayesNet bayesNet, Instances instances) throws Exception { m_bInitAsNaiveBayes = true; m_nMaxNrOfParents = 2; super.buildStructure(bayesNet, instances); int nNrOfAtts = instances.numAttributes(); if (nNrOfAtts <= 2) { return; } // determine base scores double[] fBaseScores = new double[instances.numAttributes()]; for (int iAttribute = 0; iAttribute < nNrOfAtts; iAttribute++) { fBaseScores[iAttribute] = calcNodeScore(iAttribute); } // // cache scores & whether adding an arc makes sense double[][] fScore = new double[nNrOfAtts][nNrOfAtts]; for (int iAttributeHead = 0; iAttributeHead < nNrOfAtts; iAttributeHead++) { for (int iAttributeTail = 0; iAttributeTail < nNrOfAtts; iAttributeTail++) { if (iAttributeHead != iAttributeTail) { fScore[iAttributeHead][iAttributeTail] = calcScoreWithExtraParent(iAttributeHead, iAttributeTail); } } } // TAN greedy search (not restricted by ordering like K2) // 1. find strongest link // 2. find remaining links by adding strongest link to already // connected nodes // 3. assign direction to links int nClassNode = instances.classIndex(); int [] link1 = new int [nNrOfAtts - 1]; int [] link2 = new int [nNrOfAtts - 1]; boolean [] linked = new boolean [nNrOfAtts]; // 1. find strongest link int nBestLinkNode1 = -1; int nBestLinkNode2 = -1; double fBestDeltaScore = 0.0; int iLinkNode1; for (iLinkNode1 = 0; iLinkNode1 < nNrOfAtts; iLinkNode1++) { if (iLinkNode1 != nClassNode) { for (int iLinkNode2 = 0; iLinkNode2 < nNrOfAtts; iLinkNode2++) { if ((iLinkNode1 != iLinkNode2) && (iLinkNode2 != nClassNode) && ( (nBestLinkNode1 == -1) || (fScore[iLinkNode1][iLinkNode2] - fBaseScores[iLinkNode1] > fBestDeltaScore) )) { fBestDeltaScore = fScore[iLinkNode1][iLinkNode2] - fBaseScores[iLinkNode1]; nBestLinkNode1 = iLinkNode2; nBestLinkNode2 = iLinkNode1; } } } } link1[0] = nBestLinkNode1; link2[0] = nBestLinkNode2; linked[nBestLinkNode1] = true; linked[nBestLinkNode2] = true; // 2. find remaining links by adding strongest link to already // connected nodes for (int iLink = 1; iLink < nNrOfAtts - 2; iLink++) { nBestLinkNode1 = -1; for (iLinkNode1 = 0; iLinkNode1 < nNrOfAtts; iLinkNode1++) { if (iLinkNode1 != nClassNode) { for (int iLinkNode2 = 0; iLinkNode2 < nNrOfAtts; iLinkNode2++) { if ((iLinkNode1 != iLinkNode2) && (iLinkNode2 != nClassNode) && (linked[iLinkNode1] || linked[iLinkNode2]) && (!linked[iLinkNode1] || !linked[iLinkNode2]) && ( (nBestLinkNode1 == -1) || (fScore[iLinkNode1][iLinkNode2] - fBaseScores[iLinkNode1] > fBestDeltaScore) )) { fBestDeltaScore = fScore[iLinkNode1][iLinkNode2] - fBaseScores[iLinkNode1]; nBestLinkNode1 = iLinkNode2; nBestLinkNode2 = iLinkNode1; } } } } link1[iLink] = nBestLinkNode1; link2[iLink] = nBestLinkNode2; linked[nBestLinkNode1] = true; linked[nBestLinkNode2] = true; } // 3. assign direction to links boolean [] hasParent = new boolean [nNrOfAtts]; for (int iLink = 0; iLink < nNrOfAtts - 2; iLink++) { if (!hasParent[link1[iLink]]) { bayesNet.getParentSet(link1[iLink]).addParent(link2[iLink], instances); hasParent[link1[iLink]] = true; } else { if (hasParent[link2[iLink]]) { throw new Exception("Bug condition found: too many arrows"); } bayesNet.getParentSet(link2[iLink]).addParent(link1[iLink], instances); hasParent[link2[iLink]] = true; } } } // buildStructure /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ public Enumeration listOptions() { return super.listOptions(); } // listOption /** * Parses a given list of options. <p/> * <!-- options-start --> * Valid options are: <p/> * * <pre> -mbc * Applies a Markov Blanket correction to the network structure, * after a network structure is learned. This ensures that all * nodes in the network are part of the Markov blanket of the * classifier node.</pre> * * <pre> -S [BAYES|MDL|ENTROPY|AIC|CROSS_CLASSIC|CROSS_BAYES] * Score type (BAYES, BDeu, MDL, ENTROPY and AIC)</pre> * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { super.setOptions(options); } // setOptions /** * Gets the current settings of the Classifier. * * @return an array of strings suitable for passing to setOptions */ public String [] getOptions() { return super.getOptions(); } // getOptions /** * This will return a string describing the classifier. * @return The string. */ public String globalInfo() { return "This Bayes Network learning algorithm determines the maximum weight spanning tree " + " and returns a Naive Bayes network augmented with a tree.\n\n" + "For more information see:\n\n" + getTechnicalInformation().toString(); } // globalInfo /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } } // TAN
9,104
30.614583
147
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/bayes/net/search/local/TabuSearch.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * TabuSearch.java * Copyright (C) 2004-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.bayes.net.search.local; import java.util.Enumeration; import java.util.Vector; import weka.classifiers.bayes.BayesNet; import weka.core.Instances; import weka.core.Option; import weka.core.RevisionUtils; import weka.core.TechnicalInformation; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import weka.core.TechnicalInformationHandler; import weka.core.Utils; /** <!-- globalinfo-start --> * This Bayes Network learning algorithm uses tabu search for finding a well scoring Bayes network structure. Tabu search is hill climbing till an optimum is reached. The following step is the least worst possible step. The last X steps are kept in a list and none of the steps in this so called tabu list is considered in taking the next step. The best network found in this traversal is returned.<br/> * <br/> * For more information see:<br/> * <br/> * R.R. Bouckaert (1995). Bayesian Belief Networks: from Construction to Inference. Utrecht, Netherlands. * <p/> <!-- globalinfo-end --> * <!-- technical-bibtex-start --> * BibTeX: * <pre> * &#64;phdthesis{Bouckaert1995, * address = {Utrecht, Netherlands}, * author = {R.R. Bouckaert}, * institution = {University of Utrecht}, * title = {Bayesian Belief Networks: from Construction to Inference}, * year = {1995} * } * </pre> * <p/> <!-- technical-bibtex-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -L &lt;integer&gt; * Tabu list length</pre> * * <pre> -U &lt;integer&gt; * Number of runs</pre> * * <pre> -P &lt;nr of parents&gt; * Maximum number of parents</pre> * * <pre> -R * Use arc reversal operation. * (default false)</pre> * * <pre> -P &lt;nr of parents&gt; * Maximum number of parents</pre> * * <pre> -R * Use arc reversal operation. * (default false)</pre> * * <pre> -N * Initial structure is empty (instead of Naive Bayes)</pre> * * <pre> -mbc * Applies a Markov Blanket correction to the network structure, * after a network structure is learned. This ensures that all * nodes in the network are part of the Markov blanket of the * classifier node.</pre> * * <pre> -S [BAYES|MDL|ENTROPY|AIC|CROSS_CLASSIC|CROSS_BAYES] * Score type (BAYES, BDeu, MDL, ENTROPY and AIC)</pre> * <!-- options-end --> * * @author Remco Bouckaert (rrb@xm.co.nz) * @version $Revision: 8034 $ */ public class TabuSearch extends HillClimber implements TechnicalInformationHandler { /** for serialization */ static final long serialVersionUID = 1457344073228786447L; /** number of runs **/ int m_nRuns = 10; /** size of tabu list **/ int m_nTabuList = 5; /** the actual tabu list **/ Operation[] m_oTabuList = null; /** * Returns an instance of a TechnicalInformation object, containing * detailed information about the technical background of this class, * e.g., paper reference or book this class is based on. * * @return the technical information about this class */ public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; result = new TechnicalInformation(Type.PHDTHESIS); result.setValue(Field.AUTHOR, "R.R. Bouckaert"); result.setValue(Field.YEAR, "1995"); result.setValue(Field.TITLE, "Bayesian Belief Networks: from Construction to Inference"); result.setValue(Field.INSTITUTION, "University of Utrecht"); result.setValue(Field.ADDRESS, "Utrecht, Netherlands"); return result; } /** * search determines the network structure/graph of the network * with the Tabu search algorithm. * * @param bayesNet the network * @param instances the data to use * @throws Exception if something goes wrong */ protected void search(BayesNet bayesNet, Instances instances) throws Exception { m_oTabuList = new Operation[m_nTabuList]; int iCurrentTabuList = 0; initCache(bayesNet, instances); // keeps track of score pf best structure found so far double fBestScore; double fCurrentScore = 0.0; for (int iAttribute = 0; iAttribute < instances.numAttributes(); iAttribute++) { fCurrentScore += calcNodeScore(iAttribute); } // keeps track of best structure found so far BayesNet bestBayesNet; // initialize bestBayesNet fBestScore = fCurrentScore; bestBayesNet = new BayesNet(); bestBayesNet.m_Instances = instances; bestBayesNet.initStructure(); copyParentSets(bestBayesNet, bayesNet); // go do the search for (int iRun = 0; iRun < m_nRuns; iRun++) { Operation oOperation = getOptimalOperation(bayesNet, instances); performOperation(bayesNet, instances, oOperation); // sanity check if (oOperation == null) { throw new Exception("Panic: could not find any step to make. Tabu list too long?"); } // update tabu list m_oTabuList[iCurrentTabuList] = oOperation; iCurrentTabuList = (iCurrentTabuList + 1) % m_nTabuList; fCurrentScore += oOperation.m_fDeltaScore; // keep track of best network seen so far if (fCurrentScore > fBestScore) { fBestScore = fCurrentScore; copyParentSets(bestBayesNet, bayesNet); } if (bayesNet.getDebug()) { printTabuList(); } } // restore current network to best network copyParentSets(bayesNet, bestBayesNet); // free up memory bestBayesNet = null; m_Cache = null; } // search /** * copyParentSets copies parent sets of source to dest BayesNet * * @param dest destination network * @param source source network */ void copyParentSets(BayesNet dest, BayesNet source) { int nNodes = source.getNrOfNodes(); // clear parent set first for (int iNode = 0; iNode < nNodes; iNode++) { dest.getParentSet(iNode).copy(source.getParentSet(iNode)); } } // CopyParentSets /** * check whether the operation is not in the tabu list * * @param oOperation operation to be checked * @return true if operation is not in the tabu list */ boolean isNotTabu(Operation oOperation) { for (int iTabu = 0; iTabu < m_nTabuList; iTabu++) { if (oOperation.equals(m_oTabuList[iTabu])) { return false; } } return true; } // isNotTabu /** print tabu list for debugging purposes. */ void printTabuList() { for (int i = 0; i < m_nTabuList; i++) { Operation o = m_oTabuList[i]; if (o != null) { if (o.m_nOperation == 0) {System.out.print(" +(");} else {System.out.print(" -(");} System.out.print(o.m_nTail + "->" + o.m_nHead + ")"); } } System.out.println(); } // printTabuList /** * @return number of runs */ public int getRuns() { return m_nRuns; } // getRuns /** * Sets the number of runs * @param nRuns The number of runs to set */ public void setRuns(int nRuns) { m_nRuns = nRuns; } // setRuns /** * @return the Tabu List length */ public int getTabuList() { return m_nTabuList; } // getTabuList /** * Sets the Tabu List length. * @param nTabuList The nTabuList to set */ public void setTabuList(int nTabuList) { m_nTabuList = nTabuList; } // setTabuList /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ public Enumeration listOptions() { Vector newVector = new Vector(4); newVector.addElement(new Option("\tTabu list length", "L", 1, "-L <integer>")); newVector.addElement(new Option("\tNumber of runs", "U", 1, "-U <integer>")); newVector.addElement(new Option("\tMaximum number of parents", "P", 1, "-P <nr of parents>")); newVector.addElement(new Option("\tUse arc reversal operation.\n\t(default false)", "R", 0, "-R")); Enumeration enu = super.listOptions(); while (enu.hasMoreElements()) { newVector.addElement(enu.nextElement()); } return newVector.elements(); } // listOptions /** * Parses a given list of options. <p/> * <!-- options-start --> * Valid options are: <p/> * * <pre> -L &lt;integer&gt; * Tabu list length</pre> * * <pre> -U &lt;integer&gt; * Number of runs</pre> * * <pre> -P &lt;nr of parents&gt; * Maximum number of parents</pre> * * <pre> -R * Use arc reversal operation. * (default false)</pre> * * <pre> -P &lt;nr of parents&gt; * Maximum number of parents</pre> * * <pre> -R * Use arc reversal operation. * (default false)</pre> * * <pre> -N * Initial structure is empty (instead of Naive Bayes)</pre> * * <pre> -mbc * Applies a Markov Blanket correction to the network structure, * after a network structure is learned. This ensures that all * nodes in the network are part of the Markov blanket of the * classifier node.</pre> * * <pre> -S [BAYES|MDL|ENTROPY|AIC|CROSS_CLASSIC|CROSS_BAYES] * Score type (BAYES, BDeu, MDL, ENTROPY and AIC)</pre> * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { String sTabuList = Utils.getOption('L', options); if (sTabuList.length() != 0) { setTabuList(Integer.parseInt(sTabuList)); } String sRuns = Utils.getOption('U', options); if (sRuns.length() != 0) { setRuns(Integer.parseInt(sRuns)); } super.setOptions(options); } // setOptions /** * Gets the current settings of the search algorithm. * * @return an array of strings suitable for passing to setOptions */ public String[] getOptions() { String[] superOptions = super.getOptions(); String[] options = new String[7 + superOptions.length]; int current = 0; options[current++] = "-L"; options[current++] = "" + getTabuList(); options[current++] = "-U"; options[current++] = "" + getRuns(); // insert options from parent class for (int iOption = 0; iOption < superOptions.length; iOption++) { options[current++] = superOptions[iOption]; } // Fill up rest with empty strings, not nulls! while (current < options.length) { options[current++] = ""; } return options; } // getOptions /** * This will return a string describing the classifier. * @return The string. */ public String globalInfo() { return "This Bayes Network learning algorithm uses tabu search for finding a well scoring " + "Bayes network structure. Tabu search is hill climbing till an optimum is reached. The " + "following step is the least worst possible step. The last X steps are kept in a list and " + "none of the steps in this so called tabu list is considered in taking the next step. " + "The best network found in this traversal is returned.\n\n" + "For more information see:\n\n" + getTechnicalInformation().toString(); } // globalInfo /** * @return a string to describe the Runs option. */ public String runsTipText() { return "Sets the number of steps to be performed."; } // runsTipText /** * @return a string to describe the TabuList option. */ public String tabuListTipText() { return "Sets the length of the tabu list."; } // tabuListTipText /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } } // TabuSearch
12,246
28.510843
403
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/evaluation/AbstractEvaluationMetric.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * AbstractEvaluationMetric.java * Copyright (C) 2011-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.evaluation; import java.io.Serializable; import java.util.ArrayList; import java.util.List; import java.util.Set; //import weka.gui.beans.PluginManager; /** * Abstract base class for pluggable classification/regression evaluation * metrics. * * @author Mark Hall (mhall{[at]}pentaho{[dot]}com) * @version $Revision: 9320 $ */ public abstract class AbstractEvaluationMetric implements Serializable { /** For serialization */ private static final long serialVersionUID = -924507718482386887L; /** * Gets a list of freshly instantiated concrete implementations of available * plugin metrics or null if there are no plugin metrics available * * @return a list of plugin metrics or null if there are no plugin metrics */ public static ArrayList<AbstractEvaluationMetric> getPluginMetrics() { ArrayList<AbstractEvaluationMetric> pluginMetricsList = null; Set<String> pluginMetrics = PluginManager .getPluginNamesOfType(AbstractEvaluationMetric.class.getName()); if (pluginMetrics != null) { pluginMetricsList = new ArrayList<AbstractEvaluationMetric>(); for (String metric : pluginMetrics) { try { Object impl = PluginManager.getPluginInstance( AbstractEvaluationMetric.class.getName(), metric); if (impl instanceof AbstractEvaluationMetric) { pluginMetricsList.add((AbstractEvaluationMetric) impl); } } catch (Exception ex) { ex.printStackTrace(); } } } return pluginMetricsList; } /** * Exception for subclasses to throw if asked for a statistic that is not part * of their implementation * * @author Mark Hall (mhall{[at]}pentaho{[dot]}com) * @version $Revision: 9320 $ */ public class UnknownStatisticException extends IllegalArgumentException { /** For serialization */ private static final long serialVersionUID = -8787045492227999839L; /** * Constructs a new UnknownStatisticsException * * @param message the exception's message */ public UnknownStatisticException(String message) { super(message); } } /** * Base evaluation object for subclasses to access for statistics. IMPORTANT: * subclasses should treat this object as read-only */ protected Evaluation m_baseEvaluation; /** * Set the base evaluation object to use. IMPORTANT: subclasses should treat * this object as read-only. * * @param eval */ public void setBaseEvaluation(Evaluation eval) { m_baseEvaluation = eval; } /** * Return true if this evaluation metric can be computed when the class is * nominal * * @return true if this evaluation metric can be computed when the class is * nominal */ public abstract boolean appliesToNominalClass(); /** * Return true if this evaluation metric can be computed when the class is * numeric * * @return true if this evaluation metric can be computed when the class is * numeric */ public abstract boolean appliesToNumericClass(); /** * Get the name of this metric * * @return the name of this metric */ public abstract String getMetricName(); /** * Get a short description of this metric (algorithm, forumulas etc.). * * @return a short description of this metric */ public abstract String getMetricDescription(); /** * Get a list of the names of the statistics that this metrics computes. E.g. * an information theoretic evaluation measure might compute total number of * bits as well as avergate bits/instance * * @return the names of the statistics that this metric computes */ public abstract List<String> getStatisticNames(); /** * Get the value of the named statistic * * @param statName the name of the statistic to compute the value for * @return the computed statistic or Utils.missingValue() if the statistic * can't be computed for some reason */ public abstract double getStatistic(String statName); }
4,887
29.742138
80
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/evaluation/ConfusionMatrix.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * NominalPrediction.java * Copyright (C) 2002-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.evaluation; import weka.classifiers.CostMatrix; import weka.core.FastVector; import weka.core.Matrix; import weka.core.RevisionUtils; import weka.core.Utils; /** * Cells of this matrix correspond to counts of the number (or weight) * of predictions for each actual value / predicted value combination. * * @author Len Trigg (len@reeltwo.com) * @version $Revision: 8034 $ */ public class ConfusionMatrix extends Matrix { /** for serialization */ private static final long serialVersionUID = -181789981401504090L; /** Stores the names of the classes */ protected String [] m_ClassNames; /** * Creates the confusion matrix with the given class names. * * @param classNames an array containing the names the classes. */ public ConfusionMatrix(String [] classNames) { super(classNames.length, classNames.length); m_ClassNames = (String [])classNames.clone(); } /** * Makes a copy of this ConfusionMatrix after applying the * supplied CostMatrix to the cells. The resulting ConfusionMatrix * can be used to get cost-weighted statistics. * * @param costs the CostMatrix. * @return a ConfusionMatrix that has had costs applied. * @exception Exception if the CostMatrix is not of the same size * as this ConfusionMatrix. */ public ConfusionMatrix makeWeighted(CostMatrix costs) throws Exception { if (costs.size() != size()) { throw new Exception("Cost and confusion matrices must be the same size"); } ConfusionMatrix weighted = new ConfusionMatrix(m_ClassNames); for (int row = 0; row < size(); row++) { for (int col = 0; col < size(); col++) { weighted.setElement(row, col, getElement(row, col) * costs.getElement(row, col)); } } return weighted; } /** * Creates and returns a clone of this object. * * @return a clone of this instance. */ public Object clone() { ConfusionMatrix m = (ConfusionMatrix)super.clone(); m.m_ClassNames = (String [])m_ClassNames.clone(); return m; } /** * Gets the number of classes. * * @return the number of classes */ public int size() { return m_ClassNames.length; } /** * Gets the name of one of the classes. * * @param index the index of the class. * @return the class name. */ public String className(int index) { return m_ClassNames[index]; } /** * Includes a prediction in the confusion matrix. * * @param pred the NominalPrediction to include * @exception Exception if no valid prediction was made (i.e. * unclassified). */ public void addPrediction(NominalPrediction pred) throws Exception { if (pred.predicted() == NominalPrediction.MISSING_VALUE) { throw new Exception("No predicted value given."); } if (pred.actual() == NominalPrediction.MISSING_VALUE) { throw new Exception("No actual value given."); } addElement((int)pred.actual(), (int)pred.predicted(), pred.weight()); } /** * Includes a whole bunch of predictions in the confusion matrix. * * @param predictions a FastVector containing the NominalPredictions * to include * @exception Exception if no valid prediction was made (i.e. * unclassified). */ public void addPredictions(FastVector predictions) throws Exception { for (int i = 0; i < predictions.size(); i++) { addPrediction((NominalPrediction)predictions.elementAt(i)); } } /** * Gets the performance with respect to one of the classes * as a TwoClassStats object. * * @param classIndex the index of the class of interest. * @return the generated TwoClassStats object. */ public TwoClassStats getTwoClassStats(int classIndex) { double fp = 0, tp = 0, fn = 0, tn = 0; for (int row = 0; row < size(); row++) { for (int col = 0; col < size(); col++) { if (row == classIndex) { if (col == classIndex) { tp += getElement(row, col); } else { fn += getElement(row, col); } } else { if (col == classIndex) { fp += getElement(row, col); } else { tn += getElement(row, col); } } } } return new TwoClassStats(tp, fp, tn, fn); } /** * Gets the number of correct classifications (that is, for which a * correct prediction was made). (Actually the sum of the weights of * these classifications) * * @return the number of correct classifications */ public double correct() { double correct = 0; for (int i = 0; i < size(); i++) { correct += getElement(i, i); } return correct; } /** * Gets the number of incorrect classifications (that is, for which an * incorrect prediction was made). (Actually the sum of the weights of * these classifications) * * @return the number of incorrect classifications */ public double incorrect() { double incorrect = 0; for (int row = 0; row < size(); row++) { for (int col = 0; col < size(); col++) { if (row != col) { incorrect += getElement(row, col); } } } return incorrect; } /** * Gets the number of predictions that were made * (actually the sum of the weights of predictions where the * class value was known). * * @return the number of predictions with known class */ public double total() { double total = 0; for (int row = 0; row < size(); row++) { for (int col = 0; col < size(); col++) { total += getElement(row, col); } } return total; } /** * Returns the estimated error rate. * * @return the estimated error rate (between 0 and 1). */ public double errorRate() { return incorrect() / total(); } /** * Calls toString() with a default title. * * @return the confusion matrix as a string */ public String toString() { return toString("=== Confusion Matrix ===\n"); } /** * Outputs the performance statistics as a classification confusion * matrix. For each class value, shows the distribution of * predicted class values. * * @param title the title for the confusion matrix * @return the confusion matrix as a String */ public String toString(String title) { StringBuffer text = new StringBuffer(); char [] IDChars = {'a','b','c','d','e','f','g','h','i','j', 'k','l','m','n','o','p','q','r','s','t', 'u','v','w','x','y','z'}; int IDWidth; boolean fractional = false; // Find the maximum value in the matrix // and check for fractional display requirement double maxval = 0; for (int i = 0; i < size(); i++) { for (int j = 0; j < size(); j++) { double current = getElement(i, j); if (current < 0) { current *= -10; } if (current > maxval) { maxval = current; } double fract = current - Math.rint(current); if (!fractional && ((Math.log(fract) / Math.log(10)) >= -2)) { fractional = true; } } } IDWidth = 1 + Math.max((int)(Math.log(maxval) / Math.log(10) + (fractional ? 3 : 0)), (int)(Math.log(size()) / Math.log(IDChars.length))); text.append(title).append("\n"); for (int i = 0; i < size(); i++) { if (fractional) { text.append(" ").append(num2ShortID(i,IDChars,IDWidth - 3)) .append(" "); } else { text.append(" ").append(num2ShortID(i,IDChars,IDWidth)); } } text.append(" actual class\n"); for (int i = 0; i< size(); i++) { for (int j = 0; j < size(); j++) { text.append(" ").append( Utils.doubleToString(getElement(i, j), IDWidth, (fractional ? 2 : 0))); } text.append(" | ").append(num2ShortID(i,IDChars,IDWidth)) .append(" = ").append(m_ClassNames[i]).append("\n"); } return text.toString(); } /** * Method for generating indices for the confusion matrix. * * @param num integer to format * @return the formatted integer as a string */ private static String num2ShortID(int num, char [] IDChars, int IDWidth) { char ID [] = new char [IDWidth]; int i; for(i = IDWidth - 1; i >=0; i--) { ID[i] = IDChars[num % IDChars.length]; num = num / IDChars.length - 1; if (num < 0) { break; } } for(i--; i >= 0; i--) { ID[i] = ' '; } return new String(ID); } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } }
9,493
26.048433
79
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/evaluation/CostCurve.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * CostCurve.java * Copyright (C) 2001-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.evaluation; import weka.classifiers.Classifier; import weka.core.Attribute; import weka.core.DenseInstance; import weka.core.FastVector; import weka.core.Instances; import weka.core.RevisionHandler; import weka.core.RevisionUtils; /** * Generates points illustrating probablity cost tradeoffs that can be * obtained by varying the threshold value between classes. For example, * the typical threshold value of 0.5 means the predicted probability of * "positive" must be higher than 0.5 for the instance to be predicted as * "positive". * * @author Mark Hall (mhall@cs.waikato.ac.nz) * @version $Revision: 8034 $ */ public class CostCurve implements RevisionHandler { /** The name of the relation used in cost curve datasets */ public static final String RELATION_NAME = "CostCurve"; /** attribute name: Probability Cost Function */ public static final String PROB_COST_FUNC_NAME = "Probability Cost Function"; /** attribute name: Normalized Expected Cost */ public static final String NORM_EXPECTED_COST_NAME = "Normalized Expected Cost"; /** attribute name: Threshold */ public static final String THRESHOLD_NAME = "Threshold"; /** * Calculates the performance stats for the default class and return * results as a set of Instances. The * structure of these Instances is as follows:<p> <ul> * <li> <b>Probability Cost Function </b> * <li> <b>Normalized Expected Cost</b> * <li> <b>Threshold</b> contains the probability threshold that gives * rise to the previous performance values. * </ul> <p> * * @see TwoClassStats * @param predictions the predictions to base the curve on * @return datapoints as a set of instances, null if no predictions * have been made. */ public Instances getCurve(FastVector predictions) { if (predictions.size() == 0) { return null; } return getCurve(predictions, ((NominalPrediction)predictions.elementAt(0)) .distribution().length - 1); } /** * Calculates the performance stats for the desired class and return * results as a set of Instances. * * @param predictions the predictions to base the curve on * @param classIndex index of the class of interest. * @return datapoints as a set of instances. */ public Instances getCurve(FastVector predictions, int classIndex) { if ((predictions.size() == 0) || (((NominalPrediction)predictions.elementAt(0)) .distribution().length <= classIndex)) { return null; } ThresholdCurve tc = new ThresholdCurve(); Instances threshInst = tc.getCurve(predictions, classIndex); Instances insts = makeHeader(); int fpind = threshInst.attribute(ThresholdCurve.FP_RATE_NAME).index(); int tpind = threshInst.attribute(ThresholdCurve.TP_RATE_NAME).index(); int threshind = threshInst.attribute(ThresholdCurve.THRESHOLD_NAME).index(); double [] vals; double fpval, tpval, thresh; for (int i = 0; i< threshInst.numInstances(); i++) { fpval = threshInst.instance(i).value(fpind); tpval = threshInst.instance(i).value(tpind); thresh = threshInst.instance(i).value(threshind); vals = new double [3]; vals[0] = 0; vals[1] = fpval; vals[2] = thresh; insts.add(new DenseInstance(1.0, vals)); vals = new double [3]; vals[0] = 1; vals[1] = 1.0 - tpval; vals[2] = thresh; insts.add(new DenseInstance(1.0, vals)); } return insts; } /** * generates the header * * @return the header */ private Instances makeHeader() { FastVector fv = new FastVector(); fv.addElement(new Attribute(PROB_COST_FUNC_NAME)); fv.addElement(new Attribute(NORM_EXPECTED_COST_NAME)); fv.addElement(new Attribute(THRESHOLD_NAME)); return new Instances(RELATION_NAME, fv, 100); } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } /** * Tests the CostCurve generation from the command line. * The classifier is currently hardcoded. Pipe in an arff file. * * @param args currently ignored */ public static void main(String [] args) { try { Instances inst = new Instances(new java.io.InputStreamReader(System.in)); inst.setClassIndex(inst.numAttributes() - 1); CostCurve cc = new CostCurve(); EvaluationUtils eu = new EvaluationUtils(); Classifier classifier = new weka.classifiers.functions.Logistic(); FastVector predictions = new FastVector(); for (int i = 0; i < 2; i++) { // Do two runs. eu.setSeed(i); predictions.appendElements(eu.getCVPredictions(classifier, inst, 10)); //System.out.println("\n\n\n"); } Instances result = cc.getCurve(predictions); System.out.println(result); } catch (Exception ex) { ex.printStackTrace(); } } }
5,773
31.994286
82
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/evaluation/Evaluation.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * Evaluation.java * Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.evaluation; import java.beans.BeanInfo; import java.beans.Introspector; import java.beans.MethodDescriptor; import java.io.BufferedInputStream; import java.io.BufferedOutputStream; import java.io.BufferedReader; import java.io.FileInputStream; import java.io.FileOutputStream; import java.io.FileReader; import java.io.InputStream; import java.io.ObjectInputStream; import java.io.ObjectOutputStream; import java.io.OutputStream; import java.io.Reader; import java.io.Serializable; import java.lang.reflect.Method; import java.util.ArrayList; import java.util.Arrays; import java.util.Date; import java.util.Enumeration; import java.util.List; import java.util.Random; import java.util.zip.GZIPInputStream; import java.util.zip.GZIPOutputStream; import weka.classifiers.AbstractClassifier; import weka.classifiers.Classifier; import weka.classifiers.ConditionalDensityEstimator; import weka.classifiers.CostMatrix; import weka.classifiers.IntervalEstimator; import weka.classifiers.Sourcable; import weka.classifiers.UpdateableClassifier; import weka.classifiers.evaluation.output.prediction.AbstractOutput; import weka.classifiers.evaluation.output.prediction.PlainText; import weka.classifiers.xml.XMLClassifier; import weka.core.BatchPredictor; import weka.core.Drawable; import weka.core.FastVector; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.OptionHandler; import weka.core.RevisionHandler; import weka.core.RevisionUtils; import weka.core.Summarizable; import weka.core.Utils; import weka.core.Version; import weka.core.converters.ConverterUtils.DataSink; import weka.core.converters.ConverterUtils.DataSource; import weka.core.xml.KOML; import weka.core.xml.XMLOptions; import weka.core.xml.XMLSerialization; import weka.estimators.UnivariateKernelEstimator; /** * Class for evaluating machine learning models. * <p/> * * ------------------------------------------------------------------- * <p/> * * General options when evaluating a learning scheme from the command-line: * <p/> * * -t filename <br/> * Name of the file with the training data. (required) * <p/> * * -T filename <br/> * Name of the file with the test data. If missing a cross-validation is * performed. * <p/> * * -c index <br/> * Index of the class attribute (1, 2, ...; default: last). * <p/> * * -x number <br/> * The number of folds for the cross-validation (default: 10). * <p/> * * -no-cv <br/> * No cross validation. If no test file is provided, no evaluation is done. * <p/> * * -split-percentage percentage <br/> * Sets the percentage for the train/test set split, e.g., 66. * <p/> * * -preserve-order <br/> * Preserves the order in the percentage split instead of randomizing the data * first with the seed value ('-s'). * <p/> * * -s seed <br/> * Random number seed for the cross-validation and percentage split (default: * 1). * <p/> * * -m filename <br/> * The name of a file containing a cost matrix. * <p/> * * -disable list <br/> * A comma separated list of metric names not to include in the output. * <p/> * * -l filename <br/> * Loads classifier from the given file. In case the filename ends with ".xml", * a PMML file is loaded or, if that fails, options are loaded from XML. * <p/> * * -d filename <br/> * Saves classifier built from the training data into the given file. In case * the filename ends with ".xml" the options are saved XML, not the model. * <p/> * * -v <br/> * Outputs no statistics for the training data. * <p/> * * -o <br/> * Outputs statistics only, not the classifier. * <p/> * * -i <br/> * Outputs information-retrieval statistics per class. * <p/> * * -k <br/> * Outputs information-theoretic statistics. * <p/> * * -classifications * "weka.classifiers.evaluation.output.prediction.AbstractOutput + options" <br/> * Uses the specified class for generating the classification output. E.g.: * weka.classifiers.evaluation.output.prediction.PlainText or : * weka.classifiers.evaluation.output.prediction.CSV * * -p range <br/> * Outputs predictions for test instances (or the train instances if no test * instances provided and -no-cv is used), along with the attributes in the * specified range (and nothing else). Use '-p 0' if no attributes are desired. * <p/> * Deprecated: use "-classifications ..." instead. * <p/> * * -distribution <br/> * Outputs the distribution instead of only the prediction in conjunction with * the '-p' option (only nominal classes). * <p/> * Deprecated: use "-classifications ..." instead. * <p/> * * -no-predictions <br/> * Turns off the collection of predictions in order to conserve memory. * <p/> * * -r <br/> * Outputs cumulative margin distribution (and nothing else). * <p/> * * -g <br/> * Only for classifiers that implement "Graphable." Outputs the graph * representation of the classifier (and nothing else). * <p/> * * -xml filename | xml-string <br/> * Retrieves the options from the XML-data instead of the command line. * <p/> * * -threshold-file file <br/> * The file to save the threshold data to. The format is determined by the * extensions, e.g., '.arff' for ARFF format or '.csv' for CSV. * <p/> * * -threshold-label label <br/> * The class label to determine the threshold data for (default is the first * label) * <p/> * * ------------------------------------------------------------------- * <p/> * * Example usage as the main of a classifier (called FunkyClassifier): * <code> <pre> * public static void main(String [] args) { * runClassifier(new FunkyClassifier(), args); * } * </pre> </code> * <p/> * * ------------------------------------------------------------------ * <p/> * * Example usage from within an application: <code> <pre> * Instances trainInstances = ... instances got from somewhere * Instances testInstances = ... instances got from somewhere * Classifier scheme = ... scheme got from somewhere * * Evaluation evaluation = new Evaluation(trainInstances); * evaluation.evaluateModel(scheme, testInstances); * System.out.println(evaluation.toSummaryString()); * </pre> </code> * * * @author Eibe Frank (eibe@cs.waikato.ac.nz) * @author Len Trigg (trigg@cs.waikato.ac.nz) * @version $Revision: 9788 $ */ public class Evaluation implements Summarizable, RevisionHandler, Serializable { /** For serialization */ private static final long serialVersionUID = -7010314486866816271L; /** The number of classes. */ protected int m_NumClasses; /** The number of folds for a cross-validation. */ protected int m_NumFolds; /** The weight of all incorrectly classified instances. */ protected double m_Incorrect; /** The weight of all correctly classified instances. */ protected double m_Correct; /** The weight of all unclassified instances. */ protected double m_Unclassified; /*** The weight of all instances that had no class assigned to them. */ protected double m_MissingClass; /** The weight of all instances that had a class assigned to them. */ protected double m_WithClass; /** Array for storing the confusion matrix. */ protected double[][] m_ConfusionMatrix; /** The names of the classes. */ protected String[] m_ClassNames; /** Is the class nominal or numeric? */ protected boolean m_ClassIsNominal; /** The prior probabilities of the classes. */ protected double[] m_ClassPriors; /** The sum of counts for priors. */ protected double m_ClassPriorsSum; /** The cost matrix (if given). */ protected CostMatrix m_CostMatrix; /** The total cost of predictions (includes instance weights). */ protected double m_TotalCost; /** Sum of errors. */ protected double m_SumErr; /** Sum of absolute errors. */ protected double m_SumAbsErr; /** Sum of squared errors. */ protected double m_SumSqrErr; /** Sum of class values. */ protected double m_SumClass; /** Sum of squared class values. */ protected double m_SumSqrClass; /*** Sum of predicted values. */ protected double m_SumPredicted; /** Sum of squared predicted values. */ protected double m_SumSqrPredicted; /** Sum of predicted * class values. */ protected double m_SumClassPredicted; /** Sum of absolute errors of the prior. */ protected double m_SumPriorAbsErr; /** Sum of absolute errors of the prior. */ protected double m_SumPriorSqrErr; /** Total Kononenko & Bratko Information. */ protected double m_SumKBInfo; /*** Resolution of the margin histogram. */ protected static int k_MarginResolution = 500; /** Cumulative margin distribution. */ protected double m_MarginCounts[]; /** Number of non-missing class training instances seen. */ protected int m_NumTrainClassVals; /** Array containing all numeric training class values seen. */ protected double[] m_TrainClassVals; /** Array containing all numeric training class weights. */ protected double[] m_TrainClassWeights; /** Numeric class estimator for prior. */ protected UnivariateKernelEstimator m_PriorEstimator; /** Whether complexity statistics are available. */ protected boolean m_ComplexityStatisticsAvailable = true; /** * The minimum probablility accepted from an estimator to avoid taking log(0) * in Sf calculations. */ protected static final double MIN_SF_PROB = Double.MIN_VALUE; /** Total entropy of prior predictions. */ protected double m_SumPriorEntropy; /** Total entropy of scheme predictions. */ protected double m_SumSchemeEntropy; /** Whether coverage statistics are available. */ protected boolean m_CoverageStatisticsAvailable = true; /** The confidence level used for coverage statistics. */ protected double m_ConfLevel = 0.95; /** Total size of predicted regions at the given confidence level. */ protected double m_TotalSizeOfRegions; /** Total coverage of test cases at the given confidence level. */ protected double m_TotalCoverage; /** Minimum target value. */ protected double m_MinTarget; /** Maximum target value. */ protected double m_MaxTarget; /** The list of predictions that have been generated (for computing AUC). */ protected FastVector m_Predictions; /** * enables/disables the use of priors, e.g., if no training set is present in * case of de-serialized schemes. */ protected boolean m_NoPriors = false; /** The header of the training set. */ protected Instances m_Header; /** whether to discard predictions (and save memory). */ protected boolean m_DiscardPredictions; /** Holds plugin evaluation metrics */ protected List<AbstractEvaluationMetric> m_pluginMetrics; /** The list of metrics to display in the output */ protected List<String> m_metricsToDisplay = new ArrayList<String>(); public static final String[] BUILT_IN_EVAL_METRICS = { "Correct", "Incorrect", "Kappa", "Total cost", "Average cost", "KB relative", "KB information", "Correlation", "Complexity 0", "Complexity scheme", "Complexity improvement", "MAE", "RMSE", "RAE", "RRSE", "Coverage", "Region size", "TP rate", "FP rate", "Precision", "Recall", "F-measure", "MCC", "ROC area", "PRC area" }; /** * Utility method to get a list of the names of all built-in and plugin * evaluation metrics * * @return the complete list of available evaluation metrics */ public static List<String> getAllEvaluationMetricNames() { List<String> allEvals = new ArrayList<String>(); for (String s : Evaluation.BUILT_IN_EVAL_METRICS) { allEvals.add(s); } final List<AbstractEvaluationMetric> pluginMetrics = AbstractEvaluationMetric .getPluginMetrics(); if (pluginMetrics != null) { for (AbstractEvaluationMetric m : pluginMetrics) { if (m instanceof InformationRetrievalEvaluationMetric) { List<String> statNames = m.getStatisticNames(); for (String s : statNames) { allEvals.add(s); } } else { allEvals.add(m.getMetricName()); } } } return allEvals; } /** * Initializes all the counters for the evaluation. Use * <code>useNoPriors()</code> if the dataset is the test set and you can't * initialize with the priors from the training set via * <code>setPriors(Instances)</code>. * * @param data set of training instances, to get some header information and * prior class distribution information * @throws Exception if the class is not defined * @see #useNoPriors() * @see #setPriors(Instances) */ public Evaluation(Instances data) throws Exception { this(data, null); } /** * Initializes all the counters for the evaluation and also takes a cost * matrix as parameter. Use <code>useNoPriors()</code> if the dataset is the * test set and you can't initialize with the priors from the training set via * <code>setPriors(Instances)</code>. * * @param data set of training instances, to get some header information and * prior class distribution information * @param costMatrix the cost matrix---if null, default costs will be used * @throws Exception if cost matrix is not compatible with data, the class is * not defined or the class is numeric * @see #useNoPriors() * @see #setPriors(Instances) */ public Evaluation(Instances data, CostMatrix costMatrix) throws Exception { m_Header = new Instances(data, 0); m_NumClasses = data.numClasses(); m_NumFolds = 1; m_ClassIsNominal = data.classAttribute().isNominal(); if (m_ClassIsNominal) { m_ConfusionMatrix = new double[m_NumClasses][m_NumClasses]; m_ClassNames = new String[m_NumClasses]; for (int i = 0; i < m_NumClasses; i++) { m_ClassNames[i] = data.classAttribute().value(i); } } m_CostMatrix = costMatrix; if (m_CostMatrix != null) { if (!m_ClassIsNominal) { throw new Exception("Class has to be nominal if cost matrix given!"); } if (m_CostMatrix.size() != m_NumClasses) { throw new Exception("Cost matrix not compatible with data!"); } } m_ClassPriors = new double[m_NumClasses]; setPriors(data); m_MarginCounts = new double[k_MarginResolution + 1]; for (String s : BUILT_IN_EVAL_METRICS) { m_metricsToDisplay.add(s.toLowerCase()); } m_pluginMetrics = AbstractEvaluationMetric.getPluginMetrics(); if (m_pluginMetrics != null) { for (AbstractEvaluationMetric m : m_pluginMetrics) { m.setBaseEvaluation(this); if (m instanceof InformationRetrievalEvaluationMetric) { List<String> statNames = m.getStatisticNames(); for (String s : statNames) { m_metricsToDisplay.add(s.toLowerCase()); } } else { m_metricsToDisplay.add(m.getMetricName().toLowerCase()); } } } } /** * Returns the header of the underlying dataset. * * @return the header information */ public Instances getHeader() { return m_Header; } /** * Sets whether to discard predictions, ie, not storing them for future * reference via predictions() method in order to conserve memory. * * @param value true if to discard the predictions * @see #predictions() */ public void setDiscardPredictions(boolean value) { m_DiscardPredictions = value; if (m_DiscardPredictions) m_Predictions = null; } /** * Returns whether predictions are not recorded at all, in order to conserve * memory. * * @return true if predictions are not recorded * @see #predictions() */ public boolean getDiscardPredictions() { return m_DiscardPredictions; } /** * Returns the list of plugin metrics in use (or null if there are none) * * @return the list of plugin metrics */ public List<AbstractEvaluationMetric> getPluginMetrics() { return m_pluginMetrics; } /** * Set a list of the names of metrics to have appear in the output. The * default is to display all built in metrics and plugin metrics that haven't * been globally disabled. * * @param display a list of metric names to have appear in the output */ public void setMetricsToDisplay(List<String> display) { // make sure all metric names are lower case for matching m_metricsToDisplay.clear(); for (String s : display) { m_metricsToDisplay.add(s.trim().toLowerCase()); } } /** * Get a list of the names of metrics to have appear in the output The default * is to display all built in metrics and plugin metrics that haven't been * globally disabled. * * @param display a list of metric names to have appear in the output */ public List<String> getMetricsToDisplay() { return m_metricsToDisplay; } /** * Remove the supplied list of metrics from the list of those to display. * * @param metricsNotToDisplay */ public void dontDisplayMetrics(List<String> metricsNotToDisplay) { for (String s : metricsNotToDisplay) { m_metricsToDisplay.remove(s.toLowerCase()); } } /** * Get the named plugin evaluation metric * * @param name the name of the metric (as returned by * AbstractEvaluationMetric.getName()) or the fully qualified class * name of the metric to find * * @return the metric or null if the metric is not in the list of plugin * metrics */ public AbstractEvaluationMetric getPluginMetric(String name) { AbstractEvaluationMetric match = null; if (m_pluginMetrics != null) { for (AbstractEvaluationMetric m : m_pluginMetrics) { if (m.getMetricName().equals(name) || m.getClass().getName().equals(name)) { match = m; break; } } } return match; } /** * Returns the area under ROC for those predictions that have been collected * in the evaluateClassifier(Classifier, Instances) method. Returns * Utils.missingValue() if the area is not available. * * @param classIndex the index of the class to consider as "positive" * @return the area under the ROC curve or not a number */ public double areaUnderROC(int classIndex) { // Check if any predictions have been collected if (m_Predictions == null) { return Utils.missingValue(); } else { ThresholdCurve tc = new ThresholdCurve(); Instances result = tc.getCurve(m_Predictions, classIndex); return ThresholdCurve.getROCArea(result); } } /** * Calculates the weighted (by class size) AUC. * * @return the weighted AUC. */ public double weightedAreaUnderROC() { double[] classCounts = new double[m_NumClasses]; double classCountSum = 0; for (int i = 0; i < m_NumClasses; i++) { for (int j = 0; j < m_NumClasses; j++) { classCounts[i] += m_ConfusionMatrix[i][j]; } classCountSum += classCounts[i]; } double aucTotal = 0; for (int i = 0; i < m_NumClasses; i++) { double temp = areaUnderROC(i); if (!Utils.isMissingValue(temp)) { aucTotal += (temp * classCounts[i]); } } return aucTotal / classCountSum; } /** * Returns the area under precision-recall curve (AUPRC) for those predictions * that have been collected in the evaluateClassifier(Classifier, Instances) * method. Returns Utils.missingValue() if the area is not available. * * @param classIndex the index of the class to consider as "positive" * @return the area under the precision-recall curve or not a number */ public double areaUnderPRC(int classIndex) { // Check if any predictions have been collected if (m_Predictions == null) { return Utils.missingValue(); } else { ThresholdCurve tc = new ThresholdCurve(); Instances result = tc.getCurve(m_Predictions, classIndex); return ThresholdCurve.getPRCArea(result); } } /** * Calculates the weighted (by class size) AUPRC. * * @return the weighted AUPRC. */ public double weightedAreaUnderPRC() { double[] classCounts = new double[m_NumClasses]; double classCountSum = 0; for (int i = 0; i < m_NumClasses; i++) { for (int j = 0; j < m_NumClasses; j++) { classCounts[i] += m_ConfusionMatrix[i][j]; } classCountSum += classCounts[i]; } double auprcTotal = 0; for (int i = 0; i < m_NumClasses; i++) { double temp = areaUnderPRC(i); if (!Utils.isMissingValue(temp)) { auprcTotal += (temp * classCounts[i]); } } return auprcTotal / classCountSum; } /** * Returns a copy of the confusion matrix. * * @return a copy of the confusion matrix as a two-dimensional array */ public double[][] confusionMatrix() { double[][] newMatrix = new double[m_ConfusionMatrix.length][0]; for (int i = 0; i < m_ConfusionMatrix.length; i++) { newMatrix[i] = new double[m_ConfusionMatrix[i].length]; System.arraycopy(m_ConfusionMatrix[i], 0, newMatrix[i], 0, m_ConfusionMatrix[i].length); } return newMatrix; } /** * Performs a (stratified if class is nominal) cross-validation for a * classifier on a set of instances. Now performs a deep copy of the * classifier before each call to buildClassifier() (just in case the * classifier is not initialized properly). * * @param classifier the classifier with any options set. * @param data the data on which the cross-validation is to be performed * @param numFolds the number of folds for the cross-validation * @param random random number generator for randomization * @param forPredictionsPrinting varargs parameter that, if supplied, is * expected to hold a * weka.classifiers.evaluation.output.prediction.AbstractOutput * object * @throws Exception if a classifier could not be generated successfully or * the class is not defined */ public void crossValidateModel(Classifier classifier, Instances data, int numFolds, Random random, Object... forPredictionsPrinting) throws Exception { // Make a copy of the data we can reorder data = new Instances(data); data.randomize(random); if (data.classAttribute().isNominal()) { data.stratify(numFolds); } // We assume that the first element is a // weka.classifiers.evaluation.output.prediction.AbstractOutput object AbstractOutput classificationOutput = null; if (forPredictionsPrinting.length > 0) { // print the header first classificationOutput = (AbstractOutput) forPredictionsPrinting[0]; classificationOutput.setHeader(data); classificationOutput.printHeader(); } // Do the folds for (int i = 0; i < numFolds; i++) { Instances train = data.trainCV(numFolds, i, random); setPriors(train); Classifier copiedClassifier = AbstractClassifier.makeCopy(classifier); copiedClassifier.buildClassifier(train); Instances test = data.testCV(numFolds, i); evaluateModel(copiedClassifier, test, forPredictionsPrinting); } m_NumFolds = numFolds; if (classificationOutput != null) classificationOutput.printFooter(); } /** * Performs a (stratified if class is nominal) cross-validation for a * classifier on a set of instances. * * @param classifierString a string naming the class of the classifier * @param data the data on which the cross-validation is to be performed * @param numFolds the number of folds for the cross-validation * @param options the options to the classifier. Any options * @param random the random number generator for randomizing the data accepted * by the classifier will be removed from this array. * @throws Exception if a classifier could not be generated successfully or * the class is not defined */ public void crossValidateModel(String classifierString, Instances data, int numFolds, String[] options, Random random) throws Exception { crossValidateModel(AbstractClassifier.forName(classifierString, options), data, numFolds, random); } /** * Evaluates a classifier with the options given in an array of strings. * <p/> * * Valid options are: * <p/> * * -t filename <br/> * Name of the file with the training data. (required) * <p/> * * -T filename <br/> * Name of the file with the test data. If missing a cross-validation is * performed. * <p/> * * -c index <br/> * Index of the class attribute (1, 2, ...; default: last). * <p/> * * -x number <br/> * The number of folds for the cross-validation (default: 10). * <p/> * * -no-cv <br/> * No cross validation. If no test file is provided, no evaluation is done. * <p/> * * -split-percentage percentage <br/> * Sets the percentage for the train/test set split, e.g., 66. * <p/> * * -preserve-order <br/> * Preserves the order in the percentage split instead of randomizing the data * first with the seed value ('-s'). * <p/> * * -s seed <br/> * Random number seed for the cross-validation and percentage split (default: * 1). * <p/> * * -m filename <br/> * The name of a file containing a cost matrix. * <p/> * * -l filename <br/> * Loads classifier from the given file. In case the filename ends with * ".xml",a PMML file is loaded or, if that fails, options are loaded from * XML. * <p/> * * -d filename <br/> * Saves classifier built from the training data into the given file. In case * the filename ends with ".xml" the options are saved XML, not the model. * <p/> * * -v <br/> * Outputs no statistics for the training data. * <p/> * * -o <br/> * Outputs statistics only, not the classifier. * <p/> * * -i <br/> * Outputs detailed information-retrieval statistics per class. * <p/> * * -k <br/> * Outputs information-theoretic statistics. * <p/> * * -classifications * "weka.classifiers.evaluation.output.prediction.AbstractOutput + options" <br/> * Uses the specified class for generating the classification output. E.g.: * weka.classifiers.evaluation.output.prediction.PlainText or : * weka.classifiers.evaluation.output.prediction.CSV * * -p range <br/> * Outputs predictions for test instances (or the train instances if no test * instances provided and -no-cv is used), along with the attributes in the * specified range (and nothing else). Use '-p 0' if no attributes are * desired. * <p/> * Deprecated: use "-classifications ..." instead. * <p/> * * -distribution <br/> * Outputs the distribution instead of only the prediction in conjunction with * the '-p' option (only nominal classes). * <p/> * Deprecated: use "-classifications ..." instead. * <p/> * * -no-predictions <br/> * Turns off the collection of predictions in order to conserve memory. * <p/> * * -r <br/> * Outputs cumulative margin distribution (and nothing else). * <p/> * * -g <br/> * Only for classifiers that implement "Graphable." Outputs the graph * representation of the classifier (and nothing else). * <p/> * * -xml filename | xml-string <br/> * Retrieves the options from the XML-data instead of the command line. * <p/> * * -threshold-file file <br/> * The file to save the threshold data to. The format is determined by the * extensions, e.g., '.arff' for ARFF format or '.csv' for CSV. * <p/> * * -threshold-label label <br/> * The class label to determine the threshold data for (default is the first * label) * <p/> * * @param classifierString class of machine learning classifier as a string * @param options the array of string containing the options * @throws Exception if model could not be evaluated successfully * @return a string describing the results */ public static String evaluateModel(String classifierString, String[] options) throws Exception { Classifier classifier; // Create classifier try { classifier = // (Classifier)Class.forName(classifierString).newInstance(); AbstractClassifier.forName(classifierString, null); } catch (Exception e) { throw new Exception("Can't find class with name " + classifierString + '.'); } return evaluateModel(classifier, options); } /** * A test method for this class. Just extracts the first command line argument * as a classifier class name and calls evaluateModel. * * @param args an array of command line arguments, the first of which must be * the class name of a classifier. */ public static void main(String[] args) { try { if (args.length == 0) { throw new Exception("The first argument must be the class name" + " of a classifier"); } String classifier = args[0]; args[0] = ""; System.out.println(evaluateModel(classifier, args)); } catch (Exception ex) { ex.printStackTrace(); System.err.println(ex.getMessage()); } } /** * Evaluates a classifier with the options given in an array of strings. * <p/> * * Valid options are: * <p/> * * -t name of training file <br/> * Name of the file with the training data. (required) * <p/> * * -T name of test file <br/> * Name of the file with the test data. If missing a cross-validation is * performed. * <p/> * * -c class index <br/> * Index of the class attribute (1, 2, ...; default: last). * <p/> * * -x number of folds <br/> * The number of folds for the cross-validation (default: 10). * <p/> * * -no-cv <br/> * No cross validation. If no test file is provided, no evaluation is done. * <p/> * * -split-percentage percentage <br/> * Sets the percentage for the train/test set split, e.g., 66. * <p/> * * -preserve-order <br/> * Preserves the order in the percentage split instead of randomizing the data * first with the seed value ('-s'). * <p/> * * -s seed <br/> * Random number seed for the cross-validation and percentage split (default: * 1). * <p/> * * -m file with cost matrix <br/> * The name of a file containing a cost matrix. * <p/> * * -l filename <br/> * Loads classifier from the given file. In case the filename ends with * ".xml",a PMML file is loaded or, if that fails, options are loaded from * XML. * <p/> * * -d filename <br/> * Saves classifier built from the training data into the given file. In case * the filename ends with ".xml" the options are saved XML, not the model. * <p/> * * -v <br/> * Outputs no statistics for the training data. * <p/> * * -o <br/> * Outputs statistics only, not the classifier. * <p/> * * -i <br/> * Outputs detailed information-retrieval statistics per class. * <p/> * * -k <br/> * Outputs information-theoretic statistics. * <p/> * * -classifications * "weka.classifiers.evaluation.output.prediction.AbstractOutput + options" <br/> * Uses the specified class for generating the classification output. E.g.: * weka.classifiers.evaluation.output.prediction.PlainText or : * weka.classifiers.evaluation.output.prediction.CSV * * -p range <br/> * Outputs predictions for test instances (or the train instances if no test * instances provided and -no-cv is used), along with the attributes in the * specified range (and nothing else). Use '-p 0' if no attributes are * desired. * <p/> * Deprecated: use "-classifications ..." instead. * <p/> * * -distribution <br/> * Outputs the distribution instead of only the prediction in conjunction with * the '-p' option (only nominal classes). * <p/> * Deprecated: use "-classifications ..." instead. * <p/> * * -no-predictions <br/> * Turns off the collection of predictions in order to conserve memory. * <p/> * * -r <br/> * Outputs cumulative margin distribution (and nothing else). * <p/> * * -g <br/> * Only for classifiers that implement "Graphable." Outputs the graph * representation of the classifier (and nothing else). * <p/> * * -xml filename | xml-string <br/> * Retrieves the options from the XML-data instead of the command line. * <p/> * * @param classifier machine learning classifier * @param options the array of string containing the options * @throws Exception if model could not be evaluated successfully * @return a string describing the results */ public static String evaluateModel(Classifier classifier, String[] options) throws Exception { Instances train = null, tempTrain, test = null, template = null; int seed = 1, folds = 10, classIndex = -1; boolean noCrossValidation = false; String trainFileName, testFileName, sourceClass, classIndexString, seedString, foldsString, objectInputFileName, objectOutputFileName; boolean noOutput = false, trainStatistics = true, printMargins = false, printComplexityStatistics = false, printGraph = false, classStatistics = false, printSource = false; StringBuffer text = new StringBuffer(); DataSource trainSource = null, testSource = null; ObjectInputStream objectInputStream = null; BufferedInputStream xmlInputStream = null; CostMatrix costMatrix = null; StringBuffer schemeOptionsText = null; long trainTimeStart = 0, trainTimeElapsed = 0, testTimeStart = 0, testTimeElapsed = 0; String xml = ""; String[] optionsTmp = null; Classifier classifierBackup; int actualClassIndex = -1; // 0-based class index String splitPercentageString = ""; double splitPercentage = -1; boolean preserveOrder = false; boolean forceBatchTraining = false; // set to true if updateable classifier // should not be trained using // updateClassifier() boolean trainSetPresent = false; boolean testSetPresent = false; boolean discardPredictions = false; String thresholdFile; String thresholdLabel; StringBuffer predsBuff = null; // predictions from cross-validation AbstractOutput classificationOutput = null; // help requested? if (Utils.getFlag("h", options) || Utils.getFlag("help", options)) { // global info requested as well? boolean globalInfo = Utils.getFlag("synopsis", options) || Utils.getFlag("info", options); throw new Exception("\nHelp requested." + makeOptionString(classifier, globalInfo)); } String metricsToDisable = Utils.getOption("disable", options); List<String> disableList = new ArrayList<String>(); if (metricsToDisable.length() > 0) { String[] parts = metricsToDisable.split(","); for (String p : parts) { disableList.add(p.trim().toLowerCase()); } } try { // do we get the input from XML instead of normal parameters? xml = Utils.getOption("xml", options); if (!xml.equals("")) options = new XMLOptions(xml).toArray(); // is the input model only the XML-Options, i.e. w/o built model? optionsTmp = new String[options.length]; for (int i = 0; i < options.length; i++) optionsTmp[i] = options[i]; String tmpO = Utils.getOption('l', optionsTmp); // if (Utils.getOption('l', optionsTmp).toLowerCase().endsWith(".xml")) { if (tmpO.endsWith(".xml")) { // try to load file as PMML first boolean success = false; if (!success) { // load options from serialized data ('-l' is automatically erased!) XMLClassifier xmlserial = new XMLClassifier(); OptionHandler cl = (OptionHandler) xmlserial.read(Utils.getOption( 'l', options)); // merge options optionsTmp = new String[options.length + cl.getOptions().length]; System.arraycopy(cl.getOptions(), 0, optionsTmp, 0, cl.getOptions().length); System.arraycopy(options, 0, optionsTmp, cl.getOptions().length, options.length); options = optionsTmp; } } noCrossValidation = Utils.getFlag("no-cv", options); // Get basic options (options the same for all schemes) classIndexString = Utils.getOption('c', options); if (classIndexString.length() != 0) { if (classIndexString.equals("first")) classIndex = 1; else if (classIndexString.equals("last")) classIndex = -1; else classIndex = Integer.parseInt(classIndexString); } trainFileName = Utils.getOption('t', options); objectInputFileName = Utils.getOption('l', options); objectOutputFileName = Utils.getOption('d', options); testFileName = Utils.getOption('T', options); foldsString = Utils.getOption('x', options); if (foldsString.length() != 0) { folds = Integer.parseInt(foldsString); } seedString = Utils.getOption('s', options); if (seedString.length() != 0) { seed = Integer.parseInt(seedString); } if (trainFileName.length() == 0) { if (objectInputFileName.length() == 0) { throw new Exception( "No training file and no object input file given."); } if (testFileName.length() == 0) { throw new Exception("No training file and no test file given."); } } else if ((objectInputFileName.length() != 0) && ((!(classifier instanceof UpdateableClassifier)) || (testFileName .length() == 0))) { throw new Exception("Classifier not incremental, or no " + "test file provided: can't " + "use both train and model file."); } try { if (trainFileName.length() != 0) { trainSetPresent = true; trainSource = new DataSource(trainFileName); } if (testFileName.length() != 0) { testSetPresent = true; testSource = new DataSource(testFileName); } if (objectInputFileName.length() != 0) { if (objectInputFileName.endsWith(".xml")) { // if this is the case then it means that a PMML classifier was // successfully loaded earlier in the code objectInputStream = null; xmlInputStream = null; } else { InputStream is = new FileInputStream(objectInputFileName); if (objectInputFileName.endsWith(".gz")) { is = new GZIPInputStream(is); } // load from KOML? if (!(objectInputFileName.endsWith(".koml") && KOML.isPresent())) { objectInputStream = new ObjectInputStream(is); xmlInputStream = null; } else { objectInputStream = null; xmlInputStream = new BufferedInputStream(is); } } } } catch (Exception e) { throw new Exception("Can't open file " + e.getMessage() + '.'); } if (testSetPresent) { template = test = testSource.getStructure(); if (classIndex != -1) { test.setClassIndex(classIndex - 1); } else { if ((test.classIndex() == -1) || (classIndexString.length() != 0)) test.setClassIndex(test.numAttributes() - 1); } actualClassIndex = test.classIndex(); } else { // percentage split splitPercentageString = Utils.getOption("split-percentage", options); if (splitPercentageString.length() != 0) { if (foldsString.length() != 0) throw new Exception( "Percentage split cannot be used in conjunction with " + "cross-validation ('-x')."); splitPercentage = Double.parseDouble(splitPercentageString); if ((splitPercentage <= 0) || (splitPercentage >= 100)) throw new Exception("Percentage split value needs be >0 and <100."); } else { splitPercentage = -1; } preserveOrder = Utils.getFlag("preserve-order", options); if (preserveOrder) { if (splitPercentage == -1) throw new Exception( "Percentage split ('-percentage-split') is missing."); } // create new train/test sources if (splitPercentage > 0) { testSetPresent = true; Instances tmpInst = trainSource.getDataSet(actualClassIndex); if (!preserveOrder) tmpInst.randomize(new Random(seed)); int trainSize = (int) Math.round(tmpInst.numInstances() * splitPercentage / 100); int testSize = tmpInst.numInstances() - trainSize; Instances trainInst = new Instances(tmpInst, 0, trainSize); Instances testInst = new Instances(tmpInst, trainSize, testSize); trainSource = new DataSource(trainInst); testSource = new DataSource(testInst); template = test = testSource.getStructure(); if (classIndex != -1) { test.setClassIndex(classIndex - 1); } else { if ((test.classIndex() == -1) || (classIndexString.length() != 0)) test.setClassIndex(test.numAttributes() - 1); } actualClassIndex = test.classIndex(); } } if (trainSetPresent) { template = train = trainSource.getStructure(); if (classIndex != -1) { train.setClassIndex(classIndex - 1); } else { if ((train.classIndex() == -1) || (classIndexString.length() != 0)) train.setClassIndex(train.numAttributes() - 1); } actualClassIndex = train.classIndex(); if (!(classifier instanceof weka.classifiers.misc.InputMappedClassifier)) { if ((testSetPresent) && !test.equalHeaders(train)) { throw new IllegalArgumentException( "Train and test file not compatible!\n" + test.equalHeadersMsg(train)); } } } if (template == null) { throw new Exception("No actual dataset provided to use as template"); } costMatrix = handleCostOption(Utils.getOption('m', options), template.numClasses()); classStatistics = Utils.getFlag('i', options); noOutput = Utils.getFlag('o', options); trainStatistics = !Utils.getFlag('v', options); printComplexityStatistics = Utils.getFlag('k', options); printMargins = Utils.getFlag('r', options); printGraph = Utils.getFlag('g', options); sourceClass = Utils.getOption('z', options); printSource = (sourceClass.length() != 0); thresholdFile = Utils.getOption("threshold-file", options); thresholdLabel = Utils.getOption("threshold-label", options); forceBatchTraining = Utils.getFlag("force-batch-training", options); String classifications = Utils.getOption("classifications", options); String classificationsOld = Utils.getOption("p", options); if (classifications.length() > 0) { noOutput = true; classificationOutput = AbstractOutput.fromCommandline(classifications); if (classificationOutput == null) throw new Exception( "Failed to instantiate class for classification output: " + classifications); classificationOutput.setHeader(template); } // backwards compatible with old "-p range" and "-distribution" options else if (classificationsOld.length() > 0) { noOutput = true; classificationOutput = new PlainText(); classificationOutput.setHeader(template); if (!classificationsOld.equals("0")) classificationOutput.setAttributes(classificationsOld); classificationOutput.setOutputDistribution(Utils.getFlag( "distribution", options)); } // -distribution flag needs -p option else { if (Utils.getFlag("distribution", options)) throw new Exception("Cannot print distribution without '-p' option!"); } discardPredictions = Utils.getFlag("no-predictions", options); if (discardPredictions && (classificationOutput != null)) throw new Exception( "Cannot discard predictions ('-no-predictions') and output predictions at the same time ('-classifications/-p')!"); // if no training file given, we don't have any priors if ((!trainSetPresent) && (printComplexityStatistics)) throw new Exception( "Cannot print complexity statistics ('-k') without training file ('-t')!"); // If a model file is given, we can't process // scheme-specific options if (objectInputFileName.length() != 0) { Utils.checkForRemainingOptions(options); } else { // Set options for classifier if (classifier instanceof OptionHandler) { for (int i = 0; i < options.length; i++) { if (options[i].length() != 0) { if (schemeOptionsText == null) { schemeOptionsText = new StringBuffer(); } if (options[i].indexOf(' ') != -1) { schemeOptionsText.append('"' + options[i] + "\" "); } else { schemeOptionsText.append(options[i] + " "); } } } ((OptionHandler) classifier).setOptions(options); } } Utils.checkForRemainingOptions(options); } catch (Exception e) { throw new Exception("\nWeka exception: " + e.getMessage() + makeOptionString(classifier, false)); } if (objectInputFileName.length() != 0) { // Load classifier from file if (objectInputStream != null) { classifier = (Classifier) objectInputStream.readObject(); // try and read a header (if present) Instances savedStructure = null; try { savedStructure = (Instances) objectInputStream.readObject(); } catch (Exception ex) { // don't make a fuss } if (savedStructure != null) { // test for compatibility with template if (!template.equalHeaders(savedStructure)) { throw new Exception("training and test set are not compatible\n" + template.equalHeadersMsg(savedStructure)); } } objectInputStream.close(); } else if (xmlInputStream != null) { // whether KOML is available has already been checked (objectInputStream // would null otherwise)! classifier = (Classifier) KOML.read(xmlInputStream); xmlInputStream.close(); } } // Setup up evaluation objects Evaluation trainingEvaluation = new Evaluation(new Instances(template, 0), costMatrix); Evaluation testingEvaluation = new Evaluation(new Instances(template, 0), costMatrix); if (classifier instanceof weka.classifiers.misc.InputMappedClassifier) { Instances mappedClassifierHeader = ((weka.classifiers.misc.InputMappedClassifier) classifier) .getModelHeader(new Instances(template, 0)); trainingEvaluation = new Evaluation(new Instances(mappedClassifierHeader, 0), costMatrix); testingEvaluation = new Evaluation(new Instances(mappedClassifierHeader, 0), costMatrix); } trainingEvaluation.setDiscardPredictions(discardPredictions); trainingEvaluation.dontDisplayMetrics(disableList); testingEvaluation.setDiscardPredictions(discardPredictions); testingEvaluation.dontDisplayMetrics(disableList); // disable use of priors if no training file given if (!trainSetPresent) testingEvaluation.useNoPriors(); // backup of fully setup classifier for cross-validation classifierBackup = AbstractClassifier.makeCopy(classifier); // Build the classifier if no object file provided if ((classifier instanceof UpdateableClassifier) && (testSetPresent || noCrossValidation) && (costMatrix == null) && (trainSetPresent) && !forceBatchTraining) { // Build classifier incrementally trainingEvaluation.setPriors(train); testingEvaluation.setPriors(train); trainTimeStart = System.currentTimeMillis(); if (objectInputFileName.length() == 0) { classifier.buildClassifier(train); } Instance trainInst; while (trainSource.hasMoreElements(train)) { trainInst = trainSource.nextElement(train); trainingEvaluation.updatePriors(trainInst); testingEvaluation.updatePriors(trainInst); ((UpdateableClassifier) classifier).updateClassifier(trainInst); } trainTimeElapsed = System.currentTimeMillis() - trainTimeStart; } else if (objectInputFileName.length() == 0) { // Build classifier in one go tempTrain = trainSource.getDataSet(actualClassIndex); if (classifier instanceof weka.classifiers.misc.InputMappedClassifier && !trainingEvaluation.getHeader().equalHeaders(tempTrain)) { // we need to make a new dataset that maps the training instances to // the structure expected by the mapped classifier - this is only // to ensure that the structure and priors computed by the *testing* // evaluation object is correct with respect to the mapped classifier Instances mappedClassifierDataset = ((weka.classifiers.misc.InputMappedClassifier) classifier) .getModelHeader(new Instances(template, 0)); for (int zz = 0; zz < tempTrain.numInstances(); zz++) { Instance mapped = ((weka.classifiers.misc.InputMappedClassifier) classifier) .constructMappedInstance(tempTrain.instance(zz)); mappedClassifierDataset.add(mapped); } tempTrain = mappedClassifierDataset; } trainingEvaluation.setPriors(tempTrain); testingEvaluation.setPriors(tempTrain); trainTimeStart = System.currentTimeMillis(); classifier.buildClassifier(tempTrain); trainTimeElapsed = System.currentTimeMillis() - trainTimeStart; } // backup of fully trained classifier for printing the classifications if (classificationOutput != null) { if (classifier instanceof weka.classifiers.misc.InputMappedClassifier) { classificationOutput.setHeader(trainingEvaluation.getHeader()); } } // Save the classifier if an object output file is provided if (objectOutputFileName.length() != 0) { OutputStream os = new FileOutputStream(objectOutputFileName); // binary if (!(objectOutputFileName.endsWith(".xml") || (objectOutputFileName .endsWith(".koml") && KOML.isPresent()))) { if (objectOutputFileName.endsWith(".gz")) { os = new GZIPOutputStream(os); } ObjectOutputStream objectOutputStream = new ObjectOutputStream(os); objectOutputStream.writeObject(classifier); if (template != null) { objectOutputStream.writeObject(template); } objectOutputStream.flush(); objectOutputStream.close(); } // KOML/XML else { BufferedOutputStream xmlOutputStream = new BufferedOutputStream(os); if (objectOutputFileName.endsWith(".xml")) { XMLSerialization xmlSerial = new XMLClassifier(); xmlSerial.write(xmlOutputStream, classifier); } else // whether KOML is present has already been checked // if not present -> ".koml" is interpreted as binary - see above if (objectOutputFileName.endsWith(".koml")) { KOML.write(xmlOutputStream, classifier); } xmlOutputStream.close(); } } // If classifier is drawable output string describing graph if ((classifier instanceof Drawable) && (printGraph)) { return ((Drawable) classifier).graph(); } // Output the classifier as equivalent source if ((classifier instanceof Sourcable) && (printSource)) { return wekaStaticWrapper((Sourcable) classifier, sourceClass); } // Output model if (!(noOutput || printMargins)) { if (classifier instanceof OptionHandler) { if (schemeOptionsText != null) { text.append("\nOptions: " + schemeOptionsText); text.append("\n"); } } text.append("\n" + classifier.toString() + "\n"); } if (!printMargins && (costMatrix != null)) { text.append("\n=== Evaluation Cost Matrix ===\n\n"); text.append(costMatrix.toString()); } // Output test instance predictions only if (classificationOutput != null) { DataSource source = testSource; predsBuff = new StringBuffer(); classificationOutput.setBuffer(predsBuff); // no test set -> use train set if (source == null && noCrossValidation) { source = trainSource; predsBuff.append("\n=== Predictions on training data ===\n\n"); } else { predsBuff.append("\n=== Predictions on test data ===\n\n"); } if (source != null) classificationOutput.print(classifier, source); } // Compute error estimate from training data if ((trainStatistics) && (trainSetPresent)) { if ((classifier instanceof UpdateableClassifier) && (testSetPresent) && (costMatrix == null)) { // Classifier was trained incrementally, so we have to // reset the source. trainSource.reset(); // Incremental testing train = trainSource.getStructure(actualClassIndex); testTimeStart = System.currentTimeMillis(); Instance trainInst; while (trainSource.hasMoreElements(train)) { trainInst = trainSource.nextElement(train); trainingEvaluation.evaluateModelOnce(classifier, trainInst); } testTimeElapsed = System.currentTimeMillis() - testTimeStart; } else { testTimeStart = System.currentTimeMillis(); trainingEvaluation.evaluateModel(classifier, trainSource.getDataSet(actualClassIndex)); testTimeElapsed = System.currentTimeMillis() - testTimeStart; } // Print the results of the training evaluation if (printMargins) { return trainingEvaluation.toCumulativeMarginDistributionString(); } else { if (classificationOutput == null) { text.append("\nTime taken to build model: " + Utils.doubleToString(trainTimeElapsed / 1000.0, 2) + " seconds"); if (splitPercentage > 0) text.append("\nTime taken to test model on training split: "); else text.append("\nTime taken to test model on training data: "); text.append(Utils.doubleToString(testTimeElapsed / 1000.0, 2) + " seconds"); if (splitPercentage > 0) text.append(trainingEvaluation.toSummaryString( "\n\n=== Error on training" + " split ===\n", printComplexityStatistics)); else text.append(trainingEvaluation.toSummaryString( "\n\n=== Error on training" + " data ===\n", printComplexityStatistics)); if (template.classAttribute().isNominal()) { if (classStatistics) { text.append("\n\n" + trainingEvaluation.toClassDetailsString()); } if (!noCrossValidation) text.append("\n\n" + trainingEvaluation.toMatrixString()); } } } } // Compute proper error estimates if (testSource != null) { // Testing is on the supplied test data testSource.reset(); if (classifier instanceof BatchPredictor) { testingEvaluation.evaluateModel(classifier, testSource.getDataSet(test.classIndex())); } else { test = testSource.getStructure(test.classIndex()); Instance testInst; while (testSource.hasMoreElements(test)) { testInst = testSource.nextElement(test); testingEvaluation.evaluateModelOnceAndRecordPrediction(classifier, testInst); } } if (splitPercentage > 0) { if (classificationOutput == null) { text.append("\n\n" + testingEvaluation.toSummaryString( "=== Error on test split ===\n", printComplexityStatistics)); } } else { if (classificationOutput == null) { text.append("\n\n" + testingEvaluation.toSummaryString( "=== Error on test data ===\n", printComplexityStatistics)); } } } else if (trainSource != null) { if (!noCrossValidation) { // Testing is via cross-validation on training data Random random = new Random(seed); // use untrained (!) classifier for cross-validation classifier = AbstractClassifier.makeCopy(classifierBackup); if (classificationOutput == null) { testingEvaluation.crossValidateModel(classifier, trainSource.getDataSet(actualClassIndex), folds, random); if (template.classAttribute().isNumeric()) { text.append("\n\n\n" + testingEvaluation.toSummaryString( "=== Cross-validation ===\n", printComplexityStatistics)); } else { text.append("\n\n\n" + testingEvaluation.toSummaryString("=== Stratified " + "cross-validation ===\n", printComplexityStatistics)); } } else { predsBuff = new StringBuffer(); classificationOutput.setBuffer(predsBuff); predsBuff.append("\n=== Predictions under cross-validation ===\n\n"); testingEvaluation.crossValidateModel(classifier, trainSource.getDataSet(actualClassIndex), folds, random, classificationOutput); } } } if (template.classAttribute().isNominal()) { if (classStatistics && !noCrossValidation && (classificationOutput == null)) { text.append("\n\n" + testingEvaluation.toClassDetailsString()); } if (!noCrossValidation && (classificationOutput == null)) text.append("\n\n" + testingEvaluation.toMatrixString()); } // predictions from cross-validation? if (predsBuff != null) { text.append("\n" + predsBuff); } if ((thresholdFile.length() != 0) && template.classAttribute().isNominal()) { int labelIndex = 0; if (thresholdLabel.length() != 0) labelIndex = template.classAttribute().indexOfValue(thresholdLabel); if (labelIndex == -1) throw new IllegalArgumentException("Class label '" + thresholdLabel + "' is unknown!"); ThresholdCurve tc = new ThresholdCurve(); Instances result = tc.getCurve(testingEvaluation.predictions(), labelIndex); DataSink.write(thresholdFile, result); } return text.toString(); } /** * Attempts to load a cost matrix. * * @param costFileName the filename of the cost matrix * @param numClasses the number of classes that should be in the cost matrix * (only used if the cost file is in old format). * @return a <code>CostMatrix</code> value, or null if costFileName is empty * @throws Exception if an error occurs. */ protected static CostMatrix handleCostOption(String costFileName, int numClasses) throws Exception { if ((costFileName != null) && (costFileName.length() != 0)) { System.out .println("NOTE: The behaviour of the -m option has changed between WEKA 3.0" + " and WEKA 3.1. -m now carries out cost-sensitive *evaluation*" + " only. For cost-sensitive *prediction*, use one of the" + " cost-sensitive metaschemes such as" + " weka.classifiers.meta.CostSensitiveClassifier or" + " weka.classifiers.meta.MetaCost"); Reader costReader = null; try { costReader = new BufferedReader(new FileReader(costFileName)); } catch (Exception e) { throw new Exception("Can't open file " + e.getMessage() + '.'); } try { // First try as a proper cost matrix format return new CostMatrix(costReader); } catch (Exception ex) { try { // Now try as the poxy old format :-) // System.err.println("Attempting to read old format cost file"); try { costReader.close(); // Close the old one costReader = new BufferedReader(new FileReader(costFileName)); } catch (Exception e) { throw new Exception("Can't open file " + e.getMessage() + '.'); } CostMatrix costMatrix = new CostMatrix(numClasses); // System.err.println("Created default cost matrix"); costMatrix.readOldFormat(costReader); return costMatrix; // System.err.println("Read old format"); } catch (Exception e2) { // re-throw the original exception // System.err.println("Re-throwing original exception"); throw ex; } } } else { return null; } } /** * Evaluates the classifier on a given set of instances. Note that the data * must have exactly the same format (e.g. order of attributes) as the data * used to train the classifier! Otherwise the results will generally be * meaningless. * * @param classifier machine learning classifier * @param data set of test instances for evaluation * @param forPredictionsPrinting varargs parameter that, if supplied, is * expected to hold a * weka.classifiers.evaluation.output.prediction.AbstractOutput * object * @return the predictions * @throws Exception if model could not be evaluated successfully */ public double[] evaluateModel(Classifier classifier, Instances data, Object... forPredictionsPrinting) throws Exception { // for predictions printing AbstractOutput classificationOutput = null; double predictions[] = new double[data.numInstances()]; if (forPredictionsPrinting.length > 0) { classificationOutput = (AbstractOutput) forPredictionsPrinting[0]; } if (classifier instanceof BatchPredictor) { // make a copy and set the class to missing Instances dataPred = new Instances(data); for (int i = 0; i < data.numInstances(); i++) { dataPred.instance(i).setClassMissing(); } double[][] preds = ((BatchPredictor) classifier) .distributionsForInstances(dataPred); for (int i = 0; i < data.numInstances(); i++) { double[] p = preds[i]; predictions[i] = evaluationForSingleInstance(p, data.instance(i), true); if (classificationOutput != null) classificationOutput.printClassification(p, data.instance(i), i); } } else { // Need to be able to collect predictions if appropriate (for AUC) for (int i = 0; i < data.numInstances(); i++) { predictions[i] = evaluateModelOnceAndRecordPrediction(classifier, data.instance(i)); if (classificationOutput != null) classificationOutput.printClassification(classifier, data.instance(i), i); } } return predictions; } /** * Evaluates the supplied distribution on a single instance. * * @param dist the supplied distribution * @param instance the test instance to be classified * @param storePredictions whether to store predictions for nominal classifier * @return the prediction * @throws Exception if model could not be evaluated successfully */ public double evaluationForSingleInstance(double[] dist, Instance instance, boolean storePredictions) throws Exception { double pred; if (m_ClassIsNominal) { pred = Utils.maxIndex(dist); if (dist[(int) pred] <= 0) { pred = Utils.missingValue(); } updateStatsForClassifier(dist, instance); if (storePredictions && !m_DiscardPredictions) { if (m_Predictions == null) m_Predictions = new FastVector(); m_Predictions.addElement(new NominalPrediction(instance.classValue(), dist, instance.weight())); } } else { pred = dist[0]; updateStatsForPredictor(pred, instance); if (storePredictions && !m_DiscardPredictions) { if (m_Predictions == null) m_Predictions = new FastVector(); m_Predictions.addElement(new NumericPrediction(instance.classValue(), pred, instance.weight())); } } return pred; } /** * Evaluates the classifier on a single instance and records the prediction. * * @param classifier machine learning classifier * @param instance the test instance to be classified * @param storePredictions whether to store predictions for nominal classifier * @return the prediction made by the clasifier * @throws Exception if model could not be evaluated successfully or the data * contains string attributes */ protected double evaluationForSingleInstance(Classifier classifier, Instance instance, boolean storePredictions) throws Exception { Instance classMissing = (Instance) instance.copy(); classMissing.setDataset(instance.dataset()); if (classifier instanceof weka.classifiers.misc.InputMappedClassifier) { instance = (Instance) instance.copy(); instance = ((weka.classifiers.misc.InputMappedClassifier) classifier) .constructMappedInstance(instance); // System.out.println("Mapped instance " + instance); int mappedClass = ((weka.classifiers.misc.InputMappedClassifier) classifier) .getMappedClassIndex(); classMissing.setMissing(mappedClass); } else { classMissing.setClassMissing(); } // System.out.println("instance (to predict)" + classMissing); double pred = evaluationForSingleInstance( classifier.distributionForInstance(classMissing), instance, storePredictions); // We don't need to do the following if the class is nominal because in that // case // entropy and coverage statistics are always computed. if (!m_ClassIsNominal) { if (!instance.classIsMissing() && !Utils.isMissingValue(pred)) { if (classifier instanceof IntervalEstimator) { updateStatsForIntervalEstimator((IntervalEstimator) classifier, classMissing, instance.classValue()); } else { m_CoverageStatisticsAvailable = false; } if (classifier instanceof ConditionalDensityEstimator) { updateStatsForConditionalDensityEstimator( (ConditionalDensityEstimator) classifier, classMissing, instance.classValue()); } else { m_ComplexityStatisticsAvailable = false; } } } return pred; } /** * Evaluates the classifier on a single instance and records the prediction. * * @param classifier machine learning classifier * @param instance the test instance to be classified * @return the prediction made by the clasifier * @throws Exception if model could not be evaluated successfully or the data * contains string attributes */ public double evaluateModelOnceAndRecordPrediction(Classifier classifier, Instance instance) throws Exception { return evaluationForSingleInstance(classifier, instance, true); } /** * Evaluates the classifier on a single instance. * * @param classifier machine learning classifier * @param instance the test instance to be classified * @return the prediction made by the clasifier * @throws Exception if model could not be evaluated successfully or the data * contains string attributes */ public double evaluateModelOnce(Classifier classifier, Instance instance) throws Exception { return evaluationForSingleInstance(classifier, instance, false); } /** * Evaluates the supplied distribution on a single instance. * * @param dist the supplied distribution * @param instance the test instance to be classified * @return the prediction * @throws Exception if model could not be evaluated successfully */ public double evaluateModelOnce(double[] dist, Instance instance) throws Exception { return evaluationForSingleInstance(dist, instance, false); } /** * Evaluates the supplied distribution on a single instance. * * @param dist the supplied distribution * @param instance the test instance to be classified * @return the prediction * @throws Exception if model could not be evaluated successfully */ public double evaluateModelOnceAndRecordPrediction(double[] dist, Instance instance) throws Exception { return evaluationForSingleInstance(dist, instance, true); } /** * Evaluates the supplied prediction on a single instance. * * @param prediction the supplied prediction * @param instance the test instance to be classified * @throws Exception if model could not be evaluated successfully */ public void evaluateModelOnce(double prediction, Instance instance) throws Exception { evaluateModelOnce(makeDistribution(prediction), instance); } /** * Returns the predictions that have been collected. * * @return a reference to the FastVector containing the predictions that have * been collected. This should be null if no predictions have been * collected. */ public FastVector predictions() { if (m_DiscardPredictions) return null; else return m_Predictions; } /** * Wraps a static classifier in enough source to test using the weka class * libraries. * * @param classifier a Sourcable Classifier * @param className the name to give to the source code class * @return the source for a static classifier that can be tested with weka * libraries. * @throws Exception if code-generation fails */ public static String wekaStaticWrapper(Sourcable classifier, String className) throws Exception { StringBuffer result = new StringBuffer(); String staticClassifier = classifier.toSource(className); result.append("// Generated with Weka " + Version.VERSION + "\n"); result.append("//\n"); result .append("// This code is public domain and comes with no warranty.\n"); result.append("//\n"); result.append("// Timestamp: " + new Date() + "\n"); result.append("\n"); result.append("package weka.classifiers;\n"); result.append("\n"); result.append("import weka.core.Attribute;\n"); result.append("import weka.core.Capabilities;\n"); result.append("import weka.core.Capabilities.Capability;\n"); result.append("import weka.core.Instance;\n"); result.append("import weka.core.Instances;\n"); result.append("import weka.core.RevisionUtils;\n"); result .append("import weka.classifiers.Classifier;\nimport weka.classifiers.AbstractClassifier;\n"); result.append("\n"); result.append("public class WekaWrapper\n"); result.append(" extends AbstractClassifier {\n"); // globalInfo result.append("\n"); result.append(" /**\n"); result.append(" * Returns only the toString() method.\n"); result.append(" *\n"); result.append(" * @return a string describing the classifier\n"); result.append(" */\n"); result.append(" public String globalInfo() {\n"); result.append(" return toString();\n"); result.append(" }\n"); // getCapabilities result.append("\n"); result.append(" /**\n"); result.append(" * Returns the capabilities of this classifier.\n"); result.append(" *\n"); result.append(" * @return the capabilities\n"); result.append(" */\n"); result.append(" public Capabilities getCapabilities() {\n"); result.append(((Classifier) classifier).getCapabilities().toSource( "result", 4)); result.append(" return result;\n"); result.append(" }\n"); // buildClassifier result.append("\n"); result.append(" /**\n"); result.append(" * only checks the data against its capabilities.\n"); result.append(" *\n"); result.append(" * @param i the training data\n"); result.append(" */\n"); result .append(" public void buildClassifier(Instances i) throws Exception {\n"); result.append(" // can classifier handle the data?\n"); result.append(" getCapabilities().testWithFail(i);\n"); result.append(" }\n"); // classifyInstance result.append("\n"); result.append(" /**\n"); result.append(" * Classifies the given instance.\n"); result.append(" *\n"); result.append(" * @param i the instance to classify\n"); result.append(" * @return the classification result\n"); result.append(" */\n"); result .append(" public double classifyInstance(Instance i) throws Exception {\n"); result.append(" Object[] s = new Object[i.numAttributes()];\n"); result.append(" \n"); result.append(" for (int j = 0; j < s.length; j++) {\n"); result.append(" if (!i.isMissing(j)) {\n"); result.append(" if (i.attribute(j).isNominal())\n"); result.append(" s[j] = new String(i.stringValue(j));\n"); result.append(" else if (i.attribute(j).isNumeric())\n"); result.append(" s[j] = new Double(i.value(j));\n"); result.append(" }\n"); result.append(" }\n"); result.append(" \n"); result.append(" // set class value to missing\n"); result.append(" s[i.classIndex()] = null;\n"); result.append(" \n"); result.append(" return " + className + ".classify(s);\n"); result.append(" }\n"); // getRevision result.append("\n"); result.append(" /**\n"); result.append(" * Returns the revision string.\n"); result.append(" * \n"); result.append(" * @return the revision\n"); result.append(" */\n"); result.append(" public String getRevision() {\n"); result.append(" return RevisionUtils.extract(\"1.0\");\n"); result.append(" }\n"); // toString result.append("\n"); result.append(" /**\n"); result .append(" * Returns only the classnames and what classifier it is based on.\n"); result.append(" *\n"); result.append(" * @return a short description\n"); result.append(" */\n"); result.append(" public String toString() {\n"); result.append(" return \"Auto-generated classifier wrapper, based on " + classifier.getClass().getName() + " (generated with Weka " + Version.VERSION + ").\\n" + "\" + this.getClass().getName() + \"/" + className + "\";\n"); result.append(" }\n"); // main result.append("\n"); result.append(" /**\n"); result.append(" * Runs the classfier from commandline.\n"); result.append(" *\n"); result.append(" * @param args the commandline arguments\n"); result.append(" */\n"); result.append(" public static void main(String args[]) {\n"); result.append(" runClassifier(new WekaWrapper(), args);\n"); result.append(" }\n"); result.append("}\n"); // actual classifier code result.append("\n"); result.append(staticClassifier); return result.toString(); } /** * Gets the number of test instances that had a known class value (actually * the sum of the weights of test instances with known class value). * * @return the number of test instances with known class */ public final double numInstances() { return m_WithClass; } /** * Gets the coverage of the test cases by the predicted regions at the * confidence level specified when evaluation was performed. * * @return the coverage of the test cases by the predicted regions */ public final double coverageOfTestCasesByPredictedRegions() { if (!m_CoverageStatisticsAvailable) return Double.NaN; return 100 * m_TotalCoverage / m_WithClass; } /** * Gets the average size of the predicted regions, relative to the range of * the target in the training data, at the confidence level specified when * evaluation was performed. * * @return the average size of the predicted regions */ public final double sizeOfPredictedRegions() { if (m_NoPriors || !m_CoverageStatisticsAvailable) return Double.NaN; return 100 * m_TotalSizeOfRegions / m_WithClass; } /** * Gets the number of instances incorrectly classified (that is, for which an * incorrect prediction was made). (Actually the sum of the weights of these * instances) * * @return the number of incorrectly classified instances */ public final double incorrect() { return m_Incorrect; } /** * Gets the percentage of instances incorrectly classified (that is, for which * an incorrect prediction was made). * * @return the percent of incorrectly classified instances (between 0 and 100) */ public final double pctIncorrect() { return 100 * m_Incorrect / m_WithClass; } /** * Gets the total cost, that is, the cost of each prediction times the weight * of the instance, summed over all instances. * * @return the total cost */ public final double totalCost() { return m_TotalCost; } /** * Gets the average cost, that is, total cost of misclassifications (incorrect * plus unclassified) over the total number of instances. * * @return the average cost. */ public final double avgCost() { return m_TotalCost / m_WithClass; } /** * Gets the number of instances correctly classified (that is, for which a * correct prediction was made). (Actually the sum of the weights of these * instances) * * @return the number of correctly classified instances */ public final double correct() { return m_Correct; } /** * Gets the percentage of instances correctly classified (that is, for which a * correct prediction was made). * * @return the percent of correctly classified instances (between 0 and 100) */ public final double pctCorrect() { return 100 * m_Correct / m_WithClass; } /** * Gets the number of instances not classified (that is, for which no * prediction was made by the classifier). (Actually the sum of the weights of * these instances) * * @return the number of unclassified instances */ public final double unclassified() { return m_Unclassified; } /** * Gets the percentage of instances not classified (that is, for which no * prediction was made by the classifier). * * @return the percent of unclassified instances (between 0 and 100) */ public final double pctUnclassified() { return 100 * m_Unclassified / m_WithClass; } /** * Returns the estimated error rate or the root mean squared error (if the * class is numeric). If a cost matrix was given this error rate gives the * average cost. * * @return the estimated error rate (between 0 and 1, or between 0 and maximum * cost) */ public final double errorRate() { if (!m_ClassIsNominal) { return Math.sqrt(m_SumSqrErr / (m_WithClass - m_Unclassified)); } if (m_CostMatrix == null) { return m_Incorrect / m_WithClass; } else { return avgCost(); } } /** * Returns value of kappa statistic if class is nominal. * * @return the value of the kappa statistic */ public final double kappa() { double[] sumRows = new double[m_ConfusionMatrix.length]; double[] sumColumns = new double[m_ConfusionMatrix.length]; double sumOfWeights = 0; for (int i = 0; i < m_ConfusionMatrix.length; i++) { for (int j = 0; j < m_ConfusionMatrix.length; j++) { sumRows[i] += m_ConfusionMatrix[i][j]; sumColumns[j] += m_ConfusionMatrix[i][j]; sumOfWeights += m_ConfusionMatrix[i][j]; } } double correct = 0, chanceAgreement = 0; for (int i = 0; i < m_ConfusionMatrix.length; i++) { chanceAgreement += (sumRows[i] * sumColumns[i]); correct += m_ConfusionMatrix[i][i]; } chanceAgreement /= (sumOfWeights * sumOfWeights); correct /= sumOfWeights; if (chanceAgreement < 1) { return (correct - chanceAgreement) / (1 - chanceAgreement); } else { return 1; } } /** * Returns the correlation coefficient if the class is numeric. * * @return the correlation coefficient * @throws Exception if class is not numeric */ public final double correlationCoefficient() throws Exception { if (m_ClassIsNominal) { throw new Exception("Can't compute correlation coefficient: " + "class is nominal!"); } double correlation = 0; double varActual = m_SumSqrClass - m_SumClass * m_SumClass / (m_WithClass - m_Unclassified); double varPredicted = m_SumSqrPredicted - m_SumPredicted * m_SumPredicted / (m_WithClass - m_Unclassified); double varProd = m_SumClassPredicted - m_SumClass * m_SumPredicted / (m_WithClass - m_Unclassified); if (varActual * varPredicted <= 0) { correlation = 0.0; } else { correlation = varProd / Math.sqrt(varActual * varPredicted); } return correlation; } /** * Returns the mean absolute error. Refers to the error of the predicted * values for numeric classes, and the error of the predicted probability * distribution for nominal classes. * * @return the mean absolute error */ public final double meanAbsoluteError() { return m_SumAbsErr / (m_WithClass - m_Unclassified); } /** * Returns the mean absolute error of the prior. * * @return the mean absolute error */ public final double meanPriorAbsoluteError() { if (m_NoPriors) return Double.NaN; return m_SumPriorAbsErr / m_WithClass; } /** * Returns the relative absolute error. * * @return the relative absolute error * @throws Exception if it can't be computed */ public final double relativeAbsoluteError() throws Exception { if (m_NoPriors) return Double.NaN; return 100 * meanAbsoluteError() / meanPriorAbsoluteError(); } /** * Returns the root mean squared error. * * @return the root mean squared error */ public final double rootMeanSquaredError() { return Math.sqrt(m_SumSqrErr / (m_WithClass - m_Unclassified)); } /** * Returns the root mean prior squared error. * * @return the root mean prior squared error */ public final double rootMeanPriorSquaredError() { if (m_NoPriors) return Double.NaN; return Math.sqrt(m_SumPriorSqrErr / m_WithClass); } /** * Returns the root relative squared error if the class is numeric. * * @return the root relative squared error */ public final double rootRelativeSquaredError() { if (m_NoPriors) return Double.NaN; return 100.0 * rootMeanSquaredError() / rootMeanPriorSquaredError(); } /** * Calculate the entropy of the prior distribution. * * @return the entropy of the prior distribution * @throws Exception if the class is not nominal */ public final double priorEntropy() throws Exception { if (!m_ClassIsNominal) { throw new Exception("Can't compute entropy of class prior: " + "class numeric!"); } if (m_NoPriors) return Double.NaN; double entropy = 0; for (int i = 0; i < m_NumClasses; i++) { entropy -= m_ClassPriors[i] / m_ClassPriorsSum * Utils.log2(m_ClassPriors[i] / m_ClassPriorsSum); } return entropy; } /** * Return the total Kononenko & Bratko Information score in bits. * * @return the K&B information score * @throws Exception if the class is not nominal */ public final double KBInformation() throws Exception { if (!m_ClassIsNominal) { throw new Exception("Can't compute K&B Info score: " + "class numeric!"); } if (m_NoPriors) return Double.NaN; return m_SumKBInfo; } /** * Return the Kononenko & Bratko Information score in bits per instance. * * @return the K&B information score * @throws Exception if the class is not nominal */ public final double KBMeanInformation() throws Exception { if (!m_ClassIsNominal) { throw new Exception("Can't compute K&B Info score: class numeric!"); } if (m_NoPriors) return Double.NaN; return m_SumKBInfo / (m_WithClass - m_Unclassified); } /** * Return the Kononenko & Bratko Relative Information score. * * @return the K&B relative information score * @throws Exception if the class is not nominal */ public final double KBRelativeInformation() throws Exception { if (!m_ClassIsNominal) { throw new Exception("Can't compute K&B Info score: " + "class numeric!"); } if (m_NoPriors) return Double.NaN; return 100.0 * KBInformation() / priorEntropy(); } /** * Returns the total entropy for the null model. * * @return the total null model entropy */ public final double SFPriorEntropy() { if (m_NoPriors || !m_ComplexityStatisticsAvailable) return Double.NaN; return m_SumPriorEntropy; } /** * Returns the entropy per instance for the null model. * * @return the null model entropy per instance */ public final double SFMeanPriorEntropy() { if (m_NoPriors || !m_ComplexityStatisticsAvailable) return Double.NaN; return m_SumPriorEntropy / m_WithClass; } /** * Returns the total entropy for the scheme. * * @return the total scheme entropy */ public final double SFSchemeEntropy() { if (!m_ComplexityStatisticsAvailable) return Double.NaN; return m_SumSchemeEntropy; } /** * Returns the entropy per instance for the scheme. * * @return the scheme entropy per instance */ public final double SFMeanSchemeEntropy() { if (!m_ComplexityStatisticsAvailable) return Double.NaN; return m_SumSchemeEntropy / (m_WithClass - m_Unclassified); } /** * Returns the total SF, which is the null model entropy minus the scheme * entropy. * * @return the total SF */ public final double SFEntropyGain() { if (m_NoPriors || !m_ComplexityStatisticsAvailable) return Double.NaN; return m_SumPriorEntropy - m_SumSchemeEntropy; } /** * Returns the SF per instance, which is the null model entropy minus the * scheme entropy, per instance. * * @return the SF per instance */ public final double SFMeanEntropyGain() { if (m_NoPriors || !m_ComplexityStatisticsAvailable) return Double.NaN; return (m_SumPriorEntropy - m_SumSchemeEntropy) / (m_WithClass - m_Unclassified); } /** * Output the cumulative margin distribution as a string suitable for input * for gnuplot or similar package. * * @return the cumulative margin distribution * @throws Exception if the class attribute is nominal */ public String toCumulativeMarginDistributionString() throws Exception { if (!m_ClassIsNominal) { throw new Exception("Class must be nominal for margin distributions"); } String result = ""; double cumulativeCount = 0; double margin; for (int i = 0; i <= k_MarginResolution; i++) { if (m_MarginCounts[i] != 0) { cumulativeCount += m_MarginCounts[i]; margin = i * 2.0 / k_MarginResolution - 1.0; result = result + Utils.doubleToString(margin, 7, 3) + ' ' + Utils.doubleToString(cumulativeCount * 100 / m_WithClass, 7, 3) + '\n'; } else if (i == 0) { result = Utils.doubleToString(-1.0, 7, 3) + ' ' + Utils.doubleToString(0, 7, 3) + '\n'; } } return result; } /** * Calls toSummaryString() with no title and no complexity stats. * * @return a summary description of the classifier evaluation */ @Override public String toSummaryString() { return toSummaryString("", false); } /** * Calls toSummaryString() with a default title. * * @param printComplexityStatistics if true, complexity statistics are * returned as well * @return the summary string */ public String toSummaryString(boolean printComplexityStatistics) { return toSummaryString("=== Summary ===\n", printComplexityStatistics); } /** * Outputs the performance statistics in summary form. Lists number (and * percentage) of instances classified correctly, incorrectly and * unclassified. Outputs the total number of instances classified, and the * number of instances (if any) that had no class value provided. * * @param title the title for the statistics * @param printComplexityStatistics if true, complexity statistics are * returned as well * @return the summary as a String */ public String toSummaryString(String title, boolean printComplexityStatistics) { StringBuffer text = new StringBuffer(); if (printComplexityStatistics && m_NoPriors) { printComplexityStatistics = false; System.err .println("Priors disabled, cannot print complexity statistics!"); } text.append(title + "\n"); try { if (m_WithClass > 0) { if (m_ClassIsNominal) { boolean displayCorrect = m_metricsToDisplay.contains("correct"); boolean displayIncorrect = m_metricsToDisplay.contains("incorrect"); boolean displayKappa = m_metricsToDisplay.contains("kappa"); boolean displayTotalCost = m_metricsToDisplay.contains("total cost"); boolean displayAverageCost = m_metricsToDisplay .contains("average cost"); if (displayCorrect) { text.append("Correctly Classified Instances "); text.append(Utils.doubleToString(correct(), 12, 4) + " " + Utils.doubleToString(pctCorrect(), 12, 4) + " %\n"); } if (displayIncorrect) { text.append("Incorrectly Classified Instances "); text.append(Utils.doubleToString(incorrect(), 12, 4) + " " + Utils.doubleToString(pctIncorrect(), 12, 4) + " %\n"); } if (displayKappa) { text.append("Kappa statistic "); text.append(Utils.doubleToString(kappa(), 12, 4) + "\n"); } if (m_CostMatrix != null) { if (displayTotalCost) { text.append("Total Cost "); text.append(Utils.doubleToString(totalCost(), 12, 4) + "\n"); } if (displayAverageCost) { text.append("Average Cost "); text.append(Utils.doubleToString(avgCost(), 12, 4) + "\n"); } } if (printComplexityStatistics) { boolean displayKBRelative = m_metricsToDisplay .contains("kb relative"); boolean displayKBInfo = m_metricsToDisplay .contains("kb information"); if (displayKBRelative) { text.append("K&B Relative Info Score "); text.append(Utils.doubleToString(KBRelativeInformation(), 12, 4) + " %\n"); } if (displayKBInfo) { text.append("K&B Information Score "); text.append(Utils.doubleToString(KBInformation(), 12, 4) + " bits"); text.append(Utils.doubleToString(KBMeanInformation(), 12, 4) + " bits/instance\n"); } } if (m_pluginMetrics != null) { for (AbstractEvaluationMetric m : m_pluginMetrics) { if (m instanceof StandardEvaluationMetric && m.appliesToNominalClass() && !m.appliesToNumericClass()) { String metricName = m.getMetricName().toLowerCase(); boolean display = m_metricsToDisplay.contains(metricName); List<String> statNames = m.getStatisticNames(); for (String s : statNames) { display = (display && m_metricsToDisplay.contains(s .toLowerCase())); } if (display) { String formattedS = ((StandardEvaluationMetric) m) .toSummaryString(); text.append(formattedS); } } } } } else { boolean displayCorrelation = m_metricsToDisplay .contains("correlation"); if (displayCorrelation) { text.append("Correlation coefficient "); text.append(Utils.doubleToString(correlationCoefficient(), 12, 4) + "\n"); } if (m_pluginMetrics != null) { for (AbstractEvaluationMetric m : m_pluginMetrics) { if (m instanceof StandardEvaluationMetric && !m.appliesToNominalClass() && m.appliesToNumericClass()) { String metricName = m.getMetricName().toLowerCase(); boolean display = m_metricsToDisplay.contains(metricName); List<String> statNames = m.getStatisticNames(); for (String s : statNames) { display = (display && m_metricsToDisplay.contains(s .toLowerCase())); } if (display) { String formattedS = ((StandardEvaluationMetric) m) .toSummaryString(); text.append(formattedS); } } } } } if (printComplexityStatistics && m_ComplexityStatisticsAvailable) { boolean displayComplexityOrder0 = m_metricsToDisplay .contains("complexity 0"); boolean displayComplexityScheme = m_metricsToDisplay .contains("complexity scheme"); boolean displayComplexityImprovement = m_metricsToDisplay .contains("complexity improvement"); if (displayComplexityOrder0) { text.append("Class complexity | order 0 "); text.append(Utils.doubleToString(SFPriorEntropy(), 12, 4) + " bits"); text.append(Utils.doubleToString(SFMeanPriorEntropy(), 12, 4) + " bits/instance\n"); } if (displayComplexityScheme) { text.append("Class complexity | scheme "); text.append(Utils.doubleToString(SFSchemeEntropy(), 12, 4) + " bits"); text.append(Utils.doubleToString(SFMeanSchemeEntropy(), 12, 4) + " bits/instance\n"); } if (displayComplexityImprovement) { text.append("Complexity improvement (Sf) "); text.append(Utils.doubleToString(SFEntropyGain(), 12, 4) + " bits"); text.append(Utils.doubleToString(SFMeanEntropyGain(), 12, 4) + " bits/instance\n"); } } if (printComplexityStatistics && m_pluginMetrics != null) { for (AbstractEvaluationMetric m : m_pluginMetrics) { if (m instanceof InformationTheoreticEvaluationMetric) { if ((m_ClassIsNominal && m.appliesToNominalClass()) || (!m_ClassIsNominal && m.appliesToNumericClass())) { String metricName = m.getMetricName().toLowerCase(); boolean display = m_metricsToDisplay.contains(metricName); List<String> statNames = m.getStatisticNames(); for (String s : statNames) { display = (display && m_metricsToDisplay.contains(s .toLowerCase())); } if (display) { String formattedS = ((InformationTheoreticEvaluationMetric) m) .toSummaryString(); text.append(formattedS); } } } } } boolean displayMAE = m_metricsToDisplay.contains("mae"); boolean displayRMSE = m_metricsToDisplay.contains("rmse"); boolean displayRAE = m_metricsToDisplay.contains("rae"); boolean displayRRSE = m_metricsToDisplay.contains("rrse"); if (displayMAE) { text.append("Mean absolute error "); text.append(Utils.doubleToString(meanAbsoluteError(), 12, 4) + "\n"); } if (displayRMSE) { text.append("Root mean squared error "); text.append(Utils.doubleToString(rootMeanSquaredError(), 12, 4) + "\n"); } if (!m_NoPriors) { if (displayRAE) { text.append("Relative absolute error "); text.append(Utils.doubleToString(relativeAbsoluteError(), 12, 4) + " %\n"); } if (displayRRSE) { text.append("Root relative squared error "); text.append(Utils.doubleToString(rootRelativeSquaredError(), 12, 4) + " %\n"); } } if (m_pluginMetrics != null) { for (AbstractEvaluationMetric m : m_pluginMetrics) { if (m instanceof StandardEvaluationMetric && m.appliesToNominalClass() && m.appliesToNumericClass()) { String metricName = m.getMetricName().toLowerCase(); boolean display = m_metricsToDisplay.contains(metricName); List<String> statNames = m.getStatisticNames(); for (String s : statNames) { display = (display && m_metricsToDisplay.contains(s .toLowerCase())); } if (display) { String formattedS = ((StandardEvaluationMetric) m) .toSummaryString(); text.append(formattedS); } } } } if (m_CoverageStatisticsAvailable) { boolean displayCoverage = m_metricsToDisplay.contains("coverage"); boolean displayRegionSize = m_metricsToDisplay .contains("region size"); if (displayCoverage) { text.append("Coverage of cases (" + Utils.doubleToString(m_ConfLevel, 4, 2) + " level) "); text.append(Utils.doubleToString( coverageOfTestCasesByPredictedRegions(), 12, 4) + " %\n"); } if (!m_NoPriors) { if (displayRegionSize) { text.append("Mean rel. region size (" + Utils.doubleToString(m_ConfLevel, 4, 2) + " level) "); text.append(Utils.doubleToString(sizeOfPredictedRegions(), 12, 4) + " %\n"); } } } } if (Utils.gr(unclassified(), 0)) { text.append("UnClassified Instances "); text.append(Utils.doubleToString(unclassified(), 12, 4) + " " + Utils.doubleToString(pctUnclassified(), 12, 4) + " %\n"); } text.append("Total Number of Instances "); text.append(Utils.doubleToString(m_WithClass, 12, 4) + "\n"); if (m_MissingClass > 0) { text.append("Ignored Class Unknown Instances "); text.append(Utils.doubleToString(m_MissingClass, 12, 4) + "\n"); } } catch (Exception ex) { // Should never occur since the class is known to be nominal // here System.err.println("Arggh - Must be a bug in Evaluation class"); } return text.toString(); } /** * Calls toMatrixString() with a default title. * * @return the confusion matrix as a string * @throws Exception if the class is numeric */ public String toMatrixString() throws Exception { return toMatrixString("=== Confusion Matrix ===\n"); } /** * Outputs the performance statistics as a classification confusion matrix. * For each class value, shows the distribution of predicted class values. * * @param title the title for the confusion matrix * @return the confusion matrix as a String * @throws Exception if the class is numeric */ public String toMatrixString(String title) throws Exception { StringBuffer text = new StringBuffer(); char[] IDChars = { 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z' }; int IDWidth; boolean fractional = false; if (!m_ClassIsNominal) { throw new Exception("Evaluation: No confusion matrix possible!"); } // Find the maximum value in the matrix // and check for fractional display requirement double maxval = 0; for (int i = 0; i < m_NumClasses; i++) { for (int j = 0; j < m_NumClasses; j++) { double current = m_ConfusionMatrix[i][j]; if (current < 0) { current *= -10; } if (current > maxval) { maxval = current; } double fract = current - Math.rint(current); if (!fractional && ((Math.log(fract) / Math.log(10)) >= -2)) { fractional = true; } } } IDWidth = 1 + Math.max( (int) (Math.log(maxval) / Math.log(10) + (fractional ? 3 : 0)), (int) (Math.log(m_NumClasses) / Math.log(IDChars.length))); text.append(title).append("\n"); for (int i = 0; i < m_NumClasses; i++) { if (fractional) { text.append(" ").append(num2ShortID(i, IDChars, IDWidth - 3)) .append(" "); } else { text.append(" ").append(num2ShortID(i, IDChars, IDWidth)); } } text.append(" <-- classified as\n"); for (int i = 0; i < m_NumClasses; i++) { for (int j = 0; j < m_NumClasses; j++) { text.append(" ").append( Utils.doubleToString(m_ConfusionMatrix[i][j], IDWidth, (fractional ? 2 : 0))); } text.append(" | ").append(num2ShortID(i, IDChars, IDWidth)).append(" = ") .append(m_ClassNames[i]).append("\n"); } return text.toString(); } /** * Generates a breakdown of the accuracy for each class (with default title), * incorporating various information-retrieval statistics, such as true/false * positive rate, precision/recall/F-Measure. Should be useful for ROC curves, * recall/precision curves. * * @return the statistics presented as a string * @throws Exception if class is not nominal */ public String toClassDetailsString() throws Exception { return toClassDetailsString("=== Detailed Accuracy By Class ===\n"); } /** * Generates a breakdown of the accuracy for each class, incorporating various * information-retrieval statistics, such as true/false positive rate, * precision/recall/F-Measure. Should be useful for ROC curves, * recall/precision curves. * * @param title the title to prepend the stats string with * @return the statistics presented as a string * @throws Exception if class is not nominal */ public String toClassDetailsString(String title) throws Exception { if (!m_ClassIsNominal) { throw new Exception("Evaluation: No per class statistics possible!"); } boolean displayTP = m_metricsToDisplay.contains("tp rate"); boolean displayFP = m_metricsToDisplay.contains("fp rate"); boolean displayP = m_metricsToDisplay.contains("precision"); boolean displayR = m_metricsToDisplay.contains("recall"); boolean displayFM = m_metricsToDisplay.contains("f-measure"); boolean displayMCC = m_metricsToDisplay.contains("mcc"); boolean displayROC = m_metricsToDisplay.contains("roc area"); boolean displayPRC = m_metricsToDisplay.contains("prc area"); StringBuffer text = new StringBuffer(title + "\n " + (displayTP ? "TP Rate " : "") + (displayFP ? "FP Rate " : "") + (displayP ? "Precision " : "") + (displayR ? "Recall " : "") + (displayFM ? "F-Measure " : "") + (displayMCC ? "MCC " : "") + (displayROC ? "ROC Area " : "") + (displayPRC ? "PRC Area " : "")); if (m_pluginMetrics != null && m_pluginMetrics.size() > 0) { for (AbstractEvaluationMetric m : m_pluginMetrics) { if (m instanceof InformationRetrievalEvaluationMetric && m.appliesToNominalClass()) { String metricName = m.getMetricName().toLowerCase(); if (m_metricsToDisplay.contains(metricName)) { List<String> statNames = m.getStatisticNames(); for (String name : statNames) { if (m_metricsToDisplay.contains(name.toLowerCase())) { if (name.length() < 7) { name = Utils.padRight(name, 7); } text.append(name).append(" "); } } } } } } text.append("Class\n"); for (int i = 0; i < m_NumClasses; i++) { text.append(" "); if (displayTP) { text.append(String.format("%-9.3f", truePositiveRate(i))); } if (displayFP) { text.append(String.format("%-9.3f", falsePositiveRate(i))); } if (displayP) { text.append(String.format("%-11.3f", precision(i))); } if (displayR) { text.append(String.format("%-9.3f", recall(i))); } if (displayFM) { text.append(String.format("%-11.3f", fMeasure(i))); } if (displayMCC) { double mat = matthewsCorrelationCoefficient(i); if (Utils.isMissingValue(mat)) { text.append("? "); } else { text.append(String .format("%-9.3f", matthewsCorrelationCoefficient(i))); } } if (displayROC) { double rocVal = areaUnderROC(i); if (Utils.isMissingValue(rocVal)) { text.append("? "); } else { text.append(String.format("%-10.3f", rocVal)); } } if (displayPRC) { double prcVal = areaUnderPRC(i); if (Utils.isMissingValue(prcVal)) { text.append("? "); } else { text.append(String.format("%-10.3f", prcVal)); } } if (m_pluginMetrics != null && m_pluginMetrics.size() > 0) { for (AbstractEvaluationMetric m : m_pluginMetrics) { if (m instanceof InformationRetrievalEvaluationMetric && m.appliesToNominalClass()) { String metricName = m.getMetricName().toLowerCase(); if (m_metricsToDisplay.contains(metricName)) { List<String> statNames = m.getStatisticNames(); for (String name : statNames) { if (m_metricsToDisplay.contains(name.toLowerCase())) { double stat = ((InformationRetrievalEvaluationMetric) m) .getStatistic(name, i); if (name.length() < 7) { name = Utils.padRight(name, 7); } if (Utils.isMissingValue(stat)) { Utils.padRight("?", name.length()); } else { text.append( String.format("%-" + name.length() + ".3f", stat)) .append(" "); } } } } } } } text.append(m_ClassNames[i]).append('\n'); } text.append("Weighted Avg. "); if (displayTP) { text.append(String.format("%-9.3f", weightedTruePositiveRate())); } if (displayFP) { text.append(String.format("%-9.3f", weightedFalsePositiveRate())); } if (displayP) { text.append(String.format("%-11.3f", weightedPrecision())); } if (displayR) { text.append(String.format("%-9.3f", weightedRecall())); } if (displayFM) { text.append(String.format("%-11.3f", weightedFMeasure())); } if (displayMCC) { text.append(String.format("%-9.3f", weightedMatthewsCorrelation())); } if (displayROC) { text.append(String.format("%-10.3f", weightedAreaUnderROC())); } if (displayPRC) { text.append(String.format("%-10.3f", weightedAreaUnderPRC())); } if (m_pluginMetrics != null && m_pluginMetrics.size() > 0) { for (AbstractEvaluationMetric m : m_pluginMetrics) { if (m instanceof InformationRetrievalEvaluationMetric && m.appliesToNominalClass()) { String metricName = m.getMetricName().toLowerCase(); if (m_metricsToDisplay.contains(metricName)) { List<String> statNames = m.getStatisticNames(); for (String name : statNames) { if (m_metricsToDisplay.contains(name.toLowerCase())) { double stat = ((InformationRetrievalEvaluationMetric) m) .getClassWeightedAverageStatistic(name); if (name.length() < 7) { name = Utils.padRight(name, 7); } if (Utils.isMissingValue(stat)) { Utils.padRight("?", name.length()); } else { text.append(String.format("%-" + name.length() + ".3f", stat)) .append(" "); } } } } } } } text.append("\n"); return text.toString(); } /** * Calculate the number of true positives with respect to a particular class. * This is defined as * <p/> * * <pre> * correctly classified positives * </pre> * * @param classIndex the index of the class to consider as "positive" * @return the true positive rate */ public double numTruePositives(int classIndex) { double correct = 0; for (int j = 0; j < m_NumClasses; j++) { if (j == classIndex) { correct += m_ConfusionMatrix[classIndex][j]; } } return correct; } /** * Calculate the true positive rate with respect to a particular class. This * is defined as * <p/> * * <pre> * correctly classified positives * ------------------------------ * total positives * </pre> * * @param classIndex the index of the class to consider as "positive" * @return the true positive rate */ public double truePositiveRate(int classIndex) { double correct = 0, total = 0; for (int j = 0; j < m_NumClasses; j++) { if (j == classIndex) { correct += m_ConfusionMatrix[classIndex][j]; } total += m_ConfusionMatrix[classIndex][j]; } if (total == 0) { return 0; } return correct / total; } /** * Calculates the weighted (by class size) true positive rate. * * @return the weighted true positive rate. */ public double weightedTruePositiveRate() { double[] classCounts = new double[m_NumClasses]; double classCountSum = 0; for (int i = 0; i < m_NumClasses; i++) { for (int j = 0; j < m_NumClasses; j++) { classCounts[i] += m_ConfusionMatrix[i][j]; } classCountSum += classCounts[i]; } double truePosTotal = 0; for (int i = 0; i < m_NumClasses; i++) { double temp = truePositiveRate(i); truePosTotal += (temp * classCounts[i]); } return truePosTotal / classCountSum; } /** * Calculate the number of true negatives with respect to a particular class. * This is defined as * <p/> * * <pre> * correctly classified negatives * </pre> * * @param classIndex the index of the class to consider as "positive" * @return the true positive rate */ public double numTrueNegatives(int classIndex) { double correct = 0; for (int i = 0; i < m_NumClasses; i++) { if (i != classIndex) { for (int j = 0; j < m_NumClasses; j++) { if (j != classIndex) { correct += m_ConfusionMatrix[i][j]; } } } } return correct; } /** * Calculate the true negative rate with respect to a particular class. This * is defined as * <p/> * * <pre> * correctly classified negatives * ------------------------------ * total negatives * </pre> * * @param classIndex the index of the class to consider as "positive" * @return the true positive rate */ public double trueNegativeRate(int classIndex) { double correct = 0, total = 0; for (int i = 0; i < m_NumClasses; i++) { if (i != classIndex) { for (int j = 0; j < m_NumClasses; j++) { if (j != classIndex) { correct += m_ConfusionMatrix[i][j]; } total += m_ConfusionMatrix[i][j]; } } } if (total == 0) { return 0; } return correct / total; } /** * Calculates the weighted (by class size) true negative rate. * * @return the weighted true negative rate. */ public double weightedTrueNegativeRate() { double[] classCounts = new double[m_NumClasses]; double classCountSum = 0; for (int i = 0; i < m_NumClasses; i++) { for (int j = 0; j < m_NumClasses; j++) { classCounts[i] += m_ConfusionMatrix[i][j]; } classCountSum += classCounts[i]; } double trueNegTotal = 0; for (int i = 0; i < m_NumClasses; i++) { double temp = trueNegativeRate(i); trueNegTotal += (temp * classCounts[i]); } return trueNegTotal / classCountSum; } /** * Calculate number of false positives with respect to a particular class. * This is defined as * <p/> * * <pre> * incorrectly classified negatives * </pre> * * @param classIndex the index of the class to consider as "positive" * @return the false positive rate */ public double numFalsePositives(int classIndex) { double incorrect = 0; for (int i = 0; i < m_NumClasses; i++) { if (i != classIndex) { for (int j = 0; j < m_NumClasses; j++) { if (j == classIndex) { incorrect += m_ConfusionMatrix[i][j]; } } } } return incorrect; } /** * Calculate the false positive rate with respect to a particular class. This * is defined as * <p/> * * <pre> * incorrectly classified negatives * -------------------------------- * total negatives * </pre> * * @param classIndex the index of the class to consider as "positive" * @return the false positive rate */ public double falsePositiveRate(int classIndex) { double incorrect = 0, total = 0; for (int i = 0; i < m_NumClasses; i++) { if (i != classIndex) { for (int j = 0; j < m_NumClasses; j++) { if (j == classIndex) { incorrect += m_ConfusionMatrix[i][j]; } total += m_ConfusionMatrix[i][j]; } } } if (total == 0) { return 0; } return incorrect / total; } /** * Calculates the weighted (by class size) false positive rate. * * @return the weighted false positive rate. */ public double weightedFalsePositiveRate() { double[] classCounts = new double[m_NumClasses]; double classCountSum = 0; for (int i = 0; i < m_NumClasses; i++) { for (int j = 0; j < m_NumClasses; j++) { classCounts[i] += m_ConfusionMatrix[i][j]; } classCountSum += classCounts[i]; } double falsePosTotal = 0; for (int i = 0; i < m_NumClasses; i++) { double temp = falsePositiveRate(i); falsePosTotal += (temp * classCounts[i]); } return falsePosTotal / classCountSum; } /** * Calculate number of false negatives with respect to a particular class. * This is defined as * <p/> * * <pre> * incorrectly classified positives * </pre> * * @param classIndex the index of the class to consider as "positive" * @return the false positive rate */ public double numFalseNegatives(int classIndex) { double incorrect = 0; for (int i = 0; i < m_NumClasses; i++) { if (i == classIndex) { for (int j = 0; j < m_NumClasses; j++) { if (j != classIndex) { incorrect += m_ConfusionMatrix[i][j]; } } } } return incorrect; } /** * Calculate the false negative rate with respect to a particular class. This * is defined as * <p/> * * <pre> * incorrectly classified positives * -------------------------------- * total positives * </pre> * * @param classIndex the index of the class to consider as "positive" * @return the false positive rate */ public double falseNegativeRate(int classIndex) { double incorrect = 0, total = 0; for (int i = 0; i < m_NumClasses; i++) { if (i == classIndex) { for (int j = 0; j < m_NumClasses; j++) { if (j != classIndex) { incorrect += m_ConfusionMatrix[i][j]; } total += m_ConfusionMatrix[i][j]; } } } if (total == 0) { return 0; } return incorrect / total; } /** * Calculates the weighted (by class size) false negative rate. * * @return the weighted false negative rate. */ public double weightedFalseNegativeRate() { double[] classCounts = new double[m_NumClasses]; double classCountSum = 0; for (int i = 0; i < m_NumClasses; i++) { for (int j = 0; j < m_NumClasses; j++) { classCounts[i] += m_ConfusionMatrix[i][j]; } classCountSum += classCounts[i]; } double falseNegTotal = 0; for (int i = 0; i < m_NumClasses; i++) { double temp = falseNegativeRate(i); falseNegTotal += (temp * classCounts[i]); } return falseNegTotal / classCountSum; } /** * Calculates the matthews correlation coefficient (sometimes called phi * coefficient) for the supplied class * * @param classIndex the index of the class to compute the matthews * correlation coefficient for * * @return the mathews correlation coefficient */ public double matthewsCorrelationCoefficient(int classIndex) { double numTP = numTruePositives(classIndex); double numTN = numTrueNegatives(classIndex); double numFP = numFalsePositives(classIndex); double numFN = numFalseNegatives(classIndex); double n = (numTP * numTN) - (numFP * numFN); double d = (numTP + numFP) * (numTP + numFN) * (numTN + numFP) * (numTN + numFN); d = Math.sqrt(d); if (d == 0) { d = 1; } return n / d; } /** * Calculates the weighted (by class size) matthews correlation coefficient. * * @return the weighted matthews correlation coefficient. */ public double weightedMatthewsCorrelation() { double[] classCounts = new double[m_NumClasses]; double classCountSum = 0; for (int i = 0; i < m_NumClasses; i++) { for (int j = 0; j < m_NumClasses; j++) { classCounts[i] += m_ConfusionMatrix[i][j]; } classCountSum += classCounts[i]; } double mccTotal = 0; for (int i = 0; i < m_NumClasses; i++) { double temp = matthewsCorrelationCoefficient(i); if (!Utils.isMissingValue(temp)) { mccTotal += (temp * classCounts[i]); } } return mccTotal / classCountSum; } /** * Calculate the recall with respect to a particular class. This is defined as * <p/> * * <pre> * correctly classified positives * ------------------------------ * total positives * </pre> * <p/> * (Which is also the same as the truePositiveRate.) * * @param classIndex the index of the class to consider as "positive" * @return the recall */ public double recall(int classIndex) { return truePositiveRate(classIndex); } /** * Calculates the weighted (by class size) recall. * * @return the weighted recall. */ public double weightedRecall() { return weightedTruePositiveRate(); } /** * Calculate the precision with respect to a particular class. This is defined * as * <p/> * * <pre> * correctly classified positives * ------------------------------ * total predicted as positive * </pre> * * @param classIndex the index of the class to consider as "positive" * @return the precision */ public double precision(int classIndex) { double correct = 0, total = 0; for (int i = 0; i < m_NumClasses; i++) { if (i == classIndex) { correct += m_ConfusionMatrix[i][classIndex]; } total += m_ConfusionMatrix[i][classIndex]; } if (total == 0) { return 0; } return correct / total; } /** * Calculates the weighted (by class size) precision. * * @return the weighted precision. */ public double weightedPrecision() { double[] classCounts = new double[m_NumClasses]; double classCountSum = 0; for (int i = 0; i < m_NumClasses; i++) { for (int j = 0; j < m_NumClasses; j++) { classCounts[i] += m_ConfusionMatrix[i][j]; } classCountSum += classCounts[i]; } double precisionTotal = 0; for (int i = 0; i < m_NumClasses; i++) { double temp = precision(i); precisionTotal += (temp * classCounts[i]); } return precisionTotal / classCountSum; } /** * Calculate the F-Measure with respect to a particular class. This is defined * as * <p/> * * <pre> * 2 * recall * precision * ---------------------- * recall + precision * </pre> * * @param classIndex the index of the class to consider as "positive" * @return the F-Measure */ public double fMeasure(int classIndex) { double precision = precision(classIndex); double recall = recall(classIndex); if ((precision + recall) == 0) { return 0; } return 2 * precision * recall / (precision + recall); } /** * Calculates the macro weighted (by class size) average F-Measure. * * @return the weighted F-Measure. */ public double weightedFMeasure() { double[] classCounts = new double[m_NumClasses]; double classCountSum = 0; for (int i = 0; i < m_NumClasses; i++) { for (int j = 0; j < m_NumClasses; j++) { classCounts[i] += m_ConfusionMatrix[i][j]; } classCountSum += classCounts[i]; } double fMeasureTotal = 0; for (int i = 0; i < m_NumClasses; i++) { double temp = fMeasure(i); fMeasureTotal += (temp * classCounts[i]); } return fMeasureTotal / classCountSum; } /** * Unweighted macro-averaged F-measure. If some classes not present in the * test set, they're just skipped (since recall is undefined there anyway) . * * @return unweighted macro-averaged F-measure. * */ public double unweightedMacroFmeasure() { weka.core.Stats rr = new weka.core.Stats(); for (int c = 0; c < m_NumClasses; c++) { // skip if no testing positive cases of this class if (numTruePositives(c) + numFalseNegatives(c) > 0) { rr.add(fMeasure(c)); } } rr.calculateDerived(); return rr.mean; } /** * Unweighted micro-averaged F-measure. If some classes not present in the * test set, they have no effect. * * Note: if the test set is *single-label*, then this is the same as accuracy. * * @return unweighted micro-averaged F-measure. */ public double unweightedMicroFmeasure() { double tp = 0; double fn = 0; double fp = 0; for (int c = 0; c < m_NumClasses; c++) { tp += numTruePositives(c); fn += numFalseNegatives(c); fp += numFalsePositives(c); } return 2 * tp / (2 * tp + fn + fp); } /** * Sets the class prior probabilities. * * @param train the training instances used to determine the prior * probabilities * @throws Exception if the class attribute of the instances is not set */ public void setPriors(Instances train) throws Exception { m_NoPriors = false; if (!m_ClassIsNominal) { m_NumTrainClassVals = 0; m_TrainClassVals = null; m_TrainClassWeights = null; m_PriorEstimator = null; m_MinTarget = Double.MAX_VALUE; m_MaxTarget = -Double.MAX_VALUE; for (int i = 0; i < train.numInstances(); i++) { Instance currentInst = train.instance(i); if (!currentInst.classIsMissing()) { addNumericTrainClass(currentInst.classValue(), currentInst.weight()); } } m_ClassPriors[0] = m_ClassPriorsSum = 0; for (int i = 0; i < train.numInstances(); i++) { if (!train.instance(i).classIsMissing()) { m_ClassPriors[0] += train.instance(i).classValue() * train.instance(i).weight(); m_ClassPriorsSum += train.instance(i).weight(); } } } else { for (int i = 0; i < m_NumClasses; i++) { m_ClassPriors[i] = 1; } m_ClassPriorsSum = m_NumClasses; for (int i = 0; i < train.numInstances(); i++) { if (!train.instance(i).classIsMissing()) { m_ClassPriors[(int) train.instance(i).classValue()] += train .instance(i).weight(); m_ClassPriorsSum += train.instance(i).weight(); } } m_MaxTarget = m_NumClasses; m_MinTarget = 0; } } /** * Get the current weighted class counts. * * @return the weighted class counts */ public double[] getClassPriors() { return m_ClassPriors; } /** * Updates the class prior probabilities or the mean respectively (when * incrementally training). * * @param instance the new training instance seen * @throws Exception if the class of the instance is not set */ public void updatePriors(Instance instance) throws Exception { if (!instance.classIsMissing()) { if (!m_ClassIsNominal) { addNumericTrainClass(instance.classValue(), instance.weight()); m_ClassPriors[0] += instance.classValue() * instance.weight(); m_ClassPriorsSum += instance.weight(); } else { m_ClassPriors[(int) instance.classValue()] += instance.weight(); m_ClassPriorsSum += instance.weight(); } } } /** * disables the use of priors, e.g., in case of de-serialized schemes that * have no access to the original training set, but are evaluated on a set * set. */ public void useNoPriors() { m_NoPriors = true; } /** * Tests whether the current evaluation object is equal to another evaluation * object. * * @param obj the object to compare against * @return true if the two objects are equal */ @Override public boolean equals(Object obj) { if ((obj == null) || !(obj.getClass().equals(this.getClass()))) { return false; } Evaluation cmp = (Evaluation) obj; if (m_ClassIsNominal != cmp.m_ClassIsNominal) return false; if (m_NumClasses != cmp.m_NumClasses) return false; if (m_Incorrect != cmp.m_Incorrect) return false; if (m_Correct != cmp.m_Correct) return false; if (m_Unclassified != cmp.m_Unclassified) return false; if (m_MissingClass != cmp.m_MissingClass) return false; if (m_WithClass != cmp.m_WithClass) return false; if (m_SumErr != cmp.m_SumErr) return false; if (m_SumAbsErr != cmp.m_SumAbsErr) return false; if (m_SumSqrErr != cmp.m_SumSqrErr) return false; if (m_SumClass != cmp.m_SumClass) return false; if (m_SumSqrClass != cmp.m_SumSqrClass) return false; if (m_SumPredicted != cmp.m_SumPredicted) return false; if (m_SumSqrPredicted != cmp.m_SumSqrPredicted) return false; if (m_SumClassPredicted != cmp.m_SumClassPredicted) return false; if (m_ClassIsNominal) { for (int i = 0; i < m_NumClasses; i++) { for (int j = 0; j < m_NumClasses; j++) { if (m_ConfusionMatrix[i][j] != cmp.m_ConfusionMatrix[i][j]) { return false; } } } } return true; } /** * Make up the help string giving all the command line options. * * @param classifier the classifier to include options for * @param globalInfo include the global information string for the classifier * (if available). * @return a string detailing the valid command line options */ protected static String makeOptionString(Classifier classifier, boolean globalInfo) { StringBuffer optionsText = new StringBuffer(""); // General options optionsText.append("\n\nGeneral options:\n\n"); optionsText.append("-h or -help\n"); optionsText.append("\tOutput help information.\n"); optionsText.append("-synopsis or -info\n"); optionsText.append("\tOutput synopsis for classifier (use in conjunction " + " with -h)\n"); optionsText.append("-t <name of training file>\n"); optionsText.append("\tSets training file.\n"); optionsText.append("-T <name of test file>\n"); optionsText .append("\tSets test file. If missing, a cross-validation will be performed\n"); optionsText.append("\ton the training data.\n"); optionsText.append("-c <class index>\n"); optionsText.append("\tSets index of class attribute (default: last).\n"); optionsText.append("-x <number of folds>\n"); optionsText .append("\tSets number of folds for cross-validation (default: 10).\n"); optionsText.append("-no-cv\n"); optionsText.append("\tDo not perform any cross validation.\n"); optionsText.append("-force-batch-training\n"); optionsText .append("\tAlways train classifier in batch mode, never incrementally.\n"); optionsText.append("-split-percentage <percentage>\n"); optionsText .append("\tSets the percentage for the train/test set split, e.g., 66.\n"); optionsText.append("-preserve-order\n"); optionsText.append("\tPreserves the order in the percentage split.\n"); optionsText.append("-s <random number seed>\n"); optionsText .append("\tSets random number seed for cross-validation or percentage split\n"); optionsText.append("\t(default: 1).\n"); optionsText.append("-m <name of file with cost matrix>\n"); optionsText.append("\tSets file with cost matrix.\n"); optionsText .append("-disable <comma-separated list of evaluation metric names>\n"); optionsText .append("\tComma separated list of metric names not to print to the output.\n\t"); optionsText.append("Available metrics:\n\t"); List<String> metricsToDisplay = new ArrayList<String>( Arrays.asList(BUILT_IN_EVAL_METRICS)); List<AbstractEvaluationMetric> pluginMetrics = AbstractEvaluationMetric .getPluginMetrics(); if (pluginMetrics != null) { for (AbstractEvaluationMetric m : pluginMetrics) { if (m instanceof InformationRetrievalEvaluationMetric) { List<String> statNames = m.getStatisticNames(); for (String s : statNames) { metricsToDisplay.add(s.toLowerCase()); } } else { metricsToDisplay.add(m.getMetricName().toLowerCase()); } } } int length = 0; for (int i = 0; i < metricsToDisplay.size(); i++) { optionsText.append(metricsToDisplay.get(i)); length += metricsToDisplay.get(i).length(); if (i != metricsToDisplay.size() - 1) { optionsText.append(","); } if (length >= 60) { optionsText.append("\n\t"); length = 0; } } optionsText.append("\n"); optionsText.append("-l <name of input file>\n"); optionsText .append("\tSets model input file. In case the filename ends with '.xml',\n"); optionsText .append("\ta PMML file is loaded or, if that fails, options are loaded\n"); optionsText.append("\tfrom the XML file.\n"); optionsText.append("-d <name of output file>\n"); optionsText .append("\tSets model output file. In case the filename ends with '.xml',\n"); optionsText .append("\tonly the options are saved to the XML file, not the model.\n"); optionsText.append("-v\n"); optionsText.append("\tOutputs no statistics for training data.\n"); optionsText.append("-o\n"); optionsText.append("\tOutputs statistics only, not the classifier.\n"); optionsText.append("-i\n"); optionsText.append("\tOutputs detailed information-retrieval"); optionsText.append(" statistics for each class.\n"); optionsText.append("-k\n"); optionsText.append("\tOutputs information-theoretic statistics.\n"); optionsText .append("-classifications \"weka.classifiers.evaluation.output.prediction.AbstractOutput + options\"\n"); optionsText .append("\tUses the specified class for generating the classification output.\n"); optionsText.append("\tE.g.: " + PlainText.class.getName() + "\n"); optionsText.append("-p range\n"); optionsText .append("\tOutputs predictions for test instances (or the train instances if\n"); optionsText .append("\tno test instances provided and -no-cv is used), along with the \n"); optionsText .append("\tattributes in the specified range (and nothing else). \n"); optionsText.append("\tUse '-p 0' if no attributes are desired.\n"); optionsText.append("\tDeprecated: use \"-classifications ...\" instead.\n"); optionsText.append("-distribution\n"); optionsText .append("\tOutputs the distribution instead of only the prediction\n"); optionsText .append("\tin conjunction with the '-p' option (only nominal classes).\n"); optionsText.append("\tDeprecated: use \"-classifications ...\" instead.\n"); optionsText.append("-r\n"); optionsText.append("\tOnly outputs cumulative margin distribution.\n"); if (classifier instanceof Sourcable) { optionsText.append("-z <class name>\n"); optionsText.append("\tOnly outputs the source representation" + " of the classifier,\n\tgiving it the supplied" + " name.\n"); } if (classifier instanceof Drawable) { optionsText.append("-g\n"); optionsText.append("\tOnly outputs the graph representation" + " of the classifier.\n"); } optionsText.append("-xml filename | xml-string\n"); optionsText .append("\tRetrieves the options from the XML-data instead of the " + "command line.\n"); optionsText.append("-threshold-file <file>\n"); optionsText .append("\tThe file to save the threshold data to.\n" + "\tThe format is determined by the extensions, e.g., '.arff' for ARFF \n" + "\tformat or '.csv' for CSV.\n"); optionsText.append("-threshold-label <label>\n"); optionsText .append("\tThe class label to determine the threshold data for\n" + "\t(default is the first label)\n"); // Get scheme-specific options if (classifier instanceof OptionHandler) { optionsText.append("\nOptions specific to " + classifier.getClass().getName() + ":\n\n"); Enumeration enu = ((OptionHandler) classifier).listOptions(); while (enu.hasMoreElements()) { Option option = (Option) enu.nextElement(); optionsText.append(option.synopsis() + '\n'); optionsText.append(option.description() + "\n"); } } // Get global information (if available) if (globalInfo) { try { String gi = getGlobalInfo(classifier); optionsText.append(gi); } catch (Exception ex) { // quietly ignore } } return optionsText.toString(); } /** * Return the global info (if it exists) for the supplied classifier. * * @param classifier the classifier to get the global info for * @return the global info (synopsis) for the classifier * @throws Exception if there is a problem reflecting on the classifier */ protected static String getGlobalInfo(Classifier classifier) throws Exception { BeanInfo bi = Introspector.getBeanInfo(classifier.getClass()); MethodDescriptor[] methods; methods = bi.getMethodDescriptors(); Object[] args = {}; String result = "\nSynopsis for " + classifier.getClass().getName() + ":\n\n"; for (int i = 0; i < methods.length; i++) { String name = methods[i].getDisplayName(); Method meth = methods[i].getMethod(); if (name.equals("globalInfo")) { String globalInfo = (String) (meth.invoke(classifier, args)); result += globalInfo; break; } } return result; } /** * Method for generating indices for the confusion matrix. * * @param num integer to format * @param IDChars the characters to use * @param IDWidth the width of the entry * @return the formatted integer as a string */ protected String num2ShortID(int num, char[] IDChars, int IDWidth) { char ID[] = new char[IDWidth]; int i; for (i = IDWidth - 1; i >= 0; i--) { ID[i] = IDChars[num % IDChars.length]; num = num / IDChars.length - 1; if (num < 0) { break; } } for (i--; i >= 0; i--) { ID[i] = ' '; } return new String(ID); } /** * Convert a single prediction into a probability distribution with all zero * probabilities except the predicted value which has probability 1.0. * * @param predictedClass the index of the predicted class * @return the probability distribution */ protected double[] makeDistribution(double predictedClass) { double[] result = new double[m_NumClasses]; if (Utils.isMissingValue(predictedClass)) { return result; } if (m_ClassIsNominal) { result[(int) predictedClass] = 1.0; } else { result[0] = predictedClass; } return result; } /** * Updates all the statistics about a classifiers performance for the current * test instance. * * @param predictedDistribution the probabilities assigned to each class * @param instance the instance to be classified * @throws Exception if the class of the instance is not set */ protected void updateStatsForClassifier(double[] predictedDistribution, Instance instance) throws Exception { int actualClass = (int) instance.classValue(); if (!instance.classIsMissing()) { updateMargins(predictedDistribution, actualClass, instance.weight()); // Determine the predicted class (doesn't detect multiple // classifications) int predictedClass = -1; double bestProb = 0.0; for (int i = 0; i < m_NumClasses; i++) { if (predictedDistribution[i] > bestProb) { predictedClass = i; bestProb = predictedDistribution[i]; } } m_WithClass += instance.weight(); // Determine misclassification cost if (m_CostMatrix != null) { if (predictedClass < 0) { // For missing predictions, we assume the worst possible cost. // This is pretty harsh. // Perhaps we could take the negative of the cost of a correct // prediction (-m_CostMatrix.getElement(actualClass,actualClass)), // although often this will be zero m_TotalCost += instance.weight() * m_CostMatrix.getMaxCost(actualClass, instance); } else { m_TotalCost += instance.weight() * m_CostMatrix.getElement(actualClass, predictedClass, instance); } } // Update counts when no class was predicted if (predictedClass < 0) { m_Unclassified += instance.weight(); return; } double predictedProb = Math.max(MIN_SF_PROB, predictedDistribution[actualClass]); double priorProb = Math.max(MIN_SF_PROB, m_ClassPriors[actualClass] / m_ClassPriorsSum); if (predictedProb >= priorProb) { m_SumKBInfo += (Utils.log2(predictedProb) - Utils.log2(priorProb)) * instance.weight(); } else { m_SumKBInfo -= (Utils.log2(1.0 - predictedProb) - Utils .log2(1.0 - priorProb)) * instance.weight(); } m_SumSchemeEntropy -= Utils.log2(predictedProb) * instance.weight(); m_SumPriorEntropy -= Utils.log2(priorProb) * instance.weight(); updateNumericScores(predictedDistribution, makeDistribution(instance.classValue()), instance.weight()); // Update coverage stats int[] indices = Utils.stableSort(predictedDistribution); double sum = 0, sizeOfRegions = 0; for (int i = predictedDistribution.length - 1; i >= 0; i--) { if (sum >= m_ConfLevel) { break; } sum += predictedDistribution[indices[i]]; sizeOfRegions++; if (actualClass == indices[i]) { m_TotalCoverage += instance.weight(); } } m_TotalSizeOfRegions += sizeOfRegions / (m_MaxTarget - m_MinTarget); // Update other stats m_ConfusionMatrix[actualClass][predictedClass] += instance.weight(); if (predictedClass != actualClass) { m_Incorrect += instance.weight(); } else { m_Correct += instance.weight(); } } else { m_MissingClass += instance.weight(); } if (m_pluginMetrics != null) { for (AbstractEvaluationMetric m : m_pluginMetrics) { if (m instanceof StandardEvaluationMetric) { ((StandardEvaluationMetric) m).updateStatsForClassifier( predictedDistribution, instance); } else if (m instanceof InformationRetrievalEvaluationMetric) { ((InformationRetrievalEvaluationMetric) m).updateStatsForClassifier( predictedDistribution, instance); } else if (m instanceof InformationTheoreticEvaluationMetric) { ((InformationTheoreticEvaluationMetric) m).updateStatsForClassifier( predictedDistribution, instance); } } } } /** * Updates stats for interval estimator based on current test instance. * * @param classifier the interval estimator * @param classMissing the instance for which the intervals are computed, * without a class value * @param classValue the class value of this instance * @throws Exception if intervals could not be computed successfully */ protected void updateStatsForIntervalEstimator(IntervalEstimator classifier, Instance classMissing, double classValue) throws Exception { double[][] preds = classifier.predictIntervals(classMissing, m_ConfLevel); if (m_Predictions != null) ((NumericPrediction) m_Predictions.lastElement()) .setPredictionIntervals(preds); for (int i = 0; i < preds.length; i++) { m_TotalSizeOfRegions += (preds[i][1] - preds[i][0]) / (m_MaxTarget - m_MinTarget); } for (int i = 0; i < preds.length; i++) { if ((preds[i][1] >= classValue) && (preds[i][0] <= classValue)) { m_TotalCoverage += classMissing.weight(); break; } } if (m_pluginMetrics != null) { for (AbstractEvaluationMetric m : m_pluginMetrics) { if (m instanceof IntervalBasedEvaluationMetric) { ((IntervalBasedEvaluationMetric) m).updateStatsForIntervalEstimator( classifier, classMissing, classValue); } } } } /** * Updates stats for conditional density estimator based on current test * instance. * * @param classifier the conditional density estimator * @param classMissing the instance for which density is to be computed, * without a class value * @param classValue the class value of this instance * @throws Exception if density could not be computed successfully */ protected void updateStatsForConditionalDensityEstimator( ConditionalDensityEstimator classifier, Instance classMissing, double classValue) throws Exception { if (m_PriorEstimator == null) { setNumericPriorsFromBuffer(); } m_SumSchemeEntropy -= classifier.logDensity(classMissing, classValue) * classMissing.weight() / Utils.log2; m_SumPriorEntropy -= m_PriorEstimator.logDensity(classValue) * classMissing.weight() / Utils.log2; } /** * Updates all the statistics about a predictors performance for the current * test instance. * * @param predictedValue the numeric value the classifier predicts * @param instance the instance to be classified * @throws Exception if the class of the instance is not set */ protected void updateStatsForPredictor(double predictedValue, Instance instance) throws Exception { if (!instance.classIsMissing()) { // Update stats m_WithClass += instance.weight(); if (Utils.isMissingValue(predictedValue)) { m_Unclassified += instance.weight(); return; } m_SumClass += instance.weight() * instance.classValue(); m_SumSqrClass += instance.weight() * instance.classValue() * instance.classValue(); m_SumClassPredicted += instance.weight() * instance.classValue() * predictedValue; m_SumPredicted += instance.weight() * predictedValue; m_SumSqrPredicted += instance.weight() * predictedValue * predictedValue; updateNumericScores(makeDistribution(predictedValue), makeDistribution(instance.classValue()), instance.weight()); } else m_MissingClass += instance.weight(); if (m_pluginMetrics != null) { for (AbstractEvaluationMetric m : m_pluginMetrics) { if (m instanceof StandardEvaluationMetric) { ((StandardEvaluationMetric) m).updateStatsForPredictor( predictedValue, instance); } else if (m instanceof InformationTheoreticEvaluationMetric) { ((InformationTheoreticEvaluationMetric) m).updateStatsForPredictor( predictedValue, instance); } } } } /** * Update the cumulative record of classification margins. * * @param predictedDistribution the probability distribution predicted for the * current instance * @param actualClass the index of the actual instance class * @param weight the weight assigned to the instance */ protected void updateMargins(double[] predictedDistribution, int actualClass, double weight) { double probActual = predictedDistribution[actualClass]; double probNext = 0; for (int i = 0; i < m_NumClasses; i++) if ((i != actualClass) && (predictedDistribution[i] > probNext)) probNext = predictedDistribution[i]; double margin = probActual - probNext; int bin = (int) ((margin + 1.0) / 2.0 * k_MarginResolution); m_MarginCounts[bin] += weight; } /** * Update the numeric accuracy measures. For numeric classes, the accuracy is * between the actual and predicted class values. For nominal classes, the * accuracy is between the actual and predicted class probabilities. * * @param predicted the predicted values * @param actual the actual value * @param weight the weight associated with this prediction */ protected void updateNumericScores(double[] predicted, double[] actual, double weight) { double diff; double sumErr = 0, sumAbsErr = 0, sumSqrErr = 0; double sumPriorAbsErr = 0, sumPriorSqrErr = 0; for (int i = 0; i < m_NumClasses; i++) { diff = predicted[i] - actual[i]; sumErr += diff; sumAbsErr += Math.abs(diff); sumSqrErr += diff * diff; diff = (m_ClassPriors[i] / m_ClassPriorsSum) - actual[i]; sumPriorAbsErr += Math.abs(diff); sumPriorSqrErr += diff * diff; } m_SumErr += weight * sumErr / m_NumClasses; m_SumAbsErr += weight * sumAbsErr / m_NumClasses; m_SumSqrErr += weight * sumSqrErr / m_NumClasses; m_SumPriorAbsErr += weight * sumPriorAbsErr / m_NumClasses; m_SumPriorSqrErr += weight * sumPriorSqrErr / m_NumClasses; } /** * Adds a numeric (non-missing) training class value and weight to the buffer * of stored values. Also updates minimum and maximum target value. * * @param classValue the class value * @param weight the instance weight */ protected void addNumericTrainClass(double classValue, double weight) { // Update minimum and maximum target value if (classValue > m_MaxTarget) { m_MaxTarget = classValue; } if (classValue < m_MinTarget) { m_MinTarget = classValue; } // Update buffer if (m_TrainClassVals == null) { m_TrainClassVals = new double[100]; m_TrainClassWeights = new double[100]; } if (m_NumTrainClassVals == m_TrainClassVals.length) { double[] temp = new double[m_TrainClassVals.length * 2]; System.arraycopy(m_TrainClassVals, 0, temp, 0, m_TrainClassVals.length); m_TrainClassVals = temp; temp = new double[m_TrainClassWeights.length * 2]; System.arraycopy(m_TrainClassWeights, 0, temp, 0, m_TrainClassWeights.length); m_TrainClassWeights = temp; } m_TrainClassVals[m_NumTrainClassVals] = classValue; m_TrainClassWeights[m_NumTrainClassVals] = weight; m_NumTrainClassVals++; } /** * Sets up the priors for numeric class attributes from the training class * values that have been seen so far. */ protected void setNumericPriorsFromBuffer() { m_PriorEstimator = new UnivariateKernelEstimator(); for (int i = 0; i < m_NumTrainClassVals; i++) { m_PriorEstimator.addValue(m_TrainClassVals[i], m_TrainClassWeights[i]); } } /** * Returns the revision string. * * @return the revision */ @Override public String getRevision() { return RevisionUtils.extract("$Revision: 9788 $"); } }
151,329
32.809205
176
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/evaluation/EvaluationUtils.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * EvaluationUtils.java * Copyright (C) 2002-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.evaluation; import java.util.Random; import weka.classifiers.Classifier; import weka.core.FastVector; import weka.core.Instance; import weka.core.Instances; import weka.core.RevisionHandler; import weka.core.RevisionUtils; /** * Contains utility functions for generating lists of predictions in * various manners. * * @author Len Trigg (len@reeltwo.com) * @version $Revision: 8034 $ */ public class EvaluationUtils implements RevisionHandler { /** Seed used to randomize data in cross-validation */ private int m_Seed = 1; /** Sets the seed for randomization during cross-validation */ public void setSeed(int seed) { m_Seed = seed; } /** Gets the seed for randomization during cross-validation */ public int getSeed() { return m_Seed; } /** * Generate a bunch of predictions ready for processing, by performing a * cross-validation on the supplied dataset. * * @param classifier the Classifier to evaluate * @param data the dataset * @param numFolds the number of folds in the cross-validation. * @exception Exception if an error occurs */ public FastVector getCVPredictions(Classifier classifier, Instances data, int numFolds) throws Exception { FastVector predictions = new FastVector(); Instances runInstances = new Instances(data); Random random = new Random(m_Seed); runInstances.randomize(random); if (runInstances.classAttribute().isNominal() && (numFolds > 1)) { runInstances.stratify(numFolds); } int inst = 0; for (int fold = 0; fold < numFolds; fold++) { Instances train = runInstances.trainCV(numFolds, fold, random); Instances test = runInstances.testCV(numFolds, fold); FastVector foldPred = getTrainTestPredictions(classifier, train, test); predictions.appendElements(foldPred); } return predictions; } /** * Generate a bunch of predictions ready for processing, by performing a * evaluation on a test set after training on the given training set. * * @param classifier the Classifier to evaluate * @param train the training dataset * @param test the test dataset * @exception Exception if an error occurs */ public FastVector getTrainTestPredictions(Classifier classifier, Instances train, Instances test) throws Exception { classifier.buildClassifier(train); return getTestPredictions(classifier, test); } /** * Generate a bunch of predictions ready for processing, by performing a * evaluation on a test set assuming the classifier is already trained. * * @param classifier the pre-trained Classifier to evaluate * @param test the test dataset * @exception Exception if an error occurs */ public FastVector getTestPredictions(Classifier classifier, Instances test) throws Exception { FastVector predictions = new FastVector(); for (int i = 0; i < test.numInstances(); i++) { if (!test.instance(i).classIsMissing()) { predictions.addElement(getPrediction(classifier, test.instance(i))); } } return predictions; } /** * Generate a single prediction for a test instance given the pre-trained * classifier. * * @param classifier the pre-trained Classifier to evaluate * @param test the test instance * @exception Exception if an error occurs */ public Prediction getPrediction(Classifier classifier, Instance test) throws Exception { double actual = test.classValue(); double [] dist = classifier.distributionForInstance(test); if (test.classAttribute().isNominal()) { return new NominalPrediction(actual, dist, test.weight()); } else { return new NumericPrediction(actual, dist[0], test.weight()); } } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } }
4,932
31.24183
77
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/evaluation/InformationRetrievalEvaluationMetric.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * InformationRetrievalEvaluationMetric.java * Copyright (C) 2011-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.evaluation; import weka.core.Instance; /** * An interface for information retrieval evaluation metrics to implement. * Allows the command line interface to display these metrics or not based on * user-supplied options. These statistics will be displayed as new columns in * the table of information retrieval statistics. As such, a toSummaryString() * formatted representation is not required. * * @author Mark Hall (mhall{[at]}pentaho{[dot]}com) * @version $Revision: 9320 $ */ public interface InformationRetrievalEvaluationMetric { /** * Updates the statistics about a classifiers performance for the current test * instance. Implementers need only implement this method if it is not * possible to compute their statistics from what is stored in the base * Evaluation object. * * @param predictedDistribution the probabilities assigned to each class * @param instance the instance to be classified * @throws Exception if the class of the instance is not set */ void updateStatsForClassifier(double[] predictedDistribution, Instance instance) throws Exception; /** * Get the value of the named statistic for the given class index. * * If the implementing class is extending AbstractEvaluationMetric then the * implementation of getStatistic(String statName) should just call this * method with a classIndex of 0. * * @param statName the name of the statistic to compute the value for * @param classIndex the class index for which to compute the statistic * @return the value of the named statistic for the given class index or * Utils.missingValue() if the statistic can't be computed for some * reason */ double getStatistic(String statName, int classIndex); /** * Get the weighted (by class) average for this statistic. * * @param statName the name of the statistic to compute * @return the weighted (by class) average value of the statistic or * Utils.missingValue() if this can't be computed (or isn't * appropriate). */ double getClassWeightedAverageStatistic(String statName); }
2,973
38.131579
80
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/evaluation/InformationTheoreticEvaluationMetric.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * InformationTheoreticEvaluationMetric.java * Copyright (C) 2011-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.evaluation; import weka.classifiers.ConditionalDensityEstimator; import weka.core.Instance; /** * Primarily a marker interface for information theoretic evaluation metrics to * implement. Allows the command line interface to display these metrics or not * based on user-supplied options * * @author Mark Hall (mhall{[at]}pentaho{[dot]}com) * @version $Revision: 9320 $ */ public interface InformationTheoreticEvaluationMetric { /** * Updates the statistics about a classifiers performance for the current test * instance. Gets called when the class is nominal. Implementers need only * implement this method if it is not possible to compute their statistics * from what is stored in the base Evaluation object. * * @param predictedDistribution the probabilities assigned to each class * @param instance the instance to be classified * @throws Exception if the class of the instance is not set */ void updateStatsForClassifier(double[] predictedDistribution, Instance instance) throws Exception; /** * Updates the statistics about a predictors performance for the current test * instance. Gets called when the class is numeric. Implementers need only * implement this method if it is not possible to compute their statistics * from what is stored in the base Evaluation object. * * @param predictedValue the numeric value the classifier predicts * @param instance the instance to be classified * @throws Exception if the class of the instance is not set */ void updateStatsForPredictor(double predictedValue, Instance instance) throws Exception; /** * Updates stats for conditional density estimator based on current test * instance. Gets called when the class is numeric and the classifier is a * ConditionalDensityEstimators. Implementers need only implement this method * if it is not possible to compute their statistics from what is stored in * the base Evaluation object. * * @param classifier the conditional density estimator * @param classMissing the instance for which density is to be computed, * without a class value * @param classValue the class value of this instance * @throws Exception if density could not be computed successfully */ void updateStatsForConditionalDensityEstimator( ConditionalDensityEstimator classifier, Instance classMissing, double classValue) throws Exception; /** * Return a formatted string (suitable for displaying in console or GUI * output) containing all the statistics that this metric computes. * * @return a formatted string containing all the computed statistics */ String toSummaryString(); }
3,547
39.318182
80
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/evaluation/IntervalBasedEvaluationMetric.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * IntervalBasedEvaluationMetric.java * Copyright (C) 2011-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.evaluation; import weka.classifiers.IntervalEstimator; import weka.core.Instance; /** * Primarily a marker interface for interval-based evaluation metrics to * implement. Allows the command line interface to display these metrics or not * based on user-supplied options * * @author Mark Hall (mhall{[at]}pentaho{[dot]}com) * @version $Revision: 9320 $ */ public interface IntervalBasedEvaluationMetric { /** * Updates stats for interval estimator based on current test instance. * Implementers need only implement this method if it is not possible to * compute their statistics from what is stored in the base Evaluation object. * * @param classifier the interval estimator * @param classMissing the instance for which the intervals are computed, * without a class value * @param classValue the class value of this instance * @throws Exception if intervals could not be computed successfully */ void updateStatsForIntervalEstimator(IntervalEstimator classifier, Instance classMissing, double classValue) throws Exception; /** * Return a formatted string (suitable for displaying in console or GUI * output) containing all the statistics that this metric computes. * * @return a formatted string containing all the computed statistics */ String toSummaryString(); }
2,173
35.847458
80
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/evaluation/MarginCurve.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * MarginCurve.java * Copyright (C) 2002-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.evaluation; import weka.core.Attribute; import weka.core.DenseInstance; import weka.core.FastVector; import weka.core.Instance; import weka.core.Instances; import weka.core.RevisionHandler; import weka.core.RevisionUtils; import weka.core.Utils; /** * Generates points illustrating the prediction margin. The margin is defined * as the difference between the probability predicted for the actual class and * the highest probability predicted for the other classes. One hypothesis * as to the good performance of boosting algorithms is that they increaes the * margins on the training data and this gives better performance on test data. * * @author Len Trigg (len@reeltwo.com) * @version $Revision: 8034 $ */ public class MarginCurve implements RevisionHandler { /** * Calculates the cumulative margin distribution for the set of * predictions, returning the result as a set of Instances. The * structure of these Instances is as follows:<p> <ul> * <li> <b>Margin</b> contains the margin value (which should be plotted * as an x-coordinate) * <li> <b>Current</b> contains the count of instances with the current * margin (plot as y axis) * <li> <b>Cumulative</b> contains the count of instances with margin * less than or equal to the current margin (plot as y axis) * </ul> <p> * * @return datapoints as a set of instances, null if no predictions * have been made. */ public Instances getCurve(FastVector predictions) { if (predictions.size() == 0) { return null; } Instances insts = makeHeader(); double [] margins = getMargins(predictions); int [] sorted = Utils.sort(margins); int binMargin = 0; int totalMargin = 0; insts.add(makeInstance(-1, binMargin, totalMargin)); for (int i = 0; i < sorted.length; i++) { double current = margins[sorted[i]]; double weight = ((NominalPrediction)predictions.elementAt(sorted[i])) .weight(); totalMargin += weight; binMargin += weight; if (true) { insts.add(makeInstance(current, binMargin, totalMargin)); binMargin = 0; } } return insts; } /** * Pulls all the margin values out of a vector of NominalPredictions. * * @param predictions a FastVector containing NominalPredictions * @return an array of margin values. */ private double [] getMargins(FastVector predictions) { // sort by predicted probability of the desired class. double [] margins = new double [predictions.size()]; for (int i = 0; i < margins.length; i++) { NominalPrediction pred = (NominalPrediction)predictions.elementAt(i); margins[i] = pred.margin(); } return margins; } /** * Creates an Instances object with the attributes we will be calculating. * * @return the Instances structure. */ private Instances makeHeader() { FastVector fv = new FastVector(); fv.addElement(new Attribute("Margin")); fv.addElement(new Attribute("Current")); fv.addElement(new Attribute("Cumulative")); return new Instances("MarginCurve", fv, 100); } /** * Creates an Instance object with the attributes calculated. * * @param margin the margin for this data point. * @param current the number of instances with this margin. * @param cumulative the number of instances with margin less than or equal * to this margin. * @return the Instance object. */ private Instance makeInstance(double margin, int current, int cumulative) { int count = 0; double [] vals = new double[3]; vals[count++] = margin; vals[count++] = current; vals[count++] = cumulative; return new DenseInstance(1.0, vals); } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } /** * Tests the MarginCurve generation from the command line. * The classifier is currently hardcoded. Pipe in an arff file. * * @param args currently ignored */ public static void main(String [] args) { try { Utils.SMALL = 0; Instances inst = new Instances(new java.io.InputStreamReader(System.in)); inst.setClassIndex(inst.numAttributes() - 1); MarginCurve tc = new MarginCurve(); EvaluationUtils eu = new EvaluationUtils(); weka.classifiers.meta.LogitBoost classifier = new weka.classifiers.meta.LogitBoost(); classifier.setNumIterations(20); FastVector predictions = eu.getTrainTestPredictions(classifier, inst, inst); Instances result = tc.getCurve(predictions); System.out.println(result); } catch (Exception ex) { ex.printStackTrace(); } } }
5,567
31.372093
79
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/evaluation/NominalPrediction.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * NominalPrediction.java * Copyright (C) 2002-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.evaluation; import java.io.Serializable; import weka.core.RevisionHandler; import weka.core.RevisionUtils; /** * Encapsulates an evaluatable nominal prediction: the predicted probability * distribution plus the actual class value. * * @author Len Trigg (len@reeltwo.com) * @version $Revision: 8034 $ */ public class NominalPrediction implements Prediction, Serializable, RevisionHandler { /** * Remove this if you change this class so that serialization would be * affected. */ static final long serialVersionUID = -8871333992740492788L; /** The predicted probabilities */ private double [] m_Distribution; /** The actual class value */ private double m_Actual = MISSING_VALUE; /** The predicted class value */ private double m_Predicted = MISSING_VALUE; /** The weight assigned to this prediction */ private double m_Weight = 1; /** * Creates the NominalPrediction object with a default weight of 1.0. * * @param actual the actual value, or MISSING_VALUE. * @param distribution the predicted probability distribution. Use * NominalPrediction.makeDistribution() if you only know the predicted value. */ public NominalPrediction(double actual, double [] distribution) { this(actual, distribution, 1); } /** * Creates the NominalPrediction object. * * @param actual the actual value, or MISSING_VALUE. * @param distribution the predicted probability distribution. Use * NominalPrediction.makeDistribution() if you only know the predicted value. * @param weight the weight assigned to the prediction. */ public NominalPrediction(double actual, double [] distribution, double weight) { if (distribution == null) { throw new NullPointerException("Null distribution in NominalPrediction."); } m_Actual = actual; m_Distribution = distribution.clone(); m_Weight = weight; updatePredicted(); } /** * Gets the predicted probabilities * * @return the predicted probabilities */ public double [] distribution() { return m_Distribution; } /** * Gets the actual class value. * * @return the actual class value, or MISSING_VALUE if no * prediction was made. */ public double actual() { return m_Actual; } /** * Gets the predicted class value. * * @return the predicted class value, or MISSING_VALUE if no * prediction was made. */ public double predicted() { return m_Predicted; } /** * Gets the weight assigned to this prediction. This is typically the weight * of the test instance the prediction was made for. * * @return the weight assigned to this prediction. */ public double weight() { return m_Weight; } /** * Calculates the prediction margin. This is defined as the difference * between the probability predicted for the actual class and the highest * predicted probability of the other classes. * * @return the margin for this prediction, or * MISSING_VALUE if either the actual or predicted value * is missing. */ public double margin() { if ((m_Actual == MISSING_VALUE) || (m_Predicted == MISSING_VALUE)) { return MISSING_VALUE; } double probActual = m_Distribution[(int)m_Actual]; double probNext = 0; for(int i = 0; i < m_Distribution.length; i++) if ((i != m_Actual) && (m_Distribution[i] > probNext)) probNext = m_Distribution[i]; return probActual - probNext; } /** * Convert a single prediction into a probability distribution * with all zero probabilities except the predicted value which * has probability 1.0. If no prediction was made, all probabilities * are zero. * * @param predictedClass the index of the predicted class, or * MISSING_VALUE if no prediction was made. * @param numClasses the number of possible classes for this nominal * prediction. * @return the probability distribution. */ public static double [] makeDistribution(double predictedClass, int numClasses) { double [] dist = new double [numClasses]; if (predictedClass == MISSING_VALUE) { return dist; } dist[(int)predictedClass] = 1.0; return dist; } /** * Creates a uniform probability distribution -- where each of the * possible classes is assigned equal probability. * * @param numClasses the number of possible classes for this nominal * prediction. * @return the probability distribution. */ public static double [] makeUniformDistribution(int numClasses) { double [] dist = new double [numClasses]; for (int i = 0; i < numClasses; i++) { dist[i] = 1.0 / numClasses; } return dist; } /** * Determines the predicted class (doesn't detect multiple * classifications). If no prediction was made (i.e. all zero * probababilities in the distribution), m_Prediction is set to * MISSING_VALUE. */ private void updatePredicted() { int predictedClass = -1; double bestProb = 0.0; for(int i = 0; i < m_Distribution.length; i++) { if (m_Distribution[i] > bestProb) { predictedClass = i; bestProb = m_Distribution[i]; } } if (predictedClass != -1) { m_Predicted = predictedClass; } else { m_Predicted = MISSING_VALUE; } } /** * Gets a human readable representation of this prediction. * * @return a human readable representation of this prediction. */ public String toString() { StringBuffer sb = new StringBuffer(); sb.append("NOM: ").append(actual()).append(" ").append(predicted()); sb.append(' ').append(weight()); double [] dist = distribution(); for (int i = 0; i < dist.length; i++) { sb.append(' ').append(dist[i]); } return sb.toString(); } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } }
6,901
26.830645
80
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/evaluation/NumericPrediction.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * NumericPrediction.java * Copyright (C) 2002-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.evaluation; import java.io.Serializable; import weka.classifiers.IntervalEstimator; import weka.core.RevisionHandler; import weka.core.RevisionUtils; /** * Encapsulates an evaluatable numeric prediction: the predicted class value * plus the actual class value. * * @author Len Trigg (len@reeltwo.com) * @version $Revision: 8034 $ */ public class NumericPrediction implements Prediction, Serializable, RevisionHandler { /** for serialization. */ private static final long serialVersionUID = -4880216423674233887L; /** The actual class value. */ private double m_Actual = MISSING_VALUE; /** The predicted class value. */ private double m_Predicted = MISSING_VALUE; /** The weight assigned to this prediction. */ private double m_Weight = 1; /** the prediction intervals. */ private double[][] m_PredictionIntervals; /** * Creates the NumericPrediction object with a default weight of 1.0. * * @param actual the actual value, or MISSING_VALUE. * @param predicted the predicted value, or MISSING_VALUE. */ public NumericPrediction(double actual, double predicted) { this(actual, predicted, 1); } /** * Creates the NumericPrediction object. * * @param actual the actual value, or MISSING_VALUE. * @param predicted the predicted value, or MISSING_VALUE. * @param weight the weight assigned to the prediction. */ public NumericPrediction(double actual, double predicted, double weight) { this(actual, predicted, weight, new double[0][]); } /** * Creates the NumericPrediction object. * * @param actual the actual value, or MISSING_VALUE. * @param predicted the predicted value, or MISSING_VALUE. * @param weight the weight assigned to the prediction. * @param predInt the prediction intervals from classifiers implementing * the <code>IntervalEstimator</code> interface. * @see IntervalEstimator */ public NumericPrediction(double actual, double predicted, double weight, double[][] predInt) { m_Actual = actual; m_Predicted = predicted; m_Weight = weight; setPredictionIntervals(predInt); } /** * Gets the actual class value. * * @return the actual class value, or MISSING_VALUE if no * prediction was made. */ public double actual() { return m_Actual; } /** * Gets the predicted class value. * * @return the predicted class value, or MISSING_VALUE if no * prediction was made. */ public double predicted() { return m_Predicted; } /** * Gets the weight assigned to this prediction. This is typically the weight * of the test instance the prediction was made for. * * @return the weight assigned to this prediction. */ public double weight() { return m_Weight; } /** * Calculates the prediction error. This is defined as the predicted * value minus the actual value. * * @return the error for this prediction, or * MISSING_VALUE if either the actual or predicted value * is missing. */ public double error() { if ((m_Actual == MISSING_VALUE) || (m_Predicted == MISSING_VALUE)) { return MISSING_VALUE; } return m_Predicted - m_Actual; } /** * Sets the prediction intervals for this prediction. * * @param predInt the prediction intervals */ public void setPredictionIntervals(double[][] predInt) { m_PredictionIntervals = predInt.clone(); } /** * Returns the predictions intervals. Only classifiers implementing the * <code>IntervalEstimator</code> interface. * * @return the prediction intervals. * @see IntervalEstimator */ public double[][] predictionIntervals() { return m_PredictionIntervals; } /** * Gets a human readable representation of this prediction. * * @return a human readable representation of this prediction. */ public String toString() { StringBuffer sb = new StringBuffer(); sb.append("NUM: ").append(actual()).append(' ').append(predicted()); sb.append(' ').append(weight()); return sb.toString(); } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } }
5,084
27.25
96
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/evaluation/PluginManager.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * PluginManager.java * Copyright (C) 2011-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.evaluation; import java.io.BufferedInputStream; import java.io.File; import java.io.FileInputStream; import java.io.InputStream; import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Properties; import java.util.Set; import java.util.TreeMap; /** * Class that manages a global map of plugins. The knowledge flow uses this to * manage plugins other than step components and perspectives. Is general * purpose, so can be used by other Weka components. Provides static methods for * registering and instantiating plugins. * * @author Mark Hall (mhall{[at]}pentaho{[dot]}com) * @version $Revision: 9312 $ */ public class PluginManager { /** * Global map that is keyed by plugin base class/interface type. The inner Map * then stores individual plugin instances of the interface type, keyed by * plugin name/short title with values the actual fully qualified class name */ protected static Map<String, Map<String, String>> PLUGINS = new HashMap<String, Map<String, String>>(); /** * Set of concrete fully qualified class names or abstract/interface base * types to "disable". Entries in this list wont ever be returned by any of * the getPlugin() methods. Registering an abstract/interface base name will * disable all concrete implementations of that type */ protected static Set<String> DISABLED = new HashSet<String>(); /** * Add the supplied list of fully qualified class names to the disabled list * * @param classnames a list of class names to add */ public static synchronized void addToDisabledList(List<String> classnames) { for (String s : classnames) { addToDisabledList(s); } } /** * Add the supplied fully qualified class name to the list of disabled plugins * * @param classname the fully qualified name of a class to add */ public static synchronized void addToDisabledList(String classname) { DISABLED.add(classname); } /** * Remove the supplied list of fully qualified class names to the disabled * list * * @param classnames a list of class names to remove */ public static synchronized void removeFromDisabledList(List<String> classnames) { for (String s : classnames) { removeFromDisabledList(s); } } /** * Remove the supplied fully qualified class name from the list of disabled * plugins * * @param classname the fully qualified name of a class to remove */ public static synchronized void removeFromDisabledList(String classname) { DISABLED.remove(classname); } /** * Returns true if the supplied fully qualified class name is in the disabled * list * * @param classname the name of the class to check * @return true if the supplied class name is in the disabled list */ public static boolean isInDisabledList(String classname) { return DISABLED.contains(classname); } /** * Add all key value pairs from the supplied property file * * @param propsFile the properties file to add * @throws Exception if a problem occurs */ public static synchronized void addFromProperties(File propsFile) throws Exception { BufferedInputStream bi = new BufferedInputStream(new FileInputStream( propsFile)); addFromProperties(bi); } /** * Add all key value pairs from the supplied properties stream * * @param propsStream an input stream to a properties file * @throws Exception if a problem occurs */ public static synchronized void addFromProperties(InputStream propsStream) throws Exception { Properties expProps = new Properties(); expProps.load(propsStream); propsStream.close(); propsStream = null; addFromProperties(expProps); } /** * Add all key value pairs from the supplied properties object * * @param props a Properties object * @throws Exception if a problem occurs */ public static synchronized void addFromProperties(Properties props) throws Exception { Set keys = props.keySet(); Iterator keysI = keys.iterator(); while (keysI.hasNext()) { String baseType = (String) keysI.next(); String implementations = props.getProperty(baseType); if (implementations != null && implementations.length() > 0) { String[] parts = implementations.split(","); for (String impl : parts) { PluginManager.addPlugin(baseType, impl.trim(), impl.trim()); } } } } /** * Get a set of names of plugins that implement the supplied interface. * * @param interfaceName the fully qualified name of the interface to list * plugins for * * @return a set of names of plugins */ public static Set<String> getPluginNamesOfType(String interfaceName) { if (PLUGINS.get(interfaceName) != null) { Set<String> match = PLUGINS.get(interfaceName).keySet(); Set<String> result = new HashSet<String>(); for (String s : match) { String impl = PLUGINS.get(interfaceName).get(s); if (!DISABLED.contains(impl)) { result.add(s); } } // return PLUGINS.get(interfaceName).keySet(); return result; } return null; } /** * Add a plugin. * * @param interfaceName the fully qualified interface name that the plugin * implements * * @param name the name/short description of the plugin * @param concreteType the fully qualified class name of the actual concrete * implementation */ public static void addPlugin(String interfaceName, String name, String concreteType) { if (PLUGINS.get(interfaceName) == null) { Map<String, String> pluginsOfInterfaceType = new TreeMap<String, String>(); pluginsOfInterfaceType.put(name, concreteType); PLUGINS.put(interfaceName, pluginsOfInterfaceType); } else { PLUGINS.get(interfaceName).put(name, concreteType); } } /** * Remove plugins of a specific type. * * @param interfaceName the fully qualified interface name that the plugins to * be remove implement * @param names a list of named plugins to remove */ public static void removePlugins(String interfaceName, List<String> names) { for (String name : names) { removePlugin(interfaceName, name); } } /** * Remove a plugin. * * @param interfaceName the fully qualified interface name that the plugin * implements * * @param name the name/short description of the plugin */ public static void removePlugin(String interfaceName, String name) { if (PLUGINS.get(interfaceName) != null) { PLUGINS.get(interfaceName).remove(name); } } /** * Get an instance of a concrete implementation of a plugin type * * @param interfaceType the fully qualified interface name of the plugin type * @param name the name/short description of the plugin to get * @return the concrete plugin or null if the plugin is disabled * @throws Exception if the plugin can't be found or instantiated */ public static Object getPluginInstance(String interfaceType, String name) throws Exception { if (PLUGINS.get(interfaceType) == null || PLUGINS.get(interfaceType).size() == 0) { throw new Exception("No plugins of interface type: " + interfaceType + " available!!"); } Map<String, String> pluginsOfInterfaceType = PLUGINS.get(interfaceType); if (pluginsOfInterfaceType.get(name) == null) { throw new Exception("Can't find named plugin '" + name + "' of type '" + interfaceType + "'!"); } String concreteImpl = pluginsOfInterfaceType.get(name); Object plugin = null; if (!DISABLED.contains(concreteImpl)) { plugin = Class.forName(concreteImpl).newInstance(); } return plugin; } }
8,736
31.121324
105
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/evaluation/Prediction.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * Prediction.java * Copyright (C) 2002-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.evaluation; /** * Encapsulates a single evaluatable prediction: the predicted value plus the * actual class value. * * @author Len Trigg (len@reeltwo.com) * @version $Revision: 8034 $ */ public interface Prediction { /** * Constant representing a missing value. This should have the same value * as weka.core.Instance.MISSING_VALUE */ double MISSING_VALUE = weka.core.Utils.missingValue(); /** * Gets the weight assigned to this prediction. This is typically the weight * of the test instance the prediction was made for. * * @return the weight assigned to this prediction. */ double weight(); /** * Gets the actual class value. * * @return the actual class value, or MISSING_VALUE if no * prediction was made. */ double actual(); /** * Gets the predicted class value. * * @return the predicted class value, or MISSING_VALUE if no * prediction was made. */ double predicted(); }
1,782
26.430769
78
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/evaluation/StandardEvaluationMetric.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * StandardEvaluationMetric.java * Copyright (C) 2011-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.evaluation; import weka.core.Instance; /** * Primarily a marker interface for a "standard" evaluation metric - i.e. one * that would be part of the normal output in Weka without having to turn * specific display options. * * @author Mark Hall (mhall{[at]}pentaho{[dot]}com) * @version $Revision: 9320 $ */ public interface StandardEvaluationMetric { /** * Return a formatted string (suitable for displaying in console or GUI * output) containing all the statistics that this metric computes. * * @return a formatted string containing all the computed statistics */ String toSummaryString(); /** * Updates the statistics about a classifiers performance for the current test * instance. Gets called when the class is nominal. Implementers need only * implement this method if it is not possible to compute their statistics * from what is stored in the base Evaluation object. * * @param predictedDistribution the probabilities assigned to each class * @param instance the instance to be classified * @throws Exception if the class of the instance is not set */ void updateStatsForClassifier(double[] predictedDistribution, Instance instance) throws Exception; /** * Updates the statistics about a predictors performance for the current test * instance. Gets called when the class is numeric. Implementers need only * implement this method if it is not possible to compute their statistics * from what is stored in the base Evaluation object. * * @param predictedValue the numeric value the classifier predicts * @param instance the instance to be classified * @throws Exception if the class of the instance is not set */ void updateStatsForPredictor(double predictedValue, Instance instance) throws Exception; }
2,640
36.728571
80
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/evaluation/ThresholdCurve.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * ThresholdCurve.java * Copyright (C) 2002-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.evaluation; import weka.classifiers.Classifier; import weka.core.Attribute; import weka.core.DenseInstance; import weka.core.FastVector; import weka.core.Instance; import weka.core.Instances; import weka.core.RevisionHandler; import weka.core.RevisionUtils; import weka.core.Utils; /** * Generates points illustrating prediction tradeoffs that can be obtained * by varying the threshold value between classes. For example, the typical * threshold value of 0.5 means the predicted probability of "positive" must be * higher than 0.5 for the instance to be predicted as "positive". The * resulting dataset can be used to visualize precision/recall tradeoff, or * for ROC curve analysis (true positive rate vs false positive rate). * Weka just varies the threshold on the class probability estimates in each * case. The Mann Whitney statistic is used to calculate the AUC. * * @author Len Trigg (len@reeltwo.com) * @version $Revision: 8034 $ */ public class ThresholdCurve implements RevisionHandler { /** The name of the relation used in threshold curve datasets */ public static final String RELATION_NAME = "ThresholdCurve"; /** attribute name: True Positives */ public static final String TRUE_POS_NAME = "True Positives"; /** attribute name: False Negatives */ public static final String FALSE_NEG_NAME = "False Negatives"; /** attribute name: False Positives */ public static final String FALSE_POS_NAME = "False Positives"; /** attribute name: True Negatives */ public static final String TRUE_NEG_NAME = "True Negatives"; /** attribute name: False Positive Rate" */ public static final String FP_RATE_NAME = "False Positive Rate"; /** attribute name: True Positive Rate */ public static final String TP_RATE_NAME = "True Positive Rate"; /** attribute name: Precision */ public static final String PRECISION_NAME = "Precision"; /** attribute name: Recall */ public static final String RECALL_NAME = "Recall"; /** attribute name: Fallout */ public static final String FALLOUT_NAME = "Fallout"; /** attribute name: FMeasure */ public static final String FMEASURE_NAME = "FMeasure"; /** attribute name: Sample Size */ public static final String SAMPLE_SIZE_NAME = "Sample Size"; /** attribute name: Lift */ public static final String LIFT_NAME = "Lift"; /** attribute name: Threshold */ public static final String THRESHOLD_NAME = "Threshold"; /** * Calculates the performance stats for the default class and return * results as a set of Instances. The * structure of these Instances is as follows:<p> <ul> * <li> <b>True Positives </b> * <li> <b>False Negatives</b> * <li> <b>False Positives</b> * <li> <b>True Negatives</b> * <li> <b>False Positive Rate</b> * <li> <b>True Positive Rate</b> * <li> <b>Precision</b> * <li> <b>Recall</b> * <li> <b>Fallout</b> * <li> <b>Threshold</b> contains the probability threshold that gives * rise to the previous performance values. * </ul> <p> * For the definitions of these measures, see TwoClassStats <p> * * @see TwoClassStats * @param predictions the predictions to base the curve on * @return datapoints as a set of instances, null if no predictions * have been made. */ public Instances getCurve(FastVector predictions) { if (predictions.size() == 0) { return null; } return getCurve(predictions, ((NominalPrediction)predictions.elementAt(0)) .distribution().length - 1); } /** * Calculates the performance stats for the desired class and return * results as a set of Instances. * * @param predictions the predictions to base the curve on * @param classIndex index of the class of interest. * @return datapoints as a set of instances. */ public Instances getCurve(FastVector predictions, int classIndex) { if ((predictions.size() == 0) || (((NominalPrediction)predictions.elementAt(0)) .distribution().length <= classIndex)) { return null; } double totPos = 0, totNeg = 0; double [] probs = getProbabilities(predictions, classIndex); // Get distribution of positive/negatives for (int i = 0; i < probs.length; i++) { NominalPrediction pred = (NominalPrediction)predictions.elementAt(i); if (pred.actual() == Prediction.MISSING_VALUE) { System.err.println(getClass().getName() + " Skipping prediction with missing class value"); continue; } if (pred.weight() < 0) { System.err.println(getClass().getName() + " Skipping prediction with negative weight"); continue; } if (pred.actual() == classIndex) { totPos += pred.weight(); } else { totNeg += pred.weight(); } } Instances insts = makeHeader(); int [] sorted = Utils.sort(probs); TwoClassStats tc = new TwoClassStats(totPos, totNeg, 0, 0); double threshold = 0; double cumulativePos = 0; double cumulativeNeg = 0; for (int i = 0; i < sorted.length; i++) { if ((i == 0) || (probs[sorted[i]] > threshold)) { tc.setTruePositive(tc.getTruePositive() - cumulativePos); tc.setFalseNegative(tc.getFalseNegative() + cumulativePos); tc.setFalsePositive(tc.getFalsePositive() - cumulativeNeg); tc.setTrueNegative(tc.getTrueNegative() + cumulativeNeg); threshold = probs[sorted[i]]; insts.add(makeInstance(tc, threshold)); cumulativePos = 0; cumulativeNeg = 0; if (i == sorted.length - 1) { break; } } NominalPrediction pred = (NominalPrediction)predictions.elementAt(sorted[i]); if (pred.actual() == Prediction.MISSING_VALUE) { System.err.println(getClass().getName() + " Skipping prediction with missing class value"); continue; } if (pred.weight() < 0) { System.err.println(getClass().getName() + " Skipping prediction with negative weight"); continue; } if (pred.actual() == classIndex) { cumulativePos += pred.weight(); } else { cumulativeNeg += pred.weight(); } /* System.out.println(tc + " " + probs[sorted[i]] + " " + (pred.actual() == classIndex)); */ /*if ((i != (sorted.length - 1)) && ((i == 0) || (probs[sorted[i]] != probs[sorted[i - 1]]))) { insts.add(makeInstance(tc, probs[sorted[i]])); }*/ } // make sure a zero point gets into the curve if (tc.getFalseNegative() != totPos || tc.getTrueNegative() != totNeg) { tc = new TwoClassStats(0, 0, totNeg, totPos); threshold = probs[sorted[sorted.length - 1]] + 10e-6; insts.add(makeInstance(tc, threshold)); } return insts; } /** * Calculates the n point precision result, which is the precision averaged * over n evenly spaced (w.r.t recall) samples of the curve. * * @param tcurve a previously extracted threshold curve Instances. * @param n the number of points to average over. * @return the n-point precision. */ public static double getNPointPrecision(Instances tcurve, int n) { if (!RELATION_NAME.equals(tcurve.relationName()) || (tcurve.numInstances() == 0)) { return Double.NaN; } int recallInd = tcurve.attribute(RECALL_NAME).index(); int precisInd = tcurve.attribute(PRECISION_NAME).index(); double [] recallVals = tcurve.attributeToDoubleArray(recallInd); int [] sorted = Utils.sort(recallVals); double isize = 1.0 / (n - 1); double psum = 0; for (int i = 0; i < n; i++) { int pos = binarySearch(sorted, recallVals, i * isize); double recall = recallVals[sorted[pos]]; double precis = tcurve.instance(sorted[pos]).value(precisInd); /* System.err.println("Point " + (i + 1) + ": i=" + pos + " r=" + (i * isize) + " p'=" + precis + " r'=" + recall); */ // interpolate figures for non-endpoints while ((pos != 0) && (pos < sorted.length - 1)) { pos++; double recall2 = recallVals[sorted[pos]]; if (recall2 != recall) { double precis2 = tcurve.instance(sorted[pos]).value(precisInd); double slope = (precis2 - precis) / (recall2 - recall); double offset = precis - recall * slope; precis = isize * i * slope + offset; /* System.err.println("Point2 " + (i + 1) + ": i=" + pos + " r=" + (i * isize) + " p'=" + precis2 + " r'=" + recall2 + " p''=" + precis); */ break; } } psum += precis; } return psum / n; } /** * Calculates the area under the precision-recall curve (AUPRC). * * @param tcurve a previously extracted threshold curve Instances. * @return the PRC area, or Double.NaN if you don't pass in * a ThresholdCurve generated Instances. */ public static double getPRCArea(Instances tcurve) { final int n = tcurve.numInstances(); if (!RELATION_NAME.equals(tcurve.relationName()) || (n == 0)) { return Double.NaN; } final int pInd = tcurve.attribute(PRECISION_NAME).index(); final int rInd = tcurve.attribute(RECALL_NAME).index(); final double [] pVals = tcurve.attributeToDoubleArray(pInd); final double [] rVals = tcurve.attributeToDoubleArray(rInd); double area = 0; double xlast = rVals[n - 1]; // start from the first real p/r pair (not the artificial zero point) for (int i = n - 2; i >= 0; i--) { double recallDelta = rVals[i] - xlast; area += (pVals[i] * recallDelta); xlast = rVals[i]; } if (area == 0) { return Utils.missingValue(); } return area; } /** * Calculates the area under the ROC curve as the Wilcoxon-Mann-Whitney statistic. * * @param tcurve a previously extracted threshold curve Instances. * @return the ROC area, or Double.NaN if you don't pass in * a ThresholdCurve generated Instances. */ public static double getROCArea(Instances tcurve) { final int n = tcurve.numInstances(); if (!RELATION_NAME.equals(tcurve.relationName()) || (n == 0)) { return Double.NaN; } final int tpInd = tcurve.attribute(TRUE_POS_NAME).index(); final int fpInd = tcurve.attribute(FALSE_POS_NAME).index(); final double [] tpVals = tcurve.attributeToDoubleArray(tpInd); final double [] fpVals = tcurve.attributeToDoubleArray(fpInd); double area = 0.0, cumNeg = 0.0; final double totalPos = tpVals[0]; final double totalNeg = fpVals[0]; for (int i = 0; i < n; i++) { double cip, cin; if (i < n - 1) { cip = tpVals[i] - tpVals[i + 1]; cin = fpVals[i] - fpVals[i + 1]; } else { cip = tpVals[n - 1]; cin = fpVals[n - 1]; } area += cip * (cumNeg + (0.5 * cin)); cumNeg += cin; } area /= (totalNeg * totalPos); return area; } /** * Gets the index of the instance with the closest threshold value to the * desired target * * @param tcurve a set of instances that have been generated by this class * @param threshold the target threshold * @return the index of the instance that has threshold closest to * the target, or -1 if this could not be found (i.e. no data, or * bad threshold target) */ public static int getThresholdInstance(Instances tcurve, double threshold) { if (!RELATION_NAME.equals(tcurve.relationName()) || (tcurve.numInstances() == 0) || (threshold < 0) || (threshold > 1.0)) { return -1; } if (tcurve.numInstances() == 1) { return 0; } double [] tvals = tcurve.attributeToDoubleArray(tcurve.numAttributes() - 1); int [] sorted = Utils.sort(tvals); return binarySearch(sorted, tvals, threshold); } /** * performs a binary search * * @param index the indices * @param vals the values * @param target the target to look for * @return the index of the target */ private static int binarySearch(int [] index, double [] vals, double target) { int lo = 0, hi = index.length - 1; while (hi - lo > 1) { int mid = lo + (hi - lo) / 2; double midval = vals[index[mid]]; if (target > midval) { lo = mid; } else if (target < midval) { hi = mid; } else { while ((mid > 0) && (vals[index[mid - 1]] == target)) { mid --; } return mid; } } return lo; } /** * * @param predictions the predictions to use * @param classIndex the class index * @return the probabilities */ private double [] getProbabilities(FastVector predictions, int classIndex) { // sort by predicted probability of the desired class. double [] probs = new double [predictions.size()]; for (int i = 0; i < probs.length; i++) { NominalPrediction pred = (NominalPrediction)predictions.elementAt(i); probs[i] = pred.distribution()[classIndex]; } return probs; } /** * generates the header * * @return the header */ private Instances makeHeader() { FastVector fv = new FastVector(); fv.addElement(new Attribute(TRUE_POS_NAME)); fv.addElement(new Attribute(FALSE_NEG_NAME)); fv.addElement(new Attribute(FALSE_POS_NAME)); fv.addElement(new Attribute(TRUE_NEG_NAME)); fv.addElement(new Attribute(FP_RATE_NAME)); fv.addElement(new Attribute(TP_RATE_NAME)); fv.addElement(new Attribute(PRECISION_NAME)); fv.addElement(new Attribute(RECALL_NAME)); fv.addElement(new Attribute(FALLOUT_NAME)); fv.addElement(new Attribute(FMEASURE_NAME)); fv.addElement(new Attribute(SAMPLE_SIZE_NAME)); fv.addElement(new Attribute(LIFT_NAME)); fv.addElement(new Attribute(THRESHOLD_NAME)); return new Instances(RELATION_NAME, fv, 100); } /** * generates an instance out of the given data * * @param tc the statistics * @param prob the probability * @return the generated instance */ private Instance makeInstance(TwoClassStats tc, double prob) { int count = 0; double [] vals = new double[13]; vals[count++] = tc.getTruePositive(); vals[count++] = tc.getFalseNegative(); vals[count++] = tc.getFalsePositive(); vals[count++] = tc.getTrueNegative(); vals[count++] = tc.getFalsePositiveRate(); vals[count++] = tc.getTruePositiveRate(); vals[count++] = tc.getPrecision(); vals[count++] = tc.getRecall(); vals[count++] = tc.getFallout(); vals[count++] = tc.getFMeasure(); double ss = (tc.getTruePositive() + tc.getFalsePositive()) / (tc.getTruePositive() + tc.getFalsePositive() + tc.getTrueNegative() + tc.getFalseNegative()); vals[count++] = ss; double expectedByChance = (ss * (tc.getTruePositive() + tc.getFalseNegative())); if (expectedByChance < 1) { vals[count++] = Utils.missingValue(); } else { vals[count++] = tc.getTruePositive() / expectedByChance; } vals[count++] = prob; return new DenseInstance(1.0, vals); } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } /** * Tests the ThresholdCurve generation from the command line. * The classifier is currently hardcoded. Pipe in an arff file. * * @param args currently ignored */ public static void main(String [] args) { try { Instances inst = new Instances(new java.io.InputStreamReader(System.in)); if (false) { System.out.println(ThresholdCurve.getNPointPrecision(inst, 11)); } else { inst.setClassIndex(inst.numAttributes() - 1); ThresholdCurve tc = new ThresholdCurve(); EvaluationUtils eu = new EvaluationUtils(); Classifier classifier = new weka.classifiers.functions.Logistic(); FastVector predictions = new FastVector(); for (int i = 0; i < 2; i++) { // Do two runs. eu.setSeed(i); predictions.appendElements(eu.getCVPredictions(classifier, inst, 10)); //System.out.println("\n\n\n"); } Instances result = tc.getCurve(predictions); System.out.println(result); } } catch (Exception ex) { ex.printStackTrace(); } } }
17,370
32.79572
102
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/evaluation/TwoClassStats.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * TwoClassStats.java * Copyright (C) 2002-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.evaluation; import weka.core.RevisionHandler; import weka.core.RevisionUtils; /** * Encapsulates performance functions for two-class problems. * * @author Len Trigg (len@reeltwo.com) * @version $Revision: 8034 $ */ public class TwoClassStats implements RevisionHandler { /** The names used when converting this object to a confusion matrix */ private static final String [] CATEGORY_NAMES = {"negative", "positive"}; /** Pos predicted as pos */ private double m_TruePos; /** Neg predicted as pos */ private double m_FalsePos; /** Neg predicted as neg */ private double m_TrueNeg; /** Pos predicted as neg */ private double m_FalseNeg; /** * Creates the TwoClassStats with the given initial performance values. * * @param tp the number of correctly classified positives * @param fp the number of incorrectly classified negatives * @param tn the number of correctly classified negatives * @param fn the number of incorrectly classified positives */ public TwoClassStats(double tp, double fp, double tn, double fn) { setTruePositive(tp); setFalsePositive(fp); setTrueNegative(tn); setFalseNegative(fn); } /** Sets the number of positive instances predicted as positive */ public void setTruePositive(double tp) { m_TruePos = tp; } /** Sets the number of negative instances predicted as positive */ public void setFalsePositive(double fp) { m_FalsePos = fp; } /** Sets the number of negative instances predicted as negative */ public void setTrueNegative(double tn) { m_TrueNeg = tn; } /** Sets the number of positive instances predicted as negative */ public void setFalseNegative(double fn) { m_FalseNeg = fn; } /** Gets the number of positive instances predicted as positive */ public double getTruePositive() { return m_TruePos; } /** Gets the number of negative instances predicted as positive */ public double getFalsePositive() { return m_FalsePos; } /** Gets the number of negative instances predicted as negative */ public double getTrueNegative() { return m_TrueNeg; } /** Gets the number of positive instances predicted as negative */ public double getFalseNegative() { return m_FalseNeg; } /** * Calculate the true positive rate. * This is defined as<p> * <pre> * correctly classified positives * ------------------------------ * total positives * </pre> * * @return the true positive rate */ public double getTruePositiveRate() { if (0 == (m_TruePos + m_FalseNeg)) { return 0; } else { return m_TruePos / (m_TruePos + m_FalseNeg); } } /** * Calculate the false positive rate. * This is defined as<p> * <pre> * incorrectly classified negatives * -------------------------------- * total negatives * </pre> * * @return the false positive rate */ public double getFalsePositiveRate() { if (0 == (m_FalsePos + m_TrueNeg)) { return 0; } else { return m_FalsePos / (m_FalsePos + m_TrueNeg); } } /** * Calculate the precision. * This is defined as<p> * <pre> * correctly classified positives * ------------------------------ * total predicted as positive * </pre> * * @return the precision */ public double getPrecision() { if (0 == (m_TruePos + m_FalsePos)) { return 0; } else { return m_TruePos / (m_TruePos + m_FalsePos); } } /** * Calculate the recall. * This is defined as<p> * <pre> * correctly classified positives * ------------------------------ * total positives * </pre><p> * (Which is also the same as the truePositiveRate.) * * @return the recall */ public double getRecall() { return getTruePositiveRate(); } /** * Calculate the F-Measure. * This is defined as<p> * <pre> * 2 * recall * precision * ---------------------- * recall + precision * </pre> * * @return the F-Measure */ public double getFMeasure() { double precision = getPrecision(); double recall = getRecall(); if ((precision + recall) == 0) { return 0; } return 2 * precision * recall / (precision + recall); } /** * Calculate the fallout. * This is defined as<p> * <pre> * incorrectly classified negatives * -------------------------------- * total predicted as positive * </pre> * * @return the fallout */ public double getFallout() { if (0 == (m_TruePos + m_FalsePos)) { return 0; } else { return m_FalsePos / (m_TruePos + m_FalsePos); } } /** * Generates a <code>ConfusionMatrix</code> representing the current * two-class statistics, using class names "negative" and "positive". * * @return a <code>ConfusionMatrix</code>. */ public ConfusionMatrix getConfusionMatrix() { ConfusionMatrix cm = new ConfusionMatrix(CATEGORY_NAMES); cm.setElement(0, 0, m_TrueNeg); cm.setElement(0, 1, m_FalsePos); cm.setElement(1, 0, m_FalseNeg); cm.setElement(1, 1, m_TruePos); return cm; } /** * Returns a string containing the various performance measures * for the current object */ public String toString() { StringBuffer res = new StringBuffer(); res.append(getTruePositive()).append(' '); res.append(getFalseNegative()).append(' '); res.append(getTrueNegative()).append(' '); res.append(getFalsePositive()).append(' '); res.append(getFalsePositiveRate()).append(' '); res.append(getTruePositiveRate()).append(' '); res.append(getPrecision()).append(' '); res.append(getRecall()).append(' '); res.append(getFMeasure()).append(' '); res.append(getFallout()).append(' '); return res.toString(); } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } }
6,777
26.441296
75
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/evaluation/output/prediction/AbstractOutput.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * AbstractOutput.java * Copyright (C) 2009-2012 University of Waikato, Hamilton, New Zealand */ package weka.classifiers.evaluation.output.prediction; import java.io.BufferedWriter; import java.io.File; import java.io.FileWriter; import java.io.Serializable; import java.util.Enumeration; import java.util.Vector; import weka.classifiers.Classifier; import weka.core.BatchPredictor; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.OptionHandler; import weka.core.Range; import weka.core.Utils; import weka.core.WekaException; import weka.core.converters.ConverterUtils.DataSource; /** * A superclass for outputting the classifications of a classifier. * <p/> * Basic use with a classifier and a test set: * * <pre> * Classifier classifier = ... // trained classifier * Instances testset = ... // the test set to output the predictions for * StringBuffer buffer = ... // the string buffer to add the output to * AbstractOutput output = new FunkyOutput(); * output.setHeader(...); * output.printClassifications(classifier, testset); * </pre> * * Basic use with a classifier and a data source: * * <pre> * Classifier classifier = ... // trained classifier * DataSource testset = ... // the data source to obtain the test set from to output the predictions for * StringBuffer buffer = ... // the string buffer to add the output to * AbstractOutput output = new FunkyOutput(); * output.setHeader(...); * output.printClassifications(classifier, testset); * </pre> * * In order to make the output generation easily integrate into GUI components, * one can output the header, classifications and footer separately: * * <pre> * Classifier classifier = ... // trained classifier * Instances testset = ... // the test set to output the predictions for * StringBuffer buffer = ... // the string buffer to add the output to * AbstractOutput output = new FunkyOutput(); * output.setHeader(...); * // print the header * output.printHeader(); * // print the classifications one-by-one * for (int i = 0; i &lt; testset.numInstances(); i++) { * output.printClassification(classifier, testset.instance(i), i); * // output progress information * if ((i+1) % 100 == 0) * System.out.println((i+1) + "/" + testset.numInstances()); * } * // print the footer * output.printFooter(); * </pre> * * @author fracpete (fracpete at waikato dot ac dot nz) * @version $Revision: 9788 $ */ public abstract class AbstractOutput implements Serializable, OptionHandler { /** for serialization. */ private static final long serialVersionUID = 752696986017306241L; /** the header of the dataset. */ protected Instances m_Header; /** the buffer to write to. */ protected StringBuffer m_Buffer; /** the file buffer to write to. */ protected StringBuffer m_FileBuffer; /** whether to output the class distribution. */ protected boolean m_OutputDistribution; /** the range of attributes to output. */ protected Range m_Attributes; /** the number of decimals after the decimal point. */ protected int m_NumDecimals; /** the file to store the output in. */ protected File m_OutputFile; /** whether to suppress the regular output and only store in file. */ protected boolean m_SuppressOutput; /** * Initializes the output class. */ public AbstractOutput() { m_Header = null; m_OutputDistribution = false; m_Attributes = null; m_Buffer = null; m_NumDecimals = 3; m_OutputFile = new File("."); m_FileBuffer = new StringBuffer(); m_SuppressOutput = false; } /** * Returns a string describing the output generator. * * @return a description suitable for displaying in the GUI */ public abstract String globalInfo(); /** * Returns a short display text, to be used in comboboxes. * * @return a short display text */ public abstract String getDisplay(); /** * Returns an enumeration of all the available options.. * * @return an enumeration of all available options. */ @Override public Enumeration listOptions() { Vector result; result = new Vector(); result.addElement(new Option( "\tThe range of attributes to print in addition to the classification.\n" + "\t(default: none)", "p", 1, "-p <range>")); result.addElement(new Option( "\tWhether to turn on the output of the class distribution.\n" + "\tOnly for nominal class attributes.\n" + "\t(default: off)", "distribution", 0, "-distribution")); result.addElement(new Option( "\tThe number of digits after the decimal point.\n" + "\t(default: " + getDefaultNumDecimals() + ")", "decimals", 1, "-decimals <num>")); result.addElement(new Option( "\tThe file to store the output in, instead of outputting it on stdout.\n" + "\tGets ignored if the supplied path is a directory.\n" + "\t(default: .)", "file", 1, "-file <path>")); result.addElement(new Option( "\tIn case the data gets stored in a file, then this flag can be used\n" + "\tto suppress the regular output.\n" + "\t(default: not suppressed)", "suppress", 0, "-suppress")); return result.elements(); } /** * Sets the OptionHandler's options using the given list. All options will be * set (or reset) during this call (i.e. incremental setting of options is not * possible). * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ @Override public void setOptions(String[] options) throws Exception { String tmpStr; setAttributes(Utils.getOption("p", options)); setOutputDistribution(Utils.getFlag("distribution", options)); tmpStr = Utils.getOption("decimals", options); if (tmpStr.length() > 0) setNumDecimals(Integer.parseInt(tmpStr)); else setNumDecimals(getDefaultNumDecimals()); tmpStr = Utils.getOption("file", options); if (tmpStr.length() > 0) setOutputFile(new File(tmpStr)); else setOutputFile(new File(".")); setSuppressOutput(Utils.getFlag("suppress", options)); } /** * Gets the current option settings for the OptionHandler. * * @return the list of current option settings as an array of strings */ @Override public String[] getOptions() { Vector<String> result; result = new Vector<String>(); if (getAttributes().length() > 0) { result.add("-p"); result.add(getAttributes()); } if (getOutputDistribution()) result.add("-distribution"); if (getNumDecimals() != getDefaultNumDecimals()) { result.add("-decimals"); result.add("" + getNumDecimals()); } if (!getOutputFile().isDirectory()) { result.add("-file"); result.add(getOutputFile().getAbsolutePath()); if (getSuppressOutput()) result.add("-suppress"); } return result.toArray(new String[result.size()]); } /** * Sets the header of the dataset. * * @param value the header */ public void setHeader(Instances value) { m_Header = new Instances(value, 0); } /** * Returns the header of the dataset. * * @return the header */ public Instances getHeader() { return m_Header; } /** * Sets the buffer to use. * * @param value the buffer */ public void setBuffer(StringBuffer value) { m_Buffer = value; } /** * Returns the current buffer. * * @return the buffer, can be null */ public StringBuffer getBuffer() { return m_Buffer; } /** * Sets the range of attributes to output. * * @param value the range */ public void setAttributes(String value) { if (value.length() == 0) m_Attributes = null; else m_Attributes = new Range(value); } /** * Returns the range of attributes to output. * * @return the range */ public String getAttributes() { if (m_Attributes == null) return ""; else return m_Attributes.getRanges(); } /** * Returns the tip text for this property. * * @return tip text for this property suitable for displaying in the GUI */ public String attributesTipText() { return "The indices of the attributes to print in addition."; } /** * Sets whether to output the class distribution or not. * * @param value true if the class distribution is to be output as well */ public void setOutputDistribution(boolean value) { m_OutputDistribution = value; } /** * Returns whether to output the class distribution as well. * * @return true if the class distribution is output as well */ public boolean getOutputDistribution() { return m_OutputDistribution; } /** * Returns the tip text for this property. * * @return tip text for this property suitable for displaying in the GUI */ public String outputDistributionTipText() { return "Whether to ouput the class distribution as well (only nominal class attributes)."; } /** * Returns the default number of digits to output after the decimal point. * * @return the default number of digits */ public int getDefaultNumDecimals() { return 3; } /** * Sets the number of digits to output after the decimal point. * * @param value the number of digits */ public void setNumDecimals(int value) { if (value >= 0) m_NumDecimals = value; else System.err.println("Number of decimals cannot be negative (provided: " + value + ")!"); } /** * Returns the number of digits to output after the decimal point. * * @return the number of digits */ public int getNumDecimals() { return m_NumDecimals; } /** * Returns the tip text for this property. * * @return tip text for this property suitable for displaying in the GUI */ public String numDecimalsTipText() { return "The number of digits to output after the decimal point."; } /** * Sets the output file to write to. A directory disables this feature. * * @param value the file to write to or a directory */ public void setOutputFile(File value) { m_OutputFile = value; } /** * Returns the output file to write to. A directory if turned off. * * @return the file to write to or a directory */ public File getOutputFile() { return m_OutputFile; } /** * Returns the tip text for this property. * * @return tip text for this property suitable for displaying in the GUI */ public String outputFileTipText() { return "The file to write the generated output to (disabled if path is a directory)."; } /** * Sets whether to the regular output is suppressed in case the output is * stored in a file. * * @param value true if the regular output is to be suppressed */ public void setSuppressOutput(boolean value) { m_SuppressOutput = value; } /** * Returns whether to the regular output is suppressed in case the output is * stored in a file. * * @return true if the regular output is to be suppressed */ public boolean getSuppressOutput() { return m_SuppressOutput; } /** * Returns the tip text for this property. * * @return tip text for this property suitable for displaying in the GUI */ public String suppressOutputTipText() { return "Whether to suppress the regular output when storing the output in a file."; } /** * Performs basic checks. * * @return null if everything is in order, otherwise the error message */ protected String checkBasic() { if (m_Buffer == null) return "Buffer is null!"; if (m_Header == null) return "No dataset structure provided!"; if (m_Attributes != null) m_Attributes.setUpper(m_Header.numAttributes() - 1); return null; } /** * Returns whether regular output is generated or not. * * @return true if regular output is generated */ public boolean generatesOutput() { return m_OutputFile.isDirectory() || (!m_OutputFile.isDirectory() && !m_SuppressOutput); } /** * If an output file was defined, then the string gets added to the file * buffer, otherwise to the actual buffer. * * @param s the string to append * @see #m_Buffer * @see #m_FileBuffer */ protected void append(String s) { if (generatesOutput()) m_Buffer.append(s); if (!m_OutputFile.isDirectory()) m_FileBuffer.append(s); } /** * Performs checks whether everything is correctly setup for the header. * * @return null if everything is in order, otherwise the error message */ protected String checkHeader() { return checkBasic(); } /** * Performs the actual printing of the header. */ protected abstract void doPrintHeader(); /** * Prints the header to the buffer. */ public void printHeader() { String error; if ((error = checkHeader()) != null) throw new IllegalStateException(error); doPrintHeader(); } /** * Performs the actual printing of the classification. * * @param classifier the classifier to use for printing the classification * @param inst the instance to print * @param index the index of the instance * @throws Exception if printing of classification fails */ protected abstract void doPrintClassification(Classifier classifier, Instance inst, int index) throws Exception; /** * Performs the actual printing of the classification. * * @param dist the distribution to use for printing the classification * @param inst the instance to print * @param index the index of the instance * @throws Exception if printing of classification fails */ protected abstract void doPrintClassification(double[] dist, Instance inst, int index) throws Exception; /** * Preprocesses an input instance and its copy (that will get its class value * set to missing for prediction purposes). Basically this only does something * special in the case when the classifier is an InputMappedClassifier. * * @param inst the original instance to predict * @param withMissing a copy of the instance to predict * @param classifier the classifier that will be used to make the prediction * @return the original instance unchanged or mapped (in the case of an * InputMappedClassifier) and the withMissing copy with the class * attribute set to missing value. * @throws Exception if a problem occurs. */ protected Instance preProcessInstance(Instance inst, Instance withMissing, Classifier classifier) throws Exception { if (classifier instanceof weka.classifiers.misc.InputMappedClassifier) { inst = (Instance) inst.copy(); inst = ((weka.classifiers.misc.InputMappedClassifier) classifier) .constructMappedInstance(inst); int mappedClass = ((weka.classifiers.misc.InputMappedClassifier) classifier) .getMappedClassIndex(); withMissing.setMissing(mappedClass); } else { withMissing.setMissing(withMissing.classIndex()); } return inst; } /** * Prints the classification to the buffer. * * @param classifier the classifier to use for printing the classification * @param inst the instance to print * @param index the index of the instance * @throws Exception if check fails or error occurs during printing of * classification */ public void printClassification(Classifier classifier, Instance inst, int index) throws Exception { String error; if ((error = checkBasic()) != null) throw new WekaException(error); doPrintClassification(classifier, inst, index); } /** * Prints the classification to the buffer. * * @param dist the distribution from classifier for the supplied instance * @param inst the instance to print * @param index the index of the instance * @throws Exception if check fails or error occurs during printing of * classification */ public void printClassification(double[] dist, Instance inst, int index) throws Exception { String error; if ((error = checkBasic()) != null) throw new WekaException(error); doPrintClassification(dist, inst, index); } /** * Prints the classifications to the buffer. * * @param classifier the classifier to use for printing the classifications * @param testset the data source to obtain the test instances from * @throws Exception if check fails or error occurs during printing of * classifications */ public void printClassifications(Classifier classifier, DataSource testset) throws Exception { int i; Instances test; Instance inst; i = 0; testset.reset(); if (classifier instanceof BatchPredictor) { test = testset.getDataSet(m_Header.classIndex()); double[][] predictions = ((BatchPredictor) classifier) .distributionsForInstances(test); for (i = 0; i < test.numInstances(); i++) { printClassification(predictions[i], test.instance(i), i); } } else { test = testset.getStructure(m_Header.classIndex()); while (testset.hasMoreElements(test)) { inst = testset.nextElement(test); doPrintClassification(classifier, inst, i); i++; } } } /** * Prints the classifications to the buffer. * * @param classifier the classifier to use for printing the classifications * @param testset the test instances * @throws Exception if check fails or error occurs during printing of * classifications */ public void printClassifications(Classifier classifier, Instances testset) throws Exception { int i; if (classifier instanceof BatchPredictor) { double[][] predictions = ((BatchPredictor) classifier) .distributionsForInstances(testset); for (i = 0; i < testset.numInstances(); i++) { printClassification(predictions[i], testset.instance(i), i); } } else { for (i = 0; i < testset.numInstances(); i++) doPrintClassification(classifier, testset.instance(i), i); } } /** * Performs the actual printing of the footer. */ protected abstract void doPrintFooter(); /** * Prints the footer to the buffer. This will also store the generated output * in a file if an output file was specified. * * @throws Exception if check fails */ public void printFooter() throws Exception { String error; BufferedWriter writer; if ((error = checkBasic()) != null) throw new WekaException(error); doPrintFooter(); // write output to file if (!m_OutputFile.isDirectory()) { try { writer = new BufferedWriter(new FileWriter(m_OutputFile)); writer.write(m_FileBuffer.toString()); writer.newLine(); writer.flush(); writer.close(); } catch (Exception e) { e.printStackTrace(); } } } /** * Prints the header, classifications and footer to the buffer. * * @param classifier the classifier to use for printing the classifications * @param testset the data source to obtain the test instances from * @throws Exception if check fails or error occurs during printing of * classifications */ public void print(Classifier classifier, DataSource testset) throws Exception { printHeader(); printClassifications(classifier, testset); printFooter(); } /** * Prints the header, classifications and footer to the buffer. * * @param classifier the classifier to use for printing the classifications * @param testset the test instances * @throws Exception if check fails or error occurs during printing of * classifications */ public void print(Classifier classifier, Instances testset) throws Exception { printHeader(); printClassifications(classifier, testset); printFooter(); } /** * Returns a fully configured object from the given commandline. * * @param cmdline the commandline to turn into an object * @return the object or null in case of an error */ public static AbstractOutput fromCommandline(String cmdline) { AbstractOutput result; String[] options; String classname; try { options = Utils.splitOptions(cmdline); classname = options[0]; options[0] = ""; result = (AbstractOutput) Utils.forName(AbstractOutput.class, classname, options); } catch (Exception e) { result = null; } return result; } }
21,462
27.848118
104
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/evaluation/output/prediction/CSV.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * CSV.java * Copyright (C) 2009-2012 University of Waikato, Hamilton, New Zealand */ package weka.classifiers.evaluation.output.prediction; import java.util.Enumeration; import java.util.Vector; import weka.classifiers.Classifier; import weka.core.Instance; import weka.core.Option; import weka.core.Utils; /** <!-- globalinfo-start --> * Outputs the predictions as CSV. * <p/> <!-- globalinfo-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -p &lt;range&gt; * The range of attributes to print in addition to the classification. * (default: none)</pre> * * <pre> -distribution * Whether to turn on the output of the class distribution. * Only for nominal class attributes. * (default: off)</pre> * * <pre> -decimals &lt;num&gt; * The number of digits after the decimal point. * (default: 3)</pre> * * <pre> -file &lt;path&gt; * The file to store the output in, instead of outputting it on stdout. * Gets ignored if the supplied path is a directory. * (default: .)</pre> * * <pre> -suppress * In case the data gets stored in a file, then this flag can be used * to suppress the regular output. * (default: not suppressed)</pre> * * <pre> -use-tab * Whether to use TAB as separator instead of comma. * (default: comma)</pre> * <!-- options-end --> * * @author fracpete (fracpete at waikato dot ac dot nz) * @version $Revision: 8937 $ */ public class CSV extends AbstractOutput { /** for serialization. */ private static final long serialVersionUID = 3401604538169573720L; /** the delimiter. */ protected String m_Delimiter = ","; /** * Returns a string describing the output generator. * * @return a description suitable for * displaying in the GUI */ public String globalInfo() { return "Outputs the predictions as CSV."; } /** * Returns a short display text, to be used in comboboxes. * * @return a short display text */ public String getDisplay() { return "CSV"; } /** * Returns an enumeration of all the available options.. * * @return an enumeration of all available options. */ public Enumeration listOptions() { Vector result; Enumeration enm; result = new Vector(); enm = super.listOptions(); while (enm.hasMoreElements()) result.add(enm.nextElement()); result.addElement(new Option( "\tWhether to use TAB as separator instead of comma.\n" + "\t(default: comma)", "use-tab", 0, "-use-tab")); return result.elements(); } /** * Sets the OptionHandler's options using the given list. All options * will be set (or reset) during this call (i.e. incremental setting * of options is not possible). * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { setUseTab(Utils.getFlag("use-tab", options)); super.setOptions(options); } /** * Gets the current option settings for the OptionHandler. * * @return the list of current option settings as an array of strings */ public String[] getOptions() { Vector<String> result; String[] options; int i; result = new Vector<String>(); options = super.getOptions(); for (i = 0; i < options.length; i++) result.add(options[i]); if (getUseTab()) result.add("-use-tab"); return result.toArray(new String[result.size()]); } /** * Sets whether to use tab instead of comma as separator. * * @param value true if tab is to be used */ public void setUseTab(boolean value) { if (value) m_Delimiter = "\t"; else m_Delimiter = ","; } /** * Returns whether tab is used as separator. * * @return true if tab is used instead of comma */ public boolean getUseTab() { return m_Delimiter.equals("\t"); } /** * Returns the tip text for this property. * * @return tip text for this property suitable for * displaying in the GUI */ public String useTabTipText() { return "Whether to use TAB instead of COMMA as column separator."; } /** * Performs the actual printing of the header. */ protected void doPrintHeader() { if (m_Header.classAttribute().isNominal()) { if (m_OutputDistribution) { append("inst#" + m_Delimiter + "actual" + m_Delimiter + "predicted" + m_Delimiter + "error" + m_Delimiter + "distribution"); for (int i = 1; i < m_Header.classAttribute().numValues(); i++) append(m_Delimiter); } else { append("inst#" + m_Delimiter + "actual" + m_Delimiter + "predicted" + m_Delimiter + "error" + m_Delimiter + "prediction"); } } else { append("inst#" + m_Delimiter + "actual" + m_Delimiter + "predicted" + m_Delimiter + "error"); } if (m_Attributes != null) { append(m_Delimiter); boolean first = true; for (int i = 0; i < m_Header.numAttributes(); i++) { if (i == m_Header.classIndex()) continue; if (m_Attributes.isInRange(i)) { if (!first) append(m_Delimiter); append(m_Header.attribute(i).name()); first = false; } } } append("\n"); } /** * Builds a string listing the attribute values in a specified range of indices, * separated by commas and enclosed in brackets. * * @param instance the instance to print the values from * @return a string listing values of the attributes in the range */ protected String attributeValuesString(Instance instance) { StringBuffer text = new StringBuffer(); if (m_Attributes != null) { m_Attributes.setUpper(instance.numAttributes() - 1); boolean first = true; for (int i=0; i<instance.numAttributes(); i++) if (m_Attributes.isInRange(i) && i != instance.classIndex()) { if (!first) text.append(m_Delimiter); text.append(instance.toString(i)); first = false; } } return text.toString(); } /** * Store the prediction made by the classifier as a string. * * @param dist the distribution to use * @param inst the instance to generate text from * @param index the index in the dataset * @throws Exception if something goes wrong */ protected void doPrintClassification(double[] dist, Instance inst, int index) throws Exception { int prec = m_NumDecimals; Instance withMissing = (Instance)inst.copy(); withMissing.setDataset(inst.dataset()); double predValue = 0; if (Utils.sum(dist) == 0) { predValue = Utils.missingValue(); } else { if (inst.classAttribute().isNominal()) { predValue = Utils.maxIndex(dist); } else { predValue = dist[0]; } } // index append("" + (index+1)); if (inst.dataset().classAttribute().isNumeric()) { // actual if (inst.classIsMissing()) append(m_Delimiter + "?"); else append(m_Delimiter + Utils.doubleToString(inst.classValue(), prec)); // predicted if (Utils.isMissingValue(predValue)) append(m_Delimiter + "?"); else append(m_Delimiter + Utils.doubleToString(predValue, prec)); // error if (Utils.isMissingValue(predValue) || inst.classIsMissing()) append(m_Delimiter + "?"); else append(m_Delimiter + Utils.doubleToString(predValue - inst.classValue(), prec)); } else { // actual append(m_Delimiter + ((int) inst.classValue()+1) + ":" + inst.toString(inst.classIndex())); // predicted if (Utils.isMissingValue(predValue)) append(m_Delimiter + "?"); else append(m_Delimiter + ((int) predValue+1) + ":" + inst.dataset().classAttribute().value((int)predValue)); // error? if (!Utils.isMissingValue(predValue) && !inst.classIsMissing() && ((int) predValue+1 != (int) inst.classValue()+1)) append(m_Delimiter + "+"); else append(m_Delimiter + ""); // prediction/distribution if (m_OutputDistribution) { if (Utils.isMissingValue(predValue)) { append(m_Delimiter + "?"); } else { append(m_Delimiter); for (int n = 0; n < dist.length; n++) { if (n > 0) append(m_Delimiter); if (n == (int) predValue) append("*"); append(Utils.doubleToString(dist[n], prec)); } } } else { if (Utils.isMissingValue(predValue)) append(m_Delimiter + "?"); else append(m_Delimiter + Utils.doubleToString(dist[(int)predValue], prec)); } } // attributes if (m_Attributes != null) append(m_Delimiter + attributeValuesString(withMissing)); append("\n"); } /** * Store the prediction made by the classifier as a string. * * @param classifier the classifier to use * @param inst the instance to generate text from * @param index the index in the dataset * @throws Exception if something goes wrong */ protected void doPrintClassification(Classifier classifier, Instance inst, int index) throws Exception { double[] d = classifier.distributionForInstance(inst); doPrintClassification(d, inst, index); } /** * Does nothing. */ protected void doPrintFooter() { } }
10,207
27.674157
125
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/evaluation/output/prediction/HTML.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * HTML.java * Copyright (C) 2009-2012 University of Waikato, Hamilton, New Zealand */ package weka.classifiers.evaluation.output.prediction; import weka.classifiers.Classifier; import weka.core.Instance; import weka.core.Utils; /** <!-- globalinfo-start --> * Outputs the predictions in HTML. * <p/> <!-- globalinfo-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -p &lt;range&gt; * The range of attributes to print in addition to the classification. * (default: none)</pre> * * <pre> -distribution * Whether to turn on the output of the class distribution. * Only for nominal class attributes. * (default: off)</pre> * * <pre> -decimals &lt;num&gt; * The number of digits after the decimal point. * (default: 3)</pre> * * <pre> -file &lt;path&gt; * The file to store the output in, instead of outputting it on stdout. * Gets ignored if the supplied path is a directory. * (default: .)</pre> * * <pre> -suppress * In case the data gets stored in a file, then this flag can be used * to suppress the regular output. * (default: not suppressed)</pre> * <!-- options-end --> * * @author fracpete (fracpete at waikato dot ac dot nz) * @version $Revision: 8937 $ */ public class HTML extends AbstractOutput { /** for serialization. */ private static final long serialVersionUID = 7241252244954353300L; /** * Returns a string describing the output generator. * * @return a description suitable for * displaying in the GUI */ public String globalInfo() { return "Outputs the predictions in HTML."; } /** * Returns a short display text, to be used in comboboxes. * * @return a short display text */ public String getDisplay() { return "HTML"; } /** * Replaces certain characters with their HTML entities. * * @param s the string to process * @return the processed string */ protected String sanitize(String s) { String result; result = s; result = result.replaceAll("&", "&amp;"); result = result.replaceAll("<", "&lt;"); result = result.replaceAll(">", "&gt;"); result = result.replaceAll("\"", "&quot;"); return result; } /** * Performs the actual printing of the header. */ protected void doPrintHeader() { append("<html>\n"); append("<head>\n"); append("<title>Predictions for dataset " + sanitize(m_Header.relationName()) + "</title>\n"); append("</head>\n"); append("<body>\n"); append("<div align=\"center\">\n"); append("<h3>Predictions for dataset " + sanitize(m_Header.relationName()) + "</h3>\n"); append("<table border=\"1\">\n"); append("<tr>\n"); if (m_Header.classAttribute().isNominal()) if (m_OutputDistribution) append("<td>inst#</td><td>actual</td><td>predicted</td><td>error</td><td colspan=\"" + m_Header.classAttribute().numValues() + "\">distribution</td>"); else append("<td>inst#</td><td>actual</td><td>predicted</td><td>error</td><td>prediction</td>"); else append("<td>inst#</td><td>actual</td><td>predicted</td><td>error</td>"); if (m_Attributes != null) { append("<td>"); boolean first = true; for (int i = 0; i < m_Header.numAttributes(); i++) { if (i == m_Header.classIndex()) continue; if (m_Attributes.isInRange(i)) { if (!first) append("</td><td>"); append(sanitize(m_Header.attribute(i).name())); first = false; } } append("</td>"); } append("</tr>\n"); } /** * Builds a string listing the attribute values in a specified range of indices, * separated by commas and enclosed in brackets. * * @param instance the instance to print the values from * @return a string listing values of the attributes in the range */ protected String attributeValuesString(Instance instance) { StringBuffer text = new StringBuffer(); if (m_Attributes != null) { boolean firstOutput = true; m_Attributes.setUpper(instance.numAttributes() - 1); for (int i=0; i<instance.numAttributes(); i++) if (m_Attributes.isInRange(i) && i != instance.classIndex()) { if (!firstOutput) text.append("</td>"); if (m_Header.attribute(i).isNumeric()) text.append("<td align=\"right\">"); else text.append("<td>"); text.append(sanitize(instance.toString(i))); firstOutput = false; } if (!firstOutput) text.append("</td>"); } return text.toString(); } protected void doPrintClassification(double[] dist, Instance inst, int index) throws Exception { int prec = m_NumDecimals; Instance withMissing = (Instance)inst.copy(); withMissing.setDataset(inst.dataset()); double predValue = 0; if (Utils.sum(dist) == 0) { predValue = Utils.missingValue(); } else { if (inst.classAttribute().isNominal()) { predValue = Utils.maxIndex(dist); } else { predValue = dist[0]; } } // index append("<tr>"); append("<td>" + (index+1) + "</td>"); if (inst.dataset().classAttribute().isNumeric()) { // actual if (inst.classIsMissing()) append("<td align=\"right\">" + "?" + "</td>"); else append("<td align=\"right\">" + Utils.doubleToString(inst.classValue(), prec) + "</td>"); // predicted if (Utils.isMissingValue(predValue)) append("<td align=\"right\">" + "?" + "</td>"); else append("<td align=\"right\">" + Utils.doubleToString(predValue, prec) + "</td>"); // error if (Utils.isMissingValue(predValue) || inst.classIsMissing()) append("<td align=\"right\">" + "?" + "</td>"); else append("<td align=\"right\">" + Utils.doubleToString(predValue - inst.classValue(), prec) + "</td>"); } else { // actual append("<td>" + ((int) inst.classValue()+1) + ":" + sanitize(inst.toString(inst.classIndex())) + "</td>"); // predicted if (Utils.isMissingValue(predValue)) append("<td>" + "?" + "</td>"); else append("<td>" + ((int) predValue+1) + ":" + sanitize(inst.dataset().classAttribute().value((int)predValue)) + "</td>"); // error? if (!Utils.isMissingValue(predValue) && !inst.classIsMissing() && ((int) predValue+1 != (int) inst.classValue()+1)) append("<td>" + "+" + "</td>"); else append("<td>" + "&nbsp;" + "</td>"); // prediction/distribution if (m_OutputDistribution) { if (Utils.isMissingValue(predValue)) { append("<td>" + "?" + "</td>"); } else { append("<td align=\"right\">"); for (int n = 0; n < dist.length; n++) { if (n > 0) append("</td><td align=\"right\">"); if (n == (int) predValue) append("*"); append(Utils.doubleToString(dist[n], prec)); } append("</td>"); } } else { if (Utils.isMissingValue(predValue)) append("<td align=\"right\">" + "?" + "</td>"); else append("<td align=\"right\">" + Utils.doubleToString(dist[(int)predValue], prec) + "</td>"); } } // attributes append(attributeValuesString(withMissing) + "</tr>\n"); } /** * Store the prediction made by the classifier as a string. * * @param classifier the classifier to use * @param inst the instance to generate text from * @param index the index in the dataset * @throws Exception if something goes wrong */ protected void doPrintClassification(Classifier classifier, Instance inst, int index) throws Exception { double[] d = classifier.distributionForInstance(inst); doPrintClassification(d, inst, index); } /** * Does nothing. */ protected void doPrintFooter() { append("</table>\n"); append("</div>\n"); append("</body>\n"); append("</html>\n"); } }
8,729
30.178571
152
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/evaluation/output/prediction/Null.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * Null.java * Copyright (C) 2009-2012 University of Waikato, Hamilton, New Zealand */ package weka.classifiers.evaluation.output.prediction; import weka.classifiers.Classifier; import weka.core.Instance; /** <!-- globalinfo-start --> * Suppresses all output. * <p/> <!-- globalinfo-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -p &lt;range&gt; * The range of attributes to print in addition to the classification. * (default: none)</pre> * * <pre> -distribution * Whether to turn on the output of the class distribution. * Only for nominal class attributes. * (default: off)</pre> * * <pre> -decimals &lt;num&gt; * The number of digits after the decimal point. * (default: 3)</pre> * * <pre> -file &lt;path&gt; * The file to store the output in, instead of outputting it on stdout. * Gets ignored if the supplied path is a directory. * (default: .)</pre> * * <pre> -suppress * In case the data gets stored in a file, then this flag can be used * to suppress the regular output. * (default: not suppressed)</pre> * <!-- options-end --> * * @author fracpete (fracpete at waikato dot ac dot nz) * @version $Revision: 8937 $ */ public class Null extends AbstractOutput { /** for serialization. */ private static final long serialVersionUID = 4988413155999044966L; /** * Returns a string describing the output generator. * * @return a description suitable for * displaying in the GUI */ public String globalInfo() { return "Suppresses all output."; } /** * Returns a short display text, to be used in comboboxes. * * @return a short display text */ public String getDisplay() { return "No output"; } /** * Returns always false. * * @return always false */ public boolean generatesOutput() { return false; } /** * Does nothing. */ protected void doPrintHeader() { } /** * Does nothing. * * @param classifier the classifier to use * @param inst the instance to generate text from * @param index the index in the dataset * @throws Exception if something goes wrong */ protected void doPrintClassification(Classifier classifier, Instance inst, int index) throws Exception { } /** * Does nothing. * * @param dist the distribution to use * @param inst the instance to generate text from * @param index the index in the dataset * @throws Exception if something goes wrong */ protected void doPrintClassification(double[] dist, Instance inst, int index) throws Exception { } /** * Does nothing. */ protected void doPrintFooter() { } }
3,392
24.900763
106
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/evaluation/output/prediction/PlainText.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * PlainText.java * Copyright (C) 2009-2012 University of Waikato, Hamilton, New Zealand */ package weka.classifiers.evaluation.output.prediction; import weka.classifiers.Classifier; import weka.core.Instance; import weka.core.Utils; /** <!-- globalinfo-start --> * Outputs the predictions in plain text. * <p/> <!-- globalinfo-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -p &lt;range&gt; * The range of attributes to print in addition to the classification. * (default: none)</pre> * * <pre> -distribution * Whether to turn on the output of the class distribution. * Only for nominal class attributes. * (default: off)</pre> * * <pre> -decimals &lt;num&gt; * The number of digits after the decimal point. * (default: 3)</pre> * * <pre> -file &lt;path&gt; * The file to store the output in, instead of outputting it on stdout. * Gets ignored if the supplied path is a directory. * (default: .)</pre> * * <pre> -suppress * In case the data gets stored in a file, then this flag can be used * to suppress the regular output. * (default: not suppressed)</pre> * <!-- options-end --> * * @author fracpete (fracpete at waikato dot ac dot nz) * @version $Revision: 8937 $ */ public class PlainText extends AbstractOutput { /** for serialization. */ private static final long serialVersionUID = 2033389864898242735L; /** * Returns a string describing the output generator. * * @return a description suitable for * displaying in the GUI */ public String globalInfo() { return "Outputs the predictions in plain text."; } /** * Returns a short display text, to be used in comboboxes. * * @return a short display text */ public String getDisplay() { return "Plain text"; } /** * Performs the actual printing of the header. */ protected void doPrintHeader() { if (m_Header.classAttribute().isNominal()) if (m_OutputDistribution) append(" inst# actual predicted error distribution"); else append(" inst# actual predicted error prediction"); else append(" inst# actual predicted error"); if (m_Attributes != null) { append(" ("); boolean first = true; for (int i = 0; i < m_Header.numAttributes(); i++) { if (i == m_Header.classIndex()) continue; if (m_Attributes.isInRange(i)) { if (!first) append(","); append(m_Header.attribute(i).name()); first = false; } } append(")"); } append("\n"); } /** * Builds a string listing the attribute values in a specified range of indices, * separated by commas and enclosed in brackets. * * @param instance the instance to print the values from * @return a string listing values of the attributes in the range */ protected String attributeValuesString(Instance instance) { StringBuffer text = new StringBuffer(); if (m_Attributes != null) { boolean firstOutput = true; m_Attributes.setUpper(instance.numAttributes() - 1); for (int i=0; i<instance.numAttributes(); i++) if (m_Attributes.isInRange(i) && i != instance.classIndex()) { if (firstOutput) text.append("("); else text.append(","); text.append(instance.toString(i)); firstOutput = false; } if (!firstOutput) text.append(")"); } return text.toString(); } /** * Store the prediction made by the classifier as a string. * * @param dist the distribution to use * @param inst the instance to generate text from * @param index the index in the dataset * @throws Exception if something goes wrong */ protected void doPrintClassification(double[] dist, Instance inst, int index) throws Exception { int width = 7 + m_NumDecimals; int prec = m_NumDecimals; Instance withMissing = (Instance)inst.copy(); withMissing.setDataset(inst.dataset()); double predValue = 0; if (Utils.sum(dist) == 0) { predValue = Utils.missingValue(); } else { if (inst.classAttribute().isNominal()) { predValue = Utils.maxIndex(dist); } else { predValue = dist[0]; } } // index append(Utils.padLeft("" + (index+1), 6)); if (inst.dataset().classAttribute().isNumeric()) { // actual if (inst.classIsMissing()) append(" " + Utils.padLeft("?", width)); else append(" " + Utils.doubleToString(inst.classValue(), width, prec)); // predicted if (Utils.isMissingValue(predValue)) append(" " + Utils.padLeft("?", width)); else append(" " + Utils.doubleToString(predValue, width, prec)); // error if (Utils.isMissingValue(predValue) || inst.classIsMissing()) append(" " + Utils.padLeft("?", width)); else append(" " + Utils.doubleToString(predValue - inst.classValue(), width, prec)); } else { // actual append(" " + Utils.padLeft(((int) inst.classValue()+1) + ":" + inst.toString(inst.classIndex()), width)); // predicted if (Utils.isMissingValue(predValue)) append(" " + Utils.padLeft("?", width)); else append(" " + Utils.padLeft(((int) predValue+1) + ":" + inst.dataset().classAttribute().value((int)predValue), width)); // error? if (!Utils.isMissingValue(predValue) && !inst.classIsMissing() && ((int) predValue+1 != (int) inst.classValue()+1)) append(" " + " + "); else append(" " + " "); // prediction/distribution if (m_OutputDistribution) { if (Utils.isMissingValue(predValue)) { append(" " + "?"); } else { append(" "); for (int n = 0; n < dist.length; n++) { if (n > 0) append(","); if (n == (int) predValue) append("*"); append(Utils.doubleToString(dist[n], prec)); } } } else { if (Utils.isMissingValue(predValue)) append(" " + "?"); else append(" " + Utils.doubleToString(dist[(int)predValue], prec)); } } // attributes append(" " + attributeValuesString(withMissing) + "\n"); } /** * Store the prediction made by the classifier as a string. * * @param classifier the classifier to use * @param inst the instance to generate text from * @param index the index in the dataset * @throws Exception if something goes wrong */ protected void doPrintClassification(Classifier classifier, Instance inst, int index) throws Exception { double[] d = classifier.distributionForInstance(inst); doPrintClassification(d, inst, index); } /** * Does nothing. */ protected void doPrintFooter() { } }
7,592
29.130952
126
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/evaluation/output/prediction/XML.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * XML.java * Copyright (C) 2009-2012 University of Waikato, Hamilton, New Zealand */ package weka.classifiers.evaluation.output.prediction; import weka.classifiers.Classifier; import weka.core.Attribute; import weka.core.Instance; import weka.core.Utils; import weka.core.Version; import weka.core.xml.XMLDocument; /** <!-- globalinfo-start --> * Outputs the predictions in XML.<br/> * <br/> * The following DTD is used:<br/> * <br/> * &lt;!DOCTYPE predictions<br/> * [<br/> * &lt;!ELEMENT predictions (prediction*)&gt;<br/> * &lt;!ATTLIST predictions version CDATA "3.5.8"&gt;<br/> * &lt;!ATTLIST predictions name CDATA #REQUIRED&gt;<br/> * <br/> * &lt;!ELEMENT prediction ((actual_label,predicted_label,error,(prediction|distribution),attributes?)|(actual_value,predicted_value,error,attributes?))&gt;<br/> * &lt;!ATTLIST prediction index CDATA #REQUIRED&gt;<br/> * <br/> * &lt;!ELEMENT actual_label ANY&gt;<br/> * &lt;!ATTLIST actual_label index CDATA #REQUIRED&gt;<br/> * &lt;!ELEMENT predicted_label ANY&gt;<br/> * &lt;!ATTLIST predicted_label index CDATA #REQUIRED&gt;<br/> * &lt;!ELEMENT error ANY&gt;<br/> * &lt;!ELEMENT prediction ANY&gt;<br/> * &lt;!ELEMENT distribution (class_label+)&gt;<br/> * &lt;!ELEMENT class_label ANY&gt;<br/> * &lt;!ATTLIST class_label index CDATA #REQUIRED&gt;<br/> * &lt;!ATTLIST class_label predicted (yes|no) "no"&gt;<br/> * &lt;!ELEMENT actual_value ANY&gt;<br/> * &lt;!ELEMENT predicted_value ANY&gt;<br/> * &lt;!ELEMENT attributes (attribute+)&gt;<br/> * &lt;!ELEMENT attribute ANY&gt;<br/> * &lt;!ATTLIST attribute index CDATA #REQUIRED&gt;<br/> * &lt;!ATTLIST attribute name CDATA #REQUIRED&gt;<br/> * &lt;!ATTLIST attribute type (numeric|date|nominal|string|relational) #REQUIRED&gt;<br/> * ]<br/> * &gt; * <p/> <!-- globalinfo-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -p &lt;range&gt; * The range of attributes to print in addition to the classification. * (default: none)</pre> * * <pre> -distribution * Whether to turn on the output of the class distribution. * Only for nominal class attributes. * (default: off)</pre> * * <pre> -decimals &lt;num&gt; * The number of digits after the decimal point. * (default: 3)</pre> * * <pre> -file &lt;path&gt; * The file to store the output in, instead of outputting it on stdout. * Gets ignored if the supplied path is a directory. * (default: .)</pre> * * <pre> -suppress * In case the data gets stored in a file, then this flag can be used * to suppress the regular output. * (default: not suppressed)</pre> * <!-- options-end --> * * @author fracpete (fracpete at waikato dot ac dot nz) * @version $Revision: 8937 $ */ public class XML extends AbstractOutput { /** for serialization. */ private static final long serialVersionUID = -3165514277316824801L; /** the DocType definition. */ public final static String DTD_DOCTYPE = XMLDocument.DTD_DOCTYPE; /** the Element definition. */ public final static String DTD_ELEMENT = XMLDocument.DTD_ELEMENT; /** the AttList definition. */ public final static String DTD_ATTLIST = XMLDocument.DTD_ATTLIST; /** the optional marker. */ public final static String DTD_OPTIONAL = XMLDocument.DTD_OPTIONAL; /** the at least one marker. */ public final static String DTD_AT_LEAST_ONE = XMLDocument.DTD_AT_LEAST_ONE; /** the zero or more marker. */ public final static String DTD_ZERO_OR_MORE = XMLDocument.DTD_ZERO_OR_MORE; /** the option separator. */ public final static String DTD_SEPARATOR = XMLDocument.DTD_SEPARATOR; /** the CDATA placeholder. */ public final static String DTD_CDATA = XMLDocument.DTD_CDATA; /** the ANY placeholder. */ public final static String DTD_ANY = XMLDocument.DTD_ANY; /** the #PCDATA placeholder. */ public final static String DTD_PCDATA = XMLDocument.DTD_PCDATA; /** the #IMPLIED placeholder. */ public final static String DTD_IMPLIED = XMLDocument.DTD_IMPLIED; /** the #REQUIRED placeholder. */ public final static String DTD_REQUIRED = XMLDocument.DTD_REQUIRED; /** the "version" attribute. */ public final static String ATT_VERSION = XMLDocument.ATT_VERSION; /** the "name" attribute. */ public final static String ATT_NAME = XMLDocument.ATT_NAME; /** the "type" attribute. */ public final static String ATT_TYPE = "type"; /** the value "yes". */ public final static String VAL_YES = XMLDocument.VAL_YES; /** the value "no". */ public final static String VAL_NO = XMLDocument.VAL_NO; /** the predictions tag. */ public final static String TAG_PREDICTIONS = "predictions"; /** the prediction tag. */ public final static String TAG_PREDICTION = "prediction"; /** the actual_nominal tag. */ public final static String TAG_ACTUAL_LABEL = "actual_label"; /** the predicted_nominal tag. */ public final static String TAG_PREDICTED_LABEL = "predicted_label"; /** the error tag. */ public final static String TAG_ERROR = "error"; /** the distribution tag. */ public final static String TAG_DISTRIBUTION = "distribution"; /** the class_label tag. */ public final static String TAG_CLASS_LABEL = "class_label"; /** the actual_numeric tag. */ public final static String TAG_ACTUAL_VALUE = "actual_value"; /** the predicted_numeric tag. */ public final static String TAG_PREDICTED_VALUE = "predicted_value"; /** the attributes tag. */ public final static String TAG_ATTRIBUTES = "attributes"; /** the attribute tag. */ public final static String TAG_ATTRIBUTE = "attribute"; /** the index attribute. */ public final static String ATT_INDEX = "index"; /** the predicted attribute. */ public final static String ATT_PREDICTED = "predicted"; /** the DTD. */ public final static String DTD = "<!" + DTD_DOCTYPE + " " + TAG_PREDICTIONS + "\n" + "[\n" + " <!" + DTD_ELEMENT + " " + TAG_PREDICTIONS + " (" + TAG_PREDICTION + DTD_ZERO_OR_MORE + ")" + ">\n" + " <!" + DTD_ATTLIST + " " + TAG_PREDICTIONS + " " + ATT_VERSION + " " + DTD_CDATA + " \"" + Version.VERSION + "\"" + ">\n" + " <!" + DTD_ATTLIST + " " + TAG_PREDICTIONS + " " + ATT_NAME + " " + DTD_CDATA + " " + DTD_REQUIRED + ">\n" + "\n" + " <!" + DTD_ELEMENT + " " + TAG_PREDICTION + " " + "(" + "(" + TAG_ACTUAL_LABEL + "," + TAG_PREDICTED_LABEL + "," + TAG_ERROR + "," + "(" + TAG_PREDICTION + DTD_SEPARATOR + TAG_DISTRIBUTION + ")" + "," + TAG_ATTRIBUTES + DTD_OPTIONAL + ")" + DTD_SEPARATOR + "(" + TAG_ACTUAL_VALUE + "," + TAG_PREDICTED_VALUE + "," + TAG_ERROR + "," + TAG_ATTRIBUTES + DTD_OPTIONAL + ")" + ")" + ">\n" + " <!" + DTD_ATTLIST + " " + TAG_PREDICTION + " " + ATT_INDEX + " " + DTD_CDATA + " " + DTD_REQUIRED + ">\n" + "\n" + " <!" + DTD_ELEMENT + " " + TAG_ACTUAL_LABEL + " " + DTD_ANY + ">\n" + " <!" + DTD_ATTLIST + " " + TAG_ACTUAL_LABEL + " " + ATT_INDEX + " " + DTD_CDATA + " " + DTD_REQUIRED + ">\n" + " <!" + DTD_ELEMENT + " " + TAG_PREDICTED_LABEL + " " + DTD_ANY + ">\n" + " <!" + DTD_ATTLIST + " " + TAG_PREDICTED_LABEL + " " + ATT_INDEX + " " + DTD_CDATA + " " + DTD_REQUIRED + ">\n" + " <!" + DTD_ELEMENT + " " + TAG_ERROR + " " + DTD_ANY + ">\n" + " <!" + DTD_ELEMENT + " " + TAG_PREDICTION + " " + DTD_ANY + ">\n" + " <!" + DTD_ELEMENT + " " + TAG_DISTRIBUTION + " (" + TAG_CLASS_LABEL + DTD_AT_LEAST_ONE + ")" + ">\n" + " <!" + DTD_ELEMENT + " " + TAG_CLASS_LABEL + " " + DTD_ANY + ">\n" + " <!" + DTD_ATTLIST + " " + TAG_CLASS_LABEL + " " + ATT_INDEX + " " + DTD_CDATA + " " + DTD_REQUIRED + ">\n" + " <!" + DTD_ATTLIST + " " + TAG_CLASS_LABEL + " " + ATT_PREDICTED + " (" + VAL_YES + DTD_SEPARATOR + VAL_NO + ") " + "\"" + VAL_NO + "\"" + ">\n" + " <!" + DTD_ELEMENT + " " + TAG_ACTUAL_VALUE + " " + DTD_ANY + ">\n" + " <!" + DTD_ELEMENT + " " + TAG_PREDICTED_VALUE + " " + DTD_ANY + ">\n" + " <!" + DTD_ELEMENT + " " + TAG_ATTRIBUTES + " (" + TAG_ATTRIBUTE + DTD_AT_LEAST_ONE + ")" + ">\n" + " <!" + DTD_ELEMENT + " " + TAG_ATTRIBUTE + " " + DTD_ANY + ">\n" + " <!" + DTD_ATTLIST + " " + TAG_ATTRIBUTE + " " + ATT_INDEX + " " + DTD_CDATA + " " + DTD_REQUIRED + ">\n" + " <!" + DTD_ATTLIST + " " + TAG_ATTRIBUTE + " " + ATT_NAME + " " + DTD_CDATA + " " + DTD_REQUIRED + ">\n" + " <!" + DTD_ATTLIST + " " + TAG_ATTRIBUTE + " " + ATT_TYPE + " " + "(" + Attribute.typeToString(Attribute.NUMERIC) + DTD_SEPARATOR + Attribute.typeToString(Attribute.DATE) + DTD_SEPARATOR + Attribute.typeToString(Attribute.NOMINAL) + DTD_SEPARATOR + Attribute.typeToString(Attribute.STRING) + DTD_SEPARATOR + Attribute.typeToString(Attribute.RELATIONAL) + ")" + " " + DTD_REQUIRED + ">\n" + "]\n" + ">"; /** * Returns a string describing the output generator. * * @return a description suitable for * displaying in the GUI */ public String globalInfo() { return "Outputs the predictions in XML.\n\n" + "The following DTD is used:\n\n" + DTD; } /** * Returns a short display text, to be used in comboboxes. * * @return a short display text */ public String getDisplay() { return "XML"; } /** * Replaces certain characters with their XML entities. * * @param s the string to process * @return the processed string */ protected String sanitize(String s) { String result; result = s; result = result.replaceAll("&", "&amp;"); result = result.replaceAll("<", "&lt;"); result = result.replaceAll(">", "&gt;"); result = result.replaceAll("\"", "&quot;"); return result; } /** * Performs the actual printing of the header. */ protected void doPrintHeader() { append("<?xml version=\"1.0\" encoding=\"utf-8\"?>\n"); append("\n"); append(DTD + "\n\n"); append("<" + TAG_PREDICTIONS + " " + ATT_VERSION + "=\"" + Version.VERSION + "\"" + " " + ATT_NAME + "=\"" + sanitize(m_Header.relationName()) + "\">\n"); } /** * Builds a string listing the attribute values in a specified range of indices, * separated by commas and enclosed in brackets. * * @param instance the instance to print the values from * @return a string listing values of the attributes in the range */ protected String attributeValuesString(Instance instance) { StringBuffer text = new StringBuffer(); if (m_Attributes != null) { text.append(" <" + TAG_ATTRIBUTES + ">\n"); m_Attributes.setUpper(instance.numAttributes() - 1); for (int i=0; i<instance.numAttributes(); i++) { if (m_Attributes.isInRange(i) && i != instance.classIndex()) { text.append(" <" + TAG_ATTRIBUTE + " " + ATT_INDEX + "=\"" + (i+1) + "\"" + " " + ATT_NAME + "=\"" + sanitize(instance.attribute(i).name()) + "\"" + " " + ATT_TYPE + "=\"" + Attribute.typeToString(instance.attribute(i).type()) + "\"" + ">"); text.append(sanitize(instance.toString(i))); text.append("</" + TAG_ATTRIBUTE + ">\n"); } } text.append(" </" + TAG_ATTRIBUTES + ">\n"); } return text.toString(); } /** * Store the prediction made by the classifier as a string. * * @param dist the distribution to use * @param inst the instance to generate text from * @param index the index in the dataset * @throws Exception if something goes wrong */ protected void doPrintClassification(double[] dist, Instance inst, int index) throws Exception { int prec = m_NumDecimals; Instance withMissing = (Instance)inst.copy(); withMissing.setDataset(inst.dataset()); double predValue = 0; if (Utils.sum(dist) == 0) { predValue = Utils.missingValue(); } else { if (inst.classAttribute().isNominal()) { predValue = Utils.maxIndex(dist); } else { predValue = dist[0]; } } // opening tag append(" <" + TAG_PREDICTION + " " + ATT_INDEX + "=\"" + (index+1) + "\">\n"); if (inst.dataset().classAttribute().isNumeric()) { // actual append(" <" + TAG_ACTUAL_VALUE + ">"); if (inst.classIsMissing()) append("?"); else append(Utils.doubleToString(inst.classValue(), prec)); append("</" + TAG_ACTUAL_VALUE + ">\n"); // predicted append(" <" + TAG_PREDICTED_VALUE + ">"); if (inst.classIsMissing()) append("?"); else append(Utils.doubleToString(predValue, prec)); append("</" + TAG_PREDICTED_VALUE + ">\n"); // error append(" <" + TAG_ERROR + ">"); if (Utils.isMissingValue(predValue) || inst.classIsMissing()) append("?"); else append(Utils.doubleToString(predValue - inst.classValue(), prec)); append("</" + TAG_ERROR + ">\n"); } else { // actual append(" <" + TAG_ACTUAL_LABEL + " " + ATT_INDEX + "=\"" + ((int) inst.classValue()+1) + "\"" + ">"); append(sanitize(inst.toString(inst.classIndex()))); append("</" + TAG_ACTUAL_LABEL + ">\n"); // predicted append(" <" + TAG_PREDICTED_LABEL + " " + ATT_INDEX + "=\"" + ((int) predValue+1) + "\"" + ">"); if (Utils.isMissingValue(predValue)) append("?"); else append(sanitize(inst.dataset().classAttribute().value((int)predValue))); append("</" + TAG_PREDICTED_LABEL + ">\n"); // error? append(" <" + TAG_ERROR + ">"); if (!Utils.isMissingValue(predValue) && !inst.classIsMissing() && ((int) predValue+1 != (int) inst.classValue()+1)) append(VAL_YES); else append(VAL_NO); append("</" + TAG_ERROR + ">\n"); // prediction/distribution if (m_OutputDistribution) { append(" <" + TAG_DISTRIBUTION + ">\n"); for (int n = 0; n < dist.length; n++) { append(" <" + TAG_CLASS_LABEL + " " + ATT_INDEX + "=\"" + (n+1) + "\""); if (!Utils.isMissingValue(predValue) && (n == (int) predValue)) append(" " + ATT_PREDICTED + "=\"" + VAL_YES + "\""); append(">"); append(Utils.doubleToString(dist[n], prec)); append("</" + TAG_CLASS_LABEL + ">\n"); } append(" </" + TAG_DISTRIBUTION + ">\n"); } else { append(" <" + TAG_PREDICTION + ">"); if (Utils.isMissingValue(predValue)) append("?"); else append(Utils.doubleToString(dist[(int)predValue], prec)); append("</" + TAG_PREDICTION + ">\n"); } } // attributes if (m_Attributes != null) append(attributeValuesString(withMissing)); // closing tag append(" </" + TAG_PREDICTION + ">\n"); } /** * Store the prediction made by the classifier as a string. * * @param classifier the classifier to use * @param inst the instance to generate text from * @param index the index in the dataset * @throws Exception if something goes wrong */ protected void doPrintClassification(Classifier classifier, Instance inst, int index) throws Exception { double[] d = classifier.distributionForInstance(inst); doPrintClassification(d, inst, index); } /** * Does nothing. */ protected void doPrintFooter() { append("</" + TAG_PREDICTIONS + ">\n"); } }
16,156
37.016471
395
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/functions/GaussianProcesses.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * GaussianProcesses.java * Copyright (C) 2005-2012 University of Waikato */ package weka.classifiers.functions; import java.util.Enumeration; import java.util.Vector; import weka.classifiers.AbstractClassifier; import weka.classifiers.ConditionalDensityEstimator; import weka.classifiers.IntervalEstimator; import weka.classifiers.functions.supportVector.CachedKernel; import weka.classifiers.functions.supportVector.Kernel; import weka.classifiers.functions.supportVector.PolyKernel; import weka.core.Capabilities; import weka.core.Capabilities.Capability; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.OptionHandler; import weka.core.SelectedTag; import weka.core.Statistics; import weka.core.Tag; import weka.core.TechnicalInformation; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import weka.core.TechnicalInformationHandler; import weka.core.Utils; import weka.core.WeightedInstancesHandler; import weka.core.matrix.Matrix; import weka.filters.Filter; import weka.filters.unsupervised.attribute.NominalToBinary; import weka.filters.unsupervised.attribute.Normalize; import weka.filters.unsupervised.attribute.ReplaceMissingValues; import weka.filters.unsupervised.attribute.Standardize; /** * <!-- globalinfo-start --> * Implements Gaussian processes for * regression without hyperparameter-tuning. To make choosing an * appropriate noise level easier, this implementation applies * normalization/standardization to the target attribute as well (if * normalization/standardizaton is turned on). Missing values * are replaced by the global mean/mode. Nominal attributes are * converted to binary ones. * <!-- globalinfo-end --> * * <!-- technical-bibtex-start --> BibTeX: * * <pre> * @misc{Mackay1998, * address = {Dept. of Physics, Cambridge University, UK}, * author = {David J.C. Mackay}, * title = {Introduction to Gaussian Processes}, * year = {1998}, * PS = {http://wol.ra.phy.cam.ac.uk/mackay/gpB.ps.gz} * } * </pre> * * <p/> <!-- technical-bibtex-end --> * * <!-- options-start --> Valid options are: <p/> * * <pre> * -D * If set, classifier is run in debug mode and * may output additional info to the console * </pre> * * <pre> * -L &lt;double&gt; * Level of Gaussian Noise. (default 0.1) * </pre> * * <pre> * -N * Whether to 0=normalize/1=standardize/2=neither. (default 0=normalize) * </pre> * * <pre> * -K &lt;classname and parameters&gt; * The Kernel to use. * (default: weka.classifiers.functions.supportVector.PolyKernel) * </pre> * * <pre> * * Options specific to kernel weka.classifiers.functions.supportVector.RBFKernel: * </pre> * * <pre> * -D * Enables debugging output (if available) to be printed. * (default: off) * </pre> * * <pre> * -no-checks * Turns off all checks - use with caution! * (default: checks on) * </pre> * * <pre> * -C &lt;num&gt; * The size of the cache (a prime number). * (default: 250007) * </pre> * * <pre> * -G &lt;num&gt; * The Gamma parameter. * (default: 0.01) * </pre> * * <!-- options-end --> * * @author Kurt Driessens (kurtd@cs.waikato.ac.nz) * @author Remco Bouckaert (remco@cs.waikato.ac.nz) * @version $Revision: 9562 $ */ public class GaussianProcesses extends AbstractClassifier implements OptionHandler, IntervalEstimator, ConditionalDensityEstimator, TechnicalInformationHandler, WeightedInstancesHandler { /** for serialization */ static final long serialVersionUID = -8620066949967678545L; /** The filter used to make attributes numeric. */ protected NominalToBinary m_NominalToBinary; /** normalizes the data */ public static final int FILTER_NORMALIZE = 0; /** standardizes the data */ public static final int FILTER_STANDARDIZE = 1; /** no filter */ public static final int FILTER_NONE = 2; /** The filter to apply to the training data */ public static final Tag[] TAGS_FILTER = { new Tag(FILTER_NORMALIZE, "Normalize training data"), new Tag(FILTER_STANDARDIZE, "Standardize training data"), new Tag(FILTER_NONE, "No normalization/standardization"), }; /** The filter used to standardize/normalize all values. */ protected Filter m_Filter = null; /** Whether to normalize/standardize/neither */ protected int m_filterType = FILTER_NORMALIZE; /** The filter used to get rid of missing values. */ protected ReplaceMissingValues m_Missing; /** * Turn off all checks and conversions? Turning them off assumes that data * is purely numeric, doesn't contain any missing values, and has a numeric * class. */ protected boolean m_checksTurnedOff = false; /** Gaussian Noise Value. */ protected double m_delta = 1; /** * The parameters of the linear transforamtion realized by the filter on the * class attribute */ protected double m_Alin; protected double m_Blin; /** Kernel to use * */ protected Kernel m_kernel = new PolyKernel(); /** The number of training instances */ protected int m_NumTrain = 0; /** The training data. */ protected double m_avg_target; /** (negative) covariance matrix in symmetric matrix representation **/ public double[][] m_L; /** The vector of target values. */ protected Matrix m_t; /** * Returns a string describing classifier * * @return a description suitable for displaying in the * explorer/experimenter gui */ public String globalInfo() { return " Implements Gaussian processes for " + "regression without hyperparameter-tuning. To make choosing an " + "appropriate noise level easier, this implementation applies " + "normalization/standardization to the target attribute as well " + "as the other attributes (if " + " normalization/standardizaton is turned on). Missing values " + "are replaced by the global mean/mode. Nominal attributes are " + "converted to binary ones. Note that kernel caching is turned off " + "if the kernel used implements CachedKernel."; } /** * Returns an instance of a TechnicalInformation object, containing detailed * information about the technical background of this class, e.g., paper * reference or book this class is based on. * * @return the technical information about this class */ public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; result = new TechnicalInformation(Type.MISC); result.setValue(Field.AUTHOR, "David J.C. Mackay"); result.setValue(Field.YEAR, "1998"); result.setValue(Field.TITLE, "Introduction to Gaussian Processes"); result.setValue(Field.ADDRESS, "Dept. of Physics, Cambridge University, UK"); result.setValue(Field.PS, "http://wol.ra.phy.cam.ac.uk/mackay/gpB.ps.gz"); return result; } /** * Returns default capabilities of the classifier. * * @return the capabilities of this classifier */ public Capabilities getCapabilities() { Capabilities result = getKernel().getCapabilities(); result.setOwner(this); // attribute result.enableAllAttributeDependencies(); // with NominalToBinary we can also handle nominal attributes, but only // if the kernel can handle numeric attributes if (result.handles(Capability.NUMERIC_ATTRIBUTES)) result.enable(Capability.NOMINAL_ATTRIBUTES); result.enable(Capability.MISSING_VALUES); // class result.disableAllClasses(); result.disableAllClassDependencies(); result.enable(Capability.NUMERIC_CLASS); result.enable(Capability.DATE_CLASS); result.enable(Capability.MISSING_CLASS_VALUES); return result; } /** * Method for building the classifier. * * @param insts * the set of training instances * @throws Exception * if the classifier can't be built successfully */ public void buildClassifier(Instances insts) throws Exception { /* check the set of training instances */ if (!m_checksTurnedOff) { // can classifier handle the data? getCapabilities().testWithFail(insts); // remove instances with missing class insts = new Instances(insts); insts.deleteWithMissingClass(); } if (!m_checksTurnedOff) { m_Missing = new ReplaceMissingValues(); m_Missing.setInputFormat(insts); insts = Filter.useFilter(insts, m_Missing); } else { m_Missing = null; } if (getCapabilities().handles(Capability.NUMERIC_ATTRIBUTES)) { boolean onlyNumeric = true; if (!m_checksTurnedOff) { for (int i = 0; i < insts.numAttributes(); i++) { if (i != insts.classIndex()) { if (!insts.attribute(i).isNumeric()) { onlyNumeric = false; break; } } } } if (!onlyNumeric) { m_NominalToBinary = new NominalToBinary(); m_NominalToBinary.setInputFormat(insts); insts = Filter.useFilter(insts, m_NominalToBinary); } else { m_NominalToBinary = null; } } else { m_NominalToBinary = null; } if (m_filterType == FILTER_STANDARDIZE) { m_Filter = new Standardize(); ((Standardize)m_Filter).setIgnoreClass(true); m_Filter.setInputFormat(insts); insts = Filter.useFilter(insts, m_Filter); } else if (m_filterType == FILTER_NORMALIZE) { m_Filter = new Normalize(); ((Normalize)m_Filter).setIgnoreClass(true); m_Filter.setInputFormat(insts); insts = Filter.useFilter(insts, m_Filter); } else { m_Filter = null; } m_NumTrain = insts.numInstances(); // determine which linear transformation has been // applied to the class by the filter if (m_Filter != null) { Instance witness = (Instance) insts.instance(0).copy(); witness.setValue(insts.classIndex(), 0); m_Filter.input(witness); m_Filter.batchFinished(); Instance res = m_Filter.output(); m_Blin = res.value(insts.classIndex()); witness.setValue(insts.classIndex(), 1); m_Filter.input(witness); m_Filter.batchFinished(); res = m_Filter.output(); m_Alin = res.value(insts.classIndex()) - m_Blin; } else { m_Alin = 1.0; m_Blin = 0.0; } // Initialize kernel try { CachedKernel cachedKernel = (CachedKernel) m_kernel; cachedKernel.setCacheSize(0); } catch (Exception e) { // ignore } m_kernel.buildKernel(insts); // Compute average target value double sum = 0.0; for (int i = 0; i < insts.numInstances(); i++) { sum += insts.instance(i).classValue(); } m_avg_target = sum / insts.numInstances(); // initialize kernel matrix/covariance matrix int n = insts.numInstances(); m_L = new double[n][]; for (int i = 0; i < n; i++) { m_L[i] = new double[i+1]; } double kv = 0; for (int i = 0; i < n; i++) { for (int j = 0; j < i; j++) { kv = m_kernel.eval(i, j, insts.instance(i)); m_L[i][j] = kv; } kv = m_kernel.eval(i, i, insts.instance(i)); m_L[i][i] = kv + m_delta * m_delta; } // Save memory (can't use Kernel.clean() because of polynominal kernel with exponent 1) if (m_kernel instanceof CachedKernel) { m_kernel = Kernel.makeCopy(m_kernel); ((CachedKernel)m_kernel).setCacheSize(-1); m_kernel.buildKernel(insts); } // Calculate inverse matrix exploiting symmetry of covariance matrix // NB this replaces the kernel matrix with (the negative of) its inverse and does // not require any extra memory for a solution matrix double [] tmprow = new double [n]; double tmp2 = 0, tmp = 0; for (int i = 0; i < n; i++) { tmp = -m_L[i][i]; m_L[i][i] = 1.0 / tmp; for (int j = 0; j < n; j++) { if (j != i) { if (j < i) { tmprow[j] = m_L[i][j]; m_L[i][j] /= tmp; tmp2 = m_L[i][j]; m_L[j][j] += tmp2 * tmp2 * tmp; } else if (j > i) { tmprow[j] = m_L[j][i]; m_L[j][i] /= tmp; tmp2 = m_L[j][i]; m_L[j][j] += tmp2 * tmp2 * tmp; } } } for (int j = 0; j < n; j++) { if (j != i) { if (i < j) { for (int k = 0; k < i; k++) { m_L[j][k] += tmprow[j] * m_L[i][k]; } } else { for (int k = 0; k < j; k++) { m_L[j][k] += tmprow[j] * m_L[i][k]; } } for (int k = i + 1; k < j; k++) { m_L[j][k] += tmprow[j] * m_L[k][i]; } } } } m_t = new Matrix(insts.numInstances(), 1); double [] tt = new double[n]; for (int i = 0; i < n; i++) { tt[i] = insts.instance(i).classValue() - m_avg_target; } // calculate m_t = tt . m_L for (int i = 0; i < n; i++) { double s = 0; for (int k = 0; k < i; k++) { s -= m_L[i][k] * tt[k]; } for (int k = i; k < n; k++) { s -= m_L[k][i] * tt[k]; } m_t.set(i, 0, s); } } // buildClassifier /** * Classifies a given instance. * * @param inst * the instance to be classified * @return the classification * @throws Exception * if instance could not be classified successfully */ public double classifyInstance(Instance inst) throws Exception { // Filter instance inst = filterInstance(inst); // Build K vector Matrix k = new Matrix(m_NumTrain, 1); for (int i = 0; i < m_NumTrain; i++) { k.set(i, 0, m_kernel.eval(-1, i, inst)); } double result = k.transpose().times(m_t).get(0, 0) + m_avg_target; result = (result - m_Blin) / m_Alin; return result; } /** * Filters an instance. */ protected Instance filterInstance(Instance inst) throws Exception { if (!m_checksTurnedOff) { m_Missing.input(inst); m_Missing.batchFinished(); inst = m_Missing.output(); } if (m_NominalToBinary != null) { m_NominalToBinary.input(inst); m_NominalToBinary.batchFinished(); inst = m_NominalToBinary.output(); } if (m_Filter != null) { m_Filter.input(inst); m_Filter.batchFinished(); inst = m_Filter.output(); } return inst; } /** * Computes standard deviation for given instance, without * transforming target back into original space. */ protected double computeStdDev(Instance inst, Matrix k) throws Exception { double kappa = m_kernel.eval(-1, -1, inst) + m_delta * m_delta; double s = 0; int n = m_L.length; for (int i = 0; i < n; i++) { double t = 0; for (int j = 0; j < n; j++) { t -= k.get(j,0) * (i>j? m_L[i][j] : m_L[j][i]); } s += t * k.get(i,0); } double sigma = m_delta; if (kappa > s) { sigma = Math.sqrt(kappa - s); } return sigma; } /** * Computes a prediction interval for the given instance and confidence * level. * * @param inst * the instance to make the prediction for * @param confidenceLevel * the percentage of cases the interval should cover * @return a 1*2 array that contains the boundaries of the interval * @throws Exception * if interval could not be estimated successfully */ public double[][] predictIntervals(Instance inst, double confidenceLevel) throws Exception { inst = filterInstance(inst); // Build K vector (and Kappa) Matrix k = new Matrix(m_NumTrain, 1); for (int i = 0; i < m_NumTrain; i++) { k.set(i, 0, m_kernel.eval(-1, i, inst)); } double estimate = k.transpose().times(m_t).get(0, 0) + m_avg_target; double sigma = computeStdDev(inst, k); confidenceLevel = 1.0 - ((1.0 - confidenceLevel) / 2.0); double z = Statistics.normalInverse(confidenceLevel); double[][] interval = new double[1][2]; interval[0][0] = estimate - z * sigma; interval[0][1] = estimate + z * sigma; interval[0][0] = (interval[0][0] - m_Blin) / m_Alin; interval[0][1] = (interval[0][1] - m_Blin) / m_Alin; return interval; } /** * Gives standard deviation of the prediction at the given instance. * * @param inst * the instance to get the standard deviation for * @return the standard deviation * @throws Exception * if computation fails */ public double getStandardDeviation(Instance inst) throws Exception { inst = filterInstance(inst); // Build K vector (and Kappa) Matrix k = new Matrix(m_NumTrain, 1); for (int i = 0; i < m_NumTrain; i++) { k.set(i, 0, m_kernel.eval(-1, i, inst)); } return computeStdDev(inst, k) / m_Alin; } /** * Returns natural logarithm of density estimate for given value based on given instance. * * @param instance the instance to make the prediction for. * @param value the value to make the prediction for. * @return the natural logarithm of the density estimate * @exception Exception if the density cannot be computed */ public double logDensity(Instance inst, double value) throws Exception { inst = filterInstance(inst); // Build K vector (and Kappa) Matrix k = new Matrix(m_NumTrain, 1); for (int i = 0; i < m_NumTrain; i++) { k.set(i, 0, m_kernel.eval(-1, i, inst)); } double estimate = k.transpose().times(m_t).get(0, 0) + m_avg_target; double sigma = computeStdDev(inst, k); // transform to GP space value = value * m_Alin + m_Blin; // center around estimate value = value - estimate; double z = -Math.log(sigma * Math.sqrt(2 * Math.PI)) - value * value /(2.0*sigma*sigma); return z + Math.log(m_Alin); } /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ public Enumeration listOptions() { Vector<Option> result = new Vector<Option>(); Enumeration enm = super.listOptions(); while (enm.hasMoreElements()) result.addElement((Option)enm.nextElement()); result.addElement(new Option("\tLevel of Gaussian Noise wrt transformed target." + " (default 1)", "L", 1, "-L <double>")); result.addElement(new Option("\tWhether to 0=normalize/1=standardize/2=neither. " + "(default 0=normalize)", "N", 1, "-N")); result.addElement(new Option("\tThe Kernel to use.\n" + "\t(default: weka.classifiers.functions.supportVector.PolyKernel)", "K", 1, "-K <classname and parameters>")); result.addElement(new Option("", "", 0, "\nOptions specific to kernel " + getKernel().getClass().getName() + ":")); enm = ((OptionHandler) getKernel()).listOptions(); while (enm.hasMoreElements()) result.addElement((Option)enm.nextElement()); return result.elements(); } /** * Parses a given list of options. <p/> * * <!-- options-start --> Valid options are: <p/> * * <pre> * -D * If set, classifier is run in debug mode and * may output additional info to the console * </pre> * * <pre> * -L &lt;double&gt; * Level of Gaussian Noise. (default 0.1) * </pre> * * <pre> * -M &lt;double&gt; * Level of Gaussian Noise for the class. (default 0.1) * </pre> * * <pre> * -N * Whether to 0=normalize/1=standardize/2=neither. (default 0=normalize) * </pre> * * <pre> * -K &lt;classname and parameters&gt; * The Kernel to use. * (default: weka.classifiers.functions.supportVector.PolyKernel) * </pre> * * <pre> * * Options specific to kernel weka.classifiers.functions.supportVector.RBFKernel: * </pre> * * <pre> * -D * Enables debugging output (if available) to be printed. * (default: off) * </pre> * * <pre> * -no-checks * Turns off all checks - use with caution! * (default: checks on) * </pre> * * <pre> * -C &lt;num&gt; * The size of the cache (a prime number). * (default: 250007) * </pre> * * <pre> * -G &lt;num&gt; * The Gamma parameter. * (default: 0.01) * </pre> * * <!-- options-end --> * * @param options * the list of options as an array of strings * @throws Exception * if an option is not supported */ public void setOptions(String[] options) throws Exception { String tmpStr; String[] tmpOptions; tmpStr = Utils.getOption('L', options); if (tmpStr.length() != 0) setNoise(Double.parseDouble(tmpStr)); else setNoise(1); tmpStr = Utils.getOption('N', options); if (tmpStr.length() != 0) setFilterType(new SelectedTag(Integer.parseInt(tmpStr), TAGS_FILTER)); else setFilterType(new SelectedTag(FILTER_NORMALIZE, TAGS_FILTER)); tmpStr = Utils.getOption('K', options); tmpOptions = Utils.splitOptions(tmpStr); if (tmpOptions.length != 0) { tmpStr = tmpOptions[0]; tmpOptions[0] = ""; setKernel(Kernel.forName(tmpStr, tmpOptions)); } super.setOptions(options); } /** * Gets the current settings of the classifier. * * @return an array of strings suitable for passing to setOptions */ public String[] getOptions() { int i; Vector<String> result; String[] options; result = new Vector<String>(); options = super.getOptions(); for (i = 0; i < options.length; i++) result.addElement(options[i]); result.addElement("-L"); result.addElement("" + getNoise()); result.addElement("-N"); result.addElement("" + m_filterType); result.addElement("-K"); result.addElement("" + m_kernel.getClass().getName() + " " + Utils.joinOptions(m_kernel.getOptions())); return (String[]) result.toArray(new String[result.size()]); } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String kernelTipText() { return "The kernel to use."; } /** * Gets the kernel to use. * * @return the kernel */ public Kernel getKernel() { return m_kernel; } /** * Sets the kernel to use. * * @param value * the new kernel */ public void setKernel(Kernel value) { m_kernel = value; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String filterTypeTipText() { return "Determines how/if the data will be transformed."; } /** * Gets how the training data will be transformed. Will be one of * FILTER_NORMALIZE, FILTER_STANDARDIZE, FILTER_NONE. * * @return the filtering mode */ public SelectedTag getFilterType() { return new SelectedTag(m_filterType, TAGS_FILTER); } /** * Sets how the training data will be transformed. Should be one of * FILTER_NORMALIZE, FILTER_STANDARDIZE, FILTER_NONE. * * @param newType * the new filtering mode */ public void setFilterType(SelectedTag newType) { if (newType.getTags() == TAGS_FILTER) { m_filterType = newType.getSelectedTag().getID(); } } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String noiseTipText() { return "The level of Gaussian Noise (added to the diagonal of the Covariance Matrix), after the " + "target has been normalized/standardized/left unchanged)."; } /** * Get the value of noise. * * @return Value of noise. */ public double getNoise() { return m_delta; } /** * Set the level of Gaussian Noise. * * @param v * Value to assign to noise. */ public void setNoise(double v) { m_delta = v; } /** * Prints out the classifier. * * @return a description of the classifier as a string */ public String toString() { StringBuffer text = new StringBuffer(); if (m_t == null) return "Gaussian Processes: No model built yet."; try { text.append("Gaussian Processes\n\n"); text.append("Kernel used:\n " + m_kernel.toString() + "\n\n"); text.append("All values shown based on: " + TAGS_FILTER[m_filterType].getReadable() + "\n\n"); text.append("Average Target Value : " + m_avg_target + "\n"); text.append("Inverted Covariance Matrix:\n"); double min = -m_L[0][0]; double max = -m_L[0][0]; for (int i = 0; i < m_NumTrain; i++) for (int j = 0; j <= i; j++) { if (-m_L[i][j] < min) min = -m_L[i][j]; else if (-m_L[i][j] > max) max = -m_L[i][j]; } text.append(" Lowest Value = " + min + "\n"); text.append(" Highest Value = " + max + "\n"); text.append("Inverted Covariance Matrix * Target-value Vector:\n"); min = m_t.get(0, 0); max = m_t.get(0, 0); for (int i = 0; i < m_NumTrain; i++) { if (m_t.get(i, 0) < min) min = m_t.get(i, 0); else if (m_t.get(i, 0) > max) max = m_t.get(i, 0); } text.append(" Lowest Value = " + min + "\n"); text.append(" Highest Value = " + max + "\n \n"); } catch (Exception e) { return "Can't print the classifier."; } return text.toString(); } /** * Main method for testing this class. * * @param argv * the commandline parameters */ public static void main(String[] argv) { runClassifier(new GaussianProcesses(), argv); } }
27,290
27.848837
127
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/functions/IsotonicRegression.java
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* * IsotonicRegression.java * Copyright (C) 2006 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.functions; import weka.classifiers.Classifier; import weka.classifiers.Evaluation; import weka.core.Attribute; import weka.core.Capabilities; import weka.core.Instance; import weka.core.Instances; import weka.core.RevisionUtils; import weka.core.Utils; import weka.core.WeightedInstancesHandler; import weka.core.Capabilities.Capability; import java.util.Arrays; import weka.classifiers.AbstractClassifier; /** <!-- globalinfo-start --> * Learns an isotonic regression model. Picks the attribute that results in the lowest squared error. Missing values are not allowed. Can only deal with numeric attributes.Considers the monotonically increasing case as well as the monotonicallydecreasing case * <p/> <!-- globalinfo-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -D * If set, classifier is run in debug mode and * may output additional info to the console</pre> * <!-- options-end --> * * @author Eibe Frank (eibe@cs.waikato.ac.nz) * @version $Revision: 5523 $ */ public class IsotonicRegression extends AbstractClassifier implements WeightedInstancesHandler { /** for serialization */ static final long serialVersionUID = 1679336022835454137L; /** The chosen attribute */ private Attribute m_attribute; /** The array of cut points */ private double[] m_cuts; /** The predicted value in each interval. */ private double[] m_values; /** The minimum mean squared error that has been achieved. */ private double m_minMsq; /** a ZeroR model in case no model can be built from the data */ private Classifier m_ZeroR; /** * Returns a string describing this classifier * @return a description of the classifier suitable for * displaying in the explorer/experimenter gui */ public String globalInfo() { return "Learns an isotonic regression model. " +"Picks the attribute that results in the lowest squared error. " +"Missing values are not allowed. Can only deal with numeric attributes." +"Considers the monotonically increasing case as well as the monotonically" +"decreasing case"; } /** * Generate a prediction for the supplied instance. * * @param inst the instance to predict. * @return the prediction * @throws Exception if an error occurs */ public double classifyInstance(Instance inst) throws Exception { // default model? if (m_ZeroR != null) { return m_ZeroR.classifyInstance(inst); } if (inst.isMissing(m_attribute.index())) { throw new Exception("IsotonicRegression: No missing values!"); } int index = Arrays.binarySearch(m_cuts, inst.value(m_attribute)); if (index < 0) { return m_values[-index - 1]; } else { return m_values[index + 1]; } } /** * Returns default capabilities of the classifier. * * @return the capabilities of this classifier */ public Capabilities getCapabilities() { Capabilities result = super.getCapabilities(); result.disableAll(); // attributes result.enable(Capability.NUMERIC_ATTRIBUTES); result.enable(Capability.DATE_ATTRIBUTES); // class result.enable(Capability.NUMERIC_CLASS); result.enable(Capability.DATE_CLASS); result.enable(Capability.MISSING_CLASS_VALUES); return result; } /** * Does the actual regression. */ protected void regress(Attribute attribute, Instances insts, boolean ascending) throws Exception { // Sort values according to current attribute insts.sort(attribute); // Initialize arrays double[] values = new double[insts.numInstances()]; double[] weights = new double[insts.numInstances()]; double[] cuts = new double[insts.numInstances() - 1]; int size = 0; values[0] = insts.instance(0).classValue(); weights[0] = insts.instance(0).weight(); for (int i = 1; i < insts.numInstances(); i++) { if (insts.instance(i).value(attribute) > insts.instance(i - 1).value(attribute)) { cuts[size] = (insts.instance(i).value(attribute) + insts.instance(i - 1).value(attribute)) / 2; size++; } values[size] += insts.instance(i).classValue(); weights[size] += insts.instance(i).weight(); } size++; // While there is a pair of adjacent violators boolean violators; do { violators = false; // Initialize arrays double[] tempValues = new double[size]; double[] tempWeights = new double[size]; double[] tempCuts = new double[size - 1]; // Merge adjacent violators int newSize = 0; tempValues[0] = values[0]; tempWeights[0] = weights[0]; for (int j = 1; j < size; j++) { if ((ascending && (values[j] / weights[j] > tempValues[newSize] / tempWeights[newSize])) || (!ascending && (values[j] / weights[j] < tempValues[newSize] / tempWeights[newSize]))) { tempCuts[newSize] = cuts[j - 1]; newSize++; tempValues[newSize] = values[j]; tempWeights[newSize] = weights[j]; } else { tempWeights[newSize] += weights[j]; tempValues[newSize] += values[j]; violators = true; } } newSize++; // Copy references values = tempValues; weights = tempWeights; cuts = tempCuts; size = newSize; } while (violators); // Compute actual predictions for (int i = 0; i < size; i++) { values[i] /= weights[i]; } // Backup best instance variables Attribute attributeBackedup = m_attribute; double[] cutsBackedup = m_cuts; double[] valuesBackedup = m_values; // Set instance variables to values computed for this attribute m_attribute = attribute; m_cuts = cuts; m_values = values; // Compute sum of squared errors Evaluation eval = new Evaluation(insts); eval.evaluateModel(this, insts); double msq = eval.rootMeanSquaredError(); // Check whether this is the best attribute if (msq < m_minMsq) { m_minMsq = msq; } else { m_attribute = attributeBackedup; m_cuts = cutsBackedup; m_values = valuesBackedup; } } /** * Builds an isotonic regression model given the supplied training data. * * @param insts the training data. * @throws Exception if an error occurs */ public void buildClassifier(Instances insts) throws Exception { // can classifier handle the data? getCapabilities().testWithFail(insts); // remove instances with missing class insts = new Instances(insts); insts.deleteWithMissingClass(); // only class? -> build ZeroR model if (insts.numAttributes() == 1) { System.err.println( "Cannot build model (only class attribute present in data!), " + "using ZeroR model instead!"); m_ZeroR = new weka.classifiers.rules.ZeroR(); m_ZeroR.buildClassifier(insts); return; } else { m_ZeroR = null; } // Choose best attribute and mode m_minMsq = Double.MAX_VALUE; m_attribute = null; for (int a = 0; a < insts.numAttributes(); a++) { if (a != insts.classIndex()) { regress(insts.attribute(a), insts, true); regress(insts.attribute(a), insts, false); } } } /** * Returns a description of this classifier as a string * * @return a description of the classifier. */ public String toString() { // only ZeroR model? if (m_ZeroR != null) { StringBuffer buf = new StringBuffer(); buf.append(this.getClass().getName().replaceAll(".*\\.", "") + "\n"); buf.append(this.getClass().getName().replaceAll(".*\\.", "").replaceAll(".", "=") + "\n\n"); buf.append("Warning: No model could be built, hence ZeroR model is used:\n\n"); buf.append(m_ZeroR.toString()); return buf.toString(); } StringBuffer text = new StringBuffer(); text.append("Isotonic regression\n\n"); if (m_attribute == null) { text.append("No model built yet!"); } else { text.append("Based on attribute: " + m_attribute.name() + "\n\n"); for (int i = 0; i < m_values.length; i++) { text.append("prediction: " + Utils.doubleToString(m_values[i], 10, 2)); if (i < m_cuts.length) { text.append("\t\tcut point: " + Utils.doubleToString(m_cuts[i], 10, 2) + "\n"); } } } return text.toString(); } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 5523 $"); } /** * Main method for testing this class * * @param argv options */ public static void main(String [] argv){ runClassifier(new IsotonicRegression(), argv); } }
9,749
29.185759
259
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/functions/LeastMedSq.java
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* * LeastMedSq.java * * Copyright (C) 2001 University of Waikato */ package weka.classifiers.functions; import weka.classifiers.Classifier; import weka.core.Capabilities; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.OptionHandler; import weka.core.RevisionUtils; import weka.core.TechnicalInformation; import weka.core.TechnicalInformationHandler; import weka.core.Utils; import weka.core.Capabilities.Capability; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import weka.filters.Filter; import weka.filters.supervised.attribute.NominalToBinary; import weka.filters.unsupervised.attribute.ReplaceMissingValues; import weka.filters.unsupervised.instance.RemoveRange; import java.util.Enumeration; import java.util.Random; import java.util.Vector; import weka.classifiers.AbstractClassifier; /** <!-- globalinfo-start --> * Implements a least median sqaured linear regression utilising the existing weka LinearRegression class to form predictions. <br/> * Least squared regression functions are generated from random subsamples of the data. The least squared regression with the lowest meadian squared error is chosen as the final model.<br/> * <br/> * The basis of the algorithm is <br/> * <br/> * Peter J. Rousseeuw, Annick M. Leroy (1987). Robust regression and outlier detection. . * <p/> <!-- globalinfo-end --> * <!-- technical-bibtex-start --> * BibTeX: * <pre> * &#64;book{Rousseeuw1987, * author = {Peter J. Rousseeuw and Annick M. Leroy}, * title = {Robust regression and outlier detection}, * year = {1987} * } * </pre> * <p/> <!-- technical-bibtex-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -S &lt;sample size&gt; * Set sample size * (default: 4) * </pre> * * <pre> -G &lt;seed&gt; * Set the seed used to generate samples * (default: 0) * </pre> * * <pre> -D * Produce debugging output * (default no debugging output) * </pre> * <!-- options-end --> * * @author Tony Voyle (tv6@waikato.ac.nz) * @version $Revision: 5523 $ */ public class LeastMedSq extends AbstractClassifier implements OptionHandler, TechnicalInformationHandler { /** for serialization */ static final long serialVersionUID = 4288954049987652970L; private double[] m_Residuals; private double[] m_weight; private double m_SSR; private double m_scalefactor; private double m_bestMedian = Double.POSITIVE_INFINITY; private LinearRegression m_currentRegression; private LinearRegression m_bestRegression; private LinearRegression m_ls; private Instances m_Data; private Instances m_RLSData; private Instances m_SubSample; private ReplaceMissingValues m_MissingFilter; private NominalToBinary m_TransformFilter; private RemoveRange m_SplitFilter; private int m_samplesize = 4; private int m_samples; private boolean m_israndom = false; private boolean m_debug = false; private Random m_random; private long m_randomseed = 0; /** * Returns a string describing this classifier * @return a description of the classifier suitable for * displaying in the explorer/experimenter gui */ public String globalInfo() { return "Implements a least median sqaured linear regression utilising the " +"existing weka LinearRegression class to form predictions. \n" +"Least squared regression functions are generated from random subsamples of " +"the data. The least squared regression with the lowest meadian squared error " +"is chosen as the final model.\n\n" +"The basis of the algorithm is \n\n" + getTechnicalInformation().toString(); } /** * Returns an instance of a TechnicalInformation object, containing * detailed information about the technical background of this class, * e.g., paper reference or book this class is based on. * * @return the technical information about this class */ public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; result = new TechnicalInformation(Type.BOOK); result.setValue(Field.AUTHOR, "Peter J. Rousseeuw and Annick M. Leroy"); result.setValue(Field.YEAR, "1987"); result.setValue(Field.TITLE, "Robust regression and outlier detection"); return result; } /** * Returns default capabilities of the classifier. * * @return the capabilities of this classifier */ public Capabilities getCapabilities() { Capabilities result = super.getCapabilities(); result.disableAll(); // attributes result.enable(Capability.NOMINAL_ATTRIBUTES); result.enable(Capability.NUMERIC_ATTRIBUTES); result.enable(Capability.DATE_ATTRIBUTES); result.enable(Capability.MISSING_VALUES); // class result.enable(Capability.NUMERIC_CLASS); result.enable(Capability.DATE_CLASS); result.enable(Capability.MISSING_CLASS_VALUES); return result; } /** * Build lms regression * * @param data training data * @throws Exception if an error occurs */ public void buildClassifier(Instances data)throws Exception{ // can classifier handle the data? getCapabilities().testWithFail(data); // remove instances with missing class data = new Instances(data); data.deleteWithMissingClass(); cleanUpData(data); getSamples(); findBestRegression(); buildRLSRegression(); } // buildClassifier /** * Classify a given instance using the best generated * LinearRegression Classifier. * * @param instance instance to be classified * @return class value * @throws Exception if an error occurs */ public double classifyInstance(Instance instance)throws Exception{ Instance transformedInstance = instance; m_TransformFilter.input(transformedInstance); transformedInstance = m_TransformFilter.output(); m_MissingFilter.input(transformedInstance); transformedInstance = m_MissingFilter.output(); return m_ls.classifyInstance(transformedInstance); } // classifyInstance /** * Cleans up data * * @param data data to be cleaned up * @throws Exception if an error occurs */ private void cleanUpData(Instances data)throws Exception{ m_Data = data; m_TransformFilter = new NominalToBinary(); m_TransformFilter.setInputFormat(m_Data); m_Data = Filter.useFilter(m_Data, m_TransformFilter); m_MissingFilter = new ReplaceMissingValues(); m_MissingFilter.setInputFormat(m_Data); m_Data = Filter.useFilter(m_Data, m_MissingFilter); m_Data.deleteWithMissingClass(); } /** * Gets the number of samples to use. * * @throws Exception if an error occurs */ private void getSamples()throws Exception{ int stuf[] = new int[] {500,50,22,17,15,14}; if ( m_samplesize < 7){ if ( m_Data.numInstances() < stuf[m_samplesize - 1]) m_samples = combinations(m_Data.numInstances(), m_samplesize); else m_samples = m_samplesize * 500; } else m_samples = 3000; if (m_debug){ System.out.println("m_samplesize: " + m_samplesize); System.out.println("m_samples: " + m_samples); System.out.println("m_randomseed: " + m_randomseed); } } /** * Set up the random number generator * */ private void setRandom(){ m_random = new Random(getRandomSeed()); } /** * Finds the best regression generated from m_samples * random samples from the training data * * @throws Exception if an error occurs */ private void findBestRegression()throws Exception{ setRandom(); m_bestMedian = Double.POSITIVE_INFINITY; if (m_debug) { System.out.println("Starting:"); } for(int s = 0, r = 0; s < m_samples; s++, r++){ if (m_debug) { if(s%(m_samples/100)==0) System.out.print("*"); } genRegression(); getMedian(); } if (m_debug) { System.out.println(""); } m_currentRegression = m_bestRegression; } /** * Generates a LinearRegression classifier from * the current m_SubSample * * @throws Exception if an error occurs */ private void genRegression()throws Exception{ m_currentRegression = new LinearRegression(); m_currentRegression.setOptions(new String[]{"-S", "1"}); selectSubSample(m_Data); m_currentRegression.buildClassifier(m_SubSample); } /** * Finds residuals (squared) for the current * regression. * * @throws Exception if an error occurs */ private void findResiduals()throws Exception{ m_SSR = 0; m_Residuals = new double [m_Data.numInstances()]; for(int i = 0; i < m_Data.numInstances(); i++){ m_Residuals[i] = m_currentRegression.classifyInstance(m_Data.instance(i)); m_Residuals[i] -= m_Data.instance(i).value(m_Data.classAttribute()); m_Residuals[i] *= m_Residuals[i]; m_SSR += m_Residuals[i]; } } /** * finds the median residual squared for the * current regression * * @throws Exception if an error occurs */ private void getMedian()throws Exception{ findResiduals(); int p = m_Residuals.length; select(m_Residuals, 0, p - 1, p / 2); if(m_Residuals[p / 2] < m_bestMedian){ m_bestMedian = m_Residuals[p / 2]; m_bestRegression = m_currentRegression; } } /** * Returns a string representing the best * LinearRegression classifier found. * * @return String representing the regression */ public String toString(){ if( m_ls == null){ return "model has not been built"; } return m_ls.toString(); } /** * Builds a weight function removing instances with an * abnormally high scaled residual * * @throws Exception if weight building fails */ private void buildWeight()throws Exception{ findResiduals(); m_scalefactor = 1.4826 * ( 1 + 5 / (m_Data.numInstances() - m_Data.numAttributes())) * Math.sqrt(m_bestMedian); m_weight = new double[m_Residuals.length]; for (int i = 0; i < m_Residuals.length; i++) m_weight[i] = ((Math.sqrt(m_Residuals[i])/m_scalefactor < 2.5)?1.0:0.0); } /** * Builds a new LinearRegression without the 'bad' data * found by buildWeight * * @throws Exception if building fails */ private void buildRLSRegression()throws Exception{ buildWeight(); m_RLSData = new Instances(m_Data); int x = 0; int y = 0; int n = m_RLSData.numInstances(); while(y < n){ if (m_weight[x] == 0){ m_RLSData.delete(y); n = m_RLSData.numInstances(); y--; } x++; y++; } if ( m_RLSData.numInstances() == 0){ System.err.println("rls regression unbuilt"); m_ls = m_currentRegression; }else{ m_ls = new LinearRegression(); m_ls.setOptions(new String[]{"-S", "1"}); m_ls.buildClassifier(m_RLSData); m_currentRegression = m_ls; } } /** * Finds the kth number in an array * * @param a an array of numbers * @param l left pointer * @param r right pointer * @param k position of number to be found */ private static void select( double [] a, int l, int r, int k){ if (r <=l) return; int i = partition( a, l, r); if (i > k) select(a, l, i-1, k); if (i < k) select(a, i+1, r, k); } /** * Partitions an array of numbers such that all numbers * less than that at index r, between indexes l and r * will have a smaller index and all numbers greater than * will have a larger index * * @param a an array of numbers * @param l left pointer * @param r right pointer * @return final index of number originally at r */ private static int partition( double [] a, int l, int r ){ int i = l-1, j = r; double v = a[r], temp; while(true){ while(a[++i] < v); while(v < a[--j]) if(j == l) break; if(i >= j) break; temp = a[i]; a[i] = a[j]; a[j] = temp; } temp = a[i]; a[i] = a[r]; a[r] = temp; return i; } /** * Produces a random sample from m_Data * in m_SubSample * * @param data data from which to take sample * @throws Exception if an error occurs */ private void selectSubSample(Instances data)throws Exception{ m_SplitFilter = new RemoveRange(); m_SplitFilter.setInvertSelection(true); m_SubSample = data; m_SplitFilter.setInputFormat(m_SubSample); m_SplitFilter.setInstancesIndices(selectIndices(m_SubSample)); m_SubSample = Filter.useFilter(m_SubSample, m_SplitFilter); } /** * Returns a string suitable for passing to RemoveRange consisting * of m_samplesize indices. * * @param data dataset from which to take indicese * @return string of indices suitable for passing to RemoveRange */ private String selectIndices(Instances data){ StringBuffer text = new StringBuffer(); for(int i = 0, x = 0; i < m_samplesize; i++){ do{x = (int) (m_random.nextDouble() * data.numInstances());} while(x==0); text.append(Integer.toString(x)); if(i < m_samplesize - 1) text.append(","); else text.append("\n"); } return text.toString(); } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String sampleSizeTipText() { return "Set the size of the random samples used to generate the least sqaured " +"regression functions."; } /** * sets number of samples * * @param samplesize value */ public void setSampleSize(int samplesize){ m_samplesize = samplesize; } /** * gets number of samples * * @return value */ public int getSampleSize(){ return m_samplesize; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String randomSeedTipText() { return "Set the seed for selecting random subsamples of the training data."; } /** * Set the seed for the random number generator * * @param randomseed the seed */ public void setRandomSeed(long randomseed){ m_randomseed = randomseed; } /** * get the seed for the random number generator * * @return the seed value */ public long getRandomSeed(){ return m_randomseed; } /** * sets whether or not debugging output shouild be printed * * @param debug true if debugging output selected */ public void setDebug(boolean debug){ m_debug = debug; } /** * Returns whether or not debugging output shouild be printed * * @return true if debuging output selected */ public boolean getDebug(){ return m_debug; } /** * Returns an enumeration of all the available options.. * * @return an enumeration of all available options. */ public Enumeration listOptions(){ Vector newVector = new Vector(1); newVector.addElement(new Option("\tSet sample size\n" + "\t(default: 4)\n", "S", 4, "-S <sample size>")); newVector.addElement(new Option("\tSet the seed used to generate samples\n" + "\t(default: 0)\n", "G", 0, "-G <seed>")); newVector.addElement(new Option("\tProduce debugging output\n" + "\t(default no debugging output)\n", "D", 0, "-D")); return newVector.elements(); } /** * Sets the OptionHandler's options using the given list. All options * will be set (or reset) during this call (i.e. incremental setting * of options is not possible). * <!-- options-start --> * Valid options are: <p/> * * <pre> -S &lt;sample size&gt; * Set sample size * (default: 4) * </pre> * * <pre> -G &lt;seed&gt; * Set the seed used to generate samples * (default: 0) * </pre> * * <pre> -D * Produce debugging output * (default no debugging output) * </pre> * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { String curropt = Utils.getOption('S', options); if ( curropt.length() != 0){ setSampleSize(Integer.parseInt(curropt)); } else setSampleSize(4); curropt = Utils.getOption('G', options); if ( curropt.length() != 0){ setRandomSeed(Long.parseLong(curropt)); } else { setRandomSeed(0); } setDebug(Utils.getFlag('D', options)); } /** * Gets the current option settings for the OptionHandler. * * @return the list of current option settings as an array of strings */ public String[] getOptions(){ String options[] = new String[9]; int current = 0; options[current++] = "-S"; options[current++] = "" + getSampleSize(); options[current++] = "-G"; options[current++] = "" + getRandomSeed(); if (getDebug()) { options[current++] = "-D"; } while (current < options.length) { options[current++] = ""; } return options; } /** * Produces the combination nCr * * @param n * @param r * @return the combination * @throws Exception if r is greater than n */ public static int combinations (int n, int r)throws Exception { int c = 1, denom = 1, num = 1, i,orig=r; if (r > n) throw new Exception("r must be less that or equal to n."); r = Math.min( r , n - r); for (i = 1 ; i <= r; i++){ num *= n-i+1; denom *= i; } c = num / denom; if(false) System.out.println( "n: "+n+" r: "+orig+" num: "+num+ " denom: "+denom+" c: "+c); return c; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 5523 $"); } /** * generate a Linear regression predictor for testing * * @param argv options */ public static void main(String [] argv){ runClassifier(new LeastMedSq(), argv); } // main } // lmr
18,908
24.796726
189
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/functions/LibLINEAR.java
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* * LibLINEAR.java * Copyright (C) Benedikt Waldvogel */ package weka.classifiers.functions; import java.lang.reflect.Array; import java.lang.reflect.Constructor; import java.lang.reflect.Field; import java.lang.reflect.Method; import java.util.ArrayList; import java.util.Enumeration; import java.util.List; import java.util.StringTokenizer; import java.util.Vector; import weka.classifiers.AbstractClassifier; import weka.classifiers.Classifier; import weka.core.Capabilities; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.RevisionUtils; import weka.core.SelectedTag; import weka.core.Tag; import weka.core.TechnicalInformation; import weka.core.TechnicalInformationHandler; import weka.core.Utils; import weka.core.WekaException; import weka.core.Capabilities.Capability; import weka.core.TechnicalInformation.Type; import weka.filters.Filter; import weka.filters.unsupervised.attribute.NominalToBinary; import weka.filters.unsupervised.attribute.Normalize; import weka.filters.unsupervised.attribute.ReplaceMissingValues; /** <!-- globalinfo-start --> * A wrapper class for the liblinear tools (the liblinear classes, typically the jar file, need to be in the classpath to use this classifier).<br/> * Rong-En Fan, Kai-Wei Chang, Cho-Jui Hsieh, Xiang-Rui Wang, Chih-Jen Lin (2008). LIBLINEAR - A Library for Large Linear Classification. URL http://www.csie.ntu.edu.tw/~cjlin/liblinear/. * <p/> <!-- globalinfo-end --> * <!-- technical-bibtex-start --> * BibTeX: * <pre> * &#64;misc{Fan2008, * author = {Rong-En Fan and Kai-Wei Chang and Cho-Jui Hsieh and Xiang-Rui Wang and Chih-Jen Lin}, * note = {The Weka classifier works with version 1.33 of LIBLINEAR}, * title = {LIBLINEAR - A Library for Large Linear Classification}, * year = {2008}, * URL = {http://www.csie.ntu.edu.tw/\~cjlin/liblinear/} * } * </pre> * <p/> <!-- technical-bibtex-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -S &lt;int&gt; * Set type of solver (default: 1) * 0 = L2-regularized logistic regression * 1 = L2-loss support vector machines (dual) * 2 = L2-loss support vector machines (primal) * 3 = L1-loss support vector machines (dual) * 4 = multi-class support vector machines by Crammer and Singer</pre> * * <pre> -C &lt;double&gt; * Set the cost parameter C * (default: 1)</pre> * * <pre> -Z * Turn on normalization of input data (default: off)</pre> * * <pre> -N * Turn on nominal to binary conversion.</pre> * * <pre> -M * Turn off missing value replacement. * WARNING: use only if your data has no missing values.</pre> * * <pre> -P * Use probability estimation (default: off) * currently for L2-regularized logistic regression only! </pre> * * <pre> -E &lt;double&gt; * Set tolerance of termination criterion (default: 0.01)</pre> * * <pre> -W &lt;double&gt; * Set the parameters C of class i to weight[i]*C * (default: 1)</pre> * * <pre> -B &lt;double&gt; * Add Bias term with the given value if &gt;= 0; if &lt; 0, no bias term added (default: 1)</pre> * * <pre> -D * If set, classifier is run in debug mode and * may output additional info to the console</pre> * <!-- options-end --> * * @author Benedikt Waldvogel (mail at bwaldvogel.de) * @version $Revision: 5917 $ */ public class LibLINEAR extends AbstractClassifier implements TechnicalInformationHandler { /** the svm classname */ protected final static String CLASS_LINEAR = "liblinear.Linear"; /** the svm_model classname */ protected final static String CLASS_MODEL = "liblinear.Model"; /** the svm_problem classname */ protected final static String CLASS_PROBLEM = "liblinear.Problem"; /** the svm_parameter classname */ protected final static String CLASS_PARAMETER = "liblinear.Parameter"; /** the svm_parameter classname */ protected final static String CLASS_SOLVERTYPE = "liblinear.SolverType"; /** the svm_node classname */ protected final static String CLASS_FEATURENODE = "liblinear.FeatureNode"; /** serial UID */ protected static final long serialVersionUID = 230504711; /** LibLINEAR Model */ protected Object m_Model; public Object getModel() { return m_Model; } /** for normalizing the data */ protected Filter m_Filter = null; /** normalize input data */ protected boolean m_Normalize = false; /** SVM solver type L2-regularized logistic regression */ public static final int SVMTYPE_L2_LR = 0; /** SVM solver type L2-loss support vector machines (dual) */ public static final int SVMTYPE_L2LOSS_SVM_DUAL = 1; /** SVM solver type L2-loss support vector machines (primal) */ public static final int SVMTYPE_L2LOSS_SVM = 2; /** SVM solver type L1-loss support vector machines (dual) */ public static final int SVMTYPE_L1LOSS_SVM_DUAL = 3; /** SVM solver type multi-class support vector machines by Crammer and Singer */ public static final int SVMTYPE_MCSVM_CS = 4; /** SVM solver types */ public static final Tag[] TAGS_SVMTYPE = { new Tag(SVMTYPE_L2_LR, "L2-regularized logistic regression"), new Tag(SVMTYPE_L2LOSS_SVM_DUAL, "L2-loss support vector machines (dual)"), new Tag(SVMTYPE_L2LOSS_SVM, "L2-loss support vector machines (primal)"), new Tag(SVMTYPE_L1LOSS_SVM_DUAL, "L1-loss support vector machines (dual)"), new Tag(SVMTYPE_MCSVM_CS, "multi-class support vector machines by Crammer and Singer") }; /** the SVM solver type */ protected int m_SVMType = SVMTYPE_L2LOSS_SVM_DUAL; /** stopping criteria */ protected double m_eps = 0.01; /** cost Parameter C */ protected double m_Cost = 1; /** bias term value */ protected double m_Bias = 1; protected int[] m_WeightLabel = new int[0]; protected double[] m_Weight = new double[0]; /** whether to generate probability estimates instead of +1/-1 in case of * classification problems */ protected boolean m_ProbabilityEstimates = false; /** The filter used to get rid of missing values. */ protected ReplaceMissingValues m_ReplaceMissingValues; /** The filter used to make attributes numeric. */ protected NominalToBinary m_NominalToBinary; /** If true, the nominal to binary filter is applied */ private boolean m_nominalToBinary = false; /** If true, the replace missing values filter is not applied */ private boolean m_noReplaceMissingValues; /** whether the liblinear classes are in the Classpath */ protected static boolean m_Present = false; static { try { Class.forName(CLASS_LINEAR); m_Present = true; } catch (Exception e) { m_Present = false; } } /** * Returns a string describing classifier * * @return a description suitable for displaying in the * explorer/experimenter gui */ public String globalInfo() { return "A wrapper class for the liblinear tools (the liblinear classes, typically " + "the jar file, need to be in the classpath to use this classifier).\n" + getTechnicalInformation().toString(); } /** * Returns an instance of a TechnicalInformation object, containing * detailed information about the technical background of this class, * e.g., paper reference or book this class is based on. * * @return the technical information about this class */ public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; result = new TechnicalInformation(Type.MISC); result.setValue(TechnicalInformation.Field.AUTHOR, "Rong-En Fan and Kai-Wei Chang and Cho-Jui Hsieh and Xiang-Rui Wang and Chih-Jen Lin"); result.setValue(TechnicalInformation.Field.TITLE, "LIBLINEAR - A Library for Large Linear Classification"); result.setValue(TechnicalInformation.Field.YEAR, "2008"); result.setValue(TechnicalInformation.Field.URL, "http://www.csie.ntu.edu.tw/~cjlin/liblinear/"); result.setValue(TechnicalInformation.Field.NOTE, "The Weka classifier works with version 1.33 of LIBLINEAR"); return result; } /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ public Enumeration listOptions() { Vector result; result = new Vector(); result.addElement( new Option( "\tSet type of solver (default: 1)\n" + "\t\t 0 = L2-regularized logistic regression\n" + "\t\t 1 = L2-loss support vector machines (dual)\n" + "\t\t 2 = L2-loss support vector machines (primal)\n" + "\t\t 3 = L1-loss support vector machines (dual)\n" + "\t\t 4 = multi-class support vector machines by Crammer and Singer", "S", 1, "-S <int>")); result.addElement( new Option( "\tSet the cost parameter C\n" + "\t (default: 1)", "C", 1, "-C <double>")); result.addElement( new Option( "\tTurn on normalization of input data (default: off)", "Z", 0, "-Z")); result.addElement( new Option("\tTurn on nominal to binary conversion.", "N", 0, "-N")); result.addElement( new Option("\tTurn off missing value replacement." + "\n\tWARNING: use only if your data has no missing " + "values.", "M", 0, "-M")); result.addElement( new Option( "\tUse probability estimation (default: off)\n" + "currently for L2-regularized logistic regression only! ", "P", 0, "-P")); result.addElement( new Option( "\tSet tolerance of termination criterion (default: 0.01)", "E", 1, "-E <double>")); result.addElement( new Option( "\tSet the parameters C of class i to weight[i]*C\n" + "\t (default: 1)", "W", 1, "-W <double>")); result.addElement( new Option( "\tAdd Bias term with the given value if >= 0; if < 0, no bias term added (default: 1)", "B", 1, "-B <double>")); Enumeration en = super.listOptions(); while (en.hasMoreElements()) result.addElement(en.nextElement()); return result.elements(); } /** * Sets the classifier options <p/> * <!-- options-start --> * Valid options are: <p/> * * <pre> -S &lt;int&gt; * Set type of solver (default: 1) * 0 = L2-regularized logistic regression * 1 = L2-loss support vector machines (dual) * 2 = L2-loss support vector machines (primal) * 3 = L1-loss support vector machines (dual) * 4 = multi-class support vector machines by Crammer and Singer</pre> * * <pre> -C &lt;double&gt; * Set the cost parameter C * (default: 1)</pre> * * <pre> -Z * Turn on normalization of input data (default: off)</pre> * * <pre> -N * Turn on nominal to binary conversion.</pre> * * <pre> -M * Turn off missing value replacement. * WARNING: use only if your data has no missing values.</pre> * * <pre> -P * Use probability estimation (default: off) * currently for L2-regularized logistic regression only! </pre> * * <pre> -E &lt;double&gt; * Set tolerance of termination criterion (default: 0.01)</pre> * * <pre> -W &lt;double&gt; * Set the parameters C of class i to weight[i]*C * (default: 1)</pre> * * <pre> -B &lt;double&gt; * Add Bias term with the given value if &gt;= 0; if &lt; 0, no bias term added (default: 1)</pre> * * <pre> -D * If set, classifier is run in debug mode and * may output additional info to the console</pre> * <!-- options-end --> * * @param options the options to parse * @throws Exception if parsing fails */ public void setOptions(String[] options) throws Exception { String tmpStr; tmpStr = Utils.getOption('S', options); if (tmpStr.length() != 0) setSVMType( new SelectedTag(Integer.parseInt(tmpStr), TAGS_SVMTYPE)); else setSVMType( new SelectedTag(SVMTYPE_L2LOSS_SVM_DUAL, TAGS_SVMTYPE)); tmpStr = Utils.getOption('C', options); if (tmpStr.length() != 0) setCost(Double.parseDouble(tmpStr)); else setCost(1); tmpStr = Utils.getOption('E', options); if (tmpStr.length() != 0) setEps(Double.parseDouble(tmpStr)); else setEps(1e-3); setNormalize(Utils.getFlag('Z', options)); setConvertNominalToBinary(Utils.getFlag('N', options)); setDoNotReplaceMissingValues(Utils.getFlag('M', options)); tmpStr = Utils.getOption('B', options); if (tmpStr.length() != 0) setBias(Double.parseDouble(tmpStr)); else setBias(1); setWeights(Utils.getOption('W', options)); setProbabilityEstimates(Utils.getFlag('P', options)); super.setOptions(options); } /** * Returns the current options * * @return the current setup */ public String[] getOptions() { Vector result; result = new Vector(); result.add("-S"); result.add("" + m_SVMType); result.add("-C"); result.add("" + getCost()); result.add("-E"); result.add("" + getEps()); result.add("-B"); result.add("" + getBias()); if (getNormalize()) result.add("-Z"); if (getConvertNominalToBinary()) result.add("-N"); if (getDoNotReplaceMissingValues()) result.add("-M"); if (getWeights().length() != 0) { result.add("-W"); result.add("" + getWeights()); } if (getProbabilityEstimates()) result.add("-P"); return (String[]) result.toArray(new String[result.size()]); } /** * returns whether the liblinear classes are present or not, i.e. whether the * classes are in the classpath or not * * @return whether the liblinear classes are available */ public static boolean isPresent() { return m_Present; } /** * Sets type of SVM (default SVMTYPE_L2) * * @param value the type of the SVM */ public void setSVMType(SelectedTag value) { if (value.getTags() == TAGS_SVMTYPE) m_SVMType = value.getSelectedTag().getID(); } /** * Gets type of SVM * * @return the type of the SVM */ public SelectedTag getSVMType() { return new SelectedTag(m_SVMType, TAGS_SVMTYPE); } /** * Returns the tip text for this property * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String SVMTypeTipText() { return "The type of SVM to use."; } /** * Sets the cost parameter C (default 1) * * @param value the cost value */ public void setCost(double value) { m_Cost = value; } /** * Returns the cost parameter C * * @return the cost value */ public double getCost() { return m_Cost; } /** * Returns the tip text for this property * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String costTipText() { return "The cost parameter C."; } /** * Sets tolerance of termination criterion (default 0.001) * * @param value the tolerance */ public void setEps(double value) { m_eps = value; } /** * Gets tolerance of termination criterion * * @return the current tolerance */ public double getEps() { return m_eps; } /** * Returns the tip text for this property * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String epsTipText() { return "The tolerance of the termination criterion."; } /** * Sets bias term value (default 1) * No bias term is added if value &lt; 0 * * @param value the bias term value */ public void setBias(double value) { m_Bias = value; } /** * Returns bias term value (default 1) * No bias term is added if value &lt; 0 * * @return the bias term value */ public double getBias() { return m_Bias; } /** * Returns the tip text for this property * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String biasTipText() { return "If >= 0, a bias term with that value is added; " + "otherwise (<0) no bias term is added (default: 1)."; } /** * Returns the tip text for this property * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String normalizeTipText() { return "Whether to normalize the data."; } /** * whether to normalize input data * * @param value whether to normalize the data */ public void setNormalize(boolean value) { m_Normalize = value; } /** * whether to normalize input data * * @return true, if the data is normalized */ public boolean getNormalize() { return m_Normalize; } /** * Returns the tip text for this property * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String convertNominalToBinaryTipText() { return "Whether to turn on conversion of nominal attributes " + "to binary."; } /** * Whether to turn on conversion of nominal attributes * to binary. * * @param b true if nominal to binary conversion is to be * turned on */ public void setConvertNominalToBinary(boolean b) { m_nominalToBinary = b; } /** * Gets whether conversion of nominal to binary is * turned on. * * @return true if nominal to binary conversion is turned * on. */ public boolean getConvertNominalToBinary() { return m_nominalToBinary; } /** * Returns the tip text for this property * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String doNotReplaceMissingValuesTipText() { return "Whether to turn off automatic replacement of missing " + "values. WARNING: set to true only if the data does not " + "contain missing values."; } /** * Whether to turn off automatic replacement of missing values. * Set to true only if the data does not contain missing values. * * @param b true if automatic missing values replacement is * to be disabled. */ public void setDoNotReplaceMissingValues(boolean b) { m_noReplaceMissingValues = b; } /** * Gets whether automatic replacement of missing values is * disabled. * * @return true if automatic replacement of missing values * is disabled. */ public boolean getDoNotReplaceMissingValues() { return m_noReplaceMissingValues; } /** * Sets the parameters C of class i to weight[i]*C (default 1). * Blank separated list of doubles. * * @param weightsStr the weights (doubles, separated by blanks) */ public void setWeights(String weightsStr) { StringTokenizer tok; int i; tok = new StringTokenizer(weightsStr, " "); m_Weight = new double[tok.countTokens()]; m_WeightLabel = new int[tok.countTokens()]; if (m_Weight.length == 0) System.out.println( "Zero Weights processed. Default weights will be used"); for (i = 0; i < m_Weight.length; i++) { m_Weight[i] = Double.parseDouble(tok.nextToken()); m_WeightLabel[i] = i; } } /** * Gets the parameters C of class i to weight[i]*C (default 1). * Blank separated doubles. * * @return the weights (doubles separated by blanks) */ public String getWeights() { String result; int i; result = ""; for (i = 0; i < m_Weight.length; i++) { if (i > 0) result += " "; result += Double.toString(m_Weight[i]); } return result; } /** * Returns the tip text for this property * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String weightsTipText() { return "The weights to use for the classes, if empty 1 is used by default."; } /** * Returns whether probability estimates are generated instead of -1/+1 for * classification problems. * * @param value whether to predict probabilities */ public void setProbabilityEstimates(boolean value) { m_ProbabilityEstimates = value; } /** * Sets whether to generate probability estimates instead of -1/+1 for * classification problems. * * @return true, if probability estimates should be returned */ public boolean getProbabilityEstimates() { return m_ProbabilityEstimates; } /** * Returns the tip text for this property * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String probabilityEstimatesTipText() { return "Whether to generate probability estimates instead of -1/+1 for classification problems " + "(currently for L2-regularized logistic regression only!)"; } /** * sets the specified field * * @param o the object to set the field for * @param name the name of the field * @param value the new value of the field */ protected void setField(Object o, String name, Object value) { Field f; try { f = o.getClass().getField(name); f.set(o, value); } catch (Exception e) { e.printStackTrace(); } } /** * sets the specified field in an array * * @param o the object to set the field for * @param name the name of the field * @param index the index in the array * @param value the new value of the field */ protected void setField(Object o, String name, int index, Object value) { Field f; try { f = o.getClass().getField(name); Array.set(f.get(o), index, value); } catch (Exception e) { e.printStackTrace(); } } /** * returns the current value of the specified field * * @param o the object the field is member of * @param name the name of the field * @return the value */ protected Object getField(Object o, String name) { Field f; Object result; try { f = o.getClass().getField(name); result = f.get(o); } catch (Exception e) { e.printStackTrace(); result = null; } return result; } /** * sets a new array for the field * * @param o the object to set the array for * @param name the name of the field * @param type the type of the array * @param length the length of the one-dimensional array */ protected void newArray(Object o, String name, Class type, int length) { newArray(o, name, type, new int[]{length}); } /** * sets a new array for the field * * @param o the object to set the array for * @param name the name of the field * @param type the type of the array * @param dimensions the dimensions of the array */ protected void newArray(Object o, String name, Class type, int[] dimensions) { Field f; try { f = o.getClass().getField(name); f.set(o, Array.newInstance(type, dimensions)); } catch (Exception e) { e.printStackTrace(); } } /** * executes the specified method and returns the result, if any * * @param o the object the method should be called from * @param name the name of the method * @param paramClasses the classes of the parameters * @param paramValues the values of the parameters * @return the return value of the method, if any (in that case null) */ protected Object invokeMethod(Object o, String name, Class[] paramClasses, Object[] paramValues) { Method m; Object result; result = null; try { m = o.getClass().getMethod(name, paramClasses); result = m.invoke(o, paramValues); } catch (Exception e) { e.printStackTrace(); result = null; } return result; } /** * transfers the local variables into a svm_parameter object * * @return the configured svm_parameter object */ protected Object getParameters() { Object result; int i; try { Class solverTypeEnumClass = Class.forName(CLASS_SOLVERTYPE); Object[] enumValues = solverTypeEnumClass.getEnumConstants(); Object solverType = enumValues[m_SVMType]; Class[] constructorClasses = new Class[] { solverTypeEnumClass, double.class, double.class }; Constructor parameterConstructor = Class.forName(CLASS_PARAMETER).getConstructor(constructorClasses); result = parameterConstructor.newInstance(solverType, Double.valueOf(m_Cost), Double.valueOf(m_eps)); if (m_Weight.length > 0) { invokeMethod(result, "setWeights", new Class[] { double[].class, int[].class }, new Object[] { m_Weight, m_WeightLabel }); } } catch (Exception e) { e.printStackTrace(); result = null; } return result; } /** * returns the svm_problem * * @param vx the x values * @param vy the y values * @param max_index * @return the Problem object */ protected Object getProblem(List<Object> vx, List<Integer> vy, int max_index) { Object result; try { result = Class.forName(CLASS_PROBLEM).newInstance(); setField(result, "l", Integer.valueOf(vy.size())); setField(result, "n", Integer.valueOf(max_index)); setField(result, "bias", getBias()); newArray(result, "x", Class.forName(CLASS_FEATURENODE), new int[]{vy.size(), 0}); for (int i = 0; i < vy.size(); i++) setField(result, "x", i, vx.get(i)); newArray(result, "y", Integer.TYPE, vy.size()); for (int i = 0; i < vy.size(); i++) setField(result, "y", i, vy.get(i)); } catch (Exception e) { e.printStackTrace(); result = null; } return result; } /** * returns an instance into a sparse liblinear array * * @param instance the instance to work on * @return the liblinear array * @throws Exception if setup of array fails */ protected Object instanceToArray(Instance instance) throws Exception { int index; int count; int i; Object result; // determine number of non-zero attributes count = 0; for (i = 0; i < instance.numValues(); i++) { if (instance.index(i) == instance.classIndex()) continue; if (instance.valueSparse(i) != 0) count++; } if (m_Bias >= 0) { count++; } Class[] intDouble = new Class[] { int.class, double.class }; Constructor nodeConstructor = Class.forName(CLASS_FEATURENODE).getConstructor(intDouble); // fill array result = Array.newInstance(Class.forName(CLASS_FEATURENODE), count); index = 0; for (i = 0; i < instance.numValues(); i++) { int idx = instance.index(i); double val = instance.valueSparse(i); if (idx == instance.classIndex()) continue; if (val == 0) continue; Object node = nodeConstructor.newInstance(Integer.valueOf(idx+1), Double.valueOf(val)); Array.set(result, index, node); index++; } // add bias term if (m_Bias >= 0) { Integer idx = Integer.valueOf(instance.numAttributes()+1); Double value = Double.valueOf(m_Bias); Object node = nodeConstructor.newInstance(idx, value); Array.set(result, index, node); } return result; } /** * Computes the distribution for a given instance. * * @param instance the instance for which distribution is computed * @return the distribution * @throws Exception if the distribution can't be computed successfully */ public double[] distributionForInstance (Instance instance) throws Exception { if (!getDoNotReplaceMissingValues()) { m_ReplaceMissingValues.input(instance); m_ReplaceMissingValues.batchFinished(); instance = m_ReplaceMissingValues.output(); } if (getConvertNominalToBinary() && m_NominalToBinary != null) { m_NominalToBinary.input(instance); m_NominalToBinary.batchFinished(); instance = m_NominalToBinary.output(); } if (m_Filter != null) { m_Filter.input(instance); m_Filter.batchFinished(); instance = m_Filter.output(); } Object x = instanceToArray(instance); double v; double[] result = new double[instance.numClasses()]; if (m_ProbabilityEstimates) { if (m_SVMType != SVMTYPE_L2_LR) { throw new WekaException("probability estimation is currently only " + "supported for L2-regularized logistic regression"); } int[] labels = (int[])invokeMethod(m_Model, "getLabels", null, null); double[] prob_estimates = new double[instance.numClasses()]; v = ((Integer) invokeMethod( Class.forName(CLASS_LINEAR).newInstance(), "predictProbability", new Class[]{ Class.forName(CLASS_MODEL), Array.newInstance(Class.forName(CLASS_FEATURENODE), Array.getLength(x)).getClass(), Array.newInstance(Double.TYPE, prob_estimates.length).getClass()}, new Object[]{ m_Model, x, prob_estimates})).doubleValue(); // Return order of probabilities to canonical weka attribute order for (int k = 0; k < prob_estimates.length; k++) { result[labels[k]] = prob_estimates[k]; } } else { v = ((Integer) invokeMethod( Class.forName(CLASS_LINEAR).newInstance(), "predict", new Class[]{ Class.forName(CLASS_MODEL), Array.newInstance(Class.forName(CLASS_FEATURENODE), Array.getLength(x)).getClass()}, new Object[]{ m_Model, x})).doubleValue(); assert (instance.classAttribute().isNominal()); result[(int) v] = 1; } return result; } /** * Returns default capabilities of the classifier. * * @return the capabilities of this classifier */ public Capabilities getCapabilities() { Capabilities result = super.getCapabilities(); result.disableAll(); // attributes result.enable(Capability.NOMINAL_ATTRIBUTES); result.enable(Capability.NUMERIC_ATTRIBUTES); result.enable(Capability.DATE_ATTRIBUTES); // result.enable(Capability.MISSING_VALUES); // class result.enable(Capability.NOMINAL_CLASS); result.enable(Capability.MISSING_CLASS_VALUES); return result; } /** * builds the classifier * * @param insts the training instances * @throws Exception if liblinear classes not in classpath or liblinear * encountered a problem */ public void buildClassifier(Instances insts) throws Exception { m_NominalToBinary = null; m_Filter = null; if (!isPresent()) throw new Exception("liblinear classes not in CLASSPATH!"); // remove instances with missing class insts = new Instances(insts); insts.deleteWithMissingClass(); if (!getDoNotReplaceMissingValues()) { m_ReplaceMissingValues = new ReplaceMissingValues(); m_ReplaceMissingValues.setInputFormat(insts); insts = Filter.useFilter(insts, m_ReplaceMissingValues); } // can classifier handle the data? // we check this here so that if the user turns off // replace missing values filtering, it will fail // if the data actually does have missing values getCapabilities().testWithFail(insts); if (getConvertNominalToBinary()) { insts = nominalToBinary(insts); } if (getNormalize()) { m_Filter = new Normalize(); m_Filter.setInputFormat(insts); insts = Filter.useFilter(insts, m_Filter); } List<Integer> vy = new ArrayList<Integer>(insts.numInstances()); List<Object> vx = new ArrayList<Object>(insts.numInstances()); int max_index = 0; for (int d = 0; d < insts.numInstances(); d++) { Instance inst = insts.instance(d); Object x = instanceToArray(inst); int m = Array.getLength(x); if (m > 0) max_index = Math.max(max_index, ((Integer) getField(Array.get(x, m - 1), "index")).intValue()); vx.add(x); double classValue = inst.classValue(); int classValueInt = (int)classValue; if (classValueInt != classValue) throw new RuntimeException("unsupported class value: " + classValue); vy.add(Integer.valueOf(classValueInt)); } if (!m_Debug) { invokeMethod( Class.forName(CLASS_LINEAR).newInstance(), "disableDebugOutput", null, null); } else { invokeMethod( Class.forName(CLASS_LINEAR).newInstance(), "enableDebugOutput", null, null); } // reset the PRNG for regression-stable results invokeMethod( Class.forName(CLASS_LINEAR).newInstance(), "resetRandom", null, null); // train model m_Model = invokeMethod( Class.forName(CLASS_LINEAR).newInstance(), "train", new Class[]{ Class.forName(CLASS_PROBLEM), Class.forName(CLASS_PARAMETER)}, new Object[]{ getProblem(vx, vy, max_index), getParameters()}); } /** * turns on nominal to binary filtering * if there are not only numeric attributes */ private Instances nominalToBinary( Instances insts ) throws Exception { boolean onlyNumeric = true; for (int i = 0; i < insts.numAttributes(); i++) { if (i != insts.classIndex()) { if (!insts.attribute(i).isNumeric()) { onlyNumeric = false; break; } } } if (!onlyNumeric) { m_NominalToBinary = new NominalToBinary(); m_NominalToBinary.setInputFormat(insts); insts = Filter.useFilter(insts, m_NominalToBinary); } return insts; } /** * returns a string representation * * @return a string representation */ public String toString() { return "LibLINEAR wrapper"; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 5917 $"); } /** * Main method for testing this class. * * @param args the options */ public static void main(String[] args) { runClassifier(new LibLINEAR(), args); } }
35,668
27.788539
188
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/functions/LibSVM.java
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* * LibSVM.java * Copyright (C) 2005 Yasser EL-Manzalawy (original code) * Copyright (C) 2005 University of Waikato, Hamilton, NZ (adapted code) * */ package weka.classifiers.functions; import weka.classifiers.Classifier; import weka.core.Capabilities; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.RevisionUtils; import weka.core.SelectedTag; import weka.core.Tag; import weka.core.TechnicalInformation; import weka.core.TechnicalInformationHandler; import weka.core.Utils; import weka.core.Capabilities.Capability; import weka.core.TechnicalInformation.Type; import weka.filters.Filter; import weka.filters.unsupervised.attribute.Normalize; import weka.filters.unsupervised.attribute.ReplaceMissingValues; import java.lang.reflect.Array; import java.lang.reflect.Field; import java.lang.reflect.Method; import java.util.Enumeration; import java.util.StringTokenizer; import java.util.Vector; import weka.classifiers.AbstractClassifier; /* * Modifications by FracPete: * - complete overhaul to make it useable in Weka * - accesses libsvm classes only via Reflection to make Weka compile without * the libsvm classes * - uses more efficient code to transfer the data into the libsvm sparse format */ /** <!-- globalinfo-start --> * A wrapper class for the libsvm tools (the libsvm classes, typically the jar file, need to be in the classpath to use this classifier).<br/> * LibSVM runs faster than SMO since it uses LibSVM to build the SVM classifier.<br/> * LibSVM allows users to experiment with One-class SVM, Regressing SVM, and nu-SVM supported by LibSVM tool. LibSVM reports many useful statistics about LibSVM classifier (e.g., confusion matrix,precision, recall, ROC score, etc.).<br/> * <br/> * Yasser EL-Manzalawy (2005). WLSVM. URL http://www.cs.iastate.edu/~yasser/wlsvm/.<br/> * <br/> * Chih-Chung Chang, Chih-Jen Lin (2001). LIBSVM - A Library for Support Vector Machines. URL http://www.csie.ntu.edu.tw/~cjlin/libsvm/. * <p/> <!-- globalinfo-end --> * <!-- technical-bibtex-start --> * BibTeX: * <pre> * &#64;misc{EL-Manzalawy2005, * author = {Yasser EL-Manzalawy}, * note = {You don't need to include the WLSVM package in the CLASSPATH}, * title = {WLSVM}, * year = {2005}, * URL = {http://www.cs.iastate.edu/\~yasser/wlsvm/} * } * * &#64;misc{Chang2001, * author = {Chih-Chung Chang and Chih-Jen Lin}, * note = {The Weka classifier works with version 2.82 of LIBSVM}, * title = {LIBSVM - A Library for Support Vector Machines}, * year = {2001}, * URL = {http://www.csie.ntu.edu.tw/\~cjlin/libsvm/} * } * </pre> * <p/> <!-- technical-bibtex-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -S &lt;int&gt; * Set type of SVM (default: 0) * 0 = C-SVC * 1 = nu-SVC * 2 = one-class SVM * 3 = epsilon-SVR * 4 = nu-SVR</pre> * * <pre> -K &lt;int&gt; * Set type of kernel function (default: 2) * 0 = linear: u'*v * 1 = polynomial: (gamma*u'*v + coef0)^degree * 2 = radial basis function: exp(-gamma*|u-v|^2) * 3 = sigmoid: tanh(gamma*u'*v + coef0)</pre> * * <pre> -D &lt;int&gt; * Set degree in kernel function (default: 3)</pre> * * <pre> -G &lt;double&gt; * Set gamma in kernel function (default: 1/k)</pre> * * <pre> -R &lt;double&gt; * Set coef0 in kernel function (default: 0)</pre> * * <pre> -C &lt;double&gt; * Set the parameter C of C-SVC, epsilon-SVR, and nu-SVR * (default: 1)</pre> * * <pre> -N &lt;double&gt; * Set the parameter nu of nu-SVC, one-class SVM, and nu-SVR * (default: 0.5)</pre> * * <pre> -Z * Turns on normalization of input data (default: off)</pre> * * <pre> -J * Turn off nominal to binary conversion. * WARNING: use only if your data is all numeric!</pre> * * <pre> -V * Turn off missing value replacement. * WARNING: use only if your data has no missing values.</pre> * * <pre> -P &lt;double&gt; * Set the epsilon in loss function of epsilon-SVR (default: 0.1)</pre> * * <pre> -M &lt;double&gt; * Set cache memory size in MB (default: 40)</pre> * * <pre> -E &lt;double&gt; * Set tolerance of termination criterion (default: 0.001)</pre> * * <pre> -H * Turns the shrinking heuristics off (default: on)</pre> * * <pre> -W &lt;double&gt; * Set the parameters C of class i to weight[i]*C, for C-SVC * E.g., for a 3-class problem, you could use "1 1 1" for equally * weighted classes. * (default: 1 for all classes)</pre> * * <pre> -B * Trains a SVC model instead of a SVR one (default: SVR)</pre> * * <pre> -D * If set, classifier is run in debug mode and * may output additional info to the console</pre> * <!-- options-end --> * * @author Yasser EL-Manzalawy * @author FracPete (fracpete at waikato dot ac dot nz) * @version $Revision: 5523 $ * @see weka.core.converters.LibSVMLoader * @see weka.core.converters.LibSVMSaver */ public class LibSVM extends AbstractClassifier implements TechnicalInformationHandler { /** the svm classname */ protected final static String CLASS_SVM = "libsvm.svm"; /** the svm_model classname */ protected final static String CLASS_SVMMODEL = "libsvm.svm_model"; /** the svm_problem classname */ protected final static String CLASS_SVMPROBLEM = "libsvm.svm_problem"; /** the svm_parameter classname */ protected final static String CLASS_SVMPARAMETER = "libsvm.svm_parameter"; /** the svm_node classname */ protected final static String CLASS_SVMNODE = "libsvm.svm_node"; /** serial UID */ protected static final long serialVersionUID = 14172; /** LibSVM Model */ protected Object m_Model; /** for normalizing the data */ protected Filter m_Filter = null; /** The filter used to get rid of missing values. */ protected ReplaceMissingValues m_ReplaceMissingValues; /** normalize input data */ protected boolean m_Normalize = false; /** If true, the replace missing values filter is not applied */ private boolean m_noReplaceMissingValues; /** SVM type C-SVC (classification) */ public static final int SVMTYPE_C_SVC = 0; /** SVM type nu-SVC (classification) */ public static final int SVMTYPE_NU_SVC = 1; /** SVM type one-class SVM (classification) */ public static final int SVMTYPE_ONE_CLASS_SVM = 2; /** SVM type epsilon-SVR (regression) */ public static final int SVMTYPE_EPSILON_SVR = 3; /** SVM type nu-SVR (regression) */ public static final int SVMTYPE_NU_SVR = 4; /** SVM types */ public static final Tag[] TAGS_SVMTYPE = { new Tag(SVMTYPE_C_SVC, "C-SVC (classification)"), new Tag(SVMTYPE_NU_SVC, "nu-SVC (classification)"), new Tag(SVMTYPE_ONE_CLASS_SVM, "one-class SVM (classification)"), new Tag(SVMTYPE_EPSILON_SVR, "epsilon-SVR (regression)"), new Tag(SVMTYPE_NU_SVR, "nu-SVR (regression)") }; /** the SVM type */ protected int m_SVMType = SVMTYPE_C_SVC; /** kernel type linear: u'*v */ public static final int KERNELTYPE_LINEAR = 0; /** kernel type polynomial: (gamma*u'*v + coef0)^degree */ public static final int KERNELTYPE_POLYNOMIAL = 1; /** kernel type radial basis function: exp(-gamma*|u-v|^2) */ public static final int KERNELTYPE_RBF = 2; /** kernel type sigmoid: tanh(gamma*u'*v + coef0) */ public static final int KERNELTYPE_SIGMOID = 3; /** the different kernel types */ public static final Tag[] TAGS_KERNELTYPE = { new Tag(KERNELTYPE_LINEAR, "linear: u'*v"), new Tag(KERNELTYPE_POLYNOMIAL, "polynomial: (gamma*u'*v + coef0)^degree"), new Tag(KERNELTYPE_RBF, "radial basis function: exp(-gamma*|u-v|^2)"), new Tag(KERNELTYPE_SIGMOID, "sigmoid: tanh(gamma*u'*v + coef0)") }; /** the kernel type */ protected int m_KernelType = KERNELTYPE_RBF; /** for poly - in older versions of libsvm declared as a double. * At least since 2.82 it is an int. */ protected int m_Degree = 3; /** for poly/rbf/sigmoid */ protected double m_Gamma = 0; /** for poly/rbf/sigmoid (the actual gamma) */ protected double m_GammaActual = 0; /** for poly/sigmoid */ protected double m_Coef0 = 0; /** in MB */ protected double m_CacheSize = 40; /** stopping criteria */ protected double m_eps = 1e-3; /** cost, for C_SVC, EPSILON_SVR and NU_SVR */ protected double m_Cost = 1; /** for C_SVC */ protected int[] m_WeightLabel = new int[0]; /** for C_SVC */ protected double[] m_Weight = new double[0]; /** for NU_SVC, ONE_CLASS, and NU_SVR */ protected double m_nu = 0.5; /** loss, for EPSILON_SVR */ protected double m_Loss = 0.1; /** use the shrinking heuristics */ protected boolean m_Shrinking = true; /** whether to generate probability estimates instead of +1/-1 in case of * classification problems */ protected boolean m_ProbabilityEstimates = false; /** whether the libsvm classes are in the Classpath */ protected static boolean m_Present = false; static { try { Class.forName(CLASS_SVM); m_Present = true; } catch (Exception e) { m_Present = false; } } /** * Returns a string describing classifier * * @return a description suitable for displaying in the * explorer/experimenter gui */ public String globalInfo() { return "A wrapper class for the libsvm tools (the libsvm classes, typically " + "the jar file, need to be in the classpath to use this classifier).\n" + "LibSVM runs faster than SMO since it uses LibSVM to build the SVM " + "classifier.\n" + "LibSVM allows users to experiment with One-class SVM, Regressing SVM, " + "and nu-SVM supported by LibSVM tool. LibSVM reports many useful " + "statistics about LibSVM classifier (e.g., confusion matrix," + "precision, recall, ROC score, etc.).\n" + "\n" + getTechnicalInformation().toString(); } /** * Returns an instance of a TechnicalInformation object, containing * detailed information about the technical background of this class, * e.g., paper reference or book this class is based on. * * @return the technical information about this class */ public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; TechnicalInformation additional; result = new TechnicalInformation(Type.MISC); result.setValue(TechnicalInformation.Field.AUTHOR, "Yasser EL-Manzalawy"); result.setValue(TechnicalInformation.Field.YEAR, "2005"); result.setValue(TechnicalInformation.Field.TITLE, "WLSVM"); result.setValue(TechnicalInformation.Field.NOTE, "LibSVM was originally developed as 'WLSVM'"); result.setValue(TechnicalInformation.Field.URL, "http://www.cs.iastate.edu/~yasser/wlsvm/"); result.setValue(TechnicalInformation.Field.NOTE, "You don't need to include the WLSVM package in the CLASSPATH"); additional = result.add(Type.MISC); additional.setValue(TechnicalInformation.Field.AUTHOR, "Chih-Chung Chang and Chih-Jen Lin"); additional.setValue(TechnicalInformation.Field.TITLE, "LIBSVM - A Library for Support Vector Machines"); additional.setValue(TechnicalInformation.Field.YEAR, "2001"); additional.setValue(TechnicalInformation.Field.URL, "http://www.csie.ntu.edu.tw/~cjlin/libsvm/"); additional.setValue(TechnicalInformation.Field.NOTE, "The Weka classifier works with version 2.82 of LIBSVM"); return result; } /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ public Enumeration listOptions() { Vector result; result = new Vector(); result.addElement( new Option( "\tSet type of SVM (default: 0)\n" + "\t\t 0 = C-SVC\n" + "\t\t 1 = nu-SVC\n" + "\t\t 2 = one-class SVM\n" + "\t\t 3 = epsilon-SVR\n" + "\t\t 4 = nu-SVR", "S", 1, "-S <int>")); result.addElement( new Option( "\tSet type of kernel function (default: 2)\n" + "\t\t 0 = linear: u'*v\n" + "\t\t 1 = polynomial: (gamma*u'*v + coef0)^degree\n" + "\t\t 2 = radial basis function: exp(-gamma*|u-v|^2)\n" + "\t\t 3 = sigmoid: tanh(gamma*u'*v + coef0)", "K", 1, "-K <int>")); result.addElement( new Option( "\tSet degree in kernel function (default: 3)", "D", 1, "-D <int>")); result.addElement( new Option( "\tSet gamma in kernel function (default: 1/k)", "G", 1, "-G <double>")); result.addElement( new Option( "\tSet coef0 in kernel function (default: 0)", "R", 1, "-R <double>")); result.addElement( new Option( "\tSet the parameter C of C-SVC, epsilon-SVR, and nu-SVR\n" + "\t (default: 1)", "C", 1, "-C <double>")); result.addElement( new Option( "\tSet the parameter nu of nu-SVC, one-class SVM, and nu-SVR\n" + "\t (default: 0.5)", "N", 1, "-N <double>")); result.addElement( new Option( "\tTurns on normalization of input data (default: off)", "Z", 0, "-Z")); result.addElement( new Option("\tTurn off nominal to binary conversion." + "\n\tWARNING: use only if your data is all numeric!", "J", 0, "-J")); result.addElement( new Option("\tTurn off missing value replacement." + "\n\tWARNING: use only if your data has no missing " + "values.", "V", 0, "-V")); result.addElement( new Option( "\tSet the epsilon in loss function of epsilon-SVR (default: 0.1)", "P", 1, "-P <double>")); result.addElement( new Option( "\tSet cache memory size in MB (default: 40)", "M", 1, "-M <double>")); result.addElement( new Option( "\tSet tolerance of termination criterion (default: 0.001)", "E", 1, "-E <double>")); result.addElement( new Option( "\tTurns the shrinking heuristics off (default: on)", "H", 0, "-H")); result.addElement( new Option( "\tSet the parameters C of class i to weight[i]*C, for C-SVC\n" + "\tE.g., for a 3-class problem, you could use \"1 1 1\" for equally\n" + "\tweighted classes.\n" + "\t(default: 1 for all classes)", "W", 1, "-W <double>")); result.addElement( new Option( "\tTrains a SVC model instead of a SVR one (default: SVR)", "B", 0, "-B")); Enumeration en = super.listOptions(); while (en.hasMoreElements()) result.addElement(en.nextElement()); return result.elements(); } /** * Sets the classifier options <p/> * <!-- options-start --> * Valid options are: <p/> * * <pre> -S &lt;int&gt; * Set type of SVM (default: 0) * 0 = C-SVC * 1 = nu-SVC * 2 = one-class SVM * 3 = epsilon-SVR * 4 = nu-SVR</pre> * * <pre> -K &lt;int&gt; * Set type of kernel function (default: 2) * 0 = linear: u'*v * 1 = polynomial: (gamma*u'*v + coef0)^degree * 2 = radial basis function: exp(-gamma*|u-v|^2) * 3 = sigmoid: tanh(gamma*u'*v + coef0)</pre> * * <pre> -D &lt;int&gt; * Set degree in kernel function (default: 3)</pre> * * <pre> -G &lt;double&gt; * Set gamma in kernel function (default: 1/k)</pre> * * <pre> -R &lt;double&gt; * Set coef0 in kernel function (default: 0)</pre> * * <pre> -C &lt;double&gt; * Set the parameter C of C-SVC, epsilon-SVR, and nu-SVR * (default: 1)</pre> * * <pre> -N &lt;double&gt; * Set the parameter nu of nu-SVC, one-class SVM, and nu-SVR * (default: 0.5)</pre> * * <pre> -Z * Turns on normalization of input data (default: off)</pre> * * <pre> -J * Turn off nominal to binary conversion. * WARNING: use only if your data is all numeric!</pre> * * <pre> -V * Turn off missing value replacement. * WARNING: use only if your data has no missing values.</pre> * * <pre> -P &lt;double&gt; * Set the epsilon in loss function of epsilon-SVR (default: 0.1)</pre> * * <pre> -M &lt;double&gt; * Set cache memory size in MB (default: 40)</pre> * * <pre> -E &lt;double&gt; * Set tolerance of termination criterion (default: 0.001)</pre> * * <pre> -H * Turns the shrinking heuristics off (default: on)</pre> * * <pre> -W &lt;double&gt; * Set the parameters C of class i to weight[i]*C, for C-SVC * E.g., for a 3-class problem, you could use "1 1 1" for equally * weighted classes. * (default: 1 for all classes)</pre> * * <pre> -B * Trains a SVC model instead of a SVR one (default: SVR)</pre> * * <pre> -D * If set, classifier is run in debug mode and * may output additional info to the console</pre> * <!-- options-end --> * * @param options the options to parse * @throws Exception if parsing fails */ public void setOptions(String[] options) throws Exception { String tmpStr; tmpStr = Utils.getOption('S', options); if (tmpStr.length() != 0) setSVMType( new SelectedTag(Integer.parseInt(tmpStr), TAGS_SVMTYPE)); else setSVMType( new SelectedTag(SVMTYPE_C_SVC, TAGS_SVMTYPE)); tmpStr = Utils.getOption('K', options); if (tmpStr.length() != 0) setKernelType( new SelectedTag(Integer.parseInt(tmpStr), TAGS_KERNELTYPE)); else setKernelType( new SelectedTag(KERNELTYPE_RBF, TAGS_KERNELTYPE)); tmpStr = Utils.getOption('D', options); if (tmpStr.length() != 0) setDegree(Integer.parseInt(tmpStr)); else setDegree(3); tmpStr = Utils.getOption('G', options); if (tmpStr.length() != 0) setGamma(Double.parseDouble(tmpStr)); else setGamma(0); tmpStr = Utils.getOption('R', options); if (tmpStr.length() != 0) setCoef0(Double.parseDouble(tmpStr)); else setCoef0(0); tmpStr = Utils.getOption('N', options); if (tmpStr.length() != 0) setNu(Double.parseDouble(tmpStr)); else setNu(0.5); tmpStr = Utils.getOption('M', options); if (tmpStr.length() != 0) setCacheSize(Double.parseDouble(tmpStr)); else setCacheSize(40); tmpStr = Utils.getOption('C', options); if (tmpStr.length() != 0) setCost(Double.parseDouble(tmpStr)); else setCost(1); tmpStr = Utils.getOption('E', options); if (tmpStr.length() != 0) setEps(Double.parseDouble(tmpStr)); else setEps(1e-3); setNormalize(Utils.getFlag('Z', options)); setDoNotReplaceMissingValues(Utils.getFlag("V", options)); tmpStr = Utils.getOption('P', options); if (tmpStr.length() != 0) setLoss(Double.parseDouble(tmpStr)); else setLoss(0.1); setShrinking(!Utils.getFlag('H', options)); setWeights(Utils.getOption('W', options)); setProbabilityEstimates(Utils.getFlag('B', options)); } /** * Returns the current options * * @return the current setup */ public String[] getOptions() { Vector result; result = new Vector(); result.add("-S"); result.add("" + m_SVMType); result.add("-K"); result.add("" + m_KernelType); result.add("-D"); result.add("" + getDegree()); result.add("-G"); result.add("" + getGamma()); result.add("-R"); result.add("" + getCoef0()); result.add("-N"); result.add("" + getNu()); result.add("-M"); result.add("" + getCacheSize()); result.add("-C"); result.add("" + getCost()); result.add("-E"); result.add("" + getEps()); result.add("-P"); result.add("" + getLoss()); if (!getShrinking()) result.add("-H"); if (getNormalize()) result.add("-Z"); if (getDoNotReplaceMissingValues()) result.add("-V"); if (getWeights().length() != 0) { result.add("-W"); result.add("" + getWeights()); } if (getProbabilityEstimates()) result.add("-B"); return (String[]) result.toArray(new String[result.size()]); } /** * returns whether the libsvm classes are present or not, i.e. whether the * classes are in the classpath or not * * @return whether the libsvm classes are available */ public static boolean isPresent() { return m_Present; } /** * Sets type of SVM (default SVMTYPE_C_SVC) * * @param value the type of the SVM */ public void setSVMType(SelectedTag value) { if (value.getTags() == TAGS_SVMTYPE) m_SVMType = value.getSelectedTag().getID(); } /** * Gets type of SVM * * @return the type of the SVM */ public SelectedTag getSVMType() { return new SelectedTag(m_SVMType, TAGS_SVMTYPE); } /** * Returns the tip text for this property * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String SVMTypeTipText() { return "The type of SVM to use."; } /** * Sets type of kernel function (default KERNELTYPE_RBF) * * @param value the kernel type */ public void setKernelType(SelectedTag value) { if (value.getTags() == TAGS_KERNELTYPE) m_KernelType = value.getSelectedTag().getID(); } /** * Gets type of kernel function * * @return the kernel type */ public SelectedTag getKernelType() { return new SelectedTag(m_KernelType, TAGS_KERNELTYPE); } /** * Returns the tip text for this property * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String kernelTypeTipText() { return "The type of kernel to use"; } /** * Sets the degree of the kernel * * @param value the degree of the kernel */ public void setDegree(int value) { m_Degree = value; } /** * Gets the degree of the kernel * * @return the degree of the kernel */ public int getDegree() { return m_Degree; } /** * Returns the tip text for this property * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String degreeTipText() { return "The degree of the kernel."; } /** * Sets gamma (default = 1/no of attributes) * * @param value the gamma value */ public void setGamma(double value) { m_Gamma = value; } /** * Gets gamma * * @return the current gamma */ public double getGamma() { return m_Gamma; } /** * Returns the tip text for this property * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String gammaTipText() { return "The gamma to use, if 0 then 1/max_index is used."; } /** * Sets coef (default 0) * * @param value the coef */ public void setCoef0(double value) { m_Coef0 = value; } /** * Gets coef * * @return the coef */ public double getCoef0() { return m_Coef0; } /** * Returns the tip text for this property * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String coef0TipText() { return "The coefficient to use."; } /** * Sets nu of nu-SVC, one-class SVM, and nu-SVR (default 0.5) * * @param value the new nu value */ public void setNu(double value) { m_nu = value; } /** * Gets nu of nu-SVC, one-class SVM, and nu-SVR (default 0.5) * * @return the current nu value */ public double getNu() { return m_nu; } /** * Returns the tip text for this property * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String nuTipText() { return "The value of nu for nu-SVC, one-class SVM and nu-SVR."; } /** * Sets cache memory size in MB (default 40) * * @param value the memory size in MB */ public void setCacheSize(double value) { m_CacheSize = value; } /** * Gets cache memory size in MB * * @return the memory size in MB */ public double getCacheSize() { return m_CacheSize; } /** * Returns the tip text for this property * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String cacheSizeTipText() { return "The cache size in MB."; } /** * Sets the parameter C of C-SVC, epsilon-SVR, and nu-SVR (default 1) * * @param value the cost value */ public void setCost(double value) { m_Cost = value; } /** * Sets the parameter C of C-SVC, epsilon-SVR, and nu-SVR * * @return the cost value */ public double getCost() { return m_Cost; } /** * Returns the tip text for this property * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String costTipText() { return "The cost parameter C for C-SVC, epsilon-SVR and nu-SVR."; } /** * Sets tolerance of termination criterion (default 0.001) * * @param value the tolerance */ public void setEps(double value) { m_eps = value; } /** * Gets tolerance of termination criterion * * @return the current tolerance */ public double getEps() { return m_eps; } /** * Returns the tip text for this property * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String epsTipText() { return "The tolerance of the termination criterion."; } /** * Sets the epsilon in loss function of epsilon-SVR (default 0.1) * * @param value the loss epsilon */ public void setLoss(double value) { m_Loss = value; } /** * Gets the epsilon in loss function of epsilon-SVR * * @return the loss epsilon */ public double getLoss() { return m_Loss; } /** * Returns the tip text for this property * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String lossTipText() { return "The epsilon for the loss function in epsilon-SVR."; } /** * whether to use the shrinking heuristics * * @param value true uses shrinking */ public void setShrinking(boolean value) { m_Shrinking = value; } /** * whether to use the shrinking heuristics * * @return true, if shrinking is used */ public boolean getShrinking() { return m_Shrinking; } /** * Returns the tip text for this property * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String shrinkingTipText() { return "Whether to use the shrinking heuristic."; } /** * whether to normalize input data * * @param value whether to normalize the data */ public void setNormalize(boolean value) { m_Normalize = value; } /** * whether to normalize input data * * @return true, if the data is normalized */ public boolean getNormalize() { return m_Normalize; } /** * Returns the tip text for this property * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String normalizeTipText() { return "Whether to normalize the data."; } /** * Returns the tip text for this property * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String doNotReplaceMissingValuesTipText() { return "Whether to turn off automatic replacement of missing " + "values. WARNING: set to true only if the data does not " + "contain missing values."; } /** * Whether to turn off automatic replacement of missing values. * Set to true only if the data does not contain missing values. * * @param b true if automatic missing values replacement is * to be disabled. */ public void setDoNotReplaceMissingValues(boolean b) { m_noReplaceMissingValues = b; } /** * Gets whether automatic replacement of missing values is * disabled. * * @return true if automatic replacement of missing values * is disabled. */ public boolean getDoNotReplaceMissingValues() { return m_noReplaceMissingValues; } /** * Sets the parameters C of class i to weight[i]*C, for C-SVC (default 1). * Blank separated list of doubles. * * @param weightsStr the weights (doubles, separated by blanks) */ public void setWeights(String weightsStr) { StringTokenizer tok; int i; tok = new StringTokenizer(weightsStr, " "); m_Weight = new double[tok.countTokens()]; m_WeightLabel = new int[tok.countTokens()]; if (m_Weight.length == 0) System.out.println( "Zero Weights processed. Default weights will be used"); for (i = 0; i < m_Weight.length; i++) { m_Weight[i] = Double.parseDouble(tok.nextToken()); m_WeightLabel[i] = i; } } /** * Gets the parameters C of class i to weight[i]*C, for C-SVC (default 1). * Blank separated doubles. * * @return the weights (doubles separated by blanks) */ public String getWeights() { String result; int i; result = ""; for (i = 0; i < m_Weight.length; i++) { if (i > 0) result += " "; result += Double.toString(m_Weight[i]); } return result; } /** * Returns the tip text for this property * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String weightsTipText() { return "The weights to use for the classes (blank-separated list, eg, \"1 1 1\" for a 3-class problem), if empty 1 is used by default."; } /** * Returns whether probability estimates are generated instead of -1/+1 for * classification problems. * * @param value whether to predict probabilities */ public void setProbabilityEstimates(boolean value) { m_ProbabilityEstimates = value; } /** * Sets whether to generate probability estimates instead of -1/+1 for * classification problems. * * @return true, if probability estimates should be returned */ public boolean getProbabilityEstimates() { return m_ProbabilityEstimates; } /** * Returns the tip text for this property * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String probabilityEstimatesTipText() { return "Whether to generate probability estimates instead of -1/+1 for classification problems."; } /** * sets the specified field * * @param o the object to set the field for * @param name the name of the field * @param value the new value of the field */ protected void setField(Object o, String name, Object value) { Field f; try { f = o.getClass().getField(name); f.set(o, value); } catch (Exception e) { e.printStackTrace(); } } /** * sets the specified field in an array * * @param o the object to set the field for * @param name the name of the field * @param index the index in the array * @param value the new value of the field */ protected void setField(Object o, String name, int index, Object value) { Field f; try { f = o.getClass().getField(name); Array.set(f.get(o), index, value); } catch (Exception e) { e.printStackTrace(); } } /** * returns the current value of the specified field * * @param o the object the field is member of * @param name the name of the field * @return the value */ protected Object getField(Object o, String name) { Field f; Object result; try { f = o.getClass().getField(name); result = f.get(o); } catch (Exception e) { e.printStackTrace(); result = null; } return result; } /** * sets a new array for the field * * @param o the object to set the array for * @param name the name of the field * @param type the type of the array * @param length the length of the one-dimensional array */ protected void newArray(Object o, String name, Class type, int length) { newArray(o, name, type, new int[]{length}); } /** * sets a new array for the field * * @param o the object to set the array for * @param name the name of the field * @param type the type of the array * @param dimensions the dimensions of the array */ protected void newArray(Object o, String name, Class type, int[] dimensions) { Field f; try { f = o.getClass().getField(name); f.set(o, Array.newInstance(type, dimensions)); } catch (Exception e) { e.printStackTrace(); } } /** * executes the specified method and returns the result, if any * * @param o the object the method should be called from * @param name the name of the method * @param paramClasses the classes of the parameters * @param paramValues the values of the parameters * @return the return value of the method, if any (in that case null) */ protected Object invokeMethod(Object o, String name, Class[] paramClasses, Object[] paramValues) { Method m; Object result; result = null; try { m = o.getClass().getMethod(name, paramClasses); result = m.invoke(o, paramValues); } catch (Exception e) { e.printStackTrace(); result = null; } return result; } /** * transfers the local variables into a svm_parameter object * * @return the configured svm_parameter object */ protected Object getParameters() { Object result; int i; try { result = Class.forName(CLASS_SVMPARAMETER).newInstance(); setField(result, "svm_type", new Integer(m_SVMType)); setField(result, "kernel_type", new Integer(m_KernelType)); setField(result, "degree", new Integer(m_Degree)); setField(result, "gamma", new Double(m_GammaActual)); setField(result, "coef0", new Double(m_Coef0)); setField(result, "nu", new Double(m_nu)); setField(result, "cache_size", new Double(m_CacheSize)); setField(result, "C", new Double(m_Cost)); setField(result, "eps", new Double(m_eps)); setField(result, "p", new Double(m_Loss)); setField(result, "shrinking", new Integer(m_Shrinking ? 1 : 0)); setField(result, "nr_weight", new Integer(m_Weight.length)); setField(result, "probability", new Integer(m_ProbabilityEstimates ? 1 : 0)); newArray(result, "weight", Double.TYPE, m_Weight.length); newArray(result, "weight_label", Integer.TYPE, m_Weight.length); for (i = 0; i < m_Weight.length; i++) { setField(result, "weight", i, new Double(m_Weight[i])); setField(result, "weight_label", i, new Integer(m_WeightLabel[i])); } } catch (Exception e) { e.printStackTrace(); result = null; } return result; } /** * returns the svm_problem * * @param vx the x values * @param vy the y values * @return the svm_problem object */ protected Object getProblem(Vector vx, Vector vy) { Object result; try { result = Class.forName(CLASS_SVMPROBLEM).newInstance(); setField(result, "l", new Integer(vy.size())); newArray(result, "x", Class.forName(CLASS_SVMNODE), new int[]{vy.size(), 0}); for (int i = 0; i < vy.size(); i++) setField(result, "x", i, vx.elementAt(i)); newArray(result, "y", Double.TYPE, vy.size()); for (int i = 0; i < vy.size(); i++) setField(result, "y", i, vy.elementAt(i)); } catch (Exception e) { e.printStackTrace(); result = null; } return result; } /** * returns an instance into a sparse libsvm array * * @param instance the instance to work on * @return the libsvm array * @throws Exception if setup of array fails */ protected Object instanceToArray(Instance instance) throws Exception { int index; int count; int i; Object result; // determine number of non-zero attributes /*for (i = 0; i < instance.numAttributes(); i++) { if (i == instance.classIndex()) continue; if (instance.value(i) != 0) count++; } */ count = 0; for (i = 0; i < instance.numValues(); i++) { if (instance.index(i) == instance.classIndex()) continue; if (instance.valueSparse(i) != 0) count++; } // fill array /* result = Array.newInstance(Class.forName(CLASS_SVMNODE), count); index = 0; for (i = 0; i < instance.numAttributes(); i++) { if (i == instance.classIndex()) continue; if (instance.value(i) == 0) continue; Array.set(result, index, Class.forName(CLASS_SVMNODE).newInstance()); setField(Array.get(result, index), "index", new Integer(i + 1)); setField(Array.get(result, index), "value", new Double(instance.value(i))); index++; } */ result = Array.newInstance(Class.forName(CLASS_SVMNODE), count); index = 0; for (i = 0; i < instance.numValues(); i++) { int idx = instance.index(i); if (idx == instance.classIndex()) continue; if (instance.valueSparse(i) == 0) continue; Array.set(result, index, Class.forName(CLASS_SVMNODE).newInstance()); setField(Array.get(result, index), "index", new Integer(idx + 1)); setField(Array.get(result, index), "value", new Double(instance.valueSparse(i))); index++; } return result; } /** * Computes the distribution for a given instance. * In case of 1-class classification, 1 is returned at index 0 if libsvm * returns 1 and NaN (= missing) if libsvm returns -1. * * @param instance the instance for which distribution is computed * @return the distribution * @throws Exception if the distribution can't be computed successfully */ public double[] distributionForInstance (Instance instance) throws Exception { int[] labels = new int[instance.numClasses()]; double[] prob_estimates = null; if (m_ProbabilityEstimates) { invokeMethod( Class.forName(CLASS_SVM).newInstance(), "svm_get_labels", new Class[]{ Class.forName(CLASS_SVMMODEL), Array.newInstance(Integer.TYPE, instance.numClasses()).getClass()}, new Object[]{ m_Model, labels}); prob_estimates = new double[instance.numClasses()]; } if (!getDoNotReplaceMissingValues()) { m_ReplaceMissingValues.input(instance); m_ReplaceMissingValues.batchFinished(); instance = m_ReplaceMissingValues.output(); } if (m_Filter != null) { m_Filter.input(instance); m_Filter.batchFinished(); instance = m_Filter.output(); } Object x = instanceToArray(instance); double v; double[] result = new double[instance.numClasses()]; if ( m_ProbabilityEstimates && ((m_SVMType == SVMTYPE_C_SVC) || (m_SVMType == SVMTYPE_NU_SVC)) ) { v = ((Double) invokeMethod( Class.forName(CLASS_SVM).newInstance(), "svm_predict_probability", new Class[]{ Class.forName(CLASS_SVMMODEL), Array.newInstance(Class.forName(CLASS_SVMNODE), Array.getLength(x)).getClass(), Array.newInstance(Double.TYPE, prob_estimates.length).getClass()}, new Object[]{ m_Model, x, prob_estimates})).doubleValue(); // Return order of probabilities to canonical weka attribute order for (int k = 0; k < prob_estimates.length; k++) { result[labels[k]] = prob_estimates[k]; } } else { v = ((Double) invokeMethod( Class.forName(CLASS_SVM).newInstance(), "svm_predict", new Class[]{ Class.forName(CLASS_SVMMODEL), Array.newInstance(Class.forName(CLASS_SVMNODE), Array.getLength(x)).getClass()}, new Object[]{ m_Model, x})).doubleValue(); if (instance.classAttribute().isNominal()) { if (m_SVMType == SVMTYPE_ONE_CLASS_SVM) { if (v > 0) result[0] = 1; else result[0] = Double.NaN; // outlier } else { result[(int) v] = 1; } } else { result[0] = v; } } return result; } /** * Returns default capabilities of the classifier. * * @return the capabilities of this classifier */ public Capabilities getCapabilities() { Capabilities result = super.getCapabilities(); result.disableAll(); // attributes result.enable(Capability.NOMINAL_ATTRIBUTES); result.enable(Capability.NUMERIC_ATTRIBUTES); result.enable(Capability.DATE_ATTRIBUTES); // class result.enableDependency(Capability.UNARY_CLASS); result.enableDependency(Capability.NOMINAL_CLASS); result.enableDependency(Capability.NUMERIC_CLASS); result.enableDependency(Capability.DATE_CLASS); switch (m_SVMType) { case SVMTYPE_C_SVC: case SVMTYPE_NU_SVC: result.enable(Capability.NOMINAL_CLASS); break; case SVMTYPE_ONE_CLASS_SVM: result.enable(Capability.UNARY_CLASS); break; case SVMTYPE_EPSILON_SVR: case SVMTYPE_NU_SVR: result.enable(Capability.NUMERIC_CLASS); result.enable(Capability.DATE_CLASS); break; default: throw new IllegalArgumentException("SVMType " + m_SVMType + " is not supported!"); } result.enable(Capability.MISSING_CLASS_VALUES); return result; } /** * builds the classifier * * @param insts the training instances * @throws Exception if libsvm classes not in classpath or libsvm * encountered a problem */ public void buildClassifier(Instances insts) throws Exception { m_Filter = null; if (!isPresent()) throw new Exception("libsvm classes not in CLASSPATH!"); // remove instances with missing class insts = new Instances(insts); insts.deleteWithMissingClass(); if (!getDoNotReplaceMissingValues()) { m_ReplaceMissingValues = new ReplaceMissingValues(); m_ReplaceMissingValues.setInputFormat(insts); insts = Filter.useFilter(insts, m_ReplaceMissingValues); } // can classifier handle the data? // we check this here so that if the user turns off // replace missing values filtering, it will fail // if the data actually does have missing values getCapabilities().testWithFail(insts); if (getNormalize()) { m_Filter = new Normalize(); m_Filter.setInputFormat(insts); insts = Filter.useFilter(insts, m_Filter); } Vector vy = new Vector(); Vector vx = new Vector(); int max_index = 0; for (int d = 0; d < insts.numInstances(); d++) { Instance inst = insts.instance(d); Object x = instanceToArray(inst); int m = Array.getLength(x); if (m > 0) max_index = Math.max(max_index, ((Integer) getField(Array.get(x, m - 1), "index")).intValue()); vx.addElement(x); vy.addElement(new Double(inst.classValue())); } // calculate actual gamma if (getGamma() == 0) m_GammaActual = 1.0 / max_index; else m_GammaActual = m_Gamma; // check parameter String error_msg = (String) invokeMethod( Class.forName(CLASS_SVM).newInstance(), "svm_check_parameter", new Class[]{ Class.forName(CLASS_SVMPROBLEM), Class.forName(CLASS_SVMPARAMETER)}, new Object[]{ getProblem(vx, vy), getParameters()}); if (error_msg != null) throw new Exception("Error: " + error_msg); // train model m_Model = invokeMethod( Class.forName(CLASS_SVM).newInstance(), "svm_train", new Class[]{ Class.forName(CLASS_SVMPROBLEM), Class.forName(CLASS_SVMPARAMETER)}, new Object[]{ getProblem(vx, vy), getParameters()}); } /** * returns a string representation * * @return a string representation */ public String toString() { return "LibSVM wrapper, original code by Yasser EL-Manzalawy (= WLSVM)"; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 5523 $"); } /** * Main method for testing this class. * * @param args the options */ public static void main(String[] args) { runClassifier(new LibSVM(), args); } }
47,040
27.631163
237
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/functions/LinearRegression.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * LinearRegression.java * Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.functions; import java.util.Enumeration; import java.util.Vector; import weka.classifiers.AbstractClassifier; import weka.core.Capabilities; import weka.core.Capabilities.Capability; import weka.core.Instance; import weka.core.Instances; import weka.core.matrix.Matrix; import weka.core.Option; import weka.core.OptionHandler; import weka.core.RevisionUtils; import weka.core.SelectedTag; import weka.core.Tag; import weka.core.Utils; import weka.core.WeightedInstancesHandler; import weka.filters.Filter; import weka.filters.supervised.attribute.NominalToBinary; import weka.filters.unsupervised.attribute.ReplaceMissingValues; /** <!-- globalinfo-start --> * Class for using linear regression for prediction. Uses the Akaike criterion for model selection, and is able to deal with weighted instances. * <p/> <!-- globalinfo-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -D * Produce debugging output. * (default no debugging output)</pre> * * <pre> -S &lt;number of selection method&gt; * Set the attribute selection method to use. 1 = None, 2 = Greedy. * (default 0 = M5' method)</pre> * * <pre> -C * Do not try to eliminate colinear attributes. * </pre> * * <pre> -R &lt;double&gt; * Set ridge parameter (default 1.0e-8). * </pre> * * <pre> -minimal * Conserve memory, don't keep dataset header and means/stdevs. * Model cannot be printed out if this option is enabled. (default: keep data)</pre> * <!-- options-end --> * * @author Eibe Frank (eibe@cs.waikato.ac.nz) * @author Len Trigg (trigg@cs.waikato.ac.nz) * @version $Revision: 9768 $ */ public class LinearRegression extends AbstractClassifier implements OptionHandler, WeightedInstancesHandler { /** for serialization */ static final long serialVersionUID = -3364580862046573747L; /** Array for storing coefficients of linear regression. */ protected double[] m_Coefficients; /** Which attributes are relevant? */ protected boolean[] m_SelectedAttributes; /** Variable for storing transformed training data. */ protected Instances m_TransformedData; /** The filter for removing missing values. */ protected ReplaceMissingValues m_MissingFilter; /** The filter storing the transformation from nominal to binary attributes. */ protected NominalToBinary m_TransformFilter; /** The standard deviations of the class attribute */ protected double m_ClassStdDev; /** The mean of the class attribute */ protected double m_ClassMean; /** The index of the class attribute */ protected int m_ClassIndex; /** The attributes means */ protected double[] m_Means; /** The attribute standard deviations */ protected double[] m_StdDevs; /** The current attribute selection method */ protected int m_AttributeSelection; /** Attribute selection method: M5 method */ public static final int SELECTION_M5 = 0; /** Attribute selection method: No attribute selection */ public static final int SELECTION_NONE = 1; /** Attribute selection method: Greedy method */ public static final int SELECTION_GREEDY = 2; /** Attribute selection methods */ public static final Tag[] TAGS_SELECTION = { new Tag(SELECTION_NONE, "No attribute selection"), new Tag(SELECTION_M5, "M5 method"), new Tag(SELECTION_GREEDY, "Greedy method") }; /** Try to eliminate correlated attributes? */ protected boolean m_EliminateColinearAttributes = true; /** Turn off all checks and conversions? */ protected boolean m_checksTurnedOff = false; /** The ridge parameter */ protected double m_Ridge = 1.0e-8; /** Conserve memory? */ protected boolean m_Minimal = false; /** Model already built? */ protected boolean m_ModelBuilt = false; /** * Returns a string describing this classifier * @return a description of the classifier suitable for * displaying in the explorer/experimenter gui */ public String globalInfo() { return "Class for using linear regression for prediction. Uses the Akaike " +"criterion for model selection, and is able to deal with weighted " +"instances."; } /** * Returns default capabilities of the classifier. * * @return the capabilities of this classifier */ public Capabilities getCapabilities() { Capabilities result = super.getCapabilities(); result.disableAll(); // attributes result.enable(Capability.NOMINAL_ATTRIBUTES); result.enable(Capability.NUMERIC_ATTRIBUTES); result.enable(Capability.DATE_ATTRIBUTES); result.enable(Capability.MISSING_VALUES); // class result.enable(Capability.NUMERIC_CLASS); result.enable(Capability.DATE_CLASS); result.enable(Capability.MISSING_CLASS_VALUES); return result; } /** * Builds a regression model for the given data. * * @param data the training data to be used for generating the * linear regression function * @throws Exception if the classifier could not be built successfully */ public void buildClassifier(Instances data) throws Exception { m_ModelBuilt = false; if (!m_checksTurnedOff) { // can classifier handle the data? getCapabilities().testWithFail(data); // remove instances with missing class data = new Instances(data); data.deleteWithMissingClass(); } // Preprocess instances if (!m_checksTurnedOff) { m_TransformFilter = new NominalToBinary(); m_TransformFilter.setInputFormat(data); data = Filter.useFilter(data, m_TransformFilter); m_MissingFilter = new ReplaceMissingValues(); m_MissingFilter.setInputFormat(data); data = Filter.useFilter(data, m_MissingFilter); data.deleteWithMissingClass(); } else { m_TransformFilter = null; m_MissingFilter = null; } m_ClassIndex = data.classIndex(); m_TransformedData = data; // Turn all attributes on for a start m_SelectedAttributes = new boolean[data.numAttributes()]; for (int i = 0; i < data.numAttributes(); i++) { if (i != m_ClassIndex) { m_SelectedAttributes[i] = true; } } m_Coefficients = null; // Compute means and standard deviations m_Means = new double[data.numAttributes()]; m_StdDevs = new double[data.numAttributes()]; for (int j = 0; j < data.numAttributes(); j++) { if (j != data.classIndex()) { m_Means[j] = data.meanOrMode(j); m_StdDevs[j] = Math.sqrt(data.variance(j)); if (m_StdDevs[j] == 0) { m_SelectedAttributes[j] = false; } } } m_ClassStdDev = Math.sqrt(data.variance(m_TransformedData.classIndex())); m_ClassMean = data.meanOrMode(m_TransformedData.classIndex()); // Perform the regression findBestModel(); // Save memory if (m_Minimal) { m_TransformedData = null; m_Means = null; m_StdDevs = null; } else { m_TransformedData = new Instances(data, 0); } m_ModelBuilt = true; } /** * Classifies the given instance using the linear regression function. * * @param instance the test instance * @return the classification * @throws Exception if classification can't be done successfully */ public double classifyInstance(Instance instance) throws Exception { // Transform the input instance Instance transformedInstance = instance; if (!m_checksTurnedOff) { m_TransformFilter.input(transformedInstance); m_TransformFilter.batchFinished(); transformedInstance = m_TransformFilter.output(); m_MissingFilter.input(transformedInstance); m_MissingFilter.batchFinished(); transformedInstance = m_MissingFilter.output(); } // Calculate the dependent variable from the regression model return regressionPrediction(transformedInstance, m_SelectedAttributes, m_Coefficients); } /** * Outputs the linear regression model as a string. * * @return the model as string */ public String toString() { if (!m_ModelBuilt) return "Linear Regression: No model built yet."; if (m_Minimal) return "Linear Regression: Model built."; try { StringBuilder text = new StringBuilder(); int column = 0; boolean first = true; text.append("\nLinear Regression Model\n\n"); text.append(m_TransformedData.classAttribute().name()+" =\n\n"); for (int i = 0; i < m_TransformedData.numAttributes(); i++) { if ((i != m_ClassIndex) && (m_SelectedAttributes[i])) { if (!first) text.append(" +\n"); else first = false; text.append(Utils.doubleToString(m_Coefficients[column], 12, 4) + " * "); text.append(m_TransformedData.attribute(i).name()); column++; } } text.append(" +\n" + Utils.doubleToString(m_Coefficients[column], 12, 4)); return text.toString(); } catch (Exception e) { return "Can't print Linear Regression!"; } } /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ public Enumeration listOptions() { Vector newVector = new Vector(); newVector.addElement( new Option( "\tProduce debugging output.\n" + "\t(default no debugging output)", "D", 0, "-D")); newVector.addElement( new Option( "\tSet the attribute selection method" + " to use. 1 = None, 2 = Greedy.\n" + "\t(default 0 = M5' method)", "S", 1, "-S <number of selection method>")); newVector.addElement( new Option( "\tDo not try to eliminate colinear" + " attributes.\n", "C", 0, "-C")); newVector.addElement( new Option( "\tSet ridge parameter (default 1.0e-8).\n", "R", 1, "-R <double>")); newVector.addElement( new Option( "\tConserve memory, don't keep dataset header and means/stdevs.\n" + "\tModel cannot be printed out if this option is enabled." + "\t(default: keep data)", "minimal", 0, "-minimal")); return newVector.elements(); } /** * Parses a given list of options. <p/> * <!-- options-start --> * Valid options are: <p/> * * <pre> -D * Produce debugging output. * (default no debugging output)</pre> * * <pre> -S &lt;number of selection method&gt; * Set the attribute selection method to use. 1 = None, 2 = Greedy. * (default 0 = M5' method)</pre> * * <pre> -C * Do not try to eliminate colinear attributes. * </pre> * * <pre> -R &lt;double&gt; * Set ridge parameter (default 1.0e-8). * </pre> * * <pre> -minimal * Conserve memory, don't keep dataset header and means/stdevs. * Model cannot be printed out if this option is enabled. (default: keep data)</pre> * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { String selectionString = Utils.getOption('S', options); if (selectionString.length() != 0) { setAttributeSelectionMethod(new SelectedTag(Integer .parseInt(selectionString), TAGS_SELECTION)); } else { setAttributeSelectionMethod(new SelectedTag(SELECTION_M5, TAGS_SELECTION)); } String ridgeString = Utils.getOption('R', options); if (ridgeString.length() != 0) { setRidge(new Double(ridgeString).doubleValue()); } else { setRidge(1.0e-8); } setDebug(Utils.getFlag('D', options)); setEliminateColinearAttributes(!Utils.getFlag('C', options)); setMinimal(Utils.getFlag("minimal", options)); } /** * Returns the coefficients for this linear model. * * @return the coefficients for this linear model */ public double[] coefficients() { double[] coefficients = new double[m_SelectedAttributes.length + 1]; int counter = 0; for (int i = 0; i < m_SelectedAttributes.length; i++) { if ((m_SelectedAttributes[i]) && ((i != m_ClassIndex))) { coefficients[i] = m_Coefficients[counter++]; } } coefficients[m_SelectedAttributes.length] = m_Coefficients[counter]; return coefficients; } /** * Gets the current settings of the classifier. * * @return an array of strings suitable for passing to setOptions */ public String[] getOptions() { Vector<String> result; result = new Vector<String>(); result.add("-S"); result.add("" + getAttributeSelectionMethod().getSelectedTag().getID()); if (getDebug()) result.add("-D"); if (!getEliminateColinearAttributes()) result.add("-C"); result.add("-R"); result.add("" + getRidge()); if (getMinimal()) result.add("-minimal"); return result.toArray(new String[result.size()]); } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String ridgeTipText() { return "The value of the Ridge parameter."; } /** * Get the value of Ridge. * * @return Value of Ridge. */ public double getRidge() { return m_Ridge; } /** * Set the value of Ridge. * * @param newRidge Value to assign to Ridge. */ public void setRidge(double newRidge) { m_Ridge = newRidge; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String eliminateColinearAttributesTipText() { return "Eliminate colinear attributes."; } /** * Get the value of EliminateColinearAttributes. * * @return Value of EliminateColinearAttributes. */ public boolean getEliminateColinearAttributes() { return m_EliminateColinearAttributes; } /** * Set the value of EliminateColinearAttributes. * * @param newEliminateColinearAttributes Value to assign to EliminateColinearAttributes. */ public void setEliminateColinearAttributes(boolean newEliminateColinearAttributes) { m_EliminateColinearAttributes = newEliminateColinearAttributes; } /** * Get the number of coefficients used in the model * * @return the number of coefficients */ public int numParameters() { return m_Coefficients.length-1; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String attributeSelectionMethodTipText() { return "Set the method used to select attributes for use in the linear " +"regression. Available methods are: no attribute selection, attribute " +"selection using M5's method (step through the attributes removing the one " +"with the smallest standardised coefficient until no improvement is observed " +"in the estimate of the error given by the Akaike " +"information criterion), and a greedy selection using the Akaike information " +"metric."; } /** * Sets the method used to select attributes for use in the * linear regression. * * @param method the attribute selection method to use. */ public void setAttributeSelectionMethod(SelectedTag method) { if (method.getTags() == TAGS_SELECTION) { m_AttributeSelection = method.getSelectedTag().getID(); } } /** * Gets the method used to select attributes for use in the * linear regression. * * @return the method to use. */ public SelectedTag getAttributeSelectionMethod() { return new SelectedTag(m_AttributeSelection, TAGS_SELECTION); } /** * Returns the tip text for this property. * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String minimalTipText() { return "If enabled, dataset header, means and stdevs get discarded to conserve memory; also, the model cannot be printed out."; } /** * Sets whether to be more memory conservative or being able to output * the model as string. * * @param value if true memory will be conserved */ public void setMinimal(boolean value) { m_Minimal = value; } /** * Returns whether to be more memory conservative or being able to output * the model as string. * * @return true if memory conservation is preferred over * outputting model description */ public boolean getMinimal() { return m_Minimal; } /** * Turns off checks for missing values, etc. Use with caution. * Also turns off scaling. */ public void turnChecksOff() { m_checksTurnedOff = true; } /** * Turns on checks for missing values, etc. Also turns * on scaling. */ public void turnChecksOn() { m_checksTurnedOff = false; } /** * Removes the attribute with the highest standardised coefficient * greater than 1.5 from the selected attributes. * * @param selectedAttributes an array of flags indicating which * attributes are included in the regression model * @param coefficients an array of coefficients for the regression * model * @return true if an attribute was removed */ protected boolean deselectColinearAttributes(boolean[] selectedAttributes, double[] coefficients) { double maxSC = 1.5; int maxAttr = -1, coeff = 0; for (int i = 0; i < selectedAttributes.length; i++) { if (selectedAttributes[i]) { double SC = Math.abs(coefficients[coeff] * m_StdDevs[i] / m_ClassStdDev); if (SC > maxSC) { maxSC = SC; maxAttr = i; } coeff++; } } if (maxAttr >= 0) { selectedAttributes[maxAttr] = false; if (m_Debug) { System.out.println("Deselected colinear attribute:" + (maxAttr + 1) + " with standardised coefficient: " + maxSC); } return true; } return false; } /** * Performs a greedy search for the best regression model using * Akaike's criterion. * * @throws Exception if regression can't be done */ protected void findBestModel() throws Exception { // For the weighted case we still use numInstances in // the calculation of the Akaike criterion. int numInstances = m_TransformedData.numInstances(); if (m_Debug) { System.out.println((new Instances(m_TransformedData, 0)).toString()); } // Perform a regression for the full model, and remove colinear attributes do { m_Coefficients = doRegression(m_SelectedAttributes); } while (m_EliminateColinearAttributes && deselectColinearAttributes(m_SelectedAttributes, m_Coefficients)); // Figure out current number of attributes + 1. (We treat this model // as the full model for the Akaike-based methods.) int numAttributes = 1; for (int i = 0; i < m_SelectedAttributes.length; i++) { if (m_SelectedAttributes[i]) { numAttributes++; } } double fullMSE = calculateSE(m_SelectedAttributes, m_Coefficients); double akaike = (numInstances - numAttributes) + 2 * numAttributes; if (m_Debug) { System.out.println("Initial Akaike value: " + akaike); } boolean improved; int currentNumAttributes = numAttributes; switch (m_AttributeSelection) { case SELECTION_GREEDY: // Greedy attribute removal do { boolean[] currentSelected = (boolean[]) m_SelectedAttributes.clone(); improved = false; currentNumAttributes--; for (int i = 0; i < m_SelectedAttributes.length; i++) { if (currentSelected[i]) { // Calculate the akaike rating without this attribute currentSelected[i] = false; double[] currentCoeffs = doRegression(currentSelected); double currentMSE = calculateSE(currentSelected, currentCoeffs); double currentAkaike = currentMSE / fullMSE * (numInstances - numAttributes) + 2 * currentNumAttributes; if (m_Debug) { System.out.println("(akaike: " + currentAkaike); } // If it is better than the current best if (currentAkaike < akaike) { if (m_Debug) { System.err.println("Removing attribute " + (i + 1) + " improved Akaike: " + currentAkaike); } improved = true; akaike = currentAkaike; System.arraycopy(currentSelected, 0, m_SelectedAttributes, 0, m_SelectedAttributes.length); m_Coefficients = currentCoeffs; } currentSelected[i] = true; } } } while (improved); break; case SELECTION_M5: // Step through the attributes removing the one with the smallest // standardised coefficient until no improvement in Akaike do { improved = false; currentNumAttributes--; // Find attribute with smallest SC double minSC = 0; int minAttr = -1, coeff = 0; for (int i = 0; i < m_SelectedAttributes.length; i++) { if (m_SelectedAttributes[i]) { double SC = Math.abs(m_Coefficients[coeff] * m_StdDevs[i] / m_ClassStdDev); if ((coeff == 0) || (SC < minSC)) { minSC = SC; minAttr = i; } coeff++; } } // See whether removing it improves the Akaike score if (minAttr >= 0) { m_SelectedAttributes[minAttr] = false; double[] currentCoeffs = doRegression(m_SelectedAttributes); double currentMSE = calculateSE(m_SelectedAttributes, currentCoeffs); double currentAkaike = currentMSE / fullMSE * (numInstances - numAttributes) + 2 * currentNumAttributes; if (m_Debug) { System.out.println("(akaike: " + currentAkaike); } // If it is better than the current best if (currentAkaike < akaike) { if (m_Debug) { System.err.println("Removing attribute " + (minAttr + 1) + " improved Akaike: " + currentAkaike); } improved = true; akaike = currentAkaike; m_Coefficients = currentCoeffs; } else { m_SelectedAttributes[minAttr] = true; } } } while (improved); break; case SELECTION_NONE: break; } } /** * Calculate the squared error of a regression model on the * training data * * @param selectedAttributes an array of flags indicating which * attributes are included in the regression model * @param coefficients an array of coefficients for the regression * model * @return the mean squared error on the training data * @throws Exception if there is a missing class value in the training * data */ protected double calculateSE(boolean[] selectedAttributes, double[] coefficients) throws Exception { double mse = 0; for (int i = 0; i < m_TransformedData.numInstances(); i++) { double prediction = regressionPrediction(m_TransformedData.instance(i), selectedAttributes, coefficients); double error = prediction - m_TransformedData.instance(i).classValue(); mse += error * error; } return mse; } /** * Calculate the dependent value for a given instance for a * given regression model. * * @param transformedInstance the input instance * @param selectedAttributes an array of flags indicating which * attributes are included in the regression model * @param coefficients an array of coefficients for the regression * model * @return the regression value for the instance. * @throws Exception if the class attribute of the input instance * is not assigned */ protected double regressionPrediction(Instance transformedInstance, boolean[] selectedAttributes, double[] coefficients) throws Exception { double result = 0; int column = 0; for (int j = 0; j < transformedInstance.numAttributes(); j++) { if ((m_ClassIndex != j) && (selectedAttributes[j])) { result += coefficients[column] * transformedInstance.value(j); column++; } } result += coefficients[column]; return result; } /** * Calculate a linear regression using the selected attributes * * @param selectedAttributes an array of booleans where each element * is true if the corresponding attribute should be included in the * regression. * @return an array of coefficients for the linear regression model. * @throws Exception if an error occurred during the regression. */ protected double[] doRegression(boolean[] selectedAttributes) throws Exception { if (m_Debug) { System.out.print("doRegression("); for (int i = 0; i < selectedAttributes.length; i++) { System.out.print(" " + selectedAttributes[i]); } System.out.println(" )"); } int numAttributes = 0; for (int i = 0; i < selectedAttributes.length; i++) { if (selectedAttributes[i]) { numAttributes++; } } // Check whether there are still attributes left Matrix independent = null, dependent = null; if (numAttributes > 0) { independent = new Matrix(m_TransformedData.numInstances(), numAttributes); dependent = new Matrix(m_TransformedData.numInstances(), 1); for (int i = 0; i < m_TransformedData.numInstances(); i ++) { Instance inst = m_TransformedData.instance(i); double sqrt_weight = Math.sqrt(inst.weight()); int column = 0; for (int j = 0; j < m_TransformedData.numAttributes(); j++) { if (j == m_ClassIndex) { dependent.set(i, 0, inst.classValue() * sqrt_weight); } else { if (selectedAttributes[j]) { double value = inst.value(j) - m_Means[j]; // We only need to do this if we want to // scale the input if (!m_checksTurnedOff) { value /= m_StdDevs[j]; } independent.set(i, column, value * sqrt_weight); column++; } } } } } // Compute coefficients (note that we have to treat the // intercept separately so that it doesn't get affected // by the ridge constant.) double[] coefficients = new double[numAttributes + 1]; if (numAttributes > 0) { double[] coeffsWithoutIntercept = independent.regression(dependent, m_Ridge).getCoefficients(); System.arraycopy(coeffsWithoutIntercept, 0, coefficients, 0, numAttributes); } coefficients[numAttributes] = m_ClassMean; // Convert coefficients into original scale int column = 0; for(int i = 0; i < m_TransformedData.numAttributes(); i++) { if ((i != m_TransformedData.classIndex()) && (selectedAttributes[i])) { // We only need to do this if we have scaled the // input. if (!m_checksTurnedOff) { coefficients[column] /= m_StdDevs[i]; } // We have centred the input coefficients[coefficients.length - 1] -= coefficients[column] * m_Means[i]; column++; } } return coefficients; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 9768 $"); } /** * Generates a linear regression function predictor. * * @param argv the options */ public static void main(String argv[]) { runClassifier(new LinearRegression(), argv); } }
28,002
28.139438
144
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/functions/Logistic.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * Logistic.java * Copyright (C) 2003-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.functions; import weka.classifiers.Classifier; import weka.classifiers.AbstractClassifier; import weka.core.Aggregateable; import weka.core.Capabilities; import weka.core.Instance; import weka.core.Instances; import weka.core.Optimization; import weka.core.ConjugateGradientOptimization; import weka.core.Option; import weka.core.OptionHandler; import weka.core.RevisionUtils; import weka.core.TechnicalInformation; import weka.core.TechnicalInformationHandler; import weka.core.Utils; import weka.core.WeightedInstancesHandler; import weka.core.Capabilities.Capability; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import weka.filters.Filter; import weka.filters.unsupervised.attribute.NominalToBinary; import weka.filters.unsupervised.attribute.RemoveUseless; import weka.filters.unsupervised.attribute.ReplaceMissingValues; import java.util.Enumeration; import java.util.Vector; /** <!-- globalinfo-start --> * Class for building and using a multinomial logistic regression model with a ridge estimator.<br/> * <br/> * There are some modifications, however, compared to the paper of leCessie and van Houwelingen(1992): <br/> * <br/> * If there are k classes for n instances with m attributes, the parameter matrix B to be calculated will be an m*(k-1) matrix.<br/> * <br/> * The probability for class j with the exception of the last class is<br/> * <br/> * Pj(Xi) = exp(XiBj)/((sum[j=1..(k-1)]exp(Xi*Bj))+1) <br/> * <br/> * The last class has probability<br/> * <br/> * 1-(sum[j=1..(k-1)]Pj(Xi)) <br/> * = 1/((sum[j=1..(k-1)]exp(Xi*Bj))+1)<br/> * <br/> * The (negative) multinomial log-likelihood is thus: <br/> * <br/> * L = -sum[i=1..n]{<br/> * sum[j=1..(k-1)](Yij * ln(Pj(Xi)))<br/> * +(1 - (sum[j=1..(k-1)]Yij)) <br/> * * ln(1 - sum[j=1..(k-1)]Pj(Xi))<br/> * } + ridge * (B^2)<br/> * <br/> * In order to find the matrix B for which L is minimised, a Quasi-Newton Method is used to search for the optimized values of the m*(k-1) variables. Note that before we use the optimization procedure, we 'squeeze' the matrix B into a m*(k-1) vector. For details of the optimization procedure, please check weka.core.Optimization class.<br/> * <br/> * Although original Logistic Regression does not deal with instance weights, we modify the algorithm a little bit to handle the instance weights.<br/> * <br/> * For more information see:<br/> * <br/> * le Cessie, S., van Houwelingen, J.C. (1992). Ridge Estimators in Logistic Regression. Applied Statistics. 41(1):191-201.<br/> * <br/> * Note: Missing values are replaced using a ReplaceMissingValuesFilter, and nominal attributes are transformed into numeric attributes using a NominalToBinaryFilter. * <p/> <!-- globalinfo-end --> * <!-- technical-bibtex-start --> * BibTeX: * <pre> * &#64;article{leCessie1992, * author = {le Cessie, S. and van Houwelingen, J.C.}, * journal = {Applied Statistics}, * number = {1}, * pages = {191-201}, * title = {Ridge Estimators in Logistic Regression}, * volume = {41}, * year = {1992} * } * </pre> * <p/> <!-- technical-bibtex-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -D * Turn on debugging output.</pre> * * <pre> -R &lt;ridge&gt; * Set the ridge in the log-likelihood.</pre> * * <pre> -M &lt;number&gt; * Set the maximum number of iterations (default -1, until convergence).</pre> * <!-- options-end --> * * @author Xin Xu (xx5@cs.waikato.ac.nz) * @version $Revision: 9785 $ */ public class Logistic extends AbstractClassifier implements OptionHandler, WeightedInstancesHandler, TechnicalInformationHandler, Aggregateable<Logistic> { /** for serialization */ static final long serialVersionUID = 3932117032546553727L; /** The coefficients (optimized parameters) of the model */ protected double [][] m_Par; /** The data saved as a matrix */ protected double [][] m_Data; /** The number of attributes in the model */ protected int m_NumPredictors; /** The index of the class attribute */ protected int m_ClassIndex; /** The number of the class labels */ protected int m_NumClasses; /** The ridge parameter. */ protected double m_Ridge = 1e-8; /** An attribute filter */ private RemoveUseless m_AttFilter; /** The filter used to make attributes numeric. */ private NominalToBinary m_NominalToBinary; /** The filter used to get rid of missing values. */ private ReplaceMissingValues m_ReplaceMissingValues; /** Debugging output */ protected boolean m_Debug; /** Log-likelihood of the searched model */ protected double m_LL; /** The maximum number of iterations. */ private int m_MaxIts = -1; /** Wether to use conjugate gradient descent rather than BFGS updates. */ private boolean m_useConjugateGradientDescent = false; private Instances m_structure; /** * Returns a string describing this classifier * @return a description of the classifier suitable for * displaying in the explorer/experimenter gui */ public String globalInfo() { return "Class for building and using a multinomial logistic " +"regression model with a ridge estimator.\n\n" +"There are some modifications, however, compared to the paper of " +"leCessie and van Houwelingen(1992): \n\n" +"If there are k classes for n instances with m attributes, the " +"parameter matrix B to be calculated will be an m*(k-1) matrix.\n\n" +"The probability for class j with the exception of the last class is\n\n" +"Pj(Xi) = exp(XiBj)/((sum[j=1..(k-1)]exp(Xi*Bj))+1) \n\n" +"The last class has probability\n\n" +"1-(sum[j=1..(k-1)]Pj(Xi)) \n\t= 1/((sum[j=1..(k-1)]exp(Xi*Bj))+1)\n\n" +"The (negative) multinomial log-likelihood is thus: \n\n" +"L = -sum[i=1..n]{\n\tsum[j=1..(k-1)](Yij * ln(Pj(Xi)))" +"\n\t+(1 - (sum[j=1..(k-1)]Yij)) \n\t* ln(1 - sum[j=1..(k-1)]Pj(Xi))" +"\n\t} + ridge * (B^2)\n\n" +"In order to find the matrix B for which L is minimised, a " +"Quasi-Newton Method is used to search for the optimized values of " +"the m*(k-1) variables. Note that before we use the optimization " +"procedure, we 'squeeze' the matrix B into a m*(k-1) vector. For " +"details of the optimization procedure, please check " +"weka.core.Optimization class.\n\n" +"Although original Logistic Regression does not deal with instance " +"weights, we modify the algorithm a little bit to handle the " +"instance weights.\n\n" +"For more information see:\n\n" + getTechnicalInformation().toString() + "\n\n" +"Note: Missing values are replaced using a ReplaceMissingValuesFilter, and " +"nominal attributes are transformed into numeric attributes using a " +"NominalToBinaryFilter."; } /** * Returns an instance of a TechnicalInformation object, containing * detailed information about the technical background of this class, * e.g., paper reference or book this class is based on. * * @return the technical information about this class */ public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; result = new TechnicalInformation(Type.ARTICLE); result.setValue(Field.AUTHOR, "le Cessie, S. and van Houwelingen, J.C."); result.setValue(Field.YEAR, "1992"); result.setValue(Field.TITLE, "Ridge Estimators in Logistic Regression"); result.setValue(Field.JOURNAL, "Applied Statistics"); result.setValue(Field.VOLUME, "41"); result.setValue(Field.NUMBER, "1"); result.setValue(Field.PAGES, "191-201"); return result; } /** * Returns an enumeration describing the available options * * @return an enumeration of all the available options */ public Enumeration listOptions() { Vector newVector = new Vector(4); newVector.addElement(new Option("\tTurn on debugging output.", "D", 0, "-D")); newVector.addElement(new Option("\tUse conjugate gradient descent rather than BFGS updates.", "C", 0, "-C")); newVector.addElement(new Option("\tSet the ridge in the log-likelihood.", "R", 1, "-R <ridge>")); newVector.addElement(new Option("\tSet the maximum number of iterations"+ " (default -1, until convergence).", "M", 1, "-M <number>")); return newVector.elements(); } /** * Parses a given list of options. <p/> * <!-- options-start --> * Valid options are: <p/> * * <pre> -D * Turn on debugging output.</pre> * * <pre> -R &lt;ridge&gt; * Set the ridge in the log-likelihood.</pre> * * <pre> -M &lt;number&gt; * Set the maximum number of iterations (default -1, until convergence).</pre> * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { setDebug(Utils.getFlag('D', options)); setUseConjugateGradientDescent(Utils.getFlag('C', options)); String ridgeString = Utils.getOption('R', options); if (ridgeString.length() != 0) m_Ridge = Double.parseDouble(ridgeString); else m_Ridge = 1.0e-8; String maxItsString = Utils.getOption('M', options); if (maxItsString.length() != 0) m_MaxIts = Integer.parseInt(maxItsString); else m_MaxIts = -1; } /** * Gets the current settings of the classifier. * * @return an array of strings suitable for passing to setOptions */ public String [] getOptions() { String [] options = new String [6]; int current = 0; if (getDebug()) options[current++] = "-D"; if (getUseConjugateGradientDescent()) { options[current++] = "-C"; } options[current++] = "-R"; options[current++] = ""+m_Ridge; options[current++] = "-M"; options[current++] = ""+m_MaxIts; while (current < options.length) options[current++] = ""; return options; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String debugTipText() { return "Output debug information to the console."; } /** * Sets whether debugging output will be printed. * * @param debug true if debugging output should be printed */ public void setDebug(boolean debug) { m_Debug = debug; } /** * Gets whether debugging output will be printed. * * @return true if debugging output will be printed */ public boolean getDebug() { return m_Debug; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String useConjugateGradientDescentTipText() { return "Use conjugate gradient descent rather than BFGS updates; faster for problems with many parameters."; } /** * Sets whether conjugate gradient descent is used. * * @param useConjugateGradientDescent true if CGD is to be used. */ public void setUseConjugateGradientDescent(boolean useConjugateGradientDescent) { m_useConjugateGradientDescent = useConjugateGradientDescent; } /** * Gets whether to use conjugate gradient descent rather than BFGS updates. * * @return true if CGD is used */ public boolean getUseConjugateGradientDescent() { return m_useConjugateGradientDescent; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String ridgeTipText() { return "Set the Ridge value in the log-likelihood."; } /** * Sets the ridge in the log-likelihood. * * @param ridge the ridge */ public void setRidge(double ridge) { m_Ridge = ridge; } /** * Gets the ridge in the log-likelihood. * * @return the ridge */ public double getRidge() { return m_Ridge; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String maxItsTipText() { return "Maximum number of iterations to perform."; } /** * Get the value of MaxIts. * * @return Value of MaxIts. */ public int getMaxIts() { return m_MaxIts; } /** * Set the value of MaxIts. * * @param newMaxIts Value to assign to MaxIts. */ public void setMaxIts(int newMaxIts) { m_MaxIts = newMaxIts; } private class OptEng extends Optimization { OptObject m_oO = null; private OptEng(OptObject oO) { m_oO = oO; } protected double objectiveFunction(double[] x){ return m_oO.objectiveFunction(x); } protected double[] evaluateGradient(double[] x){ return m_oO.evaluateGradient(x); } public String getRevision() { return RevisionUtils.extract("$Revision: 9785 $"); } } private class OptEngCG extends ConjugateGradientOptimization { OptObject m_oO = null; private OptEngCG(OptObject oO) { m_oO = oO; } protected double objectiveFunction(double[] x){ return m_oO.objectiveFunction(x); } protected double[] evaluateGradient(double[] x){ return m_oO.evaluateGradient(x); } public String getRevision() { return RevisionUtils.extract("$Revision: 9785 $"); } } private class OptObject { /** Weights of instances in the data */ private double[] weights; /** Class labels of instances */ private int[] cls; /** * Set the weights of instances * @param w the weights to be set */ public void setWeights(double[] w) { weights = w; } /** * Set the class labels of instances * @param c the class labels to be set */ public void setClassLabels(int[] c) { cls = c; } /** * Evaluate objective function * @param x the current values of variables * @return the value of the objective function */ protected double objectiveFunction(double[] x){ double nll = 0; // -LogLikelihood int dim = m_NumPredictors+1; // Number of variables per class for(int i=0; i<cls.length; i++){ // ith instance double[] exp = new double[m_NumClasses-1]; int index; for(int offset=0; offset<m_NumClasses-1; offset++){ index = offset * dim; for(int j=0; j<dim; j++) exp[offset] += m_Data[i][j]*x[index + j]; } double max = exp[Utils.maxIndex(exp)]; double denom = Math.exp(-max); double num; if (cls[i] == m_NumClasses - 1) { // Class of this instance num = -max; } else { num = exp[cls[i]] - max; } for(int offset=0; offset<m_NumClasses-1; offset++){ denom += Math.exp(exp[offset] - max); } nll -= weights[i]*(num - Math.log(denom)); // Weighted NLL } // Ridge: note that intercepts NOT included for(int offset=0; offset<m_NumClasses-1; offset++){ for(int r=1; r<dim; r++) nll += m_Ridge*x[offset*dim+r]*x[offset*dim+r]; } return nll; } /** * Evaluate Jacobian vector * @param x the current values of variables * @return the gradient vector */ protected double[] evaluateGradient(double[] x){ double[] grad = new double[x.length]; int dim = m_NumPredictors+1; // Number of variables per class for(int i=0; i<cls.length; i++){ // ith instance double[] num=new double[m_NumClasses-1]; // numerator of [-log(1+sum(exp))]' int index; for(int offset=0; offset<m_NumClasses-1; offset++){ // Which part of x double exp=0.0; index = offset * dim; for(int j=0; j<dim; j++) exp += m_Data[i][j]*x[index + j]; num[offset] = exp; } double max = num[Utils.maxIndex(num)]; double denom = Math.exp(-max); // Denominator of [-log(1+sum(exp))]' for(int offset=0; offset<m_NumClasses-1; offset++){ num[offset] = Math.exp(num[offset] - max); denom += num[offset]; } Utils.normalize(num, denom); // Update denominator of the gradient of -log(Posterior) double firstTerm; for(int offset=0; offset<m_NumClasses-1; offset++){ // Which part of x index = offset * dim; firstTerm = weights[i] * num[offset]; for(int q=0; q<dim; q++){ grad[index + q] += firstTerm * m_Data[i][q]; } } if(cls[i] != m_NumClasses-1){ // Not the last class for(int p=0; p<dim; p++){ grad[cls[i]*dim+p] -= weights[i]*m_Data[i][p]; } } } // Ridge: note that intercepts NOT included for(int offset=0; offset<m_NumClasses-1; offset++){ for(int r=1; r<dim; r++) grad[offset*dim+r] += 2*m_Ridge*x[offset*dim+r]; } return grad; } } /** * Returns default capabilities of the classifier. * * @return the capabilities of this classifier */ public Capabilities getCapabilities() { Capabilities result = super.getCapabilities(); result.disableAll(); // attributes result.enable(Capability.NOMINAL_ATTRIBUTES); result.enable(Capability.NUMERIC_ATTRIBUTES); result.enable(Capability.DATE_ATTRIBUTES); result.enable(Capability.MISSING_VALUES); // class result.enable(Capability.NOMINAL_CLASS); result.enable(Capability.MISSING_CLASS_VALUES); return result; } /** * Builds the classifier * * @param train the training data to be used for generating the * boosted classifier. * @throws Exception if the classifier could not be built successfully */ public void buildClassifier(Instances train) throws Exception { // can classifier handle the data? getCapabilities().testWithFail(train); // remove instances with missing class train = new Instances(train); train.deleteWithMissingClass(); // Replace missing values m_ReplaceMissingValues = new ReplaceMissingValues(); m_ReplaceMissingValues.setInputFormat(train); train = Filter.useFilter(train, m_ReplaceMissingValues); // Remove useless attributes m_AttFilter = new RemoveUseless(); m_AttFilter.setInputFormat(train); train = Filter.useFilter(train, m_AttFilter); // Transform attributes m_NominalToBinary = new NominalToBinary(); m_NominalToBinary.setInputFormat(train); train = Filter.useFilter(train, m_NominalToBinary); // Save the structure for printing the model m_structure = new Instances(train, 0); // Extract data m_ClassIndex = train.classIndex(); m_NumClasses = train.numClasses(); int nK = m_NumClasses - 1; // Only K-1 class labels needed int nR = m_NumPredictors = train.numAttributes() - 1; int nC = train.numInstances(); m_Data = new double[nC][nR + 1]; // Data values int [] Y = new int[nC]; // Class labels double [] xMean= new double[nR + 1]; // Attribute means double [] xSD = new double[nR + 1]; // Attribute stddev's double [] sY = new double[nK + 1]; // Number of classes double [] weights = new double[nC]; // Weights of instances double totWeights = 0; // Total weights of the instances m_Par = new double[nR + 1][nK]; // Optimized parameter values if (m_Debug) { System.out.println("Extracting data..."); } for (int i = 0; i < nC; i++) { // initialize X[][] Instance current = train.instance(i); Y[i] = (int)current.classValue(); // Class value starts from 0 weights[i] = current.weight(); // Dealing with weights totWeights += weights[i]; m_Data[i][0] = 1; int j = 1; for (int k = 0; k <= nR; k++) { if (k != m_ClassIndex) { double x = current.value(k); m_Data[i][j] = x; xMean[j] += weights[i]*x; xSD[j] += weights[i]*x*x; j++; } } // Class count sY[Y[i]]++; } if((totWeights <= 1) && (nC > 1)) throw new Exception("Sum of weights of instances less than 1, please reweight!"); xMean[0] = 0; xSD[0] = 1; for (int j = 1; j <= nR; j++) { xMean[j] = xMean[j] / totWeights; if(totWeights > 1) xSD[j] = Math.sqrt(Math.abs(xSD[j] - totWeights*xMean[j]*xMean[j])/(totWeights-1)); else xSD[j] = 0; } if (m_Debug) { // Output stats about input data System.out.println("Descriptives..."); for (int m = 0; m <= nK; m++) System.out.println(sY[m] + " cases have class " + m); System.out.println("\n Variable Avg SD "); for (int j = 1; j <= nR; j++) System.out.println(Utils.doubleToString(j,8,4) + Utils.doubleToString(xMean[j], 10, 4) + Utils.doubleToString(xSD[j], 10, 4) ); } // Normalise input data for (int i = 0; i < nC; i++) { for (int j = 0; j <= nR; j++) { if (xSD[j] != 0) { m_Data[i][j] = (m_Data[i][j] - xMean[j]) / xSD[j]; } } } if (m_Debug) { System.out.println("\nIteration History..." ); } double x[] = new double[(nR+1)*nK]; double[][] b = new double[2][x.length]; // Boundary constraints, N/A here // Initialize for(int p=0; p<nK; p++){ int offset=p*(nR+1); x[offset] = Math.log(sY[p]+1.0) - Math.log(sY[nK]+1.0); // Null model b[0][offset] = Double.NaN; b[1][offset] = Double.NaN; for (int q=1; q <= nR; q++){ x[offset+q] = 0.0; b[0][offset+q] = Double.NaN; b[1][offset+q] = Double.NaN; } } OptObject oO = new OptObject(); oO.setWeights(weights); oO.setClassLabels(Y); Optimization opt = null; if (m_useConjugateGradientDescent) { opt = new OptEngCG(oO); } else { opt = new OptEng(oO); } opt.setDebug(m_Debug); if(m_MaxIts == -1){ // Search until convergence x = opt.findArgmin(x, b); while(x==null){ x = opt.getVarbValues(); if (m_Debug) System.out.println("First set of iterations finished, not enough!"); x = opt.findArgmin(x, b); } if (m_Debug) System.out.println(" -------------<Converged>--------------"); } else{ opt.setMaxIteration(m_MaxIts); x = opt.findArgmin(x, b); if(x==null) // Not enough, but use the current value x = opt.getVarbValues(); } m_LL = -opt.getMinFunction(); // Log-likelihood // Don't need data matrix anymore m_Data = null; // Convert coefficients back to non-normalized attribute units for(int i=0; i < nK; i++){ m_Par[0][i] = x[i*(nR+1)]; for(int j = 1; j <= nR; j++) { m_Par[j][i] = x[i*(nR+1)+j]; if (xSD[j] != 0) { m_Par[j][i] /= xSD[j]; m_Par[0][i] -= m_Par[j][i] * xMean[j]; } } } } /** * Computes the distribution for a given instance * * @param instance the instance for which distribution is computed * @return the distribution * @throws Exception if the distribution can't be computed successfully */ public double [] distributionForInstance(Instance instance) throws Exception { m_ReplaceMissingValues.input(instance); instance = m_ReplaceMissingValues.output(); m_AttFilter.input(instance); instance = m_AttFilter.output(); m_NominalToBinary.input(instance); instance = m_NominalToBinary.output(); // Extract the predictor columns into an array double [] instDat = new double [m_NumPredictors + 1]; int j = 1; instDat[0] = 1; for (int k = 0; k <= m_NumPredictors; k++) { if (k != m_ClassIndex) { instDat[j++] = instance.value(k); } } double [] distribution = evaluateProbability(instDat); return distribution; } /** * Compute the posterior distribution using optimized parameter values * and the testing instance. * @param data the testing instance * @return the posterior probability distribution */ private double[] evaluateProbability(double[] data){ double[] prob = new double[m_NumClasses], v = new double[m_NumClasses]; // Log-posterior before normalizing for(int j = 0; j < m_NumClasses-1; j++){ for(int k = 0; k <= m_NumPredictors; k++){ v[j] += m_Par[k][j] * data[k]; } } v[m_NumClasses-1] = 0; // Do so to avoid scaling problems for(int m=0; m < m_NumClasses; m++){ double sum = 0; for(int n=0; n < m_NumClasses-1; n++) sum += Math.exp(v[n] - v[m]); prob[m] = 1 / (sum + Math.exp(-v[m])); } return prob; } /** * Returns the coefficients for this logistic model. * The first dimension indexes the attributes, and * the second the classes. * * @return the coefficients for this logistic model */ public double [][] coefficients() { return m_Par; } /** * Gets a string describing the classifier. * * @return a string describing the classifer built. */ public String toString() { StringBuffer temp = new StringBuffer(); String result = ""; temp.append("Logistic Regression with ridge parameter of " + m_Ridge); if (m_Par == null) { return result + ": No model built yet."; } // find longest attribute name int attLength = 0; for (int i = 0; i < m_structure.numAttributes(); i++) { if (i != m_structure.classIndex() && m_structure.attribute(i).name().length() > attLength) { attLength = m_structure.attribute(i).name().length(); } } if ("Intercept".length() > attLength) { attLength = "Intercept".length(); } if ("Variable".length() > attLength) { attLength = "Variable".length(); } attLength += 2; int colWidth = 0; // check length of class names for (int i = 0; i < m_structure.classAttribute().numValues() - 1; i++) { if (m_structure.classAttribute().value(i).length() > colWidth) { colWidth = m_structure.classAttribute().value(i).length(); } } // check against coefficients and odds ratios for (int j = 1; j <= m_NumPredictors; j++) { for (int k = 0; k < m_NumClasses - 1; k++) { if (Utils.doubleToString(m_Par[j][k], 12, 4).trim().length() > colWidth) { colWidth = Utils.doubleToString(m_Par[j][k], 12, 4).trim().length(); } double ORc = Math.exp(m_Par[j][k]); String t = " " + ((ORc > 1e10) ? "" + ORc : Utils.doubleToString(ORc, 12, 4)); if (t.trim().length() > colWidth) { colWidth = t.trim().length(); } } } if ("Class".length() > colWidth) { colWidth = "Class".length(); } colWidth += 2; temp.append("\nCoefficients...\n"); temp.append(Utils.padLeft(" ", attLength) + Utils.padLeft("Class", colWidth) + "\n"); temp.append(Utils.padRight("Variable", attLength)); for (int i = 0; i < m_NumClasses - 1; i++) { String className = m_structure.classAttribute().value(i); temp.append(Utils.padLeft(className, colWidth)); } temp.append("\n"); int separatorL = attLength + ((m_NumClasses - 1) * colWidth); for (int i = 0; i < separatorL; i++) { temp.append("="); } temp.append("\n"); int j = 1; for (int i = 0; i < m_structure.numAttributes(); i++) { if (i != m_structure.classIndex()) { temp.append(Utils.padRight(m_structure.attribute(i).name(), attLength)); for (int k = 0; k < m_NumClasses-1; k++) { temp.append(Utils.padLeft(Utils.doubleToString(m_Par[j][k], 12, 4).trim(), colWidth)); } temp.append("\n"); j++; } } temp.append(Utils.padRight("Intercept", attLength)); for (int k = 0; k < m_NumClasses-1; k++) { temp.append(Utils.padLeft(Utils.doubleToString(m_Par[0][k], 10, 4).trim(), colWidth)); } temp.append("\n"); temp.append("\n\nOdds Ratios...\n"); temp.append(Utils.padLeft(" ", attLength) + Utils.padLeft("Class", colWidth) + "\n"); temp.append(Utils.padRight("Variable", attLength)); for (int i = 0; i < m_NumClasses - 1; i++) { String className = m_structure.classAttribute().value(i); temp.append(Utils.padLeft(className, colWidth)); } temp.append("\n"); for (int i = 0; i < separatorL; i++) { temp.append("="); } temp.append("\n"); j = 1; for (int i = 0; i < m_structure.numAttributes(); i++) { if (i != m_structure.classIndex()) { temp.append(Utils.padRight(m_structure.attribute(i).name(), attLength)); for (int k = 0; k < m_NumClasses-1; k++) { double ORc = Math.exp(m_Par[j][k]); String ORs = " " + ((ORc > 1e10) ? "" + ORc : Utils.doubleToString(ORc, 12, 4)); temp.append(Utils.padLeft(ORs.trim(), colWidth)); } temp.append("\n"); j++; } } return temp.toString(); } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 9785 $"); } protected int m_numModels = 0; /** * Aggregate an object with this one * * @param toAggregate the object to aggregate * @return the result of aggregation * @throws Exception if the supplied object can't be aggregated for some * reason */ @Override public Logistic aggregate(Logistic toAggregate) throws Exception { if (m_numModels == Integer.MIN_VALUE) { throw new Exception( "Can't aggregate further - model has already been " + "aggregated and finalized"); } if (m_Par == null) { throw new Exception("No model built yet, can't aggregate"); } if (!m_structure.equalHeaders(toAggregate.m_structure)) { throw new Exception("Can't aggregate - data headers dont match: " + m_structure.equalHeadersMsg(toAggregate.m_structure)); } for (int i = 0; i < m_Par.length; i++) { for (int j = 0; j < m_Par[i].length; j++) { m_Par[i][j] += toAggregate.m_Par[i][j]; } } m_numModels++; return this; } /** * Call to complete the aggregation process. Allows implementers to do any * final processing based on how many objects were aggregated. * * @throws Exception if the aggregation can't be finalized for some reason */ @Override public void finalizeAggregation() throws Exception { if (m_numModels == Integer.MIN_VALUE) { throw new Exception("Aggregation has already been finalized"); } if (m_numModels == 0) { throw new Exception("Unable to finalize aggregation - " + "haven't seen any models to aggregate"); } for (int i = 0; i < m_Par.length; i++) { for (int j = 0; j < m_Par[i].length; j++) { m_Par[i][j] /= (m_numModels + 1); } } // aggregation complete m_numModels = Integer.MIN_VALUE; } /** * Main method for testing this class. * * @param argv should contain the command line arguments to the * scheme (see Evaluation) */ public static void main(String [] argv) { runClassifier(new Logistic(), argv); } }
32,171
29.293785
343
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/functions/MultilayerPerceptron.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * MultilayerPerceptron.java * Copyright (C) 2000-2012 University of Waikato, Hamilton, New Zealand */ package weka.classifiers.functions; import java.awt.BorderLayout; import java.awt.Color; import java.awt.Component; import java.awt.Dimension; import java.awt.FontMetrics; import java.awt.Graphics; import java.awt.event.ActionEvent; import java.awt.event.ActionListener; import java.awt.event.MouseAdapter; import java.awt.event.MouseEvent; import java.awt.event.WindowAdapter; import java.awt.event.WindowEvent; import java.util.Enumeration; import java.util.Random; import java.util.StringTokenizer; import java.util.Vector; import javax.swing.BorderFactory; import javax.swing.Box; import javax.swing.BoxLayout; import javax.swing.JButton; import javax.swing.JFrame; import javax.swing.JLabel; import javax.swing.JOptionPane; import javax.swing.JPanel; import javax.swing.JScrollPane; import javax.swing.JTextField; import weka.classifiers.AbstractClassifier; import weka.classifiers.Classifier; import weka.classifiers.functions.neural.LinearUnit; import weka.classifiers.functions.neural.NeuralConnection; import weka.classifiers.functions.neural.NeuralNode; import weka.classifiers.functions.neural.SigmoidUnit; import weka.core.Capabilities; import weka.core.Capabilities.Capability; import weka.core.DenseInstance; import weka.core.FastVector; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.OptionHandler; import weka.core.Randomizable; import weka.core.RevisionHandler; import weka.core.RevisionUtils; import weka.core.Utils; import weka.core.WeightedInstancesHandler; import weka.filters.Filter; import weka.filters.unsupervised.attribute.NominalToBinary; /** <!-- globalinfo-start --> * A Classifier that uses backpropagation to classify instances.<br/> * This network can be built by hand, created by an algorithm or both. The network can also be monitored and modified during training time. The nodes in this network are all sigmoid (except for when the class is numeric in which case the the output nodes become unthresholded linear units). * <p/> <!-- globalinfo-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -L &lt;learning rate&gt; * Learning Rate for the backpropagation algorithm. * (Value should be between 0 - 1, Default = 0.3).</pre> * * <pre> -M &lt;momentum&gt; * Momentum Rate for the backpropagation algorithm. * (Value should be between 0 - 1, Default = 0.2).</pre> * * <pre> -N &lt;number of epochs&gt; * Number of epochs to train through. * (Default = 500).</pre> * * <pre> -V &lt;percentage size of validation set&gt; * Percentage size of validation set to use to terminate * training (if this is non zero it can pre-empt num of epochs. * (Value should be between 0 - 100, Default = 0).</pre> * * <pre> -S &lt;seed&gt; * The value used to seed the random number generator * (Value should be &gt;= 0 and and a long, Default = 0).</pre> * * <pre> -E &lt;threshold for number of consequetive errors&gt; * The consequetive number of errors allowed for validation * testing before the netwrok terminates. * (Value should be &gt; 0, Default = 20).</pre> * * <pre> -G * GUI will be opened. * (Use this to bring up a GUI).</pre> * * <pre> -A * Autocreation of the network connections will NOT be done. * (This will be ignored if -G is NOT set)</pre> * * <pre> -B * A NominalToBinary filter will NOT automatically be used. * (Set this to not use a NominalToBinary filter).</pre> * * <pre> -H &lt;comma seperated numbers for nodes on each layer&gt; * The hidden layers to be created for the network. * (Value should be a list of comma separated Natural * numbers or the letters 'a' = (attribs + classes) / 2, * 'i' = attribs, 'o' = classes, 't' = attribs .+ classes) * for wildcard values, Default = a).</pre> * * <pre> -C * Normalizing a numeric class will NOT be done. * (Set this to not normalize the class if it's numeric).</pre> * * <pre> -I * Normalizing the attributes will NOT be done. * (Set this to not normalize the attributes).</pre> * * <pre> -R * Reseting the network will NOT be allowed. * (Set this to not allow the network to reset).</pre> * * <pre> -D * Learning rate decay will occur. * (Set this to cause the learning rate to decay).</pre> * <!-- options-end --> * * @author Malcolm Ware (mfw4@cs.waikato.ac.nz) * @version $Revision: 9444 $ */ public class MultilayerPerceptron extends AbstractClassifier implements OptionHandler, WeightedInstancesHandler, Randomizable { /** for serialization */ private static final long serialVersionUID = -5990607817048210779L; /** * Main method for testing this class. * * @param argv should contain command line options (see setOptions) */ public static void main(String [] argv) { runClassifier(new MultilayerPerceptron(), argv); } /** * This inner class is used to connect the nodes in the network up to * the data that they are classifying, Note that objects of this class are * only suitable to go on the attribute side or class side of the network * and not both. */ protected class NeuralEnd extends NeuralConnection { /** for serialization */ static final long serialVersionUID = 7305185603191183338L; /** * the value that represents the instance value this node represents. * For an input it is the attribute number, for an output, if nominal * it is the class value. */ private int m_link; /** True if node is an input, False if it's an output. */ private boolean m_input; /** * Constructor */ public NeuralEnd(String id) { super(id); m_link = 0; m_input = true; } /** * Call this function to determine if the point at x,y is on the unit. * @param g The graphics context for font size info. * @param x The x coord. * @param y The y coord. * @param w The width of the display. * @param h The height of the display. * @return True if the point is on the unit, false otherwise. */ public boolean onUnit(Graphics g, int x, int y, int w, int h) { FontMetrics fm = g.getFontMetrics(); int l = (int)(m_x * w) - fm.stringWidth(m_id) / 2; int t = (int)(m_y * h) - fm.getHeight() / 2; if (x < l || x > l + fm.stringWidth(m_id) + 4 || y < t || y > t + fm.getHeight() + fm.getDescent() + 4) { return false; } return true; } /** * This will draw the node id to the graphics context. * @param g The graphics context. * @param w The width of the drawing area. * @param h The height of the drawing area. */ public void drawNode(Graphics g, int w, int h) { if ((m_type & PURE_INPUT) == PURE_INPUT) { g.setColor(Color.green); } else { g.setColor(Color.orange); } FontMetrics fm = g.getFontMetrics(); int l = (int)(m_x * w) - fm.stringWidth(m_id) / 2; int t = (int)(m_y * h) - fm.getHeight() / 2; g.fill3DRect(l, t, fm.stringWidth(m_id) + 4 , fm.getHeight() + fm.getDescent() + 4 , true); g.setColor(Color.black); g.drawString(m_id, l + 2, t + fm.getHeight() + 2); } /** * Call this function to draw the node highlighted. * @param g The graphics context. * @param w The width of the drawing area. * @param h The height of the drawing area. */ public void drawHighlight(Graphics g, int w, int h) { g.setColor(Color.black); FontMetrics fm = g.getFontMetrics(); int l = (int)(m_x * w) - fm.stringWidth(m_id) / 2; int t = (int)(m_y * h) - fm.getHeight() / 2; g.fillRect(l - 2, t - 2, fm.stringWidth(m_id) + 8 , fm.getHeight() + fm.getDescent() + 8); drawNode(g, w, h); } /** * Call this to get the output value of this unit. * @param calculate True if the value should be calculated if it hasn't * been already. * @return The output value, or NaN, if the value has not been calculated. */ public double outputValue(boolean calculate) { if (Double.isNaN(m_unitValue) && calculate) { if (m_input) { if (m_currentInstance.isMissing(m_link)) { m_unitValue = 0; } else { m_unitValue = m_currentInstance.value(m_link); } } else { //node is an output. m_unitValue = 0; for (int noa = 0; noa < m_numInputs; noa++) { m_unitValue += m_inputList[noa].outputValue(true); } if (m_numeric && m_normalizeClass) { //then scale the value; //this scales linearly from between -1 and 1 m_unitValue = m_unitValue * m_attributeRanges[m_instances.classIndex()] + m_attributeBases[m_instances.classIndex()]; } } } return m_unitValue; } /** * Call this to get the error value of this unit, which in this case is * the difference between the predicted class, and the actual class. * @param calculate True if the value should be calculated if it hasn't * been already. * @return The error value, or NaN, if the value has not been calculated. */ public double errorValue(boolean calculate) { if (!Double.isNaN(m_unitValue) && Double.isNaN(m_unitError) && calculate) { if (m_input) { m_unitError = 0; for (int noa = 0; noa < m_numOutputs; noa++) { m_unitError += m_outputList[noa].errorValue(true); } } else { if (m_currentInstance.classIsMissing()) { m_unitError = .1; } else if (m_instances.classAttribute().isNominal()) { if (m_currentInstance.classValue() == m_link) { m_unitError = 1 - m_unitValue; } else { m_unitError = 0 - m_unitValue; } } else if (m_numeric) { if (m_normalizeClass) { if (m_attributeRanges[m_instances.classIndex()] == 0) { m_unitError = 0; } else { m_unitError = (m_currentInstance.classValue() - m_unitValue ) / m_attributeRanges[m_instances.classIndex()]; //m_numericRange; } } else { m_unitError = m_currentInstance.classValue() - m_unitValue; } } } } return m_unitError; } /** * Call this to reset the value and error for this unit, ready for the next * run. This will also call the reset function of all units that are * connected as inputs to this one. * This is also the time that the update for the listeners will be * performed. */ public void reset() { if (!Double.isNaN(m_unitValue) || !Double.isNaN(m_unitError)) { m_unitValue = Double.NaN; m_unitError = Double.NaN; m_weightsUpdated = false; for (int noa = 0; noa < m_numInputs; noa++) { m_inputList[noa].reset(); } } } /** * Call this to have the connection save the current * weights. */ public void saveWeights() { for (int i = 0; i < m_numInputs; i++) { m_inputList[i].saveWeights(); } } /** * Call this to have the connection restore from the saved * weights. */ public void restoreWeights() { for (int i = 0; i < m_numInputs; i++) { m_inputList[i].restoreWeights(); } } /** * Call this function to set What this end unit represents. * @param input True if this unit is used for entering an attribute, * False if it's used for determining a class value. * @param val The attribute number or class type that this unit represents. * (for nominal attributes). */ public void setLink(boolean input, int val) throws Exception { m_input = input; if (input) { m_type = PURE_INPUT; } else { m_type = PURE_OUTPUT; } if (val < 0 || (input && val > m_instances.numAttributes()) || (!input && m_instances.classAttribute().isNominal() && val > m_instances.classAttribute().numValues())) { m_link = 0; } else { m_link = val; } } /** * @return link for this node. */ public int getLink() { return m_link; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 9444 $"); } } /** Inner class used to draw the nodes onto.(uses the node lists!!) * This will also handle the user input. */ private class NodePanel extends JPanel implements RevisionHandler { /** for serialization */ static final long serialVersionUID = -3067621833388149984L; /** * The constructor. */ public NodePanel() { addMouseListener(new MouseAdapter() { public void mousePressed(MouseEvent e) { if (!m_stopped) { return; } if ((e.getModifiers() & MouseEvent.BUTTON1_MASK) == MouseEvent.BUTTON1_MASK && !e.isAltDown()) { Graphics g = NodePanel.this.getGraphics(); int x = e.getX(); int y = e.getY(); int w = NodePanel.this.getWidth(); int h = NodePanel.this.getHeight(); FastVector tmp = new FastVector(4); for (int noa = 0; noa < m_numAttributes; noa++) { if (m_inputs[noa].onUnit(g, x, y, w, h)) { tmp.addElement(m_inputs[noa]); selection(tmp, (e.getModifiers() & MouseEvent.CTRL_MASK) == MouseEvent.CTRL_MASK , true); return; } } for (int noa = 0; noa < m_numClasses; noa++) { if (m_outputs[noa].onUnit(g, x, y, w, h)) { tmp.addElement(m_outputs[noa]); selection(tmp, (e.getModifiers() & MouseEvent.CTRL_MASK) == MouseEvent.CTRL_MASK , true); return; } } for (int noa = 0; noa < m_neuralNodes.length; noa++) { if (m_neuralNodes[noa].onUnit(g, x, y, w, h)) { tmp.addElement(m_neuralNodes[noa]); selection(tmp, (e.getModifiers() & MouseEvent.CTRL_MASK) == MouseEvent.CTRL_MASK , true); return; } } NeuralNode temp = new NeuralNode(String.valueOf(m_nextId), m_random, m_sigmoidUnit); m_nextId++; temp.setX((double)e.getX() / w); temp.setY((double)e.getY() / h); tmp.addElement(temp); addNode(temp); selection(tmp, (e.getModifiers() & MouseEvent.CTRL_MASK) == MouseEvent.CTRL_MASK , true); } else { //then right click Graphics g = NodePanel.this.getGraphics(); int x = e.getX(); int y = e.getY(); int w = NodePanel.this.getWidth(); int h = NodePanel.this.getHeight(); FastVector tmp = new FastVector(4); for (int noa = 0; noa < m_numAttributes; noa++) { if (m_inputs[noa].onUnit(g, x, y, w, h)) { tmp.addElement(m_inputs[noa]); selection(tmp, (e.getModifiers() & MouseEvent.CTRL_MASK) == MouseEvent.CTRL_MASK , false); return; } } for (int noa = 0; noa < m_numClasses; noa++) { if (m_outputs[noa].onUnit(g, x, y, w, h)) { tmp.addElement(m_outputs[noa]); selection(tmp, (e.getModifiers() & MouseEvent.CTRL_MASK) == MouseEvent.CTRL_MASK , false); return; } } for (int noa = 0; noa < m_neuralNodes.length; noa++) { if (m_neuralNodes[noa].onUnit(g, x, y, w, h)) { tmp.addElement(m_neuralNodes[noa]); selection(tmp, (e.getModifiers() & MouseEvent.CTRL_MASK) == MouseEvent.CTRL_MASK , false); return; } } selection(null, (e.getModifiers() & MouseEvent.CTRL_MASK) == MouseEvent.CTRL_MASK , false); } } }); } /** * This function gets called when the user has clicked something * It will amend the current selection or connect the current selection * to the new selection. * Or if nothing was selected and the right button was used it will * delete the node. * @param v The units that were selected. * @param ctrl True if ctrl was held down. * @param left True if it was the left mouse button. */ private void selection(FastVector v, boolean ctrl, boolean left) { if (v == null) { //then unselect all. m_selected.removeAllElements(); repaint(); return; } //then exclusive or the new selection with the current one. if ((ctrl || m_selected.size() == 0) && left) { boolean removed = false; for (int noa = 0; noa < v.size(); noa++) { removed = false; for (int nob = 0; nob < m_selected.size(); nob++) { if (v.elementAt(noa) == m_selected.elementAt(nob)) { //then remove that element m_selected.removeElementAt(nob); removed = true; break; } } if (!removed) { m_selected.addElement(v.elementAt(noa)); } } repaint(); return; } if (left) { //then connect the current selection to the new one. for (int noa = 0; noa < m_selected.size(); noa++) { for (int nob = 0; nob < v.size(); nob++) { NeuralConnection .connect((NeuralConnection)m_selected.elementAt(noa) , (NeuralConnection)v.elementAt(nob)); } } } else if (m_selected.size() > 0) { //then disconnect the current selection from the new one. for (int noa = 0; noa < m_selected.size(); noa++) { for (int nob = 0; nob < v.size(); nob++) { NeuralConnection .disconnect((NeuralConnection)m_selected.elementAt(noa) , (NeuralConnection)v.elementAt(nob)); NeuralConnection .disconnect((NeuralConnection)v.elementAt(nob) , (NeuralConnection)m_selected.elementAt(noa)); } } } else { //then remove the selected node. (it was right clicked while //no other units were selected for (int noa = 0; noa < v.size(); noa++) { ((NeuralConnection)v.elementAt(noa)).removeAllInputs(); ((NeuralConnection)v.elementAt(noa)).removeAllOutputs(); removeNode((NeuralConnection)v.elementAt(noa)); } } repaint(); } /** * This will paint the nodes ontot the panel. * @param g The graphics context. */ public void paintComponent(Graphics g) { super.paintComponent(g); int x = getWidth(); int y = getHeight(); if (25 * m_numAttributes > 25 * m_numClasses && 25 * m_numAttributes > y) { setSize(x, 25 * m_numAttributes); } else if (25 * m_numClasses > y) { setSize(x, 25 * m_numClasses); } else { setSize(x, y); } y = getHeight(); for (int noa = 0; noa < m_numAttributes; noa++) { m_inputs[noa].drawInputLines(g, x, y); } for (int noa = 0; noa < m_numClasses; noa++) { m_outputs[noa].drawInputLines(g, x, y); m_outputs[noa].drawOutputLines(g, x, y); } for (int noa = 0; noa < m_neuralNodes.length; noa++) { m_neuralNodes[noa].drawInputLines(g, x, y); } for (int noa = 0; noa < m_numAttributes; noa++) { m_inputs[noa].drawNode(g, x, y); } for (int noa = 0; noa < m_numClasses; noa++) { m_outputs[noa].drawNode(g, x, y); } for (int noa = 0; noa < m_neuralNodes.length; noa++) { m_neuralNodes[noa].drawNode(g, x, y); } for (int noa = 0; noa < m_selected.size(); noa++) { ((NeuralConnection)m_selected.elementAt(noa)).drawHighlight(g, x, y); } } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 9444 $"); } } /** * This provides the basic controls for working with the neuralnetwork * @author Malcolm Ware (mfw4@cs.waikato.ac.nz) * @version $Revision: 9444 $ */ class ControlPanel extends JPanel implements RevisionHandler { /** for serialization */ static final long serialVersionUID = 7393543302294142271L; /** The start stop button. */ public JButton m_startStop; /** The button to accept the network (even if it hasn't done all epochs. */ public JButton m_acceptButton; /** A label to state the number of epochs processed so far. */ public JPanel m_epochsLabel; /** A label to state the total number of epochs to be processed. */ public JLabel m_totalEpochsLabel; /** A text field to allow the changing of the total number of epochs. */ public JTextField m_changeEpochs; /** A label to state the learning rate. */ public JLabel m_learningLabel; /** A label to state the momentum. */ public JLabel m_momentumLabel; /** A text field to allow the changing of the learning rate. */ public JTextField m_changeLearning; /** A text field to allow the changing of the momentum. */ public JTextField m_changeMomentum; /** A label to state roughly the accuracy of the network.(because the accuracy is calculated per epoch, but the network is changing throughout each epoch train). */ public JPanel m_errorLabel; /** The constructor. */ public ControlPanel() { setBorder(BorderFactory.createTitledBorder("Controls")); m_totalEpochsLabel = new JLabel("Num Of Epochs "); m_epochsLabel = new JPanel(){ /** for serialization */ private static final long serialVersionUID = 2562773937093221399L; public void paintComponent(Graphics g) { super.paintComponent(g); g.setColor(m_controlPanel.m_totalEpochsLabel.getForeground()); g.drawString("Epoch " + m_epoch, 0, 10); } }; m_epochsLabel.setFont(m_totalEpochsLabel.getFont()); m_changeEpochs = new JTextField(); m_changeEpochs.setText("" + m_numEpochs); m_errorLabel = new JPanel(){ /** for serialization */ private static final long serialVersionUID = 4390239056336679189L; public void paintComponent(Graphics g) { super.paintComponent(g); g.setColor(m_controlPanel.m_totalEpochsLabel.getForeground()); if (m_valSize == 0) { g.drawString("Error per Epoch = " + Utils.doubleToString(m_error, 7), 0, 10); } else { g.drawString("Validation Error per Epoch = " + Utils.doubleToString(m_error, 7), 0, 10); } } }; m_errorLabel.setFont(m_epochsLabel.getFont()); m_learningLabel = new JLabel("Learning Rate = "); m_momentumLabel = new JLabel("Momentum = "); m_changeLearning = new JTextField(); m_changeMomentum = new JTextField(); m_changeLearning.setText("" + m_learningRate); m_changeMomentum.setText("" + m_momentum); setLayout(new BorderLayout(15, 10)); m_stopIt = true; m_accepted = false; m_startStop = new JButton("Start"); m_startStop.setActionCommand("Start"); m_acceptButton = new JButton("Accept"); m_acceptButton.setActionCommand("Accept"); JPanel buttons = new JPanel(); buttons.setLayout(new BoxLayout(buttons, BoxLayout.Y_AXIS)); buttons.add(m_startStop); buttons.add(m_acceptButton); add(buttons, BorderLayout.WEST); JPanel data = new JPanel(); data.setLayout(new BoxLayout(data, BoxLayout.Y_AXIS)); Box ab = new Box(BoxLayout.X_AXIS); ab.add(m_epochsLabel); data.add(ab); ab = new Box(BoxLayout.X_AXIS); Component b = Box.createGlue(); ab.add(m_totalEpochsLabel); ab.add(m_changeEpochs); m_changeEpochs.setMaximumSize(new Dimension(200, 20)); ab.add(b); data.add(ab); ab = new Box(BoxLayout.X_AXIS); ab.add(m_errorLabel); data.add(ab); add(data, BorderLayout.CENTER); data = new JPanel(); data.setLayout(new BoxLayout(data, BoxLayout.Y_AXIS)); ab = new Box(BoxLayout.X_AXIS); b = Box.createGlue(); ab.add(m_learningLabel); ab.add(m_changeLearning); m_changeLearning.setMaximumSize(new Dimension(200, 20)); ab.add(b); data.add(ab); ab = new Box(BoxLayout.X_AXIS); b = Box.createGlue(); ab.add(m_momentumLabel); ab.add(m_changeMomentum); m_changeMomentum.setMaximumSize(new Dimension(200, 20)); ab.add(b); data.add(ab); add(data, BorderLayout.EAST); m_startStop.addActionListener(new ActionListener() { public void actionPerformed(ActionEvent e) { if (e.getActionCommand().equals("Start")) { m_stopIt = false; m_startStop.setText("Stop"); m_startStop.setActionCommand("Stop"); int n = Integer.valueOf(m_changeEpochs.getText()).intValue(); m_numEpochs = n; m_changeEpochs.setText("" + m_numEpochs); double m=Double.valueOf(m_changeLearning.getText()). doubleValue(); setLearningRate(m); m_changeLearning.setText("" + m_learningRate); m = Double.valueOf(m_changeMomentum.getText()).doubleValue(); setMomentum(m); m_changeMomentum.setText("" + m_momentum); blocker(false); } else if (e.getActionCommand().equals("Stop")) { m_stopIt = true; m_startStop.setText("Start"); m_startStop.setActionCommand("Start"); } } }); m_acceptButton.addActionListener(new ActionListener() { public void actionPerformed(ActionEvent e) { m_accepted = true; blocker(false); } }); m_changeEpochs.addActionListener(new ActionListener() { public void actionPerformed(ActionEvent e) { int n = Integer.valueOf(m_changeEpochs.getText()).intValue(); if (n > 0) { m_numEpochs = n; blocker(false); } } }); } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 9444 $"); } } /** a ZeroR model in case no model can be built from the data * or the network predicts all zeros for the classes */ private Classifier m_ZeroR; /** Whether to use the default ZeroR model */ private boolean m_useDefaultModel = false; /** The training instances. */ private Instances m_instances; /** The current instance running through the network. */ private Instance m_currentInstance; /** A flag to say that it's a numeric class. */ private boolean m_numeric; /** The ranges for all the attributes. */ private double[] m_attributeRanges; /** The base values for all the attributes. */ private double[] m_attributeBases; /** The output units.(only feeds the errors, does no calcs) */ private NeuralEnd[] m_outputs; /** The input units.(only feeds the inputs does no calcs) */ private NeuralEnd[] m_inputs; /** All the nodes that actually comprise the logical neural net. */ private NeuralConnection[] m_neuralNodes; /** The number of classes. */ private int m_numClasses = 0; /** The number of attributes. */ private int m_numAttributes = 0; //note the number doesn't include the class. /** The panel the nodes are displayed on. */ private NodePanel m_nodePanel; /** The control panel. */ private ControlPanel m_controlPanel; /** The next id number available for default naming. */ private int m_nextId; /** A Vector list of the units currently selected. */ private FastVector m_selected; /** A Vector list of the graphers. */ private FastVector m_graphers; /** The number of epochs to train through. */ private int m_numEpochs; /** a flag to state if the network should be running, or stopped. */ private boolean m_stopIt; /** a flag to state that the network has in fact stopped. */ private boolean m_stopped; /** a flag to state that the network should be accepted the way it is. */ private boolean m_accepted; /** The window for the network. */ private JFrame m_win; /** A flag to tell the build classifier to automatically build a neural net. */ private boolean m_autoBuild; /** A flag to state that the gui for the network should be brought up. To allow interaction while training. */ private boolean m_gui; /** An int to say how big the validation set should be. */ private int m_valSize; /** The number to to use to quit on validation testing. */ private int m_driftThreshold; /** The number used to seed the random number generator. */ private int m_randomSeed; /** The actual random number generator. */ private Random m_random; /** A flag to state that a nominal to binary filter should be used. */ private boolean m_useNomToBin; /** The actual filter. */ private NominalToBinary m_nominalToBinaryFilter; /** The string that defines the hidden layers */ private String m_hiddenLayers; /** This flag states that the user wants the input values normalized. */ private boolean m_normalizeAttributes; /** This flag states that the user wants the learning rate to decay. */ private boolean m_decay; /** This is the learning rate for the network. */ private double m_learningRate; /** This is the momentum for the network. */ private double m_momentum; /** Shows the number of the epoch that the network just finished. */ private int m_epoch; /** Shows the error of the epoch that the network just finished. */ private double m_error; /** This flag states that the user wants the network to restart if it * is found to be generating infinity or NaN for the error value. This * would restart the network with the current options except that the * learning rate would be smaller than before, (perhaps half of its current * value). This option will not be available if the gui is chosen (if the * gui is open the user can fix the network themselves, it is an * architectural minefield for the network to be reset with the gui open). */ private boolean m_reset; /** This flag states that the user wants the class to be normalized while * processing in the network is done. (the final answer will be in the * original range regardless). This option will only be used when the class * is numeric. */ private boolean m_normalizeClass; /** * this is a sigmoid unit. */ private SigmoidUnit m_sigmoidUnit; /** * This is a linear unit. */ private LinearUnit m_linearUnit; /** * The constructor. */ public MultilayerPerceptron() { m_instances = null; m_currentInstance = null; m_controlPanel = null; m_nodePanel = null; m_epoch = 0; m_error = 0; m_outputs = new NeuralEnd[0]; m_inputs = new NeuralEnd[0]; m_numAttributes = 0; m_numClasses = 0; m_neuralNodes = new NeuralConnection[0]; m_selected = new FastVector(4); m_graphers = new FastVector(2); m_nextId = 0; m_stopIt = true; m_stopped = true; m_accepted = false; m_numeric = false; m_random = null; m_nominalToBinaryFilter = new NominalToBinary(); m_sigmoidUnit = new SigmoidUnit(); m_linearUnit = new LinearUnit(); //setting all the options to their defaults. To completely change these //defaults they will also need to be changed down the bottom in the //setoptions function (the text info in the accompanying functions should //also be changed to reflect the new defaults m_normalizeClass = true; m_normalizeAttributes = true; m_autoBuild = true; m_gui = false; m_useNomToBin = true; m_driftThreshold = 20; m_numEpochs = 500; m_valSize = 0; m_randomSeed = 0; m_hiddenLayers = "a"; m_learningRate = .3; m_momentum = .2; m_reset = true; m_decay = false; } /** * @param d True if the learning rate should decay. */ public void setDecay(boolean d) { m_decay = d; } /** * @return the flag for having the learning rate decay. */ public boolean getDecay() { return m_decay; } /** * This sets the network up to be able to reset itself with the current * settings and the learning rate at half of what it is currently. This * will only happen if the network creates NaN or infinite errors. Also this * will continue to happen until the network is trained properly. The * learning rate will also get set back to it's original value at the end of * this. This can only be set to true if the GUI is not brought up. * @param r True if the network should restart with it's current options * and set the learning rate to half what it currently is. */ public void setReset(boolean r) { if (m_gui) { r = false; } m_reset = r; } /** * @return The flag for reseting the network. */ public boolean getReset() { return m_reset; } /** * @param c True if the class should be normalized (the class will only ever * be normalized if it is numeric). (Normalization puts the range between * -1 - 1). */ public void setNormalizeNumericClass(boolean c) { m_normalizeClass = c; } /** * @return The flag for normalizing a numeric class. */ public boolean getNormalizeNumericClass() { return m_normalizeClass; } /** * @param a True if the attributes should be normalized (even nominal * attributes will get normalized here) (range goes between -1 - 1). */ public void setNormalizeAttributes(boolean a) { m_normalizeAttributes = a; } /** * @return The flag for normalizing attributes. */ public boolean getNormalizeAttributes() { return m_normalizeAttributes; } /** * @param f True if a nominalToBinary filter should be used on the * data. */ public void setNominalToBinaryFilter(boolean f) { m_useNomToBin = f; } /** * @return The flag for nominal to binary filter use. */ public boolean getNominalToBinaryFilter() { return m_useNomToBin; } /** * This seeds the random number generator, that is used when a random * number is needed for the network. * @param l The seed. */ public void setSeed(int l) { if (l >= 0) { m_randomSeed = l; } } /** * @return The seed for the random number generator. */ public int getSeed() { return m_randomSeed; } /** * This sets the threshold to use for when validation testing is being done. * It works by ending testing once the error on the validation set has * consecutively increased a certain number of times. * @param t The threshold to use for this. */ public void setValidationThreshold(int t) { if (t > 0) { m_driftThreshold = t; } } /** * @return The threshold used for validation testing. */ public int getValidationThreshold() { return m_driftThreshold; } /** * The learning rate can be set using this command. * NOTE That this is a static variable so it affect all networks that are * running. * Must be greater than 0 and no more than 1. * @param l The New learning rate. */ public void setLearningRate(double l) { if (l > 0 && l <= 1) { m_learningRate = l; if (m_controlPanel != null) { m_controlPanel.m_changeLearning.setText("" + l); } } } /** * @return The learning rate for the nodes. */ public double getLearningRate() { return m_learningRate; } /** * The momentum can be set using this command. * THE same conditions apply to this as to the learning rate. * @param m The new Momentum. */ public void setMomentum(double m) { if (m >= 0 && m <= 1) { m_momentum = m; if (m_controlPanel != null) { m_controlPanel.m_changeMomentum.setText("" + m); } } } /** * @return The momentum for the nodes. */ public double getMomentum() { return m_momentum; } /** * This will set whether the network is automatically built * or if it is left up to the user. (there is nothing to stop a user * from altering an autobuilt network however). * @param a True if the network should be auto built. */ public void setAutoBuild(boolean a) { if (!m_gui) { a = true; } m_autoBuild = a; } /** * @return The auto build state. */ public boolean getAutoBuild() { return m_autoBuild; } /** * This will set what the hidden layers are made up of when auto build is * enabled. Note to have no hidden units, just put a single 0, Any more * 0's will indicate that the string is badly formed and make it unaccepted. * Negative numbers, and floats will do the same. There are also some * wildcards. These are 'a' = (number of attributes + number of classes) / 2, * 'i' = number of attributes, 'o' = number of classes, and 't' = number of * attributes + number of classes. * @param h A string with a comma seperated list of numbers. Each number is * the number of nodes to be on a hidden layer. */ public void setHiddenLayers(String h) { String tmp = ""; StringTokenizer tok = new StringTokenizer(h, ","); if (tok.countTokens() == 0) { return; } double dval; int val; String c; boolean first = true; while (tok.hasMoreTokens()) { c = tok.nextToken().trim(); if (c.equals("a") || c.equals("i") || c.equals("o") || c.equals("t")) { tmp += c; } else { dval = Double.valueOf(c).doubleValue(); val = (int)dval; if ((val == dval && (val != 0 || (tok.countTokens() == 0 && first)) && val >= 0)) { tmp += val; } else { return; } } first = false; if (tok.hasMoreTokens()) { tmp += ", "; } } m_hiddenLayers = tmp; } /** * @return A string representing the hidden layers, each number is the number * of nodes on a hidden layer. */ public String getHiddenLayers() { return m_hiddenLayers; } /** * This will set whether A GUI is brought up to allow interaction by the user * with the neural network during training. * @param a True if gui should be created. */ public void setGUI(boolean a) { m_gui = a; if (!a) { setAutoBuild(true); } else { setReset(false); } } /** * @return The true if should show gui. */ public boolean getGUI() { return m_gui; } /** * This will set the size of the validation set. * @param a The size of the validation set, as a percentage of the whole. */ public void setValidationSetSize(int a) { if (a < 0 || a > 99) { return; } m_valSize = a; } /** * @return The percentage size of the validation set. */ public int getValidationSetSize() { return m_valSize; } /** * Set the number of training epochs to perform. * Must be greater than 0. * @param n The number of epochs to train through. */ public void setTrainingTime(int n) { if (n > 0) { m_numEpochs = n; } } /** * @return The number of epochs to train through. */ public int getTrainingTime() { return m_numEpochs; } /** * Call this function to place a node into the network list. * @param n The node to place in the list. */ private void addNode(NeuralConnection n) { NeuralConnection[] temp1 = new NeuralConnection[m_neuralNodes.length + 1]; for (int noa = 0; noa < m_neuralNodes.length; noa++) { temp1[noa] = m_neuralNodes[noa]; } temp1[temp1.length-1] = n; m_neuralNodes = temp1; } /** * Call this function to remove the passed node from the list. * This will only remove the node if it is in the neuralnodes list. * @param n The neuralConnection to remove. * @return True if removed false if not (because it wasn't there). */ private boolean removeNode(NeuralConnection n) { NeuralConnection[] temp1 = new NeuralConnection[m_neuralNodes.length - 1]; int skip = 0; for (int noa = 0; noa < m_neuralNodes.length; noa++) { if (n == m_neuralNodes[noa]) { skip++; } else if (!((noa - skip) >= temp1.length)) { temp1[noa - skip] = m_neuralNodes[noa]; } else { return false; } } m_neuralNodes = temp1; return true; } /** * This function sets what the m_numeric flag to represent the passed class * it also performs the normalization of the attributes if applicable * and sets up the info to normalize the class. (note that regardless of * the options it will fill an array with the range and base, set to * normalize all attributes and the class to be between -1 and 1) * @param inst the instances. * @return The modified instances. This needs to be done. If the attributes * are normalized then deep copies will be made of all the instances which * will need to be passed back out. */ private Instances setClassType(Instances inst) throws Exception { if (inst != null) { // x bounds double min=Double.POSITIVE_INFINITY; double max=Double.NEGATIVE_INFINITY; double value; m_attributeRanges = new double[inst.numAttributes()]; m_attributeBases = new double[inst.numAttributes()]; for (int noa = 0; noa < inst.numAttributes(); noa++) { min = Double.POSITIVE_INFINITY; max = Double.NEGATIVE_INFINITY; for (int i=0; i < inst.numInstances();i++) { if (!inst.instance(i).isMissing(noa)) { value = inst.instance(i).value(noa); if (value < min) { min = value; } if (value > max) { max = value; } } } m_attributeRanges[noa] = (max - min) / 2; m_attributeBases[noa] = (max + min) / 2; if (noa != inst.classIndex() && m_normalizeAttributes) { for (int i = 0; i < inst.numInstances(); i++) { if (m_attributeRanges[noa] != 0) { inst.instance(i).setValue(noa, (inst.instance(i).value(noa) - m_attributeBases[noa]) / m_attributeRanges[noa]); } else { inst.instance(i).setValue(noa, inst.instance(i).value(noa) - m_attributeBases[noa]); } } } } if (inst.classAttribute().isNumeric()) { m_numeric = true; } else { m_numeric = false; } } return inst; } /** * A function used to stop the code that called buildclassifier * from continuing on before the user has finished the decision tree. * @param tf True to stop the thread, False to release the thread that is * waiting there (if one). */ public synchronized void blocker(boolean tf) { if (tf) { try { wait(); } catch(InterruptedException e) { } } else { notifyAll(); } } /** * Call this function to update the control panel for the gui. */ private void updateDisplay() { if (m_gui) { m_controlPanel.m_errorLabel.repaint(); m_controlPanel.m_epochsLabel.repaint(); } } /** * this will reset all the nodes in the network. */ private void resetNetwork() { for (int noc = 0; noc < m_numClasses; noc++) { m_outputs[noc].reset(); } } /** * This will cause the output values of all the nodes to be calculated. * Note that the m_currentInstance is used to calculate these values. */ private void calculateOutputs() { for (int noc = 0; noc < m_numClasses; noc++) { //get the values. m_outputs[noc].outputValue(true); } } /** * This will cause the error values to be calculated for all nodes. * Note that the m_currentInstance is used to calculate these values. * Also the output values should have been calculated first. * @return The squared error. */ private double calculateErrors() throws Exception { double ret = 0, temp = 0; for (int noc = 0; noc < m_numAttributes; noc++) { //get the errors. m_inputs[noc].errorValue(true); } for (int noc = 0; noc < m_numClasses; noc++) { temp = m_outputs[noc].errorValue(false); ret += temp * temp; } return ret; } /** * This will cause the weight values to be updated based on the learning * rate, momentum and the errors that have been calculated for each node. * @param l The learning rate to update with. * @param m The momentum to update with. */ private void updateNetworkWeights(double l, double m) { for (int noc = 0; noc < m_numClasses; noc++) { //update weights m_outputs[noc].updateWeights(l, m); } } /** * This creates the required input units. */ private void setupInputs() throws Exception { m_inputs = new NeuralEnd[m_numAttributes]; int now = 0; for (int noa = 0; noa < m_numAttributes+1; noa++) { if (m_instances.classIndex() != noa) { m_inputs[noa - now] = new NeuralEnd(m_instances.attribute(noa).name()); m_inputs[noa - now].setX(.1); m_inputs[noa - now].setY((noa - now + 1.0) / (m_numAttributes + 1)); m_inputs[noa - now].setLink(true, noa); } else { now = 1; } } } /** * This creates the required output units. */ private void setupOutputs() throws Exception { m_outputs = new NeuralEnd[m_numClasses]; for (int noa = 0; noa < m_numClasses; noa++) { if (m_numeric) { m_outputs[noa] = new NeuralEnd(m_instances.classAttribute().name()); } else { m_outputs[noa]= new NeuralEnd(m_instances.classAttribute().value(noa)); } m_outputs[noa].setX(.9); m_outputs[noa].setY((noa + 1.0) / (m_numClasses + 1)); m_outputs[noa].setLink(false, noa); NeuralNode temp = new NeuralNode(String.valueOf(m_nextId), m_random, m_sigmoidUnit); m_nextId++; temp.setX(.75); temp.setY((noa + 1.0) / (m_numClasses + 1)); addNode(temp); NeuralConnection.connect(temp, m_outputs[noa]); } } /** * Call this function to automatically generate the hidden units */ private void setupHiddenLayer() { StringTokenizer tok = new StringTokenizer(m_hiddenLayers, ","); int val = 0; //num of nodes in a layer int prev = 0; //used to remember the previous layer int num = tok.countTokens(); //number of layers String c; for (int noa = 0; noa < num; noa++) { //note that I am using the Double to get the value rather than the //Integer class, because for some reason the Double implementation can //handle leading white space and the integer version can't!?! c = tok.nextToken().trim(); if (c.equals("a")) { val = (m_numAttributes + m_numClasses) / 2; } else if (c.equals("i")) { val = m_numAttributes; } else if (c.equals("o")) { val = m_numClasses; } else if (c.equals("t")) { val = m_numAttributes + m_numClasses; } else { val = Double.valueOf(c).intValue(); } for (int nob = 0; nob < val; nob++) { NeuralNode temp = new NeuralNode(String.valueOf(m_nextId), m_random, m_sigmoidUnit); m_nextId++; temp.setX(.5 / (num) * noa + .25); temp.setY((nob + 1.0) / (val + 1)); addNode(temp); if (noa > 0) { //then do connections for (int noc = m_neuralNodes.length - nob - 1 - prev; noc < m_neuralNodes.length - nob - 1; noc++) { NeuralConnection.connect(m_neuralNodes[noc], temp); } } } prev = val; } tok = new StringTokenizer(m_hiddenLayers, ","); c = tok.nextToken(); if (c.equals("a")) { val = (m_numAttributes + m_numClasses) / 2; } else if (c.equals("i")) { val = m_numAttributes; } else if (c.equals("o")) { val = m_numClasses; } else if (c.equals("t")) { val = m_numAttributes + m_numClasses; } else { val = Double.valueOf(c).intValue(); } if (val == 0) { for (int noa = 0; noa < m_numAttributes; noa++) { for (int nob = 0; nob < m_numClasses; nob++) { NeuralConnection.connect(m_inputs[noa], m_neuralNodes[nob]); } } } else { for (int noa = 0; noa < m_numAttributes; noa++) { for (int nob = m_numClasses; nob < m_numClasses + val; nob++) { NeuralConnection.connect(m_inputs[noa], m_neuralNodes[nob]); } } for (int noa = m_neuralNodes.length - prev; noa < m_neuralNodes.length; noa++) { for (int nob = 0; nob < m_numClasses; nob++) { NeuralConnection.connect(m_neuralNodes[noa], m_neuralNodes[nob]); } } } } /** * This will go through all the nodes and check if they are connected * to a pure output unit. If so they will be set to be linear units. * If not they will be set to be sigmoid units. */ private void setEndsToLinear() { for (int noa = 0; noa < m_neuralNodes.length; noa++) { if ((m_neuralNodes[noa].getType() & NeuralConnection.OUTPUT) == NeuralConnection.OUTPUT) { ((NeuralNode)m_neuralNodes[noa]).setMethod(m_linearUnit); } else { ((NeuralNode)m_neuralNodes[noa]).setMethod(m_sigmoidUnit); } } } /** * Returns default capabilities of the classifier. * * @return the capabilities of this classifier */ public Capabilities getCapabilities() { Capabilities result = super.getCapabilities(); result.disableAll(); // attributes result.enable(Capability.NOMINAL_ATTRIBUTES); result.enable(Capability.NUMERIC_ATTRIBUTES); result.enable(Capability.DATE_ATTRIBUTES); result.enable(Capability.MISSING_VALUES); // class result.enable(Capability.NOMINAL_CLASS); result.enable(Capability.NUMERIC_CLASS); result.enable(Capability.DATE_CLASS); result.enable(Capability.MISSING_CLASS_VALUES); return result; } /** * Call this function to build and train a neural network for the training * data provided. * @param i The training data. * @throws Exception if can't build classification properly. */ public void buildClassifier(Instances i) throws Exception { // can classifier handle the data? getCapabilities().testWithFail(i); // remove instances with missing class i = new Instances(i); i.deleteWithMissingClass(); m_ZeroR = new weka.classifiers.rules.ZeroR(); m_ZeroR.buildClassifier(i); // only class? -> use ZeroR model if (i.numAttributes() == 1) { System.err.println( "Cannot build model (only class attribute present in data!), " + "using ZeroR model instead!"); m_useDefaultModel = true; return; } else { m_useDefaultModel = false; } m_epoch = 0; m_error = 0; m_instances = null; m_currentInstance = null; m_controlPanel = null; m_nodePanel = null; m_outputs = new NeuralEnd[0]; m_inputs = new NeuralEnd[0]; m_numAttributes = 0; m_numClasses = 0; m_neuralNodes = new NeuralConnection[0]; m_selected = new FastVector(4); m_graphers = new FastVector(2); m_nextId = 0; m_stopIt = true; m_stopped = true; m_accepted = false; m_instances = new Instances(i); m_random = new Random(m_randomSeed); m_instances.randomize(m_random); if (m_useNomToBin) { m_nominalToBinaryFilter = new NominalToBinary(); m_nominalToBinaryFilter.setInputFormat(m_instances); m_instances = Filter.useFilter(m_instances, m_nominalToBinaryFilter); } m_numAttributes = m_instances.numAttributes() - 1; m_numClasses = m_instances.numClasses(); setClassType(m_instances); //this sets up the validation set. Instances valSet = null; //numinval is needed later int numInVal = (int)(m_valSize / 100.0 * m_instances.numInstances()); if (m_valSize > 0) { if (numInVal == 0) { numInVal = 1; } valSet = new Instances(m_instances, 0, numInVal); } /////////// setupInputs(); setupOutputs(); if (m_autoBuild) { setupHiddenLayer(); } ///////////////////////////// //this sets up the gui for usage if (m_gui) { m_win = new JFrame(); m_win.addWindowListener(new WindowAdapter() { public void windowClosing(WindowEvent e) { boolean k = m_stopIt; m_stopIt = true; int well =JOptionPane.showConfirmDialog(m_win, "Are You Sure...\n" + "Click Yes To Accept" + " The Neural Network" + "\n Click No To Return", "Accept Neural Network", JOptionPane.YES_NO_OPTION); if (well == 0) { m_win.setDefaultCloseOperation(JFrame.DISPOSE_ON_CLOSE); m_accepted = true; blocker(false); } else { m_win.setDefaultCloseOperation(JFrame.DO_NOTHING_ON_CLOSE); } m_stopIt = k; } }); m_win.getContentPane().setLayout(new BorderLayout()); m_win.setTitle("Neural Network"); m_nodePanel = new NodePanel(); // without the following two lines, the NodePanel.paintComponents(Graphics) // method will go berserk if the network doesn't fit completely: it will // get called on a constant basis, using 100% of the CPU // see the following forum thread: // http://forum.java.sun.com/thread.jspa?threadID=580929&messageID=2945011 m_nodePanel.setPreferredSize(new Dimension(640, 480)); m_nodePanel.revalidate(); JScrollPane sp = new JScrollPane(m_nodePanel, JScrollPane.VERTICAL_SCROLLBAR_ALWAYS, JScrollPane.HORIZONTAL_SCROLLBAR_NEVER); m_controlPanel = new ControlPanel(); m_win.getContentPane().add(sp, BorderLayout.CENTER); m_win.getContentPane().add(m_controlPanel, BorderLayout.SOUTH); m_win.setSize(640, 480); m_win.setVisible(true); } //This sets up the initial state of the gui if (m_gui) { blocker(true); m_controlPanel.m_changeEpochs.setEnabled(false); m_controlPanel.m_changeLearning.setEnabled(false); m_controlPanel.m_changeMomentum.setEnabled(false); } //For silly situations in which the network gets accepted before training //commenses if (m_numeric) { setEndsToLinear(); } if (m_accepted) { m_win.dispose(); m_controlPanel = null; m_nodePanel = null; m_instances = new Instances(m_instances, 0); m_currentInstance = null; return; } //connections done. double right = 0; double driftOff = 0; double lastRight = Double.POSITIVE_INFINITY; double bestError = Double.POSITIVE_INFINITY; double tempRate; double totalWeight = 0; double totalValWeight = 0; double origRate = m_learningRate; //only used for when reset //ensure that at least 1 instance is trained through. if (numInVal == m_instances.numInstances()) { numInVal--; } if (numInVal < 0) { numInVal = 0; } for (int noa = numInVal; noa < m_instances.numInstances(); noa++) { if (!m_instances.instance(noa).classIsMissing()) { totalWeight += m_instances.instance(noa).weight(); } } if (m_valSize != 0) { for (int noa = 0; noa < valSet.numInstances(); noa++) { if (!valSet.instance(noa).classIsMissing()) { totalValWeight += valSet.instance(noa).weight(); } } } m_stopped = false; for (int noa = 1; noa < m_numEpochs + 1; noa++) { right = 0; for (int nob = numInVal; nob < m_instances.numInstances(); nob++) { m_currentInstance = m_instances.instance(nob); if (!m_currentInstance.classIsMissing()) { //this is where the network updating (and training occurs, for the //training set resetNetwork(); calculateOutputs(); tempRate = m_learningRate * m_currentInstance.weight(); if (m_decay) { tempRate /= noa; } right += (calculateErrors() / m_instances.numClasses()) * m_currentInstance.weight(); updateNetworkWeights(tempRate, m_momentum); } } right /= totalWeight; if (Double.isInfinite(right) || Double.isNaN(right)) { if (!m_reset) { m_instances = null; throw new Exception("Network cannot train. Try restarting with a" + " smaller learning rate."); } else { //reset the network if possible if (m_learningRate <= Utils.SMALL) throw new IllegalStateException( "Learning rate got too small (" + m_learningRate + " <= " + Utils.SMALL + ")!"); m_learningRate /= 2; buildClassifier(i); m_learningRate = origRate; m_instances = new Instances(m_instances, 0); m_currentInstance = null; return; } } ////////////////////////do validation testing if applicable if (m_valSize != 0) { right = 0; for (int nob = 0; nob < valSet.numInstances(); nob++) { m_currentInstance = valSet.instance(nob); if (!m_currentInstance.classIsMissing()) { //this is where the network updating occurs, for the validation set resetNetwork(); calculateOutputs(); right += (calculateErrors() / valSet.numClasses()) * m_currentInstance.weight(); //note 'right' could be calculated here just using //the calculate output values. This would be faster. //be less modular } } if (right < lastRight) { if (right < bestError) { bestError = right; // save the network weights at this point for (int noc = 0; noc < m_numClasses; noc++) { m_outputs[noc].saveWeights(); } driftOff = 0; } } else { driftOff++; } lastRight = right; if (driftOff > m_driftThreshold || noa + 1 >= m_numEpochs) { for (int noc = 0; noc < m_numClasses; noc++) { m_outputs[noc].restoreWeights(); } m_accepted = true; } right /= totalValWeight; } m_epoch = noa; m_error = right; //shows what the neuralnet is upto if a gui exists. updateDisplay(); //This junction controls what state the gui is in at the end of each //epoch, Such as if it is paused, if it is resumable etc... if (m_gui) { while ((m_stopIt || (m_epoch >= m_numEpochs && m_valSize == 0)) && !m_accepted) { m_stopIt = true; m_stopped = true; if (m_epoch >= m_numEpochs && m_valSize == 0) { m_controlPanel.m_startStop.setEnabled(false); } else { m_controlPanel.m_startStop.setEnabled(true); } m_controlPanel.m_startStop.setText("Start"); m_controlPanel.m_startStop.setActionCommand("Start"); m_controlPanel.m_changeEpochs.setEnabled(true); m_controlPanel.m_changeLearning.setEnabled(true); m_controlPanel.m_changeMomentum.setEnabled(true); blocker(true); if (m_numeric) { setEndsToLinear(); } } m_controlPanel.m_changeEpochs.setEnabled(false); m_controlPanel.m_changeLearning.setEnabled(false); m_controlPanel.m_changeMomentum.setEnabled(false); m_stopped = false; //if the network has been accepted stop the training loop if (m_accepted) { m_win.dispose(); m_controlPanel = null; m_nodePanel = null; m_instances = new Instances(m_instances, 0); m_currentInstance = null; return; } } if (m_accepted) { m_instances = new Instances(m_instances, 0); m_currentInstance = null; return; } } if (m_gui) { m_win.dispose(); m_controlPanel = null; m_nodePanel = null; } m_instances = new Instances(m_instances, 0); m_currentInstance = null; } /** * Call this function to predict the class of an instance once a * classification model has been built with the buildClassifier call. * @param i The instance to classify. * @return A double array filled with the probabilities of each class type. * @throws Exception if can't classify instance. */ public double[] distributionForInstance(Instance i) throws Exception { // default model? if (m_useDefaultModel) { return m_ZeroR.distributionForInstance(i); } m_currentInstance = new DenseInstance(i); if (m_useNomToBin) { m_nominalToBinaryFilter.input(m_currentInstance); m_currentInstance = m_nominalToBinaryFilter.output(); } if (m_normalizeAttributes) { for (int noa = 0; noa < m_instances.numAttributes(); noa++) { if (noa != m_instances.classIndex()) { if (m_attributeRanges[noa] != 0) { m_currentInstance.setValue(noa, (m_currentInstance.value(noa) - m_attributeBases[noa]) / m_attributeRanges[noa]); } else { m_currentInstance.setValue(noa, m_currentInstance.value(noa) - m_attributeBases[noa]); } } } } resetNetwork(); //since all the output values are needed. //They are calculated manually here and the values collected. double[] theArray = new double[m_numClasses]; for (int noa = 0; noa < m_numClasses; noa++) { theArray[noa] = m_outputs[noa].outputValue(true); } if (m_instances.classAttribute().isNumeric()) { return theArray; } //now normalize the array double count = 0; for (int noa = 0; noa < m_numClasses; noa++) { count += theArray[noa]; } if (count <= 0) { return m_ZeroR.distributionForInstance(i); } for (int noa = 0; noa < m_numClasses; noa++) { theArray[noa] /= count; } return theArray; } /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ public Enumeration listOptions() { Vector newVector = new Vector(14); newVector.addElement(new Option( "\tLearning Rate for the backpropagation algorithm.\n" +"\t(Value should be between 0 - 1, Default = 0.3).", "L", 1, "-L <learning rate>")); newVector.addElement(new Option( "\tMomentum Rate for the backpropagation algorithm.\n" +"\t(Value should be between 0 - 1, Default = 0.2).", "M", 1, "-M <momentum>")); newVector.addElement(new Option( "\tNumber of epochs to train through.\n" +"\t(Default = 500).", "N", 1,"-N <number of epochs>")); newVector.addElement(new Option( "\tPercentage size of validation set to use to terminate\n" + "\ttraining (if this is non zero it can pre-empt num of epochs.\n" +"\t(Value should be between 0 - 100, Default = 0).", "V", 1, "-V <percentage size of validation set>")); newVector.addElement(new Option( "\tThe value used to seed the random number generator\n" + "\t(Value should be >= 0 and and a long, Default = 0).", "S", 1, "-S <seed>")); newVector.addElement(new Option( "\tThe consequetive number of errors allowed for validation\n" + "\ttesting before the netwrok terminates.\n" + "\t(Value should be > 0, Default = 20).", "E", 1, "-E <threshold for number of consequetive errors>")); newVector.addElement(new Option( "\tGUI will be opened.\n" +"\t(Use this to bring up a GUI).", "G", 0,"-G")); newVector.addElement(new Option( "\tAutocreation of the network connections will NOT be done.\n" +"\t(This will be ignored if -G is NOT set)", "A", 0,"-A")); newVector.addElement(new Option( "\tA NominalToBinary filter will NOT automatically be used.\n" +"\t(Set this to not use a NominalToBinary filter).", "B", 0,"-B")); newVector.addElement(new Option( "\tThe hidden layers to be created for the network.\n" + "\t(Value should be a list of comma separated Natural \n" + "\tnumbers or the letters 'a' = (attribs + classes) / 2, \n" + "\t'i' = attribs, 'o' = classes, 't' = attribs .+ classes)\n" + "\tfor wildcard values, Default = a).", "H", 1, "-H <comma seperated numbers for nodes on each layer>")); newVector.addElement(new Option( "\tNormalizing a numeric class will NOT be done.\n" +"\t(Set this to not normalize the class if it's numeric).", "C", 0,"-C")); newVector.addElement(new Option( "\tNormalizing the attributes will NOT be done.\n" +"\t(Set this to not normalize the attributes).", "I", 0,"-I")); newVector.addElement(new Option( "\tReseting the network will NOT be allowed.\n" +"\t(Set this to not allow the network to reset).", "R", 0,"-R")); newVector.addElement(new Option( "\tLearning rate decay will occur.\n" +"\t(Set this to cause the learning rate to decay).", "D", 0,"-D")); return newVector.elements(); } /** * Parses a given list of options. <p/> * <!-- options-start --> * Valid options are: <p/> * * <pre> -L &lt;learning rate&gt; * Learning Rate for the backpropagation algorithm. * (Value should be between 0 - 1, Default = 0.3).</pre> * * <pre> -M &lt;momentum&gt; * Momentum Rate for the backpropagation algorithm. * (Value should be between 0 - 1, Default = 0.2).</pre> * * <pre> -N &lt;number of epochs&gt; * Number of epochs to train through. * (Default = 500).</pre> * * <pre> -V &lt;percentage size of validation set&gt; * Percentage size of validation set to use to terminate * training (if this is non zero it can pre-empt num of epochs. * (Value should be between 0 - 100, Default = 0).</pre> * * <pre> -S &lt;seed&gt; * The value used to seed the random number generator * (Value should be &gt;= 0 and and a long, Default = 0).</pre> * * <pre> -E &lt;threshold for number of consequetive errors&gt; * The consequetive number of errors allowed for validation * testing before the netwrok terminates. * (Value should be &gt; 0, Default = 20).</pre> * * <pre> -G * GUI will be opened. * (Use this to bring up a GUI).</pre> * * <pre> -A * Autocreation of the network connections will NOT be done. * (This will be ignored if -G is NOT set)</pre> * * <pre> -B * A NominalToBinary filter will NOT automatically be used. * (Set this to not use a NominalToBinary filter).</pre> * * <pre> -H &lt;comma seperated numbers for nodes on each layer&gt; * The hidden layers to be created for the network. * (Value should be a list of comma separated Natural * numbers or the letters 'a' = (attribs + classes) / 2, * 'i' = attribs, 'o' = classes, 't' = attribs .+ classes) * for wildcard values, Default = a).</pre> * * <pre> -C * Normalizing a numeric class will NOT be done. * (Set this to not normalize the class if it's numeric).</pre> * * <pre> -I * Normalizing the attributes will NOT be done. * (Set this to not normalize the attributes).</pre> * * <pre> -R * Reseting the network will NOT be allowed. * (Set this to not allow the network to reset).</pre> * * <pre> -D * Learning rate decay will occur. * (Set this to cause the learning rate to decay).</pre> * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { //the defaults can be found here!!!! String learningString = Utils.getOption('L', options); if (learningString.length() != 0) { setLearningRate((new Double(learningString)).doubleValue()); } else { setLearningRate(0.3); } String momentumString = Utils.getOption('M', options); if (momentumString.length() != 0) { setMomentum((new Double(momentumString)).doubleValue()); } else { setMomentum(0.2); } String epochsString = Utils.getOption('N', options); if (epochsString.length() != 0) { setTrainingTime(Integer.parseInt(epochsString)); } else { setTrainingTime(500); } String valSizeString = Utils.getOption('V', options); if (valSizeString.length() != 0) { setValidationSetSize(Integer.parseInt(valSizeString)); } else { setValidationSetSize(0); } String seedString = Utils.getOption('S', options); if (seedString.length() != 0) { setSeed(Integer.parseInt(seedString)); } else { setSeed(0); } String thresholdString = Utils.getOption('E', options); if (thresholdString.length() != 0) { setValidationThreshold(Integer.parseInt(thresholdString)); } else { setValidationThreshold(20); } String hiddenLayers = Utils.getOption('H', options); if (hiddenLayers.length() != 0) { setHiddenLayers(hiddenLayers); } else { setHiddenLayers("a"); } if (Utils.getFlag('G', options)) { setGUI(true); } else { setGUI(false); } //small note. since the gui is the only option that can change the other //options this should be set first to allow the other options to set //properly if (Utils.getFlag('A', options)) { setAutoBuild(false); } else { setAutoBuild(true); } if (Utils.getFlag('B', options)) { setNominalToBinaryFilter(false); } else { setNominalToBinaryFilter(true); } if (Utils.getFlag('C', options)) { setNormalizeNumericClass(false); } else { setNormalizeNumericClass(true); } if (Utils.getFlag('I', options)) { setNormalizeAttributes(false); } else { setNormalizeAttributes(true); } if (Utils.getFlag('R', options)) { setReset(false); } else { setReset(true); } if (Utils.getFlag('D', options)) { setDecay(true); } else { setDecay(false); } Utils.checkForRemainingOptions(options); } /** * Gets the current settings of NeuralNet. * * @return an array of strings suitable for passing to setOptions() */ public String [] getOptions() { String [] options = new String [21]; int current = 0; options[current++] = "-L"; options[current++] = "" + getLearningRate(); options[current++] = "-M"; options[current++] = "" + getMomentum(); options[current++] = "-N"; options[current++] = "" + getTrainingTime(); options[current++] = "-V"; options[current++] = "" +getValidationSetSize(); options[current++] = "-S"; options[current++] = "" + getSeed(); options[current++] = "-E"; options[current++] =""+getValidationThreshold(); options[current++] = "-H"; options[current++] = getHiddenLayers(); if (getGUI()) { options[current++] = "-G"; } if (!getAutoBuild()) { options[current++] = "-A"; } if (!getNominalToBinaryFilter()) { options[current++] = "-B"; } if (!getNormalizeNumericClass()) { options[current++] = "-C"; } if (!getNormalizeAttributes()) { options[current++] = "-I"; } if (!getReset()) { options[current++] = "-R"; } if (getDecay()) { options[current++] = "-D"; } while (current < options.length) { options[current++] = ""; } return options; } /** * @return string describing the model. */ public String toString() { // only ZeroR model? if (m_useDefaultModel) { StringBuffer buf = new StringBuffer(); buf.append(this.getClass().getName().replaceAll(".*\\.", "") + "\n"); buf.append(this.getClass().getName().replaceAll(".*\\.", "").replaceAll(".", "=") + "\n\n"); buf.append("Warning: No model could be built, hence ZeroR model is used:\n\n"); buf.append(m_ZeroR.toString()); return buf.toString(); } StringBuffer model = new StringBuffer(m_neuralNodes.length * 100); //just a rough size guess NeuralNode con; double[] weights; NeuralConnection[] inputs; for (int noa = 0; noa < m_neuralNodes.length; noa++) { con = (NeuralNode) m_neuralNodes[noa]; //this would need a change //for items other than nodes!!! weights = con.getWeights(); inputs = con.getInputs(); if (con.getMethod() instanceof SigmoidUnit) { model.append("Sigmoid "); } else if (con.getMethod() instanceof LinearUnit) { model.append("Linear "); } model.append("Node " + con.getId() + "\n Inputs Weights\n"); model.append(" Threshold " + weights[0] + "\n"); for (int nob = 1; nob < con.getNumInputs() + 1; nob++) { if ((inputs[nob - 1].getType() & NeuralConnection.PURE_INPUT) == NeuralConnection.PURE_INPUT) { model.append(" Attrib " + m_instances.attribute(((NeuralEnd)inputs[nob-1]). getLink()).name() + " " + weights[nob] + "\n"); } else { model.append(" Node " + inputs[nob-1].getId() + " " + weights[nob] + "\n"); } } } //now put in the ends for (int noa = 0; noa < m_outputs.length; noa++) { inputs = m_outputs[noa].getInputs(); model.append("Class " + m_instances.classAttribute(). value(m_outputs[noa].getLink()) + "\n Input\n"); for (int nob = 0; nob < m_outputs[noa].getNumInputs(); nob++) { if ((inputs[nob].getType() & NeuralConnection.PURE_INPUT) == NeuralConnection.PURE_INPUT) { model.append(" Attrib " + m_instances.attribute(((NeuralEnd)inputs[nob]). getLink()).name() + "\n"); } else { model.append(" Node " + inputs[nob].getId() + "\n"); } } } return model.toString(); } /** * This will return a string describing the classifier. * @return The string. */ public String globalInfo() { return "A Classifier that uses backpropagation to classify instances.\n" + "This network can be built by hand, created by an algorithm or both. " + "The network can also be monitored and modified during training time. " + "The nodes in this network are all sigmoid (except for when the class " + "is numeric in which case the the output nodes become unthresholded " + "linear units)."; } /** * @return a string to describe the learning rate option. */ public String learningRateTipText() { return "The amount the" + " weights are updated."; } /** * @return a string to describe the momentum option. */ public String momentumTipText() { return "Momentum applied to the weights during updating."; } /** * @return a string to describe the AutoBuild option. */ public String autoBuildTipText() { return "Adds and connects up hidden layers in the network."; } /** * @return a string to describe the random seed option. */ public String seedTipText() { return "Seed used to initialise the random number generator." + "Random numbers are used for setting the initial weights of the" + " connections betweem nodes, and also for shuffling the training data."; } /** * @return a string to describe the validation threshold option. */ public String validationThresholdTipText() { return "Used to terminate validation testing." + "The value here dictates how many times in a row the validation set" + " error can get worse before training is terminated."; } /** * @return a string to describe the GUI option. */ public String GUITipText() { return "Brings up a gui interface." + " This will allow the pausing and altering of the nueral network" + " during training.\n\n" + "* To add a node left click (this node will be automatically selected," + " ensure no other nodes were selected).\n" + "* To select a node left click on it either while no other node is" + " selected or while holding down the control key (this toggles that" + " node as being selected and not selected.\n" + "* To connect a node, first have the start node(s) selected, then click"+ " either the end node or on an empty space (this will create a new node"+ " that is connected with the selected nodes). The selection status of" + " nodes will stay the same after the connection. (Note these are" + " directed connections, also a connection between two nodes will not" + " be established more than once and certain connections that are" + " deemed to be invalid will not be made).\n" + "* To remove a connection select one of the connected node(s) in the" + " connection and then right click the other node (it does not matter" + " whether the node is the start or end the connection will be removed" + ").\n" + "* To remove a node right click it while no other nodes (including it)" + " are selected. (This will also remove all connections to it)\n." + "* To deselect a node either left click it while holding down control," + " or right click on empty space.\n" + "* The raw inputs are provided from the labels on the left.\n" + "* The red nodes are hidden layers.\n" + "* The orange nodes are the output nodes.\n" + "* The labels on the right show the class the output node represents." + " Note that with a numeric class the output node will automatically be" + " made into an unthresholded linear unit.\n\n" + "Alterations to the neural network can only be done while the network" + " is not running, This also applies to the learning rate and other" + " fields on the control panel.\n\n" + "* You can accept the network as being finished at any time.\n" + "* The network is automatically paused at the beginning.\n" + "* There is a running indication of what epoch the network is up to" + " and what the (rough) error for that epoch was (or for" + " the validation if that is being used). Note that this error value" + " is based on a network that changes as the value is computed." + " (also depending on whether" + " the class is normalized will effect the error reported for numeric" + " classes.\n" + "* Once the network is done it will pause again and either wait to be" + " accepted or trained more.\n\n" + "Note that if the gui is not set the network will not require any" + " interaction.\n"; } /** * @return a string to describe the validation size option. */ public String validationSetSizeTipText() { return "The percentage size of the validation set." + "(The training will continue until it is observed that" + " the error on the validation set has been consistently getting" + " worse, or if the training time is reached).\n" + "If This is set to zero no validation set will be used and instead" + " the network will train for the specified number of epochs."; } /** * @return a string to describe the learning rate option. */ public String trainingTimeTipText() { return "The number of epochs to train through." + " If the validation set is non-zero then it can terminate the network" + " early"; } /** * @return a string to describe the nominal to binary option. */ public String nominalToBinaryFilterTipText() { return "This will preprocess the instances with the filter." + " This could help improve performance if there are nominal attributes" + " in the data."; } /** * @return a string to describe the hidden layers in the network. */ public String hiddenLayersTipText() { return "This defines the hidden layers of the neural network." + " This is a list of positive whole numbers. 1 for each hidden layer." + " Comma seperated. To have no hidden layers put a single 0 here." + " This will only be used if autobuild is set. There are also wildcard" + " values 'a' = (attribs + classes) / 2, 'i' = attribs, 'o' = classes" + " , 't' = attribs + classes."; } /** * @return a string to describe the nominal to binary option. */ public String normalizeNumericClassTipText() { return "This will normalize the class if it's numeric." + " This could help improve performance of the network, It normalizes" + " the class to be between -1 and 1. Note that this is only internally" + ", the output will be scaled back to the original range."; } /** * @return a string to describe the nominal to binary option. */ public String normalizeAttributesTipText() { return "This will normalize the attributes." + " This could help improve performance of the network." + " This is not reliant on the class being numeric. This will also" + " normalize nominal attributes as well (after they have been run" + " through the nominal to binary filter if that is in use) so that the" + " nominal values are between -1 and 1"; } /** * @return a string to describe the Reset option. */ public String resetTipText() { return "This will allow the network to reset with a lower learning rate." + " If the network diverges from the answer this will automatically" + " reset the network with a lower learning rate and begin training" + " again. This option is only available if the gui is not set. Note" + " that if the network diverges but isn't allowed to reset it will" + " fail the training process and return an error message."; } /** * @return a string to describe the Decay option. */ public String decayTipText() { return "This will cause the learning rate to decrease." + " This will divide the starting learning rate by the epoch number, to" + " determine what the current learning rate should be. This may help" + " to stop the network from diverging from the target output, as well" + " as improve general performance. Note that the decaying learning" + " rate will not be shown in the gui, only the original learning rate" + ". If the learning rate is changed in the gui, this is treated as the" + " starting learning rate."; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 9444 $"); } }
82,568
29.592442
290
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/functions/PLSClassifier.java
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* * PLSClassifier.java * Copyright (C) 2006 University of Waikato, Hamilton, New Zealand */ package weka.classifiers.functions; import weka.classifiers.Classifier; import weka.core.Capabilities; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.OptionHandler; import weka.core.RevisionUtils; import weka.core.Utils; import weka.core.Capabilities.Capability; import weka.filters.Filter; import weka.filters.supervised.attribute.PLSFilter; import java.util.Enumeration; import java.util.Vector; import weka.classifiers.AbstractClassifier; /** <!-- globalinfo-start --> * A wrapper classifier for the PLSFilter, utilizing the PLSFilter's ability to perform predictions. * <p/> <!-- globalinfo-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -filter &lt;filter specification&gt; * The PLS filter to use. Full classname of filter to include, followed by scheme options. * (default: weka.filters.supervised.attribute.PLSFilter)</pre> * * <pre> -D * If set, classifier is run in debug mode and * may output additional info to the console</pre> * * <pre> * Options specific to filter weka.filters.supervised.attribute.PLSFilter ('-filter'): * </pre> * * <pre> -D * Turns on output of debugging information.</pre> * * <pre> -C &lt;num&gt; * The number of components to compute. * (default: 20)</pre> * * <pre> -U * Updates the class attribute as well. * (default: off)</pre> * * <pre> -M * Turns replacing of missing values on. * (default: off)</pre> * * <pre> -A &lt;SIMPLS|PLS1&gt; * The algorithm to use. * (default: PLS1)</pre> * * <pre> -P &lt;none|center|standardize&gt; * The type of preprocessing that is applied to the data. * (default: center)</pre> * <!-- options-end --> * * @author fracpete (fracpete at waikato dot ac dot nz) * @version $Revision: 1.4 $ */ public class PLSClassifier extends AbstractClassifier { /** for serialization */ private static final long serialVersionUID = 4819775160590973256L; /** the PLS filter */ protected PLSFilter m_Filter = new PLSFilter(); /** the actual filter to use */ protected PLSFilter m_ActualFilter = null; /** * Returns a string describing classifier * * @return a description suitable for displaying in the * explorer/experimenter gui */ public String globalInfo() { return "A wrapper classifier for the PLSFilter, utilizing the PLSFilter's " + "ability to perform predictions."; } /** * Gets an enumeration describing the available options. * * @return an enumeration of all the available options. */ public Enumeration listOptions(){ Vector result; Enumeration en; result = new Vector(); result.addElement(new Option( "\tThe PLS filter to use. Full classname of filter to include, " + "\tfollowed by scheme options.\n" + "\t(default: weka.filters.supervised.attribute.PLSFilter)", "filter", 1, "-filter <filter specification>")); en = super.listOptions(); while (en.hasMoreElements()) result.addElement(en.nextElement()); if (getFilter() instanceof OptionHandler) { result.addElement(new Option( "", "", 0, "\nOptions specific to filter " + getFilter().getClass().getName() + " ('-filter'):")); en = ((OptionHandler) getFilter()).listOptions(); while (en.hasMoreElements()) result.addElement(en.nextElement()); } return result.elements(); } /** * returns the options of the current setup * * @return the current options */ public String[] getOptions(){ int i; Vector result; String[] options; result = new Vector(); result.add("-filter"); if (getFilter() instanceof OptionHandler) result.add( getFilter().getClass().getName() + " " + Utils.joinOptions(((OptionHandler) getFilter()).getOptions())); else result.add( getFilter().getClass().getName()); options = super.getOptions(); for (i = 0; i < options.length; i++) result.add(options[i]); return (String[]) result.toArray(new String[result.size()]); } /** * Parses the options for this object. <p/> * <!-- options-start --> * Valid options are: <p/> * * <pre> -filter &lt;filter specification&gt; * The PLS filter to use. Full classname of filter to include, followed by scheme options. * (default: weka.filters.supervised.attribute.PLSFilter)</pre> * * <pre> -D * If set, classifier is run in debug mode and * may output additional info to the console</pre> * * <pre> * Options specific to filter weka.filters.supervised.attribute.PLSFilter ('-filter'): * </pre> * * <pre> -D * Turns on output of debugging information.</pre> * * <pre> -C &lt;num&gt; * The number of components to compute. * (default: 20)</pre> * * <pre> -U * Updates the class attribute as well. * (default: off)</pre> * * <pre> -M * Turns replacing of missing values on. * (default: off)</pre> * * <pre> -A &lt;SIMPLS|PLS1&gt; * The algorithm to use. * (default: PLS1)</pre> * * <pre> -P &lt;none|center|standardize&gt; * The type of preprocessing that is applied to the data. * (default: center)</pre> * <!-- options-end --> * * @param options the options to use * @throws Exception if setting of options fails */ public void setOptions(String[] options) throws Exception { String tmpStr; String[] tmpOptions; super.setOptions(options); tmpStr = Utils.getOption("filter", options); tmpOptions = Utils.splitOptions(tmpStr); if (tmpOptions.length != 0) { tmpStr = tmpOptions[0]; tmpOptions[0] = ""; setFilter((Filter) Utils.forName(Filter.class, tmpStr, tmpOptions)); } } /** * Returns the tip text for this property * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String filterTipText() { return "The PLS filter to be used (only used for setup)."; } /** * Set the PLS filter (only used for setup). * * @param value the kernel filter. * @throws Exception if not PLSFilter */ public void setFilter(Filter value) throws Exception { if (!(value instanceof PLSFilter)) throw new Exception("Filter has to be PLSFilter!"); else m_Filter = (PLSFilter) value; } /** * Get the PLS filter. * * @return the PLS filter */ public Filter getFilter() { return m_Filter; } /** * Returns default capabilities of the classifier. * * @return the capabilities of this classifier */ public Capabilities getCapabilities() { Capabilities result = getFilter().getCapabilities(); // class result.enable(Capability.MISSING_CLASS_VALUES); // other result.setMinimumNumberInstances(1); return result; } /** * builds the classifier * * @param data the training instances * @throws Exception if something goes wrong */ public void buildClassifier(Instances data) throws Exception { // can classifier handle the data? getCapabilities().testWithFail(data); // remove instances with missing class data = new Instances(data); data.deleteWithMissingClass(); // initialize filter m_ActualFilter = (PLSFilter) Filter.makeCopy(m_Filter); m_ActualFilter.setPerformPrediction(false); m_ActualFilter.setInputFormat(data); Filter.useFilter(data, m_ActualFilter); m_ActualFilter.setPerformPrediction(true); } /** * Classifies the given test instance. The instance has to belong to a * dataset when it's being classified. * * @param instance the instance to be classified * @return the predicted most likely class for the instance or * Instance.missingValue() if no prediction is made * @throws Exception if an error occurred during the prediction */ public double classifyInstance(Instance instance) throws Exception { double result; Instance pred; m_ActualFilter.input(instance); m_ActualFilter.batchFinished(); pred = m_ActualFilter.output(); result = pred.classValue(); return result; } /** * returns a string representation of the classifier * * @return a string representation of the classifier */ public String toString() { String result; result = this.getClass().getName() + "\n" + this.getClass().getName().replaceAll(".", "=") + "\n\n"; result += "# Components..........: " + m_Filter.getNumComponents() + "\n"; result += "Algorithm.............: " + m_Filter.getAlgorithm().getSelectedTag().getReadable() + "\n"; result += "Replace missing values: " + (m_Filter.getReplaceMissing() ? "yes" : "no") + "\n"; result += "Preprocessing.........: " + m_Filter.getPreprocessing().getSelectedTag().getReadable() + "\n"; return result; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 1.4 $"); } /** * Main method for running this classifier from commandline. * * @param args the options */ public static void main(String[] args) { runClassifier(new PLSClassifier(), args); } }
10,231
26.956284
109
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/functions/PaceRegression.java
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* * PaceRegression.java * Copyright (C) 2002 University of Waikato, Hamilton, New Zealand */ package weka.classifiers.functions; import weka.classifiers.Classifier; import weka.classifiers.functions.pace.ChisqMixture; import weka.classifiers.functions.pace.MixtureDistribution; import weka.classifiers.functions.pace.NormalMixture; import weka.classifiers.functions.pace.PaceMatrix; import weka.core.Capabilities; import weka.core.Instance; import weka.core.Instances; import weka.core.NoSupportForMissingValuesException; import weka.core.Option; import weka.core.OptionHandler; import weka.core.RevisionUtils; import weka.core.SelectedTag; import weka.core.Tag; import weka.core.TechnicalInformation; import weka.core.TechnicalInformationHandler; import weka.core.Utils; import weka.core.WeightedInstancesHandler; import weka.core.WekaException; import weka.core.Capabilities.Capability; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import weka.core.matrix.DoubleVector; import weka.core.matrix.IntVector; import java.util.Enumeration; import java.util.Vector; import weka.classifiers.AbstractClassifier; /** <!-- globalinfo-start --> * Class for building pace regression linear models and using them for prediction. <br/> * <br/> * Under regularity conditions, pace regression is provably optimal when the number of coefficients tends to infinity. It consists of a group of estimators that are either overall optimal or optimal under certain conditions.<br/> * <br/> * The current work of the pace regression theory, and therefore also this implementation, do not handle: <br/> * <br/> * - missing values <br/> * - non-binary nominal attributes <br/> * - the case that n - k is small where n is the number of instances and k is the number of coefficients (the threshold used in this implmentation is 20)<br/> * <br/> * For more information see:<br/> * <br/> * Wang, Y (2000). A new approach to fitting linear models in high dimensional spaces. Hamilton, New Zealand.<br/> * <br/> * Wang, Y., Witten, I. H.: Modeling for optimal probability prediction. In: Proceedings of the Nineteenth International Conference in Machine Learning, Sydney, Australia, 650-657, 2002. * <p/> <!-- globalinfo-end --> * <!-- technical-bibtex-start --> * BibTeX: * <pre> * &#64;phdthesis{Wang2000, * address = {Hamilton, New Zealand}, * author = {Wang, Y}, * school = {Department of Computer Science, University of Waikato}, * title = {A new approach to fitting linear models in high dimensional spaces}, * year = {2000} * } * * &#64;inproceedings{Wang2002, * address = {Sydney, Australia}, * author = {Wang, Y. and Witten, I. H.}, * booktitle = {Proceedings of the Nineteenth International Conference in Machine Learning}, * pages = {650-657}, * title = {Modeling for optimal probability prediction}, * year = {2002} * } * </pre> * <p/> <!-- technical-bibtex-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -D * Produce debugging output. * (default no debugging output)</pre> * * <pre> -E &lt;estimator&gt; * The estimator can be one of the following: * eb -- Empirical Bayes estimator for noraml mixture (default) * nested -- Optimal nested model selector for normal mixture * subset -- Optimal subset selector for normal mixture * pace2 -- PACE2 for Chi-square mixture * pace4 -- PACE4 for Chi-square mixture * pace6 -- PACE6 for Chi-square mixture * * ols -- Ordinary least squares estimator * aic -- AIC estimator * bic -- BIC estimator * ric -- RIC estimator * olsc -- Ordinary least squares subset selector with a threshold</pre> * * <pre> -S &lt;threshold value&gt; * Threshold value for the OLSC estimator</pre> * <!-- options-end --> * * @author Yong Wang (yongwang@cs.waikato.ac.nz) * @author Gabi Schmidberger (gabi@cs.waikato.ac.nz) * @version $Revision: 5523 $ */ public class PaceRegression extends AbstractClassifier implements OptionHandler, WeightedInstancesHandler, TechnicalInformationHandler { /** for serialization */ static final long serialVersionUID = 7230266976059115435L; /** The model used */ Instances m_Model = null; /** Array for storing coefficients of linear regression. */ private double[] m_Coefficients; /** The index of the class attribute */ private int m_ClassIndex; /** True if debug output will be printed */ private boolean m_Debug; /** estimator type: Ordinary least squares */ private static final int olsEstimator = 0; /** estimator type: Empirical Bayes */ private static final int ebEstimator = 1; /** estimator type: Nested model selector */ private static final int nestedEstimator = 2; /** estimator type: Subset selector */ private static final int subsetEstimator = 3; /** estimator type:PACE2 */ private static final int pace2Estimator = 4; /** estimator type: PACE4 */ private static final int pace4Estimator = 5; /** estimator type: PACE6 */ private static final int pace6Estimator = 6; /** estimator type: Ordinary least squares selection */ private static final int olscEstimator = 7; /** estimator type: AIC */ private static final int aicEstimator = 8; /** estimator type: BIC */ private static final int bicEstimator = 9; /** estimator type: RIC */ private static final int ricEstimator = 10; /** estimator types */ public static final Tag [] TAGS_ESTIMATOR = { new Tag(olsEstimator, "Ordinary least squares"), new Tag(ebEstimator, "Empirical Bayes"), new Tag(nestedEstimator, "Nested model selector"), new Tag(subsetEstimator, "Subset selector"), new Tag(pace2Estimator, "PACE2"), new Tag(pace4Estimator, "PACE4"), new Tag(pace6Estimator, "PACE6"), new Tag(olscEstimator, "Ordinary least squares selection"), new Tag(aicEstimator, "AIC"), new Tag(bicEstimator, "BIC"), new Tag(ricEstimator, "RIC") }; /** the estimator */ private int paceEstimator = ebEstimator; private double olscThreshold = 2; // AIC /** * Returns a string describing this classifier * @return a description of the classifier suitable for * displaying in the explorer/experimenter gui */ public String globalInfo() { return "Class for building pace regression linear models and using them for " +"prediction. \n\n" +"Under regularity conditions, pace regression is provably optimal when " +"the number of coefficients tends to infinity. It consists of a group of " +"estimators that are either overall optimal or optimal under certain " +"conditions.\n\n" +"The current work of the pace regression theory, and therefore also this " +"implementation, do not handle: \n\n" +"- missing values \n" +"- non-binary nominal attributes \n" +"- the case that n - k is small where n is the number of instances and k is " +"the number of coefficients (the threshold used in this implmentation is 20)\n\n" +"For more information see:\n\n" + getTechnicalInformation().toString(); } /** * Returns an instance of a TechnicalInformation object, containing * detailed information about the technical background of this class, * e.g., paper reference or book this class is based on. * * @return the technical information about this class */ public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; TechnicalInformation additional; result = new TechnicalInformation(Type.PHDTHESIS); result.setValue(Field.AUTHOR, "Wang, Y"); result.setValue(Field.YEAR, "2000"); result.setValue(Field.TITLE, "A new approach to fitting linear models in high dimensional spaces"); result.setValue(Field.SCHOOL, "Department of Computer Science, University of Waikato"); result.setValue(Field.ADDRESS, "Hamilton, New Zealand"); additional = result.add(Type.INPROCEEDINGS); additional.setValue(Field.AUTHOR, "Wang, Y. and Witten, I. H."); additional.setValue(Field.YEAR, "2002"); additional.setValue(Field.TITLE, "Modeling for optimal probability prediction"); additional.setValue(Field.BOOKTITLE, "Proceedings of the Nineteenth International Conference in Machine Learning"); additional.setValue(Field.YEAR, "2002"); additional.setValue(Field.PAGES, "650-657"); additional.setValue(Field.ADDRESS, "Sydney, Australia"); return result; } /** * Returns default capabilities of the classifier. * * @return the capabilities of this classifier */ public Capabilities getCapabilities() { Capabilities result = super.getCapabilities(); result.disableAll(); // attributes result.enable(Capability.BINARY_ATTRIBUTES); result.enable(Capability.NUMERIC_ATTRIBUTES); // class result.enable(Capability.NUMERIC_CLASS); result.enable(Capability.DATE_CLASS); result.enable(Capability.MISSING_CLASS_VALUES); return result; } /** * Builds a pace regression model for the given data. * * @param data the training data to be used for generating the * linear regression function * @throws Exception if the classifier could not be built successfully */ public void buildClassifier(Instances data) throws Exception { // can classifier handle the data? Capabilities cap = getCapabilities(); cap.setMinimumNumberInstances(20 + data.numAttributes()); cap.testWithFail(data); // remove instances with missing class data = new Instances(data); data.deleteWithMissingClass(); /* * initialize the following */ m_Model = new Instances(data, 0); m_ClassIndex = data.classIndex(); double[][] transformedDataMatrix = getTransformedDataMatrix(data, m_ClassIndex); double[] classValueVector = data.attributeToDoubleArray(m_ClassIndex); m_Coefficients = null; /* * Perform pace regression */ m_Coefficients = pace(transformedDataMatrix, classValueVector); } /** * pace regression * * @param matrix_X matrix with observations * @param vector_Y vektor with class values * @return vector with coefficients */ private double [] pace(double[][] matrix_X, double [] vector_Y) { PaceMatrix X = new PaceMatrix( matrix_X ); PaceMatrix Y = new PaceMatrix( vector_Y, vector_Y.length ); IntVector pvt = IntVector.seq(0, X.getColumnDimension()-1); int n = X.getRowDimension(); int kr = X.getColumnDimension(); X.lsqrSelection( Y, pvt, 1 ); X.positiveDiagonal( Y, pvt ); PaceMatrix sol = (PaceMatrix) Y.clone(); X.rsolve( sol, pvt, pvt.size() ); DoubleVector r = Y.getColumn( pvt.size(), n-1, 0); double sde = Math.sqrt(r.sum2() / r.size()); DoubleVector aHat = Y.getColumn( 0, pvt.size()-1, 0).times( 1./sde ); DoubleVector aTilde = null; switch( paceEstimator) { case ebEstimator: case nestedEstimator: case subsetEstimator: NormalMixture d = new NormalMixture(); d.fit( aHat, MixtureDistribution.NNMMethod ); if( paceEstimator == ebEstimator ) aTilde = d.empiricalBayesEstimate( aHat ); else if( paceEstimator == ebEstimator ) aTilde = d.subsetEstimate( aHat ); else aTilde = d.nestedEstimate( aHat ); break; case pace2Estimator: case pace4Estimator: case pace6Estimator: DoubleVector AHat = aHat.square(); ChisqMixture dc = new ChisqMixture(); dc.fit( AHat, MixtureDistribution.NNMMethod ); DoubleVector ATilde; if( paceEstimator == pace6Estimator ) ATilde = dc.pace6( AHat ); else if( paceEstimator == pace2Estimator ) ATilde = dc.pace2( AHat ); else ATilde = dc.pace4( AHat ); aTilde = ATilde.sqrt().times( aHat.sign() ); break; case olsEstimator: aTilde = aHat.copy(); break; case aicEstimator: case bicEstimator: case ricEstimator: case olscEstimator: if(paceEstimator == aicEstimator) olscThreshold = 2; else if(paceEstimator == bicEstimator) olscThreshold = Math.log( n ); else if(paceEstimator == ricEstimator) olscThreshold = 2*Math.log( kr ); aTilde = aHat.copy(); for( int i = 0; i < aTilde.size(); i++ ) if( Math.abs(aTilde.get(i)) < Math.sqrt(olscThreshold) ) aTilde.set(i, 0); } PaceMatrix YTilde = new PaceMatrix((new PaceMatrix(aTilde)).times( sde )); X.rsolve( YTilde, pvt, pvt.size() ); DoubleVector betaTilde = YTilde.getColumn(0).unpivoting( pvt, kr ); return betaTilde.getArrayCopy(); } /** * Checks if an instance has a missing value. * @param instance the instance * @param model the data * @return true if missing value is present */ public boolean checkForMissing(Instance instance, Instances model) { for (int j = 0; j < instance.numAttributes(); j++) { if (j != model.classIndex()) { if (instance.isMissing(j)) { return true; } } } return false; } /** * Transforms dataset into a two-dimensional array. * * @param data dataset * @param classIndex index of the class attribute * @return the transformed data */ private double [][] getTransformedDataMatrix(Instances data, int classIndex) { int numInstances = data.numInstances(); int numAttributes = data.numAttributes(); int middle = classIndex; if (middle < 0) { middle = numAttributes; } double[][] result = new double[numInstances] [numAttributes]; for (int i = 0; i < numInstances; i++) { Instance inst = data.instance(i); result[i][0] = 1.0; // the class value (lies on index middle) is left out for (int j = 0; j < middle; j++) { result[i][j + 1] = inst.value(j); } for (int j = middle + 1; j < numAttributes; j++) { result[i][j] = inst.value(j); } } return result; } /** * Classifies the given instance using the linear regression function. * * @param instance the test instance * @return the classification * @throws Exception if classification can't be done successfully */ public double classifyInstance(Instance instance) throws Exception { if (m_Coefficients == null) { throw new Exception("Pace Regression: No model built yet."); } // check for missing data and throw exception if some are found if (checkForMissing(instance, m_Model)) { throw new NoSupportForMissingValuesException("Can't handle missing values!"); } // Calculate the dependent variable from the regression model return regressionPrediction(instance, m_Coefficients); } /** * Outputs the linear regression model as a string. * * @return the model as string */ public String toString() { if (m_Coefficients == null) { return "Pace Regression: No model built yet."; } // try { StringBuffer text = new StringBuffer(); text.append("\nPace Regression Model\n\n"); text.append(m_Model.classAttribute().name()+" =\n\n"); int index = 0; text.append(Utils.doubleToString(m_Coefficients[0], 12, 4) ); for (int i = 1; i < m_Coefficients.length; i++) { // jump over the class attribute if (index == m_ClassIndex) index++; if (m_Coefficients[i] != 0.0) { // output a coefficient if unequal zero text.append(" +\n"); text.append(Utils.doubleToString(m_Coefficients[i], 12, 4) + " * "); text.append(m_Model.attribute(index).name()); } index ++; } return text.toString(); } /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ public Enumeration listOptions() { Vector newVector = new Vector(2); newVector.addElement(new Option("\tProduce debugging output.\n" + "\t(default no debugging output)", "D", 0, "-D")); newVector.addElement(new Option("\tThe estimator can be one of the following:\n" + "\t\teb -- Empirical Bayes estimator for noraml mixture (default)\n" + "\t\tnested -- Optimal nested model selector for normal mixture\n" + "\t\tsubset -- Optimal subset selector for normal mixture\n" + "\t\tpace2 -- PACE2 for Chi-square mixture\n" + "\t\tpace4 -- PACE4 for Chi-square mixture\n" + "\t\tpace6 -- PACE6 for Chi-square mixture\n\n" + "\t\tols -- Ordinary least squares estimator\n" + "\t\taic -- AIC estimator\n" + "\t\tbic -- BIC estimator\n" + "\t\tric -- RIC estimator\n" + "\t\tolsc -- Ordinary least squares subset selector with a threshold", "E", 0, "-E <estimator>")); newVector.addElement(new Option("\tThreshold value for the OLSC estimator", "S", 0, "-S <threshold value>")); return newVector.elements(); } /** * Parses a given list of options. <p/> * <!-- options-start --> * Valid options are: <p/> * * <pre> -D * Produce debugging output. * (default no debugging output)</pre> * * <pre> -E &lt;estimator&gt; * The estimator can be one of the following: * eb -- Empirical Bayes estimator for noraml mixture (default) * nested -- Optimal nested model selector for normal mixture * subset -- Optimal subset selector for normal mixture * pace2 -- PACE2 for Chi-square mixture * pace4 -- PACE4 for Chi-square mixture * pace6 -- PACE6 for Chi-square mixture * * ols -- Ordinary least squares estimator * aic -- AIC estimator * bic -- BIC estimator * ric -- RIC estimator * olsc -- Ordinary least squares subset selector with a threshold</pre> * * <pre> -S &lt;threshold value&gt; * Threshold value for the OLSC estimator</pre> * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { setDebug(Utils.getFlag('D', options)); String estimator = Utils.getOption('E', options); if ( estimator.equals("ols") ) paceEstimator = olsEstimator; else if ( estimator.equals("olsc") ) paceEstimator = olscEstimator; else if( estimator.equals("eb") || estimator.equals("") ) paceEstimator = ebEstimator; else if ( estimator.equals("nested") ) paceEstimator = nestedEstimator; else if ( estimator.equals("subset") ) paceEstimator = subsetEstimator; else if ( estimator.equals("pace2") ) paceEstimator = pace2Estimator; else if ( estimator.equals("pace4") ) paceEstimator = pace4Estimator; else if ( estimator.equals("pace6") ) paceEstimator = pace6Estimator; else if ( estimator.equals("aic") ) paceEstimator = aicEstimator; else if ( estimator.equals("bic") ) paceEstimator = bicEstimator; else if ( estimator.equals("ric") ) paceEstimator = ricEstimator; else throw new WekaException("unknown estimator " + estimator + " for -E option" ); String string = Utils.getOption('S', options); if( ! string.equals("") ) olscThreshold = Double.parseDouble( string ); } /** * Returns the coefficients for this linear model. * * @return the coefficients for this linear model */ public double[] coefficients() { double[] coefficients = new double[m_Coefficients.length]; for (int i = 0; i < coefficients.length; i++) { coefficients[i] = m_Coefficients[i]; } return coefficients; } /** * Gets the current settings of the classifier. * * @return an array of strings suitable for passing to setOptions */ public String [] getOptions() { String [] options = new String [6]; int current = 0; if (getDebug()) { options[current++] = "-D"; } options[current++] = "-E"; switch (paceEstimator) { case olsEstimator: options[current++] = "ols"; break; case olscEstimator: options[current++] = "olsc"; options[current++] = "-S"; options[current++] = "" + olscThreshold; break; case ebEstimator: options[current++] = "eb"; break; case nestedEstimator: options[current++] = "nested"; break; case subsetEstimator: options[current++] = "subset"; break; case pace2Estimator: options[current++] = "pace2"; break; case pace4Estimator: options[current++] = "pace4"; break; case pace6Estimator: options[current++] = "pace6"; break; case aicEstimator: options[current++] = "aic"; break; case bicEstimator: options[current++] = "bic"; break; case ricEstimator: options[current++] = "ric"; break; } while (current < options.length) { options[current++] = ""; } return options; } /** * Get the number of coefficients used in the model * * @return the number of coefficients */ public int numParameters() { return m_Coefficients.length-1; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String debugTipText() { return "Output debug information to the console."; } /** * Controls whether debugging output will be printed * * @param debug true if debugging output should be printed */ public void setDebug(boolean debug) { m_Debug = debug; } /** * Controls whether debugging output will be printed * * @return true if debugging output should be printed */ public boolean getDebug() { return m_Debug; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String estimatorTipText() { return "The estimator to use.\n\n" +"eb -- Empirical Bayes estimator for noraml mixture (default)\n" +"nested -- Optimal nested model selector for normal mixture\n" +"subset -- Optimal subset selector for normal mixture\n" +"pace2 -- PACE2 for Chi-square mixture\n" +"pace4 -- PACE4 for Chi-square mixture\n" +"pace6 -- PACE6 for Chi-square mixture\n" +"ols -- Ordinary least squares estimator\n" +"aic -- AIC estimator\n" +"bic -- BIC estimator\n" +"ric -- RIC estimator\n" +"olsc -- Ordinary least squares subset selector with a threshold"; } /** * Gets the estimator * * @return the estimator */ public SelectedTag getEstimator() { return new SelectedTag(paceEstimator, TAGS_ESTIMATOR); } /** * Sets the estimator. * * @param estimator the new estimator */ public void setEstimator(SelectedTag estimator) { if (estimator.getTags() == TAGS_ESTIMATOR) { paceEstimator = estimator.getSelectedTag().getID(); } } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String thresholdTipText() { return "Threshold for the olsc estimator."; } /** * Set threshold for the olsc estimator * * @param newThreshold the threshold for the olsc estimator */ public void setThreshold(double newThreshold) { olscThreshold = newThreshold; } /** * Gets the threshold for olsc estimator * * @return the threshold */ public double getThreshold() { return olscThreshold; } /** * Calculate the dependent value for a given instance for a * given regression model. * * @param transformedInstance the input instance * @param coefficients an array of coefficients for the regression * model * @return the regression value for the instance. * @throws Exception if the class attribute of the input instance * is not assigned */ private double regressionPrediction(Instance transformedInstance, double [] coefficients) throws Exception { int column = 0; double result = coefficients[column]; for (int j = 0; j < transformedInstance.numAttributes(); j++) { if (m_ClassIndex != j) { column++; result += coefficients[column] * transformedInstance.value(j); } } return result; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 5523 $"); } /** * Generates a linear regression function predictor. * * @param argv the options */ public static void main(String argv[]) { runClassifier(new PaceRegression(), argv); } }
25,497
31.275949
229
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/functions/RBFNetwork.java
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* * RBFNetwork.java * Copyright (C) 2004 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.functions; import weka.classifiers.Classifier; import weka.clusterers.MakeDensityBasedClusterer; import weka.clusterers.SimpleKMeans; import weka.core.Capabilities; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.OptionHandler; import weka.core.RevisionUtils; import weka.core.SelectedTag; import weka.core.Utils; import weka.filters.Filter; import weka.filters.unsupervised.attribute.ClusterMembership; import weka.filters.unsupervised.attribute.Standardize; import java.util.Enumeration; import java.util.Vector; import weka.classifiers.AbstractClassifier; /** <!-- globalinfo-start --> * Class that implements a normalized Gaussian radial basisbasis function network.<br/> * It uses the k-means clustering algorithm to provide the basis functions and learns either a logistic regression (discrete class problems) or linear regression (numeric class problems) on top of that. Symmetric multivariate Gaussians are fit to the data from each cluster. If the class is nominal it uses the given number of clusters per class.It standardizes all numeric attributes to zero mean and unit variance. * <p/> <!-- globalinfo-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -B &lt;number&gt; * Set the number of clusters (basis functions) to generate. (default = 2).</pre> * * <pre> -S &lt;seed&gt; * Set the random seed to be used by K-means. (default = 1).</pre> * * <pre> -R &lt;ridge&gt; * Set the ridge value for the logistic or linear regression.</pre> * * <pre> -M &lt;number&gt; * Set the maximum number of iterations for the logistic regression. (default -1, until convergence).</pre> * * <pre> -W &lt;number&gt; * Set the minimum standard deviation for the clusters. (default 0.1).</pre> * <!-- options-end --> * * @author Mark Hall * @author Eibe Frank * @version $Revision: 1.10 $ */ public class RBFNetwork extends AbstractClassifier implements OptionHandler { /** for serialization */ static final long serialVersionUID = -3669814959712675720L; /** The logistic regression for classification problems */ private Logistic m_logistic; /** The linear regression for numeric problems */ private LinearRegression m_linear; /** The filter for producing the meta data */ private ClusterMembership m_basisFilter; /** Filter used for normalizing the data */ private Standardize m_standardize; /** The number of clusters (basis functions to generate) */ private int m_numClusters = 2; /** The ridge parameter for the logistic regression. */ protected double m_ridge = 1e-8; /** The maximum number of iterations for logistic regression. */ private int m_maxIts = -1; /** The seed to pass on to K-means */ private int m_clusteringSeed = 1; /** The minimum standard deviation */ private double m_minStdDev = 0.1; /** a ZeroR model in case no model can be built from the data */ private Classifier m_ZeroR; /** * Returns a string describing this classifier * @return a description of the classifier suitable for * displaying in the explorer/experimenter gui */ public String globalInfo() { return "Class that implements a normalized Gaussian radial basis" + "basis function network.\n" + "It uses the k-means clustering algorithm to provide the basis " + "functions and learns either a logistic regression (discrete " + "class problems) or linear regression (numeric class problems) " + "on top of that. Symmetric multivariate Gaussians are fit to " + "the data from each cluster. If the class is " + "nominal it uses the given number of clusters per class." + "It standardizes all numeric " + "attributes to zero mean and unit variance." ; } /** * Returns default capabilities of the classifier, i.e., and "or" of * Logistic and LinearRegression. * * @return the capabilities of this classifier * @see Logistic * @see LinearRegression */ public Capabilities getCapabilities() { Capabilities result = new Logistic().getCapabilities(); result.or(new LinearRegression().getCapabilities()); Capabilities classes = result.getClassCapabilities(); result.and(new SimpleKMeans().getCapabilities()); result.or(classes); return result; } /** * Builds the classifier * * @param instances the training data * @throws Exception if the classifier could not be built successfully */ public void buildClassifier(Instances instances) throws Exception { // can classifier handle the data? getCapabilities().testWithFail(instances); // remove instances with missing class instances = new Instances(instances); instances.deleteWithMissingClass(); // only class? -> build ZeroR model if (instances.numAttributes() == 1) { System.err.println( "Cannot build model (only class attribute present in data!), " + "using ZeroR model instead!"); m_ZeroR = new weka.classifiers.rules.ZeroR(); m_ZeroR.buildClassifier(instances); return; } else { m_ZeroR = null; } m_standardize = new Standardize(); m_standardize.setInputFormat(instances); instances = Filter.useFilter(instances, m_standardize); SimpleKMeans sk = new SimpleKMeans(); sk.setNumClusters(m_numClusters); sk.setSeed(m_clusteringSeed); MakeDensityBasedClusterer dc = new MakeDensityBasedClusterer(); dc.setClusterer(sk); dc.setMinStdDev(m_minStdDev); m_basisFilter = new ClusterMembership(); m_basisFilter.setDensityBasedClusterer(dc); m_basisFilter.setInputFormat(instances); Instances transformed = Filter.useFilter(instances, m_basisFilter); if (instances.classAttribute().isNominal()) { m_linear = null; m_logistic = new Logistic(); m_logistic.setRidge(m_ridge); m_logistic.setMaxIts(m_maxIts); m_logistic.buildClassifier(transformed); } else { m_logistic = null; m_linear = new LinearRegression(); m_linear.setAttributeSelectionMethod(new SelectedTag(LinearRegression.SELECTION_NONE, LinearRegression.TAGS_SELECTION)); m_linear.setRidge(m_ridge); m_linear.buildClassifier(transformed); } } /** * Computes the distribution for a given instance * * @param instance the instance for which distribution is computed * @return the distribution * @throws Exception if the distribution can't be computed successfully */ public double [] distributionForInstance(Instance instance) throws Exception { // default model? if (m_ZeroR != null) { return m_ZeroR.distributionForInstance(instance); } m_standardize.input(instance); m_basisFilter.input(m_standardize.output()); Instance transformed = m_basisFilter.output(); return ((instance.classAttribute().isNominal() ? m_logistic.distributionForInstance(transformed) : m_linear.distributionForInstance(transformed))); } /** * Returns a description of this classifier as a String * * @return a description of this classifier */ public String toString() { // only ZeroR model? if (m_ZeroR != null) { StringBuffer buf = new StringBuffer(); buf.append(this.getClass().getName().replaceAll(".*\\.", "") + "\n"); buf.append(this.getClass().getName().replaceAll(".*\\.", "").replaceAll(".", "=") + "\n\n"); buf.append("Warning: No model could be built, hence ZeroR model is used:\n\n"); buf.append(m_ZeroR.toString()); return buf.toString(); } if (m_basisFilter == null) { return "No classifier built yet!"; } StringBuffer sb = new StringBuffer(); sb.append("Radial basis function network\n"); sb.append((m_linear == null) ? "(Logistic regression " : "(Linear regression "); sb.append("applied to K-means clusters as basis functions):\n\n"); sb.append((m_linear == null) ? m_logistic.toString() : m_linear.toString()); return sb.toString(); } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String maxItsTipText() { return "Maximum number of iterations for the logistic regression to perform. " +"Only applied to discrete class problems."; } /** * Get the value of MaxIts. * * @return Value of MaxIts. */ public int getMaxIts() { return m_maxIts; } /** * Set the value of MaxIts. * * @param newMaxIts Value to assign to MaxIts. */ public void setMaxIts(int newMaxIts) { m_maxIts = newMaxIts; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String ridgeTipText() { return "Set the Ridge value for the logistic or linear regression."; } /** * Sets the ridge value for logistic or linear regression. * * @param ridge the ridge */ public void setRidge(double ridge) { m_ridge = ridge; } /** * Gets the ridge value. * * @return the ridge */ public double getRidge() { return m_ridge; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String numClustersTipText() { return "The number of clusters for K-Means to generate."; } /** * Set the number of clusters for K-means to generate. * * @param numClusters the number of clusters to generate. */ public void setNumClusters(int numClusters) { if (numClusters > 0) { m_numClusters = numClusters; } } /** * Return the number of clusters to generate. * * @return the number of clusters to generate. */ public int getNumClusters() { return m_numClusters; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String clusteringSeedTipText() { return "The random seed to pass on to K-means."; } /** * Set the random seed to be passed on to K-means. * * @param seed a seed value. */ public void setClusteringSeed(int seed) { m_clusteringSeed = seed; } /** * Get the random seed used by K-means. * * @return the seed value. */ public int getClusteringSeed() { return m_clusteringSeed; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String minStdDevTipText() { return "Sets the minimum standard deviation for the clusters."; } /** * Get the MinStdDev value. * @return the MinStdDev value. */ public double getMinStdDev() { return m_minStdDev; } /** * Set the MinStdDev value. * @param newMinStdDev The new MinStdDev value. */ public void setMinStdDev(double newMinStdDev) { m_minStdDev = newMinStdDev; } /** * Returns an enumeration describing the available options * * @return an enumeration of all the available options */ public Enumeration listOptions() { Vector newVector = new Vector(4); newVector.addElement(new Option("\tSet the number of clusters (basis functions) " +"to generate. (default = 2).", "B", 1, "-B <number>")); newVector.addElement(new Option("\tSet the random seed to be used by K-means. " +"(default = 1).", "S", 1, "-S <seed>")); newVector.addElement(new Option("\tSet the ridge value for the logistic or " +"linear regression.", "R", 1, "-R <ridge>")); newVector.addElement(new Option("\tSet the maximum number of iterations " +"for the logistic regression." + " (default -1, until convergence).", "M", 1, "-M <number>")); newVector.addElement(new Option("\tSet the minimum standard " +"deviation for the clusters." + " (default 0.1).", "W", 1, "-W <number>")); return newVector.elements(); } /** * Parses a given list of options. <p/> * <!-- options-start --> * Valid options are: <p/> * * <pre> -B &lt;number&gt; * Set the number of clusters (basis functions) to generate. (default = 2).</pre> * * <pre> -S &lt;seed&gt; * Set the random seed to be used by K-means. (default = 1).</pre> * * <pre> -R &lt;ridge&gt; * Set the ridge value for the logistic or linear regression.</pre> * * <pre> -M &lt;number&gt; * Set the maximum number of iterations for the logistic regression. (default -1, until convergence).</pre> * * <pre> -W &lt;number&gt; * Set the minimum standard deviation for the clusters. (default 0.1).</pre> * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { setDebug(Utils.getFlag('D', options)); String ridgeString = Utils.getOption('R', options); if (ridgeString.length() != 0) { m_ridge = Double.parseDouble(ridgeString); } else { m_ridge = 1.0e-8; } String maxItsString = Utils.getOption('M', options); if (maxItsString.length() != 0) { m_maxIts = Integer.parseInt(maxItsString); } else { m_maxIts = -1; } String numClustersString = Utils.getOption('B', options); if (numClustersString.length() != 0) { setNumClusters(Integer.parseInt(numClustersString)); } String seedString = Utils.getOption('S', options); if (seedString.length() != 0) { setClusteringSeed(Integer.parseInt(seedString)); } String stdString = Utils.getOption('W', options); if (stdString.length() != 0) { setMinStdDev(Double.parseDouble(stdString)); } Utils.checkForRemainingOptions(options); } /** * Gets the current settings of the classifier. * * @return an array of strings suitable for passing to setOptions */ public String [] getOptions() { String [] options = new String [10]; int current = 0; options[current++] = "-B"; options[current++] = "" + m_numClusters; options[current++] = "-S"; options[current++] = "" + m_clusteringSeed; options[current++] = "-R"; options[current++] = ""+m_ridge; options[current++] = "-M"; options[current++] = ""+m_maxIts; options[current++] = "-W"; options[current++] = ""+m_minStdDev; while (current < options.length) options[current++] = ""; return options; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 1.10 $"); } /** * Main method for testing this class. * * @param argv should contain the command line arguments to the * scheme (see Evaluation) */ public static void main(String [] argv) { runClassifier(new RBFNetwork(), argv); } }
16,083
29.233083
416
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/functions/SGD.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * SGD.java * Copyright (C) 2009-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.functions; import java.util.ArrayList; import java.util.Enumeration; import java.util.Random; import java.util.Vector; import weka.classifiers.RandomizableClassifier; import weka.classifiers.UpdateableClassifier; import weka.core.Capabilities; import weka.core.Capabilities.Capability; import weka.core.Aggregateable; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.OptionHandler; import weka.core.RevisionUtils; import weka.core.SelectedTag; import weka.core.Tag; import weka.core.Utils; import weka.filters.Filter; import weka.filters.unsupervised.attribute.Normalize; import weka.filters.unsupervised.attribute.ReplaceMissingValues; /** <!-- globalinfo-start --> * Implements stochastic gradient descent for learning * various linear models (binary class SVM, binary class logistic regression, * squared loss, Huber loss and epsilon-insensitive loss linear regression). * Globally replaces all missing values and transforms nominal attributes into * binary ones. It also normalizes all attributes, so the coefficients in the * output are based on the normalized data.<br/> * For numeric class attributes, the squared, Huber or epsilon-insensitve loss * function must be used. Epsilon-insensitive and Huber loss may require a much * higher learning rate. * <p/> <!-- globalinfo-end --> * <!-- options-start --> * Valid options are: * <p/> * * <pre> * -F * Set the loss function to minimize. 0 = hinge loss (SVM), 1 = log loss (logistic regression), * 2 = squared loss (regression). * (default = 0) * </pre> * * <pre> * -L * The learning rate. If normalization is * turned off (as it is automatically for streaming data), then the * default learning rate will need to be reduced (try 0.0001). * (default = 0.01). * </pre> * * <pre> * -R &lt;double&gt; * The lambda regularization constant (default = 0.0001) * </pre> * * <pre> * -E &lt;integer&gt; * The number of epochs to perform (batch learning only, default = 500) * </pre> * * <pre> * -C &lt;double&gt; * The epsilon threshold (epsilon-insenstive and Huber loss only, default = 1e-3) * </pre> * * <pre> * -N * Don't normalize the data * </pre> * * <pre> * -M * Don't replace missing values * </pre> * <!-- options-end --> * * @author Eibe Frank (eibe{[at]}cs{[dot]}waikato{[dot]}ac{[dot]}nz) * @author Mark Hall (mhall{[at]}pentaho{[dot]}com) * @version $Revision: 9785 $ * */ public class SGD extends RandomizableClassifier implements UpdateableClassifier, OptionHandler, Aggregateable<SGD> { /** For serialization */ private static final long serialVersionUID = -3732968666673530290L; /** Replace missing values */ protected ReplaceMissingValues m_replaceMissing; /** * Convert nominal attributes to numerically coded binary ones. Uses * supervised NominalToBinary in the batch learning case */ protected Filter m_nominalToBinary; /** Normalize the training data */ protected Normalize m_normalize; /** The regularization parameter */ protected double m_lambda = 0.0001; /** The learning rate */ protected double m_learningRate = 0.01; /** Stores the weights (+ bias in the last element) */ protected double[] m_weights; /** The epsilon parameter for epsilon insensitive and Huber loss */ protected double m_epsilon = 1e-3; /** Holds the current iteration number */ protected double m_t; /** The number of training instances */ protected double m_numInstances; /** * The number of epochs to perform (batch learning). Total iterations is * m_epochs * num instances */ protected int m_epochs = 500; /** * Turn off normalization of the input data. This option gets forced for * incremental training. */ protected boolean m_dontNormalize = false; /** * Turn off global replacement of missing values. Missing values will be * ignored instead. This option gets forced for incremental training. */ protected boolean m_dontReplaceMissing = false; /** Holds the header of the training data */ protected Instances m_data; /** * Returns default capabilities of the classifier. * * @return the capabilities of this classifier */ @Override public Capabilities getCapabilities() { Capabilities result = super.getCapabilities(); result.disableAll(); // attributes result.enable(Capability.NOMINAL_ATTRIBUTES); result.enable(Capability.NUMERIC_ATTRIBUTES); result.enable(Capability.MISSING_VALUES); // class if (m_loss == SQUAREDLOSS || m_loss == EPSILON_INSENSITIVE || m_loss == HUBER) result.enable(Capability.NUMERIC_CLASS); else result.enable(Capability.BINARY_CLASS); result.enable(Capability.MISSING_CLASS_VALUES); // instances result.setMinimumNumberInstances(0); return result; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String epsilonTipText() { return "The epsilon threshold for epsilon insensitive and Huber " + "loss. An error with absolute value less that this " + "threshold has loss of 0 for epsilon insensitive loss. " + "For Huber loss this is the boundary between the quadratic " + "and linear parts of the loss function."; } /** * Set the epsilon threshold on the error for epsilon insensitive and Huber * loss functions * * @param e the value of epsilon to use */ public void setEpsilon(double e) { m_epsilon = e; } /** * Get the epsilon threshold on the error for epsilon insensitive and Huber * loss functions * * @return the value of epsilon to use */ public double getEpsilon() { return m_epsilon; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String lambdaTipText() { return "The regularization constant. (default = 0.0001)"; } /** * Set the value of lambda to use * * @param lambda the value of lambda to use */ public void setLambda(double lambda) { m_lambda = lambda; } /** * Get the current value of lambda * * @return the current value of lambda */ public double getLambda() { return m_lambda; } /** * Set the learning rate. * * @param lr the learning rate to use. */ public void setLearningRate(double lr) { m_learningRate = lr; } /** * Get the learning rate. * * @return the learning rate */ public double getLearningRate() { return m_learningRate; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String learningRateTipText() { return "The learning rate. If normalization is turned off " + "(as it is automatically for streaming data), then" + "the default learning rate will need to be reduced (" + "try 0.0001)."; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String epochsTipText() { return "The number of epochs to perform (batch learning). " + "The total number of iterations is epochs * num" + " instances."; } /** * Set the number of epochs to use * * @param e the number of epochs to use */ public void setEpochs(int e) { m_epochs = e; } /** * Get current number of epochs * * @return the current number of epochs */ public int getEpochs() { return m_epochs; } /** * Turn normalization off/on. * * @param m true if normalization is to be disabled. */ public void setDontNormalize(boolean m) { m_dontNormalize = m; } /** * Get whether normalization has been turned off. * * @return true if normalization has been disabled. */ public boolean getDontNormalize() { return m_dontNormalize; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String dontNormalizeTipText() { return "Turn normalization off"; } /** * Turn global replacement of missing values off/on. If turned off, then * missing values are effectively ignored. * * @param m true if global replacement of missing values is to be turned off. */ public void setDontReplaceMissing(boolean m) { m_dontReplaceMissing = m; } /** * Get whether global replacement of missing values has been disabled. * * @return true if global replacement of missing values has been turned off */ public boolean getDontReplaceMissing() { return m_dontReplaceMissing; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String dontReplaceMissingTipText() { return "Turn off global replacement of missing values"; } /** * Set the loss function to use. * * @param function the loss function to use. */ public void setLossFunction(SelectedTag function) { if (function.getTags() == TAGS_SELECTION) { m_loss = function.getSelectedTag().getID(); } } /** * Get the current loss function. * * @return the current loss function. */ public SelectedTag getLossFunction() { return new SelectedTag(m_loss, TAGS_SELECTION); } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String lossFunctionTipText() { return "The loss function to use. Hinge loss (SVM), " + "log loss (logistic regression) or " + "squared loss (regression)."; } /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ @Override public Enumeration<Option> listOptions() { Vector<Option> newVector = new Vector<Option>(); newVector.add(new Option("\tSet the loss function to minimize.\n\t0 = " + "hinge loss (SVM), 1 = log loss (logistic regression),\n\t" + "2 = squared loss (regression), 3 = epsilon insensitive loss (regression)," + "\n\t4 = Huber loss (regression).\n\t(default = 0)", "F", 1, "-F")); newVector .add(new Option( "\tThe learning rate. If normalization is\n" + "\tturned off (as it is automatically for streaming data), then the\n\t" + "default learning rate will need to be reduced " + "(try 0.0001).\n\t(default = 0.01).", "L", 1, "-L")); newVector.add(new Option("\tThe lambda regularization constant " + "(default = 0.0001)", "R", 1, "-R <double>")); newVector.add(new Option("\tThe number of epochs to perform (" + "batch learning only, default = 500)", "E", 1, "-E <integer>")); newVector.add(new Option("\tThe epsilon threshold (" + "epsilon-insenstive and Huber loss only, default = 1e-3)", "C", 1, "-C <double>")); newVector.add(new Option("\tDon't normalize the data", "N", 0, "-N")); newVector.add(new Option("\tDon't replace missing values", "M", 0, "-M")); return newVector.elements(); } /** * * Parses a given list of options. * <p/> * <!-- options-start --> * Valid options are: * <p/> * * <pre> * -F * Set the loss function to minimize. 0 = hinge loss (SVM), 1 = log loss (logistic regression), * 2 = squared loss (regression). * (default = 0) * </pre> * * <pre> * -L * The learning rate. If normalization is * turned off (as it is automatically for streaming data), then the * default learning rate will need to be reduced (try 0.0001). * (default = 0.01). * </pre> * * <pre> * -R &lt;double&gt; * The lambda regularization constant (default = 0.0001) * </pre> * * <pre> * -E &lt;integer&gt; * The number of epochs to perform (batch learning only, default = 500) * </pre> * * <pre> * -C &lt;double&gt; * The epsilon threshold (epsilon-insenstive and Huber loss only, default = 1e-3) * </pre> * * <pre> * -N * Don't normalize the data * </pre> * * <pre> * -M * Don't replace missing values * </pre> * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ @Override public void setOptions(String[] options) throws Exception { reset(); super.setOptions(options); String lossString = Utils.getOption('F', options); if (lossString.length() != 0) { setLossFunction(new SelectedTag(Integer.parseInt(lossString), TAGS_SELECTION)); } String lambdaString = Utils.getOption('R', options); if (lambdaString.length() > 0) { setLambda(Double.parseDouble(lambdaString)); } String learningRateString = Utils.getOption('L', options); if (learningRateString.length() > 0) { setLearningRate(Double.parseDouble(learningRateString)); } String epochsString = Utils.getOption("E", options); if (epochsString.length() > 0) { setEpochs(Integer.parseInt(epochsString)); } String epsilonString = Utils.getOption("C", options); if (epsilonString.length() > 0) { setEpsilon(Double.parseDouble(epsilonString)); } setDontNormalize(Utils.getFlag("N", options)); setDontReplaceMissing(Utils.getFlag('M', options)); } /** * Gets the current settings of the classifier. * * @return an array of strings suitable for passing to setOptions */ @Override public String[] getOptions() { ArrayList<String> options = new ArrayList<String>(); options.add("-F"); options.add("" + getLossFunction().getSelectedTag().getID()); options.add("-L"); options.add("" + getLearningRate()); options.add("-R"); options.add("" + getLambda()); options.add("-E"); options.add("" + getEpochs()); options.add("-C"); options.add("" + getEpsilon()); if (getDontNormalize()) { options.add("-N"); } if (getDontReplaceMissing()) { options.add("-M"); } return options.toArray(new String[1]); } /** * Returns a string describing classifier * * @return a description suitable for displaying in the explorer/experimenter * gui */ public String globalInfo() { return "Implements stochastic gradient descent for learning" + " various linear models (binary class SVM, binary class" + " logistic regression, squared loss, Huber loss and " + "epsilon-insensitive loss linear regression)." + " Globally replaces all missing values and transforms nominal" + " attributes into binary ones. It also normalizes all attributes," + " so the coefficients in the output are based on the normalized" + " data.\n" + "For numeric class attributes, the squared, Huber or " + "epsilon-insensitve loss function must be used. Epsilon-insensitive " + "and Huber loss may require a much higher learning rate."; } /** * Reset the classifier. */ public void reset() { m_t = 1; m_weights = null; } /** * Method for building the classifier. * * @param data the set of training instances. * @throws Exception if the classifier can't be built successfully. */ @Override public void buildClassifier(Instances data) throws Exception { reset(); // can classifier handle the data? getCapabilities().testWithFail(data); data = new Instances(data); data.deleteWithMissingClass(); if (data.numInstances() > 0 && !m_dontReplaceMissing) { m_replaceMissing = new ReplaceMissingValues(); m_replaceMissing.setInputFormat(data); data = Filter.useFilter(data, m_replaceMissing); } // check for only numeric attributes boolean onlyNumeric = true; for (int i = 0; i < data.numAttributes(); i++) { if (i != data.classIndex()) { if (!data.attribute(i).isNumeric()) { onlyNumeric = false; break; } } } if (!onlyNumeric) { if (data.numInstances() > 0) { m_nominalToBinary = new weka.filters.supervised.attribute.NominalToBinary(); } else { m_nominalToBinary = new weka.filters.unsupervised.attribute.NominalToBinary(); } m_nominalToBinary.setInputFormat(data); data = Filter.useFilter(data, m_nominalToBinary); } if (!m_dontNormalize && data.numInstances() > 0) { m_normalize = new Normalize(); m_normalize.setInputFormat(data); data = Filter.useFilter(data, m_normalize); } m_numInstances = data.numInstances(); m_weights = new double[data.numAttributes() + 1]; m_data = new Instances(data, 0); if (data.numInstances() > 0) { data.randomize(new Random(getSeed())); // randomize the data train(data); } } /** the hinge loss function. */ public static final int HINGE = 0; /** the log loss function. */ public static final int LOGLOSS = 1; /** the squared loss function. */ public static final int SQUAREDLOSS = 2; /** The epsilon insensitive loss function */ public static final int EPSILON_INSENSITIVE = 3; /** The Huber loss function */ public static final int HUBER = 4; /** The current loss function to minimize */ protected int m_loss = HINGE; /** Loss functions to choose from */ public static final Tag[] TAGS_SELECTION = { new Tag(HINGE, "Hinge loss (SVM)"), new Tag(LOGLOSS, "Log loss (logistic regression)"), new Tag(SQUAREDLOSS, "Squared loss (regression)"), new Tag(EPSILON_INSENSITIVE, "Epsilon-insensitive loss (SVM regression)"), new Tag(HUBER, "Huber loss (robust regression)") }; protected double dloss(double z) { if (m_loss == HINGE) { return (z < 1) ? 1 : 0; } if (m_loss == LOGLOSS) { // log loss if (z < 0) { return 1.0 / (Math.exp(z) + 1.0); } else { double t = Math.exp(-z); return t / (t + 1); } } if (m_loss == EPSILON_INSENSITIVE) { if (z > m_epsilon) { return 1.0; } if (-z > m_epsilon) { return -1.0; } return 0; } if (m_loss == HUBER) { if (Math.abs(z) <= m_epsilon) { return z; } else if (z > 0.0) { return m_epsilon; } else { return -m_epsilon; } } // squared loss return z; } private void train(Instances data) throws Exception { for (int e = 0; e < m_epochs; e++) { for (int i = 0; i < data.numInstances(); i++) { updateClassifier(data.instance(i), false); } } } protected static double dotProd(Instance inst1, double[] weights, int classIndex) { double result = 0; int n1 = inst1.numValues(); int n2 = weights.length - 1; for (int p1 = 0, p2 = 0; p1 < n1 && p2 < n2;) { int ind1 = inst1.index(p1); int ind2 = p2; if (ind1 == ind2) { if (ind1 != classIndex && !inst1.isMissingSparse(p1)) { result += inst1.valueSparse(p1) * weights[p2]; } p1++; p2++; } else if (ind1 > ind2) { p2++; } else { p1++; } } return (result); } /** * Updates the classifier with the given instance. * * @param instance the new training instance to include in the model * @param filter true if the instance should pass through any of the filters * set up in buildClassifier(). When batch training buildClassifier() * already batch filters all training instances so don't need to * filter them again here. * @exception Exception if the instance could not be incorporated in the * model. */ protected void updateClassifier(Instance instance, boolean filter) throws Exception { if (!instance.classIsMissing()) { if (filter) { if (m_replaceMissing != null) { m_replaceMissing.input(instance); instance = m_replaceMissing.output(); } if (m_nominalToBinary != null) { m_nominalToBinary.input(instance); instance = m_nominalToBinary.output(); } if (m_normalize != null) { m_normalize.input(instance); instance = m_normalize.output(); } } double wx = dotProd(instance, m_weights, instance.classIndex()); double y; double z; if (instance.classAttribute().isNominal()) { y = (instance.classValue() == 0) ? -1 : 1; z = y * (wx + m_weights[m_weights.length - 1]); } else { y = instance.classValue(); z = y - (wx + m_weights[m_weights.length - 1]); y = 1; } // Compute multiplier for weight decay double multiplier = 1.0; if (m_numInstances == 0) { multiplier = 1.0 - (m_learningRate * m_lambda) / m_t; } else { multiplier = 1.0 - (m_learningRate * m_lambda) / m_numInstances; } for (int i = 0; i < m_weights.length - 1; i++) { m_weights[i] *= multiplier; } // Only need to do the following if the loss is non-zero // if (m_loss != HINGE || (z < 1)) { if (m_loss == SQUAREDLOSS || m_loss == LOGLOSS || m_loss == HUBER || (m_loss == HINGE && (z < 1)) || (m_loss == EPSILON_INSENSITIVE && Math.abs(z) > m_epsilon)) { // Compute Factor for updates double factor = m_learningRate * y * dloss(z); // Update coefficients for attributes int n1 = instance.numValues(); for (int p1 = 0; p1 < n1; p1++) { int indS = instance.index(p1); if (indS != instance.classIndex() && !instance.isMissingSparse(p1)) { m_weights[indS] += factor * instance.valueSparse(p1); } } // update the bias m_weights[m_weights.length - 1] += factor; } m_t++; } } /** * Updates the classifier with the given instance. * * @param instance the new training instance to include in the model * @exception Exception if the instance could not be incorporated in the * model. */ @Override public void updateClassifier(Instance instance) throws Exception { updateClassifier(instance, true); } /** * Computes the distribution for a given instance * * @param instance the instance for which distribution is computed * @return the distribution * @throws Exception if the distribution can't be computed successfully */ @Override public double[] distributionForInstance(Instance inst) throws Exception { double[] result = (inst.classAttribute().isNominal()) ? new double[2] : new double[1]; if (m_replaceMissing != null) { m_replaceMissing.input(inst); inst = m_replaceMissing.output(); } if (m_nominalToBinary != null) { m_nominalToBinary.input(inst); inst = m_nominalToBinary.output(); } if (m_normalize != null) { m_normalize.input(inst); inst = m_normalize.output(); } double wx = dotProd(inst, m_weights, inst.classIndex());// * m_wScale; double z = (wx + m_weights[m_weights.length - 1]); if (inst.classAttribute().isNumeric()) { result[0] = z; return result; } if (z <= 0) { // z = 0; if (m_loss == LOGLOSS) { result[0] = 1.0 / (1.0 + Math.exp(z)); result[1] = 1.0 - result[0]; } else { result[0] = 1; } } else { if (m_loss == LOGLOSS) { result[1] = 1.0 / (1.0 + Math.exp(-z)); result[0] = 1.0 - result[1]; } else { result[1] = 1; } } return result; } public double[] getWeights() { return m_weights; } /** * Prints out the classifier. * * @return a description of the classifier as a string */ @Override public String toString() { if (m_weights == null) { return "SGD: No model built yet.\n"; } StringBuffer buff = new StringBuffer(); buff.append("Loss function: "); if (m_loss == HINGE) { buff.append("Hinge loss (SVM)\n\n"); } else if (m_loss == LOGLOSS) { buff.append("Log loss (logistic regression)\n\n"); } else { buff.append("Squared loss (linear regression)\n\n"); } buff.append(m_data.classAttribute().name() + " = \n\n"); int printed = 0; for (int i = 0; i < m_weights.length - 1; i++) { if (i != m_data.classIndex()) { if (printed > 0) { buff.append(" + "); } else { buff.append(" "); } buff.append(Utils.doubleToString(m_weights[i], 12, 4) + " " + ((m_normalize != null) ? "(normalized) " : "") + m_data.attribute(i).name() + "\n"); printed++; } } if (m_weights[m_weights.length - 1] > 0) { buff.append(" + " + Utils.doubleToString(m_weights[m_weights.length - 1], 12, 4)); } else { buff.append(" - " + Utils.doubleToString(-m_weights[m_weights.length - 1], 12, 4)); } return buff.toString(); } /** * Returns the revision string. * * @return the revision */ @Override public String getRevision() { return RevisionUtils.extract("$Revision: 9785 $"); } protected int m_numModels = 0; /** * Aggregate an object with this one * * @param toAggregate the object to aggregate * @return the result of aggregation * @throws Exception if the supplied object can't be aggregated for some * reason */ @Override public SGD aggregate(SGD toAggregate) throws Exception { if (m_weights == null) { throw new Exception("No model built yet, can't aggregate"); } if (!m_data.equalHeaders(toAggregate.m_data)) { throw new Exception("Can't aggregate - data headers dont match: " + m_data.equalHeadersMsg(toAggregate.m_data)); } if (m_weights.length != toAggregate.getWeights().length) { throw new Exception( "Can't aggregate - SDG to aggregate has weight vector " + "that differs in length from ours."); } for (int i = 0; i < m_weights.length; i++) { m_weights[i] += toAggregate.getWeights()[i]; } m_numModels++; return this; } /** * Call to complete the aggregation process. Allows implementers to do any * final processing based on how many objects were aggregated. * * @throws Exception if the aggregation can't be finalized for some reason */ @Override public void finalizeAggregation() throws Exception { if (m_numModels == 0) { throw new Exception("Unable to finalize aggregation - " + "haven't seen any models to aggregate"); } for (int i = 0; i < m_weights.length; i++) { m_weights[i] /= (m_numModels + 1); // plus one for us } // aggregation complete m_numModels = 0; } /** * Main method for testing this class. */ public static void main(String[] args) { runClassifier(new SGD(), args); } }
28,423
26.812133
98
java