repo
stringlengths
1
191
file
stringlengths
23
351
code
stringlengths
0
5.32M
file_length
int64
0
5.32M
avg_line_length
float64
0
2.9k
max_line_length
int64
0
288k
extension_type
stringclasses
1 value
tsml-java
tsml-java-master/src/main/java/weka/classifiers/trees/j48/ClassifierSplitModel.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * ClassifierSplitModel.java * Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.trees.j48; import java.io.Serializable; import weka.core.Instance; import weka.core.Instances; import weka.core.RevisionHandler; import weka.core.Utils; /** * Abstract class for classification models that can be used * recursively to split the data. * * @author Eibe Frank (eibe@cs.waikato.ac.nz) * @version $Revision: 8034 $ */ public abstract class ClassifierSplitModel implements Cloneable, Serializable, RevisionHandler { /** for serialization */ private static final long serialVersionUID = 4280730118393457457L; /** Distribution of class values. */ protected Distribution m_distribution; /** Number of created subsets. */ protected int m_numSubsets; /** * Allows to clone a model (shallow copy). */ public Object clone() { Object clone = null; try { clone = super.clone(); } catch (CloneNotSupportedException e) { } return clone; } /** * Builds the classifier split model for the given set of instances. * * @exception Exception if something goes wrong */ public abstract void buildClassifier(Instances instances) throws Exception; /** * Checks if generated model is valid. */ public final boolean checkModel() { if (m_numSubsets > 0) return true; else return false; } /** * Classifies a given instance. * * @exception Exception if something goes wrong */ public final double classifyInstance(Instance instance) throws Exception { int theSubset; theSubset = whichSubset(instance); if (theSubset > -1) return (double)m_distribution.maxClass(theSubset); else return (double)m_distribution.maxClass(); } /** * Gets class probability for instance. * * @exception Exception if something goes wrong */ public double classProb(int classIndex, Instance instance, int theSubset) throws Exception { if (theSubset > -1) { return m_distribution.prob(classIndex,theSubset); } else { double [] weights = weights(instance); if (weights == null) { return m_distribution.prob(classIndex); } else { double prob = 0; for (int i = 0; i < weights.length; i++) { prob += weights[i] * m_distribution.prob(classIndex, i); } return prob; } } } /** * Gets class probability for instance. * * @exception Exception if something goes wrong */ public double classProbLaplace(int classIndex, Instance instance, int theSubset) throws Exception { if (theSubset > -1) { return m_distribution.laplaceProb(classIndex, theSubset); } else { double [] weights = weights(instance); if (weights == null) { return m_distribution.laplaceProb(classIndex); } else { double prob = 0; for (int i = 0; i < weights.length; i++) { prob += weights[i] * m_distribution.laplaceProb(classIndex, i); } return prob; } } } /** * Returns coding costs of model. Returns 0 if not overwritten. */ public double codingCost() { return 0; } /** * Returns the distribution of class values induced by the model. */ public final Distribution distribution() { return m_distribution; } /** * Prints left side of condition satisfied by instances. * * @param data the data. */ public abstract String leftSide(Instances data); /** * Prints left side of condition satisfied by instances in subset index. */ public abstract String rightSide(int index,Instances data); /** * Prints label for subset index of instances (eg class). * * @exception Exception if something goes wrong */ public final String dumpLabel(int index,Instances data) throws Exception { StringBuffer text; text = new StringBuffer(); text.append(((Instances)data).classAttribute(). value(m_distribution.maxClass(index))); text.append(" ("+Utils.roundDouble(m_distribution.perBag(index),2)); if (Utils.gr(m_distribution.numIncorrect(index),0)) text.append("/"+Utils.roundDouble(m_distribution.numIncorrect(index),2)); text.append(")"); return text.toString(); } public final String sourceClass(int index, Instances data) throws Exception { System.err.println("sourceClass"); return (new StringBuffer(m_distribution.maxClass(index))).toString(); } public abstract String sourceExpression(int index, Instances data); /** * Prints the split model. * * @exception Exception if something goes wrong */ public final String dumpModel(Instances data) throws Exception { StringBuffer text; int i; text = new StringBuffer(); for (i=0;i<m_numSubsets;i++) { text.append(leftSide(data)+rightSide(i,data)+": "); text.append(dumpLabel(i,data)+"\n"); } return text.toString(); } /** * Returns the number of created subsets for the split. */ public final int numSubsets() { return m_numSubsets; } /** * Sets distribution associated with model. */ public void resetDistribution(Instances data) throws Exception { m_distribution = new Distribution(data, this); } /** * Splits the given set of instances into subsets. * * @exception Exception if something goes wrong */ public final Instances [] split(Instances data) throws Exception { Instances [] instances = new Instances [m_numSubsets]; double [] weights; double newWeight; Instance instance; int subset, i, j; for (j=0;j<m_numSubsets;j++) instances[j] = new Instances((Instances)data, data.numInstances()); for (i = 0; i < data.numInstances(); i++) { instance = ((Instances) data).instance(i); weights = weights(instance); subset = whichSubset(instance); if (subset > -1) instances[subset].add(instance); else for (j = 0; j < m_numSubsets; j++) if (Utils.gr(weights[j],0)) { newWeight = weights[j]*instance.weight(); instances[j].add(instance); instances[j].lastInstance().setWeight(newWeight); } } for (j = 0; j < m_numSubsets; j++) instances[j].compactify(); return instances; } /** * Returns weights if instance is assigned to more than one subset. * Returns null if instance is only assigned to one subset. */ public abstract double [] weights(Instance instance); /** * Returns index of subset instance is assigned to. * Returns -1 if instance is assigned to more than one subset. * * @exception Exception if something goes wrong */ public abstract int whichSubset(Instance instance) throws Exception; }
7,450
24.430034
79
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/trees/j48/ClassifierTree.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * ClassifierTree.java * Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.trees.j48; import java.io.Serializable; import weka.core.Capabilities; import weka.core.CapabilitiesHandler; import weka.core.Drawable; import weka.core.Instance; import weka.core.Instances; import weka.core.RevisionHandler; import weka.core.RevisionUtils; import weka.core.Utils; import java.util.Queue; import java.util.LinkedList; /** * Class for handling a tree structure used for * classification. * * @author Eibe Frank (eibe@cs.waikato.ac.nz) * @version $Revision: 9117 $ */ public class ClassifierTree implements Drawable, Serializable, CapabilitiesHandler, RevisionHandler { /** for serialization */ static final long serialVersionUID = -8722249377542734193L; /** The model selection method. */ protected ModelSelection m_toSelectModel; /** Local model at node. */ protected ClassifierSplitModel m_localModel; /** References to sons. */ protected ClassifierTree [] m_sons; /** True if node is leaf. */ protected boolean m_isLeaf; /** True if node is empty. */ protected boolean m_isEmpty; /** The training instances. */ protected Instances m_train; /** The pruning instances. */ protected Distribution m_test; /** The id for the node. */ protected int m_id; /** * For getting a unique ID when outputting the tree (hashcode isn't * guaranteed unique) */ private static long PRINTED_NODES = 0; /** * Gets the next unique node ID. * * @return the next unique node ID. */ protected static long nextID() { return PRINTED_NODES ++; } /** * Resets the unique node ID counter (e.g. * between repeated separate print types) */ protected static void resetID() { PRINTED_NODES = 0; } /** * Constructor. */ public ClassifierTree(ModelSelection toSelectLocModel) { m_toSelectModel = toSelectLocModel; } /** * Returns default capabilities of the classifier tree. * * @return the capabilities of this classifier tree */ public Capabilities getCapabilities() { Capabilities result = new Capabilities(this); result.enableAll(); return result; } /** * Method for building a classifier tree. * * @param data the data to build the tree from * @throws Exception if something goes wrong */ public void buildClassifier(Instances data) throws Exception { // can classifier tree handle the data? getCapabilities().testWithFail(data); // remove instances with missing class data = new Instances(data); data.deleteWithMissingClass(); buildTree(data, false); } /** * Builds the tree structure. * * @param data the data for which the tree structure is to be * generated. * @param keepData is training data to be kept? * @throws Exception if something goes wrong */ public void buildTree(Instances data, boolean keepData) throws Exception { Instances [] localInstances; if (keepData) { m_train = data; } m_test = null; m_isLeaf = false; m_isEmpty = false; m_sons = null; m_localModel = m_toSelectModel.selectModel(data); if (m_localModel.numSubsets() > 1) { localInstances = m_localModel.split(data); data = null; m_sons = new ClassifierTree [m_localModel.numSubsets()]; for (int i = 0; i < m_sons.length; i++) { m_sons[i] = getNewTree(localInstances[i]); localInstances[i] = null; } }else{ m_isLeaf = true; if (Utils.eq(data.sumOfWeights(), 0)) m_isEmpty = true; data = null; } } /** * Builds the tree structure with hold out set * * @param train the data for which the tree structure is to be * generated. * @param test the test data for potential pruning * @param keepData is training Data to be kept? * @throws Exception if something goes wrong */ public void buildTree(Instances train, Instances test, boolean keepData) throws Exception { Instances [] localTrain, localTest; int i; if (keepData) { m_train = train; } m_isLeaf = false; m_isEmpty = false; m_sons = null; m_localModel = m_toSelectModel.selectModel(train, test); m_test = new Distribution(test, m_localModel); if (m_localModel.numSubsets() > 1) { localTrain = m_localModel.split(train); localTest = m_localModel.split(test); train = test = null; m_sons = new ClassifierTree [m_localModel.numSubsets()]; for (i=0;i<m_sons.length;i++) { m_sons[i] = getNewTree(localTrain[i], localTest[i]); localTrain[i] = null; localTest[i] = null; } }else{ m_isLeaf = true; if (Utils.eq(train.sumOfWeights(), 0)) m_isEmpty = true; train = test = null; } } /** * Classifies an instance. * * @param instance the instance to classify * @return the classification * @throws Exception if something goes wrong */ public double classifyInstance(Instance instance) throws Exception { double maxProb = -1; double currentProb; int maxIndex = 0; int j; for (j = 0; j < instance.numClasses(); j++) { currentProb = getProbs(j, instance, 1); if (Utils.gr(currentProb,maxProb)) { maxIndex = j; maxProb = currentProb; } } return (double)maxIndex; } /** * Cleanup in order to save memory. * * @param justHeaderInfo */ public final void cleanup(Instances justHeaderInfo) { m_train = justHeaderInfo; m_test = null; if (!m_isLeaf) for (int i = 0; i < m_sons.length; i++) m_sons[i].cleanup(justHeaderInfo); } /** * Returns class probabilities for a weighted instance. * * @param instance the instance to get the distribution for * @param useLaplace whether to use laplace or not * @return the distribution * @throws Exception if something goes wrong */ public final double [] distributionForInstance(Instance instance, boolean useLaplace) throws Exception { double [] doubles = new double[instance.numClasses()]; for (int i = 0; i < doubles.length; i++) { if (!useLaplace) { doubles[i] = getProbs(i, instance, 1); } else { doubles[i] = getProbsLaplace(i, instance, 1); } } return doubles; } /** * Assigns a uniqe id to every node in the tree. * * @param lastID the last ID that was assign * @return the new current ID */ public int assignIDs(int lastID) { int currLastID = lastID + 1; m_id = currLastID; if (m_sons != null) { for (int i = 0; i < m_sons.length; i++) { currLastID = m_sons[i].assignIDs(currLastID); } } return currLastID; } /** * Returns the type of graph this classifier * represents. * @return Drawable.TREE */ public int graphType() { return Drawable.TREE; } /** * Returns graph describing the tree. * * @throws Exception if something goes wrong * @return the tree as graph */ public String graph() throws Exception { StringBuffer text = new StringBuffer(); assignIDs(-1); text.append("digraph J48Tree {\n"); if (m_isLeaf) { text.append("N" + m_id + " [label=\"" + m_localModel.dumpLabel(0,m_train) + "\" " + "shape=box style=filled "); if (m_train != null && m_train.numInstances() > 0) { text.append("data =\n" + m_train + "\n"); text.append(",\n"); } text.append("]\n"); }else { text.append("N" + m_id + " [label=\"" + m_localModel.leftSide(m_train) + "\" "); if (m_train != null && m_train.numInstances() > 0) { text.append("data =\n" + m_train + "\n"); text.append(",\n"); } text.append("]\n"); graphTree(text); } return text.toString() +"}\n"; } /** * Returns tree in prefix order. * * @throws Exception if something goes wrong * @return the prefix order */ public String prefix() throws Exception { StringBuffer text; text = new StringBuffer(); if (m_isLeaf) { text.append("["+m_localModel.dumpLabel(0,m_train)+"]"); }else { prefixTree(text); } return text.toString(); } /** * Returns source code for the tree as an if-then statement. The * class is assigned to variable "p", and assumes the tested * instance is named "i". The results are returned as two stringbuffers: * a section of code for assignment of the class, and a section of * code containing support code (eg: other support methods). * * @param className the classname that this static classifier has * @return an array containing two stringbuffers, the first string containing * assignment code, and the second containing source for support code. * @throws Exception if something goes wrong */ public StringBuffer [] toSource(String className) throws Exception { StringBuffer [] result = new StringBuffer [2]; if (m_isLeaf) { result[0] = new StringBuffer(" p = " + m_localModel.distribution().maxClass(0) + ";\n"); result[1] = new StringBuffer(""); } else { StringBuffer text = new StringBuffer(); StringBuffer atEnd = new StringBuffer(); long printID = ClassifierTree.nextID(); text.append(" static double N") .append(Integer.toHexString(m_localModel.hashCode()) + printID) .append("(Object []i) {\n") .append(" double p = Double.NaN;\n"); text.append(" if (") .append(m_localModel.sourceExpression(-1, m_train)) .append(") {\n"); text.append(" p = ") .append(m_localModel.distribution().maxClass(0)) .append(";\n"); text.append(" } "); for (int i = 0; i < m_sons.length; i++) { text.append("else if (" + m_localModel.sourceExpression(i, m_train) + ") {\n"); if (m_sons[i].m_isLeaf) { text.append(" p = " + m_localModel.distribution().maxClass(i) + ";\n"); } else { StringBuffer [] sub = m_sons[i].toSource(className); text.append(sub[0]); atEnd.append(sub[1]); } text.append(" } "); if (i == m_sons.length - 1) { text.append('\n'); } } text.append(" return p;\n }\n"); result[0] = new StringBuffer(" p = " + className + ".N"); result[0].append(Integer.toHexString(m_localModel.hashCode()) + printID) .append("(i);\n"); result[1] = text.append(atEnd); } return result; } /** * Returns number of leaves in tree structure. * * @return the number of leaves */ public int numLeaves() { int num = 0; int i; if (m_isLeaf) return 1; else for (i=0;i<m_sons.length;i++) num = num+m_sons[i].numLeaves(); return num; } /** * Returns number of nodes in tree structure. * * @return the number of nodes */ public int numNodes() { int no = 1; int i; if (!m_isLeaf) for (i=0;i<m_sons.length;i++) no = no+m_sons[i].numNodes(); return no; } /** * Prints tree structure. * * @return the tree structure */ public String toString() { try { StringBuffer text = new StringBuffer(); if (m_isLeaf) { text.append(": "); text.append(m_localModel.dumpLabel(0,m_train)); }else dumpTree(0,text); text.append("\n\nNumber of Leaves : \t"+numLeaves()+"\n"); text.append("\nSize of the tree : \t"+numNodes()+"\n"); return text.toString(); } catch (Exception e) { return "Can't print classification tree."; } } /** * Returns a newly created tree. * * @param data the training data * @return the generated tree * @throws Exception if something goes wrong */ protected ClassifierTree getNewTree(Instances data) throws Exception { ClassifierTree newTree = new ClassifierTree(m_toSelectModel); newTree.buildTree(data, false); return newTree; } /** * Returns a newly created tree. * * @param train the training data * @param test the pruning data. * @return the generated tree * @throws Exception if something goes wrong */ protected ClassifierTree getNewTree(Instances train, Instances test) throws Exception { ClassifierTree newTree = new ClassifierTree(m_toSelectModel); newTree.buildTree(train, test, false); return newTree; } /** * Help method for printing tree structure. * * @param depth the current depth * @param text for outputting the structure * @throws Exception if something goes wrong */ private void dumpTree(int depth, StringBuffer text) throws Exception { int i,j; for (i=0;i<m_sons.length;i++) { text.append("\n");; for (j=0;j<depth;j++) text.append("| "); text.append(m_localModel.leftSide(m_train)); text.append(m_localModel.rightSide(i, m_train)); if (m_sons[i].m_isLeaf) { text.append(": "); text.append(m_localModel.dumpLabel(i,m_train)); }else m_sons[i].dumpTree(depth+1,text); } } /** * Help method for printing tree structure as a graph. * * @param text for outputting the tree * @throws Exception if something goes wrong */ private void graphTree(StringBuffer text) throws Exception { for (int i = 0; i < m_sons.length; i++) { text.append("N" + m_id + "->" + "N" + m_sons[i].m_id + " [label=\"" + m_localModel.rightSide(i,m_train).trim() + "\"]\n"); if (m_sons[i].m_isLeaf) { text.append("N" + m_sons[i].m_id + " [label=\""+m_localModel.dumpLabel(i,m_train)+"\" "+ "shape=box style=filled "); if (m_train != null && m_train.numInstances() > 0) { text.append("data =\n" + m_sons[i].m_train + "\n"); text.append(",\n"); } text.append("]\n"); } else { text.append("N" + m_sons[i].m_id + " [label=\""+m_sons[i].m_localModel.leftSide(m_train) + "\" "); if (m_train != null && m_train.numInstances() > 0) { text.append("data =\n" + m_sons[i].m_train + "\n"); text.append(",\n"); } text.append("]\n"); m_sons[i].graphTree(text); } } } /** * Prints the tree in prefix form * * @param text the buffer to output the prefix form to * @throws Exception if something goes wrong */ private void prefixTree(StringBuffer text) throws Exception { text.append("["); text.append(m_localModel.leftSide(m_train)+":"); for (int i = 0; i < m_sons.length; i++) { if (i > 0) { text.append(",\n"); } text.append(m_localModel.rightSide(i, m_train)); } for (int i = 0; i < m_sons.length; i++) { if (m_sons[i].m_isLeaf) { text.append("["); text.append(m_localModel.dumpLabel(i,m_train)); text.append("]"); } else { m_sons[i].prefixTree(text); } } text.append("]"); } /** * Help method for computing class probabilities of * a given instance. * * @param classIndex the class index * @param instance the instance to compute the probabilities for * @param weight the weight to use * @return the laplace probs * @throws Exception if something goes wrong */ private double getProbsLaplace(int classIndex, Instance instance, double weight) throws Exception { double prob = 0; if (m_isLeaf) { return weight * localModel().classProbLaplace(classIndex, instance, -1); } else { int treeIndex = localModel().whichSubset(instance); if (treeIndex == -1) { double[] weights = localModel().weights(instance); for (int i = 0; i < m_sons.length; i++) { if (!son(i).m_isEmpty) { prob += son(i).getProbsLaplace(classIndex, instance, weights[i] * weight); } } return prob; } else { if (son(treeIndex).m_isEmpty) { return weight * localModel().classProbLaplace(classIndex, instance, treeIndex); } else { return son(treeIndex).getProbsLaplace(classIndex, instance, weight); } } } } /** * Help method for computing class probabilities of * a given instance. * * @param classIndex the class index * @param instance the instance to compute the probabilities for * @param weight the weight to use * @return the probs * @throws Exception if something goes wrong */ private double getProbs(int classIndex, Instance instance, double weight) throws Exception { double prob = 0; if (m_isLeaf) { return weight * localModel().classProb(classIndex, instance, -1); } else { int treeIndex = localModel().whichSubset(instance); if (treeIndex == -1) { double[] weights = localModel().weights(instance); for (int i = 0; i < m_sons.length; i++) { if (!son(i).m_isEmpty) { prob += son(i).getProbs(classIndex, instance, weights[i] * weight); } } return prob; } else { if (son(treeIndex).m_isEmpty) { return weight * localModel().classProb(classIndex, instance, treeIndex); } else { return son(treeIndex).getProbs(classIndex, instance, weight); } } } } /** * Method just exists to make program easier to read. */ private ClassifierSplitModel localModel() { return (ClassifierSplitModel)m_localModel; } /** * Method just exists to make program easier to read. */ private ClassifierTree son(int index) { return (ClassifierTree)m_sons[index]; } /** * Computes a list that indicates node membership */ public double[] getMembershipValues(Instance instance) throws Exception { // Set up array for membership values double[] a = new double[numNodes()]; // Initialize queues Queue<Double> queueOfWeights = new LinkedList<Double>(); Queue<ClassifierTree> queueOfNodes = new LinkedList<ClassifierTree>(); queueOfWeights.add(instance.weight()); queueOfNodes.add(this); int index = 0; // While the queue is not empty while (!queueOfNodes.isEmpty()) { a[index++] = queueOfWeights.poll(); ClassifierTree node = queueOfNodes.poll(); // Is node a leaf? if (node.m_isLeaf) { continue; } // Which subset? int treeIndex = node.localModel().whichSubset(instance); // Space for weight distribution double[] weights = new double[node.m_sons.length]; // Check for missing value if (treeIndex == -1) { weights = node.localModel().weights(instance); } else { weights[treeIndex] = 1.0; } for (int i = 0; i < node.m_sons.length; i++) { queueOfNodes.add(node.son(i)); queueOfWeights.add(a[index - 1] * weights[i]); } } return a; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 9117 $"); } }
19,654
24.793963
83
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/trees/j48/Distribution.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * Distribution.java * Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.trees.j48; import java.io.Serializable; import java.util.Enumeration; import weka.core.Instance; import weka.core.Instances; import weka.core.RevisionHandler; import weka.core.RevisionUtils; import weka.core.Utils; /** * Class for handling a distribution of class values. * * @author Eibe Frank (eibe@cs.waikato.ac.nz) * @version $Revision: 8034 $ */ public class Distribution implements Cloneable, Serializable, RevisionHandler { /** for serialization */ private static final long serialVersionUID = 8526859638230806576L; /** Weight of instances per class per bag. */ private double m_perClassPerBag[][]; /** Weight of instances per bag. */ private double m_perBag[]; /** Weight of instances per class. */ private double m_perClass[]; /** Total weight of instances. */ private double totaL; /** * Creates and initializes a new distribution. */ public Distribution(int numBags,int numClasses) { int i; m_perClassPerBag = new double [numBags][0]; m_perBag = new double [numBags]; m_perClass = new double [numClasses]; for (i=0;i<numBags;i++) m_perClassPerBag[i] = new double [numClasses]; totaL = 0; } /** * Creates and initializes a new distribution using the given * array. WARNING: it just copies a reference to this array. */ public Distribution(double [][] table) { int i, j; m_perClassPerBag = table; m_perBag = new double [table.length]; m_perClass = new double [table[0].length]; for (i = 0; i < table.length; i++) for (j = 0; j < table[i].length; j++) { m_perBag[i] += table[i][j]; m_perClass[j] += table[i][j]; totaL += table[i][j]; } } /** * Creates a distribution with only one bag according * to instances in source. * * @exception Exception if something goes wrong */ public Distribution(Instances source) throws Exception { m_perClassPerBag = new double [1][0]; m_perBag = new double [1]; totaL = 0; m_perClass = new double [source.numClasses()]; m_perClassPerBag[0] = new double [source.numClasses()]; Enumeration enu = source.enumerateInstances(); while (enu.hasMoreElements()) add(0,(Instance) enu.nextElement()); } /** * Creates a distribution according to given instances and * split model. * * @exception Exception if something goes wrong */ public Distribution(Instances source, ClassifierSplitModel modelToUse) throws Exception { int index; Instance instance; double[] weights; m_perClassPerBag = new double [modelToUse.numSubsets()][0]; m_perBag = new double [modelToUse.numSubsets()]; totaL = 0; m_perClass = new double [source.numClasses()]; for (int i = 0; i < modelToUse.numSubsets(); i++) m_perClassPerBag[i] = new double [source.numClasses()]; Enumeration enu = source.enumerateInstances(); while (enu.hasMoreElements()) { instance = (Instance) enu.nextElement(); index = modelToUse.whichSubset(instance); if (index != -1) add(index, instance); else { weights = modelToUse.weights(instance); addWeights(instance, weights); } } } /** * Creates distribution with only one bag by merging all * bags of given distribution. */ public Distribution(Distribution toMerge) { totaL = toMerge.totaL; m_perClass = new double [toMerge.numClasses()]; System.arraycopy(toMerge.m_perClass,0,m_perClass,0,toMerge.numClasses()); m_perClassPerBag = new double [1] [0]; m_perClassPerBag[0] = new double [toMerge.numClasses()]; System.arraycopy(toMerge.m_perClass,0,m_perClassPerBag[0],0, toMerge.numClasses()); m_perBag = new double [1]; m_perBag[0] = totaL; } /** * Creates distribution with two bags by merging all bags apart of * the indicated one. */ public Distribution(Distribution toMerge, int index) { int i; totaL = toMerge.totaL; m_perClass = new double [toMerge.numClasses()]; System.arraycopy(toMerge.m_perClass,0,m_perClass,0,toMerge.numClasses()); m_perClassPerBag = new double [2] [0]; m_perClassPerBag[0] = new double [toMerge.numClasses()]; System.arraycopy(toMerge.m_perClassPerBag[index],0,m_perClassPerBag[0],0, toMerge.numClasses()); m_perClassPerBag[1] = new double [toMerge.numClasses()]; for (i=0;i<toMerge.numClasses();i++) m_perClassPerBag[1][i] = toMerge.m_perClass[i]-m_perClassPerBag[0][i]; m_perBag = new double [2]; m_perBag[0] = toMerge.m_perBag[index]; m_perBag[1] = totaL-m_perBag[0]; } /** * Returns number of non-empty bags of distribution. */ public final int actualNumBags() { int returnValue = 0; int i; for (i=0;i<m_perBag.length;i++) if (Utils.gr(m_perBag[i],0)) returnValue++; return returnValue; } /** * Returns number of classes actually occuring in distribution. */ public final int actualNumClasses() { int returnValue = 0; int i; for (i=0;i<m_perClass.length;i++) if (Utils.gr(m_perClass[i],0)) returnValue++; return returnValue; } /** * Returns number of classes actually occuring in given bag. */ public final int actualNumClasses(int bagIndex) { int returnValue = 0; int i; for (i=0;i<m_perClass.length;i++) if (Utils.gr(m_perClassPerBag[bagIndex][i],0)) returnValue++; return returnValue; } /** * Adds given instance to given bag. * * @exception Exception if something goes wrong */ public final void add(int bagIndex,Instance instance) throws Exception { int classIndex; double weight; classIndex = (int)instance.classValue(); weight = instance.weight(); m_perClassPerBag[bagIndex][classIndex] = m_perClassPerBag[bagIndex][classIndex]+weight; m_perBag[bagIndex] = m_perBag[bagIndex]+weight; m_perClass[classIndex] = m_perClass[classIndex]+weight; totaL = totaL+weight; } /** * Subtracts given instance from given bag. * * @exception Exception if something goes wrong */ public final void sub(int bagIndex,Instance instance) throws Exception { int classIndex; double weight; classIndex = (int)instance.classValue(); weight = instance.weight(); m_perClassPerBag[bagIndex][classIndex] = m_perClassPerBag[bagIndex][classIndex]-weight; m_perBag[bagIndex] = m_perBag[bagIndex]-weight; m_perClass[classIndex] = m_perClass[classIndex]-weight; totaL = totaL-weight; } /** * Adds counts to given bag. */ public final void add(int bagIndex, double[] counts) { double sum = Utils.sum(counts); for (int i = 0; i < counts.length; i++) m_perClassPerBag[bagIndex][i] += counts[i]; m_perBag[bagIndex] = m_perBag[bagIndex]+sum; for (int i = 0; i < counts.length; i++) m_perClass[i] = m_perClass[i]+counts[i]; totaL = totaL+sum; } /** * Adds all instances with unknown values for given attribute, weighted * according to frequency of instances in each bag. * * @exception Exception if something goes wrong */ public final void addInstWithUnknown(Instances source, int attIndex) throws Exception { double [] probs; double weight,newWeight; int classIndex; Instance instance; int j; probs = new double [m_perBag.length]; for (j=0;j<m_perBag.length;j++) { if (Utils.eq(totaL, 0)) { probs[j] = 1.0 / probs.length; } else { probs[j] = m_perBag[j]/totaL; } } Enumeration enu = source.enumerateInstances(); while (enu.hasMoreElements()) { instance = (Instance) enu.nextElement(); if (instance.isMissing(attIndex)) { classIndex = (int)instance.classValue(); weight = instance.weight(); m_perClass[classIndex] = m_perClass[classIndex]+weight; totaL = totaL+weight; for (j = 0; j < m_perBag.length; j++) { newWeight = probs[j]*weight; m_perClassPerBag[j][classIndex] = m_perClassPerBag[j][classIndex]+ newWeight; m_perBag[j] = m_perBag[j]+newWeight; } } } } /** * Adds all instances in given range to given bag. * * @exception Exception if something goes wrong */ public final void addRange(int bagIndex,Instances source, int startIndex, int lastPlusOne) throws Exception { double sumOfWeights = 0; int classIndex; Instance instance; int i; for (i = startIndex; i < lastPlusOne; i++) { instance = (Instance) source.instance(i); classIndex = (int)instance.classValue(); sumOfWeights = sumOfWeights+instance.weight(); m_perClassPerBag[bagIndex][classIndex] += instance.weight(); m_perClass[classIndex] += instance.weight(); } m_perBag[bagIndex] += sumOfWeights; totaL += sumOfWeights; } /** * Adds given instance to all bags weighting it according to given weights. * * @exception Exception if something goes wrong */ public final void addWeights(Instance instance, double [] weights) throws Exception { int classIndex; int i; classIndex = (int)instance.classValue(); for (i=0;i<m_perBag.length;i++) { double weight = instance.weight() * weights[i]; m_perClassPerBag[i][classIndex] = m_perClassPerBag[i][classIndex] + weight; m_perBag[i] = m_perBag[i] + weight; m_perClass[classIndex] = m_perClass[classIndex] + weight; totaL = totaL + weight; } } /** * Checks if at least two bags contain a minimum number of instances. */ public final boolean check(double minNoObj) { int counter = 0; int i; for (i=0;i<m_perBag.length;i++) if (Utils.grOrEq(m_perBag[i],minNoObj)) counter++; if (counter > 1) return true; else return false; } /** * Clones distribution (Deep copy of distribution). */ public final Object clone() { int i,j; Distribution newDistribution = new Distribution (m_perBag.length, m_perClass.length); for (i=0;i<m_perBag.length;i++) { newDistribution.m_perBag[i] = m_perBag[i]; for (j=0;j<m_perClass.length;j++) newDistribution.m_perClassPerBag[i][j] = m_perClassPerBag[i][j]; } for (j=0;j<m_perClass.length;j++) newDistribution.m_perClass[j] = m_perClass[j]; newDistribution.totaL = totaL; return newDistribution; } /** * Deletes given instance from given bag. * * @exception Exception if something goes wrong */ public final void del(int bagIndex,Instance instance) throws Exception { int classIndex; double weight; classIndex = (int)instance.classValue(); weight = instance.weight(); m_perClassPerBag[bagIndex][classIndex] = m_perClassPerBag[bagIndex][classIndex]-weight; m_perBag[bagIndex] = m_perBag[bagIndex]-weight; m_perClass[classIndex] = m_perClass[classIndex]-weight; totaL = totaL-weight; } /** * Deletes all instances in given range from given bag. * * @exception Exception if something goes wrong */ public final void delRange(int bagIndex,Instances source, int startIndex, int lastPlusOne) throws Exception { double sumOfWeights = 0; int classIndex; Instance instance; int i; for (i = startIndex; i < lastPlusOne; i++) { instance = (Instance) source.instance(i); classIndex = (int)instance.classValue(); sumOfWeights = sumOfWeights+instance.weight(); m_perClassPerBag[bagIndex][classIndex] -= instance.weight(); m_perClass[classIndex] -= instance.weight(); } m_perBag[bagIndex] -= sumOfWeights; totaL -= sumOfWeights; } /** * Prints distribution. */ public final String dumpDistribution() { StringBuffer text; int i,j; text = new StringBuffer(); for (i=0;i<m_perBag.length;i++) { text.append("Bag num "+i+"\n"); for (j=0;j<m_perClass.length;j++) text.append("Class num "+j+" "+m_perClassPerBag[i][j]+"\n"); } return text.toString(); } /** * Sets all counts to zero. */ public final void initialize() { for (int i = 0; i < m_perClass.length; i++) m_perClass[i] = 0; for (int i = 0; i < m_perBag.length; i++) m_perBag[i] = 0; for (int i = 0; i < m_perBag.length; i++) for (int j = 0; j < m_perClass.length; j++) m_perClassPerBag[i][j] = 0; totaL = 0; } /** * Returns matrix with distribution of class values. */ public final double[][] matrix() { return m_perClassPerBag; } /** * Returns index of bag containing maximum number of instances. */ public final int maxBag() { double max; int maxIndex; int i; max = 0; maxIndex = -1; for (i=0;i<m_perBag.length;i++) if (Utils.grOrEq(m_perBag[i],max)) { max = m_perBag[i]; maxIndex = i; } return maxIndex; } /** * Returns class with highest frequency over all bags. */ public final int maxClass() { double maxCount = 0; int maxIndex = 0; int i; for (i=0;i<m_perClass.length;i++) if (Utils.gr(m_perClass[i],maxCount)) { maxCount = m_perClass[i]; maxIndex = i; } return maxIndex; } /** * Returns class with highest frequency for given bag. */ public final int maxClass(int index) { double maxCount = 0; int maxIndex = 0; int i; if (Utils.gr(m_perBag[index],0)) { for (i=0;i<m_perClass.length;i++) if (Utils.gr(m_perClassPerBag[index][i],maxCount)) { maxCount = m_perClassPerBag[index][i]; maxIndex = i; } return maxIndex; }else return maxClass(); } /** * Returns number of bags. */ public final int numBags() { return m_perBag.length; } /** * Returns number of classes. */ public final int numClasses() { return m_perClass.length; } /** * Returns perClass(maxClass()). */ public final double numCorrect() { return m_perClass[maxClass()]; } /** * Returns perClassPerBag(index,maxClass(index)). */ public final double numCorrect(int index) { return m_perClassPerBag[index][maxClass(index)]; } /** * Returns total-numCorrect(). */ public final double numIncorrect() { return totaL-numCorrect(); } /** * Returns perBag(index)-numCorrect(index). */ public final double numIncorrect(int index) { return m_perBag[index]-numCorrect(index); } /** * Returns number of (possibly fractional) instances of given class in * given bag. */ public final double perClassPerBag(int bagIndex, int classIndex) { return m_perClassPerBag[bagIndex][classIndex]; } /** * Returns number of (possibly fractional) instances in given bag. */ public final double perBag(int bagIndex) { return m_perBag[bagIndex]; } /** * Returns number of (possibly fractional) instances of given class. */ public final double perClass(int classIndex) { return m_perClass[classIndex]; } /** * Returns relative frequency of class over all bags with * Laplace correction. */ public final double laplaceProb(int classIndex) { return (m_perClass[classIndex] + 1) / (totaL + (double) m_perClass.length); } /** * Returns relative frequency of class for given bag. */ public final double laplaceProb(int classIndex, int intIndex) { if (Utils.gr(m_perBag[intIndex],0)) return (m_perClassPerBag[intIndex][classIndex] + 1.0) / (m_perBag[intIndex] + (double) m_perClass.length); else return laplaceProb(classIndex); } /** * Returns relative frequency of class over all bags. */ public final double prob(int classIndex) { if (!Utils.eq(totaL, 0)) { return m_perClass[classIndex]/totaL; } else { return 0; } } /** * Returns relative frequency of class for given bag. */ public final double prob(int classIndex,int intIndex) { if (Utils.gr(m_perBag[intIndex],0)) return m_perClassPerBag[intIndex][classIndex]/m_perBag[intIndex]; else return prob(classIndex); } /** * Subtracts the given distribution from this one. The results * has only one bag. */ public final Distribution subtract(Distribution toSubstract) { Distribution newDist = new Distribution(1,m_perClass.length); newDist.m_perBag[0] = totaL-toSubstract.totaL; newDist.totaL = newDist.m_perBag[0]; for (int i = 0; i < m_perClass.length; i++) { newDist.m_perClassPerBag[0][i] = m_perClass[i] - toSubstract.m_perClass[i]; newDist.m_perClass[i] = newDist.m_perClassPerBag[0][i]; } return newDist; } /** * Returns total number of (possibly fractional) instances. */ public final double total() { return totaL; } /** * Shifts given instance from one bag to another one. * * @exception Exception if something goes wrong */ public final void shift(int from,int to,Instance instance) throws Exception { int classIndex; double weight; classIndex = (int)instance.classValue(); weight = instance.weight(); m_perClassPerBag[from][classIndex] -= weight; m_perClassPerBag[to][classIndex] += weight; m_perBag[from] -= weight; m_perBag[to] += weight; } /** * Shifts all instances in given range from one bag to another one. * * @exception Exception if something goes wrong */ public final void shiftRange(int from,int to,Instances source, int startIndex,int lastPlusOne) throws Exception { int classIndex; double weight; Instance instance; int i; for (i = startIndex; i < lastPlusOne; i++) { instance = (Instance) source.instance(i); classIndex = (int)instance.classValue(); weight = instance.weight(); m_perClassPerBag[from][classIndex] -= weight; m_perClassPerBag[to][classIndex] += weight; m_perBag[from] -= weight; m_perBag[to] += weight; } } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } }
19,012
24.350667
81
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/trees/j48/EntropyBasedSplitCrit.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * EntropyBasedSplitCrit.java * Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.trees.j48; /** * "Abstract" class for computing splitting criteria * based on the entropy of a class distribution. * * @author Eibe Frank (eibe@cs.waikato.ac.nz) * @version $Revision: 8034 $ */ public abstract class EntropyBasedSplitCrit extends SplitCriterion { /** for serialization */ private static final long serialVersionUID = -2618691439791653056L; /** The log of 2. */ protected static double log2 = Math.log(2); /** * Help method for computing entropy. */ public final double logFunc(double num) { // Constant hard coded for efficiency reasons if (num < 1e-6) return 0; else return num*Math.log(num)/log2; } /** * Computes entropy of distribution before splitting. */ public final double oldEnt(Distribution bags) { double returnValue = 0; int j; for (j=0;j<bags.numClasses();j++) returnValue = returnValue+logFunc(bags.perClass(j)); return logFunc(bags.total())-returnValue; } /** * Computes entropy of distribution after splitting. */ public final double newEnt(Distribution bags) { double returnValue = 0; int i,j; for (i=0;i<bags.numBags();i++){ for (j=0;j<bags.numClasses();j++) returnValue = returnValue+logFunc(bags.perClassPerBag(i,j)); returnValue = returnValue-logFunc(bags.perBag(i)); } return -returnValue; } /** * Computes entropy after splitting without considering the * class values. */ public final double splitEnt(Distribution bags) { double returnValue = 0; int i; for (i=0;i<bags.numBags();i++) returnValue = returnValue+logFunc(bags.perBag(i)); return logFunc(bags.total())-returnValue; } }
2,527
25.333333
74
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/trees/j48/EntropySplitCrit.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * EntropySplitCrit.java * Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.trees.j48; import weka.core.RevisionUtils; import weka.core.Utils; /** * Class for computing the entropy for a given distribution. * * @author Eibe Frank (eibe@cs.waikato.ac.nz) * @version $Revision: 8034 $ */ public final class EntropySplitCrit extends EntropyBasedSplitCrit { /** for serialization */ private static final long serialVersionUID = 5986252682266803935L; /** * Computes entropy for given distribution. */ public final double splitCritValue(Distribution bags) { return newEnt(bags); } /** * Computes entropy of test distribution with respect to training distribution. */ public final double splitCritValue(Distribution train, Distribution test) { double result = 0; int numClasses = 0; int i, j; // Find out relevant number of classes for (j = 0; j < test.numClasses(); j++) if (Utils.gr(train.perClass(j), 0) || Utils.gr(test.perClass(j), 0)) numClasses++; // Compute entropy of test data with respect to training data for (i = 0; i < test.numBags(); i++) if (Utils.gr(test.perBag(i),0)) { for (j = 0; j < test.numClasses(); j++) if (Utils.gr(test.perClassPerBag(i, j), 0)) result -= test.perClassPerBag(i, j)* Math.log(train.perClassPerBag(i, j) + 1); result += test.perBag(i) * Math.log(train.perBag(i) + numClasses); } return result / log2; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } }
2,376
27.638554
81
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/trees/j48/GainRatioSplitCrit.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * GainRatioSplitCrit.java * Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.trees.j48; import weka.core.RevisionUtils; import weka.core.Utils; /** * Class for computing the gain ratio for a given distribution. * * @author Eibe Frank (eibe@cs.waikato.ac.nz) * @version $Revision: 8034 $ */ public final class GainRatioSplitCrit extends EntropyBasedSplitCrit{ /** for serialization */ private static final long serialVersionUID = -433336694718670930L; /** * This method is a straightforward implementation of the gain * ratio criterion for the given distribution. */ public final double splitCritValue(Distribution bags) { double numerator; double denumerator; numerator = oldEnt(bags)-newEnt(bags); // Splits with no gain are useless. if (Utils.eq(numerator,0)) return Double.MAX_VALUE; denumerator = splitEnt(bags); // Test if split is trivial. if (Utils.eq(denumerator,0)) return Double.MAX_VALUE; // We take the reciprocal value because we want to minimize the // splitting criterion's value. return denumerator/numerator; } /** * This method computes the gain ratio in the same way C4.5 does. * * @param bags the distribution * @param totalnoInst the weight of ALL instances * @param numerator the info gain */ public final double splitCritValue(Distribution bags, double totalnoInst, double numerator){ double denumerator; double noUnknown; double unknownRate; int i; // Compute split info. denumerator = splitEnt(bags,totalnoInst); // Test if split is trivial. if (Utils.eq(denumerator,0)) return 0; denumerator = denumerator/totalnoInst; return numerator/denumerator; } /** * Help method for computing the split entropy. */ private final double splitEnt(Distribution bags,double totalnoInst){ double returnValue = 0; double noUnknown; int i; noUnknown = totalnoInst-bags.total(); if (Utils.gr(bags.total(),0)){ for (i=0;i<bags.numBags();i++) returnValue = returnValue-logFunc(bags.perBag(i)); returnValue = returnValue-logFunc(noUnknown); returnValue = returnValue+logFunc(totalnoInst); } return returnValue; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } }
3,209
26.20339
75
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/trees/j48/GraftSplit.java
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* * GraftSplit.java * Copyright (C) 2007 Geoff Webb & Janice Boughton * a split object for nodes added to a tree during grafting. * (used in classifier J48g). */ package weka.classifiers.trees.j48; import weka.core.*; /** * Class implementing a split for nodes added to a tree during grafting. * * @author Janice Boughton (jrbought@infotech.monash.edu.au) * @version $Revision 1.0 $ */ public class GraftSplit extends ClassifierSplitModel implements Comparable { /** for serialzation. */ private static final long serialVersionUID = 722773260393182051L; /** the distribution for graft values, from cases in atbop */ private Distribution m_graftdistro; /** the attribute we are splitting on */ private int m_attIndex; /** value of split point (if numeric attribute) */ private double m_splitPoint; /** dominant class of the subset specified by m_testType */ private int m_maxClass; /** dominant class of the subset not specified by m_testType */ private int m_otherLeafMaxClass; /** laplace value of the subset specified by m_testType for m_maxClass */ private double m_laplace; /** leaf for the subset specified by m_testType */ private Distribution m_leafdistro; /** * type of test: * 0: <= test * 1: > test * 2: = test * 3: != test */ private int m_testType; /** * constructor * * @param a the attribute to split on * @param v the value of a where split occurs * @param t the test type (0 is <=, 1 is >, 2 is =, 3 is !) * @param c the class to label the leaf node pointed to by test as. * @param l the laplace value (needed when sorting GraftSplits) */ public GraftSplit(int a, double v, int t, double c, double l) { m_attIndex = a; m_splitPoint = v; m_testType = t; m_maxClass = (int)c; m_laplace = l; } /** * constructor * * @param a the attribute to split on * @param v the value of a where split occurs * @param t the test type (0 is <=, 1 is >, 2 is =, 3 is !=) * @param oC the class to label the leaf node not pointed to by test as. * @param counts the distribution for this split */ public GraftSplit(int a, double v, int t, double oC, double [][] counts) throws Exception { m_attIndex = a; m_splitPoint = v; m_testType = t; m_otherLeafMaxClass = (int)oC; // only deal with binary cuts (<= and >; = and !=) m_numSubsets = 2; // which subset are we looking at for the graft? int subset = subsetOfInterest(); // this is the subset for m_leaf // create graft distribution, based on counts m_distribution = new Distribution(counts); // create a distribution object for m_leaf double [][] lcounts = new double[1][m_distribution.numClasses()]; for(int c = 0; c < lcounts[0].length; c++) { lcounts[0][c] = counts[subset][c]; } m_leafdistro = new Distribution(lcounts); // set the max class m_maxClass = m_distribution.maxClass(subset); // set the laplace value (assumes binary class) for subset of interest m_laplace = (m_distribution.perClassPerBag(subset, m_maxClass) + 1.0) / (m_distribution.perBag(subset) + 2.0); } /** * deletes the cases in data that belong to leaf pointed to by * the test (i.e. the subset of interest). this is useful so * the instances belonging to that leaf aren't passed down the * other branch. * * @param data the instances to delete from */ public void deleteGraftedCases(Instances data) { int subOfInterest = subsetOfInterest(); for(int x = 0; x < data.numInstances(); x++) { if(whichSubset(data.instance(x)) == subOfInterest) { data.delete(x--); } } } /** * builds m_graftdistro using the passed data * * @param data the instances to use when creating the distribution */ public void buildClassifier(Instances data) throws Exception { // distribution for the graft, not counting cases in atbop, only orig leaf m_graftdistro = new Distribution(2, data.numClasses()); // which subset are we looking at for the graft? int subset = subsetOfInterest(); // this is the subset for m_leaf double thisNodeCount = 0; double knownCases = 0; boolean allKnown = true; // populate distribution for(int x = 0; x < data.numInstances(); x++) { Instance instance = data.instance(x); if(instance.isMissing(m_attIndex)) { allKnown = false; continue; } knownCases += instance.weight(); int subst = whichSubset(instance); if(subst == -1) continue; m_graftdistro.add(subst, instance); if(subst == subset) { // instance belongs at m_leaf thisNodeCount += instance.weight(); } } double factor = (knownCases == 0) ? (1.0 / (double)2.0) : (thisNodeCount / knownCases); if(!allKnown) { for(int x = 0; x < data.numInstances(); x++) { if(data.instance(x).isMissing(m_attIndex)) { Instance instance = data.instance(x); int subst = whichSubset(instance); if(subst == -1) continue; instance.setWeight(instance.weight() * factor); m_graftdistro.add(subst, instance); } } } // if there are no cases at the leaf, make sure the desired // class is chosen, by setting counts to 0.01 if(m_graftdistro.perBag(subset) == 0) { double [] counts = new double[data.numClasses()]; counts[m_maxClass] = 0.01; m_graftdistro.add(subset, counts); } if(m_graftdistro.perBag((subset == 0) ? 1 : 0) == 0) { double [] counts = new double[data.numClasses()]; counts[(int)m_otherLeafMaxClass] = 0.01; m_graftdistro.add((subset == 0) ? 1 : 0, counts); } } /** * @return the NoSplit object for the leaf pointed to by m_testType branch */ public NoSplit getLeaf() { return new NoSplit(m_leafdistro); } /** * @return the NoSplit object for the leaf not pointed to by m_testType branch */ public NoSplit getOtherLeaf() { // the bag (subset) that isn't pointed to by m_testType branch int bag = (subsetOfInterest() == 0) ? 1 : 0; double [][] counts = new double[1][m_graftdistro.numClasses()]; double totals = 0; for(int c = 0; c < counts[0].length; c++) { counts[0][c] = m_graftdistro.perClassPerBag(bag, c); totals += counts[0][c]; } // if empty, make sure proper class gets chosen if(totals == 0) { counts[0][m_otherLeafMaxClass] += 0.01; } return new NoSplit(new Distribution(counts)); } /** * Prints label for subset index of instances (eg class). * * @param index the bag to dump label for * @param data to get attribute names and such * @return the label as a string * @exception Exception if something goes wrong */ public final String dumpLabelG(int index, Instances data) throws Exception { StringBuffer text; text = new StringBuffer(); text.append(((Instances)data).classAttribute(). value((index==subsetOfInterest()) ? m_maxClass : m_otherLeafMaxClass)); text.append(" ("+Utils.roundDouble(m_graftdistro.perBag(index),1)); if(Utils.gr(m_graftdistro.numIncorrect(index),0)) text.append("/" +Utils.roundDouble(m_graftdistro.numIncorrect(index),2)); // show the graft values, only if this is subsetOfInterest() if(index == subsetOfInterest()) { text.append("|"+Utils.roundDouble(m_distribution.perBag(index),2)); if(Utils.gr(m_distribution.numIncorrect(index),0)) text.append("/" +Utils.roundDouble(m_distribution.numIncorrect(index),2)); } text.append(")"); return text.toString(); } /** * @return the subset that is specified by the test type */ public int subsetOfInterest() { if(m_testType == 2) return 0; if(m_testType == 3) return 1; return m_testType; } /** * @return the number of positive cases in the subset of interest */ public double positivesForSubsetOfInterest() { return (m_distribution.perClassPerBag(subsetOfInterest(), m_maxClass)); } /** * @param subset the subset to get the positives for * @return the number of positive cases in the specified subset */ public double positives(int subset) { return (m_distribution.perClassPerBag(subset, m_distribution.maxClass(subset))); } /** * @return the number of instances in the subset of interest */ public double totalForSubsetOfInterest() { return (m_distribution.perBag(subsetOfInterest())); } /** * @param subset the index of the bag to get the total for * @return the number of instances in the subset */ public double totalForSubset(int subset) { return (m_distribution.perBag(subset)); } /** * Prints left side of condition satisfied by instances. * * @param data the data. */ public String leftSide(Instances data) { return data.attribute(m_attIndex).name(); } /** * @return the index of the attribute to split on */ public int attribute() { return m_attIndex; } /** * Prints condition satisfied by instances in subset index. */ public final String rightSide(int index, Instances data) { StringBuffer text; text = new StringBuffer(); if(data.attribute(m_attIndex).isNominal()) if(index == 0) text.append(" = "+ data.attribute(m_attIndex).value((int)m_splitPoint)); else text.append(" != "+ data.attribute(m_attIndex).value((int)m_splitPoint)); else if(index == 0) text.append(" <= "+ Utils.doubleToString(m_splitPoint,6)); else text.append(" > "+ Utils.doubleToString(m_splitPoint,6)); return text.toString(); } /** * Returns a string containing java source code equivalent to the test * made at this node. The instance being tested is called "i". * * @param index index of the nominal value tested * @param data the data containing instance structure info * @return a value of type 'String' */ public final String sourceExpression(int index, Instances data) { StringBuffer expr = null; if(index < 0) { return "i[" + m_attIndex + "] == null"; } if(data.attribute(m_attIndex).isNominal()) { if(index == 0) expr = new StringBuffer("i["); else expr = new StringBuffer("!i["); expr.append(m_attIndex).append("]"); expr.append(".equals(\"").append(data.attribute(m_attIndex) .value((int)m_splitPoint)).append("\")"); } else { expr = new StringBuffer("((Double) i["); expr.append(m_attIndex).append("])"); if(index == 0) { expr.append(".doubleValue() <= ").append(m_splitPoint); } else { expr.append(".doubleValue() > ").append(m_splitPoint); } } return expr.toString(); } /** * @param instance the instance to produce the weights for * @return a double array of weights, null if only belongs to one subset */ public double [] weights(Instance instance) { double [] weights; int i; if(instance.isMissing(m_attIndex)) { weights = new double [m_numSubsets]; for(i=0;i<m_numSubsets;i++) { weights [i] = m_graftdistro.perBag(i)/m_graftdistro.total(); } return weights; } else { return null; } } /** * @param instance the instance for which to determine the subset * @return an int indicating the subset this instance belongs to */ public int whichSubset(Instance instance) { if(instance.isMissing(m_attIndex)) return -1; if(instance.attribute(m_attIndex).isNominal()) { // in the case of nominal, m_splitPoint is the = value, all else is != if(instance.value(m_attIndex) == m_splitPoint) return 0; else return 1; } else { if(Utils.smOrEq(instance.value(m_attIndex), m_splitPoint)) return 0; else return 1; } } /** * @return the value of the split point */ public double splitPoint() { return m_splitPoint; } /** * @return the dominate class for the subset of interest */ public int maxClassForSubsetOfInterest() { return m_maxClass; } /** * @return the laplace value for maxClass of subset of interest */ public double laplaceForSubsetOfInterest() { return m_laplace; } /** * returns the test type * @return value of testtype */ public int testType() { return m_testType; } /** * method needed for sorting a collection of GraftSplits by laplace value * @param g the graft split to compare to this one * @return -1, 0, or 1 if this GraftSplit laplace is <, = or > than that of g */ public int compareTo(Object g) { if(m_laplace > ((GraftSplit)g).laplaceForSubsetOfInterest()) return 1; if(m_laplace < ((GraftSplit)g).laplaceForSubsetOfInterest()) return -1; return 0; } /** * returns the probability for instance for the specified class * @param classIndex the index of the class * @param instance the instance to get the probability for * @param theSubset the subset */ public final double classProb(int classIndex, Instance instance, int theSubset) throws Exception { if (theSubset <= -1) { double [] weights = weights(instance); if (weights == null) { return m_distribution.prob(classIndex); } else { double prob = 0; for (int i = 0; i < weights.length; i++) { prob += weights[i] * m_distribution.prob(classIndex, i); } return prob; } } else { if (Utils.gr(m_distribution.perBag(theSubset), 0)) { return m_distribution.prob(classIndex, theSubset); } else { return m_distribution.prob(classIndex); } } } /** * method for returning information about this GraftSplit * @param data instances for determining names of attributes and values * @return a string showing this GraftSplit's information */ public String toString(Instances data) { String theTest; if(m_testType == 0) theTest = " <= "; else if(m_testType == 1) theTest = " > "; else if(m_testType == 2) theTest = " = "; else theTest = " != "; if(data.attribute(m_attIndex).isNominal()) theTest += data.attribute(m_attIndex).value((int)m_splitPoint); else theTest += Double.toString(m_splitPoint); return data.attribute(m_attIndex).name() + theTest + " (" + Double.toString(m_laplace) + ") --> " + data.attribute(data.classIndex()).value(m_maxClass); } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 1.2 $"); } }
16,015
27.857658
80
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/trees/j48/InfoGainSplitCrit.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * InfoGainSplitCrit.java * Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.trees.j48; import weka.core.RevisionUtils; import weka.core.Utils; /** * Class for computing the information gain for a given distribution. * * @author Eibe Frank (eibe@cs.waikato.ac.nz) * @version $Revision: 8034 $ */ public final class InfoGainSplitCrit extends EntropyBasedSplitCrit{ /** for serialization */ private static final long serialVersionUID = 4892105020180728499L; /** * This method is a straightforward implementation of the information * gain criterion for the given distribution. */ public final double splitCritValue(Distribution bags) { double numerator; numerator = oldEnt(bags)-newEnt(bags); // Splits with no gain are useless. if (Utils.eq(numerator,0)) return Double.MAX_VALUE; // We take the reciprocal value because we want to minimize the // splitting criterion's value. return bags.total()/numerator; } /** * This method computes the information gain in the same way * C4.5 does. * * @param bags the distribution * @param totalNoInst weight of ALL instances (including the * ones with missing values). */ public final double splitCritValue(Distribution bags, double totalNoInst) { double numerator; double noUnknown; double unknownRate; int i; noUnknown = totalNoInst-bags.total(); unknownRate = noUnknown/totalNoInst; numerator = (oldEnt(bags)-newEnt(bags)); numerator = (1-unknownRate)*numerator; // Splits with no gain are useless. if (Utils.eq(numerator,0)) return 0; return numerator/bags.total(); } /** * This method computes the information gain in the same way * C4.5 does. * * @param bags the distribution * @param totalNoInst weight of ALL instances * @param oldEnt entropy with respect to "no-split"-model. */ public final double splitCritValue(Distribution bags,double totalNoInst, double oldEnt) { double numerator; double noUnknown; double unknownRate; int i; noUnknown = totalNoInst-bags.total(); unknownRate = noUnknown/totalNoInst; numerator = (oldEnt-newEnt(bags)); numerator = (1-unknownRate)*numerator; // Splits with no gain are useless. if (Utils.eq(numerator,0)) return 0; return numerator/bags.total(); } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } }
3,365
26.590164
77
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/trees/j48/ModelSelection.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * ModelSelection.java * Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.trees.j48; import java.io.Serializable; import weka.core.Instances; import weka.core.RevisionHandler; /** * Abstract class for model selection criteria. * * @author Eibe Frank (eibe@cs.waikato.ac.nz) * @version $Revision: 8034 $ */ public abstract class ModelSelection implements Serializable, RevisionHandler { /** for serialization */ private static final long serialVersionUID = -4850147125096133642L; /** * Selects a model for the given dataset. * * @exception Exception if model can't be selected */ public abstract ClassifierSplitModel selectModel(Instances data) throws Exception; /** * Selects a model for the given train data using the given test data * * @exception Exception if model can't be selected */ public ClassifierSplitModel selectModel(Instances train, Instances test) throws Exception { throw new Exception("Model selection method not implemented"); } }
1,750
28.677966
84
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/trees/j48/NBTreeClassifierTree.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * NBTreeClassifierTree.java * Copyright (C) 2004-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.trees.j48; import weka.core.Capabilities; import weka.core.Capabilities.Capability; import weka.core.Instances; import weka.core.RevisionUtils; /** * Class for handling a naive bayes tree structure used for * classification. * * @author Mark Hall (mhall@cs.waikato.ac.nz) * @version $Revision: 8034 $ */ public class NBTreeClassifierTree extends ClassifierTree { /** for serialization */ private static final long serialVersionUID = -4472639447877404786L; public NBTreeClassifierTree(ModelSelection toSelectLocModel) { super(toSelectLocModel); } /** * Returns default capabilities of the classifier tree. * * @return the capabilities of this classifier tree */ public Capabilities getCapabilities() { Capabilities result = super.getCapabilities(); result.disableAll(); // attributes result.enable(Capability.NOMINAL_ATTRIBUTES); result.enable(Capability.NUMERIC_ATTRIBUTES); result.enable(Capability.DATE_ATTRIBUTES); result.enable(Capability.MISSING_VALUES); // class result.enable(Capability.NOMINAL_CLASS); result.enable(Capability.MISSING_CLASS_VALUES); // instances result.setMinimumNumberInstances(0); return result; } /** * Method for building a naive bayes classifier tree * * @exception Exception if something goes wrong */ public void buildClassifier(Instances data) throws Exception { super.buildClassifier(data); cleanup(new Instances(data, 0)); assignIDs(-1); } /** * Assigns a uniqe id to every node in the tree. * public int assignIDs(int lastID) { int currLastID = lastID + 1; m_id = currLastID; if (m_sons != null) { for (int i = 0; i < m_sons.length; i++) { currLastID = m_sons[i].assignIDs(currLastID); } } return currLastID; } */ /** * Returns a newly created tree. * * @param data the training data * @exception Exception if something goes wrong */ protected ClassifierTree getNewTree(Instances data) throws Exception { ClassifierTree newTree = new NBTreeClassifierTree(m_toSelectModel); newTree.buildTree(data, false); return newTree; } /** * Returns a newly created tree. * * @param train the training data * @param test the pruning data. * @exception Exception if something goes wrong */ protected ClassifierTree getNewTree(Instances train, Instances test) throws Exception { ClassifierTree newTree = new NBTreeClassifierTree(m_toSelectModel); newTree.buildTree(train, test, false); return newTree; } /** * Print the models at the leaves * * @return textual description of the leaf models */ public String printLeafModels() { StringBuffer text = new StringBuffer(); if (m_isLeaf) { text.append("\nLeaf number: " + m_id+" "); text.append(m_localModel.toString()); text.append("\n"); } else { for (int i=0;i<m_sons.length;i++) { text.append(((NBTreeClassifierTree)m_sons[i]).printLeafModels()); } } return text.toString(); } /** * Prints tree structure. */ public String toString() { try { StringBuffer text = new StringBuffer(); if (m_isLeaf) { text.append(": NB"); text.append(m_id); }else dumpTreeNB(0,text); text.append("\n"+printLeafModels()); text.append("\n\nNumber of Leaves : \t"+numLeaves()+"\n"); text.append("\nSize of the tree : \t"+numNodes()+"\n"); return text.toString(); } catch (Exception e) { e.printStackTrace(); return "Can't print nb tree."; } } /** * Help method for printing tree structure. * * @exception Exception if something goes wrong */ private void dumpTreeNB(int depth,StringBuffer text) throws Exception { int i,j; for (i=0;i<m_sons.length;i++) { text.append("\n");; for (j=0;j<depth;j++) text.append("| "); text.append(m_localModel.leftSide(m_train)); text.append(m_localModel.rightSide(i, m_train)); if (m_sons[i].m_isLeaf) { text.append(": NB "); text.append(m_sons[i].m_id); }else ((NBTreeClassifierTree)m_sons[i]).dumpTreeNB(depth+1,text); } } /** * Returns graph describing the tree. * * @exception Exception if something goes wrong */ public String graph() throws Exception { StringBuffer text = new StringBuffer(); text.append("digraph J48Tree {\n"); if (m_isLeaf) { text.append("N" + m_id + " [label=\"" + "NB model" + "\" " + "shape=box style=filled "); if (m_train != null && m_train.numInstances() > 0) { text.append("data =\n" + m_train + "\n"); text.append(",\n"); } text.append("]\n"); }else { text.append("N" + m_id + " [label=\"" + m_localModel.leftSide(m_train) + "\" "); if (m_train != null && m_train.numInstances() > 0) { text.append("data =\n" + m_train + "\n"); text.append(",\n"); } text.append("]\n"); graphTree(text); } return text.toString() +"}\n"; } /** * Help method for printing tree structure as a graph. * * @exception Exception if something goes wrong */ private void graphTree(StringBuffer text) throws Exception { for (int i = 0; i < m_sons.length; i++) { text.append("N" + m_id + "->" + "N" + m_sons[i].m_id + " [label=\"" + m_localModel.rightSide(i,m_train).trim() + "\"]\n"); if (m_sons[i].m_isLeaf) { text.append("N" + m_sons[i].m_id + " [label=\""+"NB Model"+"\" "+ "shape=box style=filled "); if (m_train != null && m_train.numInstances() > 0) { text.append("data =\n" + m_sons[i].m_train + "\n"); text.append(",\n"); } text.append("]\n"); } else { text.append("N" + m_sons[i].m_id + " [label=\""+m_sons[i].m_localModel.leftSide(m_train) + "\" "); if (m_train != null && m_train.numInstances() > 0) { text.append("data =\n" + m_sons[i].m_train + "\n"); text.append(",\n"); } text.append("]\n"); ((NBTreeClassifierTree)m_sons[i]).graphTree(text); } } } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } }
7,138
24.679856
74
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/trees/j48/NBTreeModelSelection.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * NBTreeModelSelection.java * Copyright (C) 2004-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.trees.j48; import java.util.Enumeration; import weka.core.Attribute; import weka.core.Instances; import weka.core.RevisionUtils; import weka.core.Utils; /** * Class for selecting a NB tree split. * * @author Mark Hall (mhall@cs.waikato.ac.nz) * @version $Revision: 8034 $ */ public class NBTreeModelSelection extends ModelSelection { /** for serialization */ private static final long serialVersionUID = 990097748931976704L; /** Minimum number of objects in interval. */ private int m_minNoObj; /** All the training data */ private Instances m_allData; // /** * Initializes the split selection method with the given parameters. * * @param minNoObj minimum number of instances that have to occur in at least two * subsets induced by split * @param allData FULL training dataset (necessary for * selection of split points). */ public NBTreeModelSelection(int minNoObj, Instances allData) { m_minNoObj = minNoObj; m_allData = allData; } /** * Sets reference to training data to null. */ public void cleanup() { m_allData = null; } /** * Selects NBTree-type split for the given dataset. */ public final ClassifierSplitModel selectModel(Instances data){ double globalErrors = 0; double minResult; double currentResult; NBTreeSplit [] currentModel; NBTreeSplit bestModel = null; NBTreeNoSplit noSplitModel = null; int validModels = 0; boolean multiVal = true; Distribution checkDistribution; Attribute attribute; double sumOfWeights; int i; try{ // build the global model at this node noSplitModel = new NBTreeNoSplit(); noSplitModel.buildClassifier(data); if (data.numInstances() < 5) { return noSplitModel; } // evaluate it globalErrors = noSplitModel.getErrors(); if (globalErrors == 0) { return noSplitModel; } // Check if all Instances belong to one class or if not // enough Instances to split. checkDistribution = new Distribution(data); if (Utils.sm(checkDistribution.total(), m_minNoObj) || Utils.eq(checkDistribution.total(), checkDistribution.perClass(checkDistribution.maxClass()))) { return noSplitModel; } // Check if all attributes are nominal and have a // lot of values. if (m_allData != null) { Enumeration enu = data.enumerateAttributes(); while (enu.hasMoreElements()) { attribute = (Attribute) enu.nextElement(); if ((attribute.isNumeric()) || (Utils.sm((double)attribute.numValues(), (0.3*(double)m_allData.numInstances())))){ multiVal = false; break; } } } currentModel = new NBTreeSplit[data.numAttributes()]; sumOfWeights = data.sumOfWeights(); // For each attribute. for (i = 0; i < data.numAttributes(); i++){ // Apart from class attribute. if (i != (data).classIndex()){ // Get models for current attribute. currentModel[i] = new NBTreeSplit(i,m_minNoObj,sumOfWeights); currentModel[i].setGlobalModel(noSplitModel); currentModel[i].buildClassifier(data); // Check if useful split for current attribute // exists and check for enumerated attributes with // a lot of values. if (currentModel[i].checkModel()){ validModels++; } } else { currentModel[i] = null; } } // Check if any useful split was found. if (validModels == 0) { return noSplitModel; } // Find "best" attribute to split on. minResult = globalErrors; for (i=0;i<data.numAttributes();i++){ if ((i != (data).classIndex()) && (currentModel[i].checkModel())) { /* System.err.println("Errors for "+data.attribute(i).name()+" "+ currentModel[i].getErrors()); */ if (currentModel[i].getErrors() < minResult) { bestModel = currentModel[i]; minResult = currentModel[i].getErrors(); } } } // System.exit(1); // Check if useful split was found. if (((globalErrors - minResult) / globalErrors) < 0.05) { return noSplitModel; } /* if (bestModel == null) { System.err.println("This shouldn't happen! glob : "+globalErrors+ " minRes : "+minResult); System.exit(1); } */ // Set the global model for the best split // bestModel.setGlobalModel(noSplitModel); return bestModel; }catch(Exception e){ e.printStackTrace(); } return null; } /** * Selects NBTree-type split for the given dataset. */ public final ClassifierSplitModel selectModel(Instances train, Instances test) { return selectModel(train); } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } }
5,681
26.057143
83
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/trees/j48/NBTreeNoSplit.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * NBTreeNoSplit.java * Copyright (C) 2004-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.trees.j48; import java.util.Random; import weka.classifiers.AbstractClassifier; import weka.classifiers.Classifier; import weka.classifiers.Evaluation; import weka.classifiers.bayes.NaiveBayesUpdateable; import weka.core.Instance; import weka.core.Instances; import weka.core.RevisionUtils; import weka.filters.Filter; import weka.filters.supervised.attribute.Discretize; /** * Class implementing a "no-split"-split (leaf node) for naive bayes * trees. * * @author Mark Hall (mhall@cs.waikato.ac.nz) * @version $Revision: 8034 $ */ public final class NBTreeNoSplit extends ClassifierSplitModel { /** for serialization */ private static final long serialVersionUID = 7824804381545259618L; /** the naive bayes classifier */ private NaiveBayesUpdateable m_nb; /** the discretizer used */ private Discretize m_disc; /** errors on the training data at this node */ private double m_errors; public NBTreeNoSplit() { m_numSubsets = 1; } /** * Build the no-split node * * @param instances an <code>Instances</code> value * @exception Exception if an error occurs */ public final void buildClassifier(Instances instances) throws Exception { m_nb = new NaiveBayesUpdateable(); m_disc = new Discretize(); m_disc.setInputFormat(instances); Instances temp = Filter.useFilter(instances, m_disc); m_nb.buildClassifier(temp); if (temp.numInstances() >= 5) { m_errors = crossValidate(m_nb, temp, new Random(1)); } m_numSubsets = 1; } /** * Return the errors made by the naive bayes model at this node * * @return the number of errors made */ public double getErrors() { return m_errors; } /** * Return the discretizer used at this node * * @return a <code>Discretize</code> value */ public Discretize getDiscretizer() { return m_disc; } /** * Get the naive bayes model at this node * * @return a <code>NaiveBayesUpdateable</code> value */ public NaiveBayesUpdateable getNaiveBayesModel() { return m_nb; } /** * Always returns 0 because only there is only one subset. */ public final int whichSubset(Instance instance){ return 0; } /** * Always returns null because there is only one subset. */ public final double [] weights(Instance instance){ return null; } /** * Does nothing because no condition has to be satisfied. */ public final String leftSide(Instances instances){ return ""; } /** * Does nothing because no condition has to be satisfied. */ public final String rightSide(int index, Instances instances){ return ""; } /** * Returns a string containing java source code equivalent to the test * made at this node. The instance being tested is called "i". * * @param index index of the nominal value tested * @param data the data containing instance structure info * @return a value of type 'String' */ public final String sourceExpression(int index, Instances data) { return "true"; // or should this be false?? } /** * Return the probability for a class value * * @param classIndex the index of the class value * @param instance the instance to generate a probability for * @param theSubset the subset to consider * @return a probability * @exception Exception if an error occurs */ public double classProb(int classIndex, Instance instance, int theSubset) throws Exception { m_disc.input(instance); Instance temp = m_disc.output(); return m_nb.distributionForInstance(temp)[classIndex]; } /** * Return a textual description of the node * * @return a <code>String</code> value */ public String toString() { return m_nb.toString(); } /** * Utility method for fast 5-fold cross validation of a naive bayes * model * * @param fullModel a <code>NaiveBayesUpdateable</code> value * @param trainingSet an <code>Instances</code> value * @param r a <code>Random</code> value * @return a <code>double</code> value * @exception Exception if an error occurs */ public static double crossValidate(NaiveBayesUpdateable fullModel, Instances trainingSet, Random r) throws Exception { // make some copies for fast evaluation of 5-fold xval Classifier [] copies = AbstractClassifier.makeCopies(fullModel, 5); Evaluation eval = new Evaluation(trainingSet); // make some splits for (int j = 0; j < 5; j++) { Instances test = trainingSet.testCV(5, j); // unlearn these test instances for (int k = 0; k < test.numInstances(); k++) { test.instance(k).setWeight(-test.instance(k).weight()); ((NaiveBayesUpdateable)copies[j]).updateClassifier(test.instance(k)); // reset the weight back to its original value test.instance(k).setWeight(-test.instance(k).weight()); } eval.evaluateModel(copies[j], test); } return eval.incorrect(); } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } }
5,957
26.456221
76
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/trees/j48/NBTreeSplit.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * NBTreeSplit.java * Copyright (C) 2004-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.trees.j48; import java.util.Random; import weka.classifiers.bayes.NaiveBayesUpdateable; import weka.core.Instance; import weka.core.Instances; import weka.core.RevisionUtils; import weka.filters.Filter; import weka.filters.supervised.attribute.Discretize; /** * Class implementing a NBTree split on an attribute. * * @author Mark Hall (mhall@cs.waikato.ac.nz) * @version $Revision: 8034 $ */ public class NBTreeSplit extends ClassifierSplitModel{ /** for serialization */ private static final long serialVersionUID = 8922627123884975070L; /** Desired number of branches. */ private int m_complexityIndex; /** Attribute to split on. */ private int m_attIndex; /** Minimum number of objects in a split. */ private int m_minNoObj; /** Value of split point. */ private double m_splitPoint; /** The sum of the weights of the instances. */ private double m_sumOfWeights; /** The weight of the instances incorrectly classified by the naive bayes models arising from this split*/ private double m_errors; private C45Split m_c45S; /** The global naive bayes model for this node */ NBTreeNoSplit m_globalNB; /** * Initializes the split model. */ public NBTreeSplit(int attIndex, int minNoObj, double sumOfWeights) { // Get index of attribute to split on. m_attIndex = attIndex; // Set minimum number of objects. m_minNoObj = minNoObj; // Set the sum of the weights m_sumOfWeights = sumOfWeights; } /** * Creates a NBTree-type split on the given data. Assumes that none of * the class values is missing. * * @exception Exception if something goes wrong */ public void buildClassifier(Instances trainInstances) throws Exception { // Initialize the remaining instance variables. m_numSubsets = 0; m_splitPoint = Double.MAX_VALUE; m_errors = 0; if (m_globalNB != null) { m_errors = m_globalNB.getErrors(); } // Different treatment for enumerated and numeric // attributes. if (trainInstances.attribute(m_attIndex).isNominal()) { m_complexityIndex = trainInstances.attribute(m_attIndex).numValues(); handleEnumeratedAttribute(trainInstances); }else{ m_complexityIndex = 2; trainInstances.sort(trainInstances.attribute(m_attIndex)); handleNumericAttribute(trainInstances); } } /** * Returns index of attribute for which split was generated. */ public final int attIndex() { return m_attIndex; } /** * Creates split on enumerated attribute. * * @exception Exception if something goes wrong */ private void handleEnumeratedAttribute(Instances trainInstances) throws Exception { m_c45S = new C45Split(m_attIndex, 2, m_sumOfWeights, true); m_c45S.buildClassifier(trainInstances); if (m_c45S.numSubsets() == 0) { return; } m_errors = 0; Instance instance; Instances [] trainingSets = new Instances [m_complexityIndex]; for (int i = 0; i < m_complexityIndex; i++) { trainingSets[i] = new Instances(trainInstances, 0); } /* m_distribution = new Distribution(m_complexityIndex, trainInstances.numClasses()); */ int subset; for (int i = 0; i < trainInstances.numInstances(); i++) { instance = trainInstances.instance(i); subset = m_c45S.whichSubset(instance); if (subset > -1) { trainingSets[subset].add((Instance)instance.copy()); } else { double [] weights = m_c45S.weights(instance); for (int j = 0; j < m_complexityIndex; j++) { try { Instance temp = (Instance) instance.copy(); if (weights.length == m_complexityIndex) { temp.setWeight(temp.weight() * weights[j]); } else { temp.setWeight(temp.weight() / m_complexityIndex); } trainingSets[j].add(temp); } catch (Exception ex) { ex.printStackTrace(); System.err.println("*** "+m_complexityIndex); System.err.println(weights.length); System.exit(1); } } } } /* // compute weights (weights of instances per subset m_weights = new double [m_complexityIndex]; for (int i = 0; i < m_complexityIndex; i++) { m_weights[i] = trainingSets[i].sumOfWeights(); } Utils.normalize(m_weights); */ /* // Only Instances with known values are relevant. Enumeration enu = trainInstances.enumerateInstances(); while (enu.hasMoreElements()) { instance = (Instance) enu.nextElement(); if (!instance.isMissing(m_attIndex)) { // m_distribution.add((int)instance.value(m_attIndex),instance); trainingSets[(int)instances.value(m_attIndex)].add(instance); } else { // add these to the error count m_errors += instance.weight(); } } */ Random r = new Random(1); int minNumCount = 0; for (int i = 0; i < m_complexityIndex; i++) { if (trainingSets[i].numInstances() >= 5) { minNumCount++; // Discretize the sets Discretize disc = new Discretize(); disc.setInputFormat(trainingSets[i]); trainingSets[i] = Filter.useFilter(trainingSets[i], disc); trainingSets[i].randomize(r); trainingSets[i].stratify(5); NaiveBayesUpdateable fullModel = new NaiveBayesUpdateable(); fullModel.buildClassifier(trainingSets[i]); // add the errors for this branch of the split m_errors += NBTreeNoSplit.crossValidate(fullModel, trainingSets[i], r); } else { // if fewer than min obj then just count them as errors for (int j = 0; j < trainingSets[i].numInstances(); j++) { m_errors += trainingSets[i].instance(j).weight(); } } } // Check if there are at least five instances in at least two of the subsets // subsets. if (minNumCount > 1) { m_numSubsets = m_complexityIndex; } } /** * Creates split on numeric attribute. * * @exception Exception if something goes wrong */ private void handleNumericAttribute(Instances trainInstances) throws Exception { m_c45S = new C45Split(m_attIndex, 2, m_sumOfWeights, true); m_c45S.buildClassifier(trainInstances); if (m_c45S.numSubsets() == 0) { return; } m_errors = 0; Instances [] trainingSets = new Instances [m_complexityIndex]; trainingSets[0] = new Instances(trainInstances, 0); trainingSets[1] = new Instances(trainInstances, 0); int subset = -1; // populate the subsets for (int i = 0; i < trainInstances.numInstances(); i++) { Instance instance = trainInstances.instance(i); subset = m_c45S.whichSubset(instance); if (subset != -1) { trainingSets[subset].add((Instance)instance.copy()); } else { double [] weights = m_c45S.weights(instance); for (int j = 0; j < m_complexityIndex; j++) { Instance temp = (Instance)instance.copy(); if (weights.length == m_complexityIndex) { temp.setWeight(temp.weight() * weights[j]); } else { temp.setWeight(temp.weight() / m_complexityIndex); } trainingSets[j].add(temp); } } } /* // compute weights (weights of instances per subset m_weights = new double [m_complexityIndex]; for (int i = 0; i < m_complexityIndex; i++) { m_weights[i] = trainingSets[i].sumOfWeights(); } Utils.normalize(m_weights); */ Random r = new Random(1); int minNumCount = 0; for (int i = 0; i < m_complexityIndex; i++) { if (trainingSets[i].numInstances() > 5) { minNumCount++; // Discretize the sets Discretize disc = new Discretize(); disc.setInputFormat(trainingSets[i]); trainingSets[i] = Filter.useFilter(trainingSets[i], disc); trainingSets[i].randomize(r); trainingSets[i].stratify(5); NaiveBayesUpdateable fullModel = new NaiveBayesUpdateable(); fullModel.buildClassifier(trainingSets[i]); // add the errors for this branch of the split m_errors += NBTreeNoSplit.crossValidate(fullModel, trainingSets[i], r); } else { for (int j = 0; j < trainingSets[i].numInstances(); j++) { m_errors += trainingSets[i].instance(j).weight(); } } } // Check if minimum number of Instances in at least two // subsets. if (minNumCount > 1) { m_numSubsets = m_complexityIndex; } } /** * Returns index of subset instance is assigned to. * Returns -1 if instance is assigned to more than one subset. * * @exception Exception if something goes wrong */ public final int whichSubset(Instance instance) throws Exception { return m_c45S.whichSubset(instance); } /** * Returns weights if instance is assigned to more than one subset. * Returns null if instance is only assigned to one subset. */ public final double [] weights(Instance instance) { return m_c45S.weights(instance); // return m_weights; } /** * Returns a string containing java source code equivalent to the test * made at this node. The instance being tested is called "i". * * @param index index of the nominal value tested * @param data the data containing instance structure info * @return a value of type 'String' */ public final String sourceExpression(int index, Instances data) { return m_c45S.sourceExpression(index, data); } /** * Prints the condition satisfied by instances in a subset. * * @param index of subset * @param data training set. */ public final String rightSide(int index,Instances data) { return m_c45S.rightSide(index, data); } /** * Prints left side of condition.. * * @param data training set. */ public final String leftSide(Instances data) { return m_c45S.leftSide(data); } /** * Return the probability for a class value * * @param classIndex the index of the class value * @param instance the instance to generate a probability for * @param theSubset the subset to consider * @return a probability * @exception Exception if an error occurs */ public double classProb(int classIndex, Instance instance, int theSubset) throws Exception { // use the global naive bayes model if (theSubset > -1) { return m_globalNB.classProb(classIndex, instance, theSubset); } else { throw new Exception("This shouldn't happen!!!"); } } /** * Return the global naive bayes model for this node * * @return a <code>NBTreeNoSplit</code> value */ public NBTreeNoSplit getGlobalModel() { return m_globalNB; } /** * Set the global naive bayes model for this node * * @param global a <code>NBTreeNoSplit</code> value */ public void setGlobalModel(NBTreeNoSplit global) { m_globalNB = global; } /** * Return the errors made by the naive bayes models arising * from this split. * * @return a <code>double</code> value */ public double getErrors() { return m_errors; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } }
11,825
27.703883
80
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/trees/j48/NoSplit.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * NoSplit.java * Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.trees.j48; import weka.core.Instance; import weka.core.Instances; import weka.core.RevisionUtils; /** * Class implementing a "no-split"-split. * * @author Eibe Frank (eibe@cs.waikato.ac.nz) * @version $Revision: 8034 $ */ public final class NoSplit extends ClassifierSplitModel{ /** for serialization */ private static final long serialVersionUID = -1292620749331337546L; /** * Creates "no-split"-split for given distribution. */ public NoSplit(Distribution distribution){ m_distribution = new Distribution(distribution); m_numSubsets = 1; } /** * Creates a "no-split"-split for a given set of instances. * * @exception Exception if split can't be built successfully */ public final void buildClassifier(Instances instances) throws Exception { m_distribution = new Distribution(instances); m_numSubsets = 1; } /** * Always returns 0 because only there is only one subset. */ public final int whichSubset(Instance instance){ return 0; } /** * Always returns null because there is only one subset. */ public final double [] weights(Instance instance){ return null; } /** * Does nothing because no condition has to be satisfied. */ public final String leftSide(Instances instances){ return ""; } /** * Does nothing because no condition has to be satisfied. */ public final String rightSide(int index, Instances instances){ return ""; } /** * Returns a string containing java source code equivalent to the test * made at this node. The instance being tested is called "i". * * @param index index of the nominal value tested * @param data the data containing instance structure info * @return a value of type 'String' */ public final String sourceExpression(int index, Instances data) { return "true"; // or should this be false?? } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } }
2,901
24.234783
74
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/trees/j48/PruneableClassifierTree.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * PruneableClassifierTree.java * Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.trees.j48; import java.util.Random; import weka.core.Capabilities; import weka.core.Capabilities.Capability; import weka.core.Instances; import weka.core.RevisionUtils; import weka.core.Utils; /** * Class for handling a tree structure that can * be pruned using a pruning set. * * @author Eibe Frank (eibe@cs.waikato.ac.nz) * @version $Revision: 8984 $ */ public class PruneableClassifierTree extends ClassifierTree { /** for serialization */ static final long serialVersionUID = -555775736857600201L; /** True if the tree is to be pruned. */ private boolean pruneTheTree = false; /** How many subsets of equal size? One used for pruning, the rest for training. */ private int numSets = 3; /** Cleanup after the tree has been built. */ private boolean m_cleanup = true; /** The random number seed. */ private int m_seed = 1; /** * Constructor for pruneable tree structure. Stores reference * to associated training data at each node. * * @param toSelectLocModel selection method for local splitting model * @param pruneTree true if the tree is to be pruned * @param num number of subsets of equal size * @param cleanup * @param seed the seed value to use * @throws Exception if something goes wrong */ public PruneableClassifierTree(ModelSelection toSelectLocModel, boolean pruneTree, int num, boolean cleanup, int seed) throws Exception { super(toSelectLocModel); pruneTheTree = pruneTree; numSets = num; m_cleanup = cleanup; m_seed = seed; } /** * Returns default capabilities of the classifier tree. * * @return the capabilities of this classifier tree */ public Capabilities getCapabilities() { Capabilities result = super.getCapabilities(); result.disableAll(); // attributes result.enable(Capability.NOMINAL_ATTRIBUTES); result.enable(Capability.NUMERIC_ATTRIBUTES); result.enable(Capability.DATE_ATTRIBUTES); result.enable(Capability.MISSING_VALUES); // class result.enable(Capability.NOMINAL_CLASS); result.enable(Capability.MISSING_CLASS_VALUES); // instances result.setMinimumNumberInstances(0); return result; } /** * Method for building a pruneable classifier tree. * * @param data the data to build the tree from * @throws Exception if tree can't be built successfully */ public void buildClassifier(Instances data) throws Exception { // can classifier tree handle the data? getCapabilities().testWithFail(data); // remove instances with missing class data = new Instances(data); data.deleteWithMissingClass(); Random random = new Random(m_seed); data.stratify(numSets); buildTree(data.trainCV(numSets, numSets - 1, random), data.testCV(numSets, numSets - 1), !m_cleanup); if (pruneTheTree) { prune(); } if (m_cleanup) { cleanup(new Instances(data, 0)); } } /** * Prunes a tree. * * @throws Exception if tree can't be pruned successfully */ public void prune() throws Exception { if (!m_isLeaf) { // Prune all subtrees. for (int i = 0; i < m_sons.length; i++) son(i).prune(); // Decide if leaf is best choice. if (Utils.smOrEq(errorsForLeaf(),errorsForTree())) { // Free son Trees m_sons = null; m_isLeaf = true; // Get NoSplit Model for node. m_localModel = new NoSplit(localModel().distribution()); } } } /** * Returns a newly created tree. * * @param train the training data * @param test the test data * @return the generated tree * @throws Exception if something goes wrong */ protected ClassifierTree getNewTree(Instances train, Instances test) throws Exception { PruneableClassifierTree newTree = new PruneableClassifierTree(m_toSelectModel, pruneTheTree, numSets, m_cleanup, m_seed); newTree.buildTree(train, test, !m_cleanup); return newTree; } /** * Computes estimated errors for tree. * * @return the estimated errors * @throws Exception if error estimate can't be computed */ private double errorsForTree() throws Exception { double errors = 0; if (m_isLeaf) return errorsForLeaf(); else{ for (int i = 0; i < m_sons.length; i++) if (Utils.eq(localModel().distribution().perBag(i), 0)) { errors += m_test.perBag(i)- m_test.perClassPerBag(i,localModel().distribution(). maxClass()); } else errors += son(i).errorsForTree(); return errors; } } /** * Computes estimated errors for leaf. * * @return the estimated errors * @throws Exception if error estimate can't be computed */ private double errorsForLeaf() throws Exception { return m_test.total()- m_test.perClass(localModel().distribution().maxClass()); } /** * Method just exists to make program easier to read. */ private ClassifierSplitModel localModel() { return (ClassifierSplitModel)m_localModel; } /** * Method just exists to make program easier to read. */ private PruneableClassifierTree son(int index) { return (PruneableClassifierTree)m_sons[index]; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8984 $"); } }
6,224
24.9375
85
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/trees/j48/SplitCriterion.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * SplitCriterion.java * Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.trees.j48; import java.io.Serializable; import weka.core.RevisionHandler; /** * Abstract class for computing splitting criteria * with respect to distributions of class values. * * @author Eibe Frank (eibe@cs.waikato.ac.nz) * @version $Revision: 8034 $ */ public abstract class SplitCriterion implements Serializable, RevisionHandler { /** for serialization */ private static final long serialVersionUID = 5490996638027101259L; /** * Computes result of splitting criterion for given distribution. * * @return value of splitting criterion. 0 by default */ public double splitCritValue(Distribution bags){ return 0; } /** * Computes result of splitting criterion for given training and * test distributions. * * @return value of splitting criterion. 0 by default */ public double splitCritValue(Distribution train, Distribution test){ return 0; } /** * Computes result of splitting criterion for given training and * test distributions and given number of classes. * * @return value of splitting criterion. 0 by default */ public double splitCritValue(Distribution train, Distribution test, int noClassesDefault){ return 0; } /** * Computes result of splitting criterion for given training and * test distributions and given default distribution. * * @return value of splitting criterion. 0 by default */ public double splitCritValue(Distribution train, Distribution test, Distribution defC){ return 0; } }
2,359
25.818182
74
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/trees/j48/Stats.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * Stats.java * Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.trees.j48; import weka.core.RevisionHandler; import weka.core.RevisionUtils; import weka.core.Statistics; /** * Class implementing a statistical routine needed by J48 to * compute its error estimate. * * @author Eibe Frank (eibe@cs.waikato.ac.nz) * @version $Revision: 8034 $ */ public class Stats implements RevisionHandler { /** * Computes estimated extra error for given total number of instances * and error using normal approximation to binomial distribution * (and continuity correction). * * @param N number of instances * @param e observed error * @param CF confidence value */ public static double addErrs(double N, double e, float CF){ // Ignore stupid values for CF if (CF > 0.5) { System.err.println("WARNING: confidence value for pruning " + " too high. Error estimate not modified."); return 0; } // Check for extreme cases at the low end because the // normal approximation won't work if (e < 1) { // Base case (i.e. e == 0) from documenta Geigy Scientific // Tables, 6th edition, page 185 double base = N * (1 - Math.pow(CF, 1 / N)); if (e == 0) { return base; } // Use linear interpolation between 0 and 1 like C4.5 does return base + e * (addErrs(N, 1, CF) - base); } // Use linear interpolation at the high end (i.e. between N - 0.5 // and N) because of the continuity correction if (e + 0.5 >= N) { // Make sure that we never return anything smaller than zero return Math.max(N - e, 0); } // Get z-score corresponding to CF double z = Statistics.normalInverse(1 - CF); // Compute upper limit of confidence interval double f = (e + 0.5) / N; double r = (f + (z * z) / (2 * N) + z * Math.sqrt((f / N) - (f * f / N) + (z * z / (4 * N * N)))) / (1 + (z * z) / N); return (r * N) - e; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } }
2,907
27.509804
74
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/trees/lmt/LMTNode.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * LMTNode.java * Copyright (C) 2003-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.trees.lmt; import java.util.Collections; import java.util.Comparator; import java.util.Vector; import weka.classifiers.Evaluation; import weka.classifiers.functions.SimpleLinearRegression; import weka.classifiers.trees.j48.ClassifierSplitModel; import weka.classifiers.trees.j48.ModelSelection; import weka.core.Instance; import weka.core.Instances; import weka.core.RevisionHandler; import weka.core.RevisionUtils; import weka.filters.Filter; import weka.filters.supervised.attribute.NominalToBinary; /** * Auxiliary class for list of LMTNodes */ class CompareNode implements Comparator, RevisionHandler { /** * Compares its two arguments for order. * * @param o1 first object * @param o2 second object * @return a negative integer, zero, or a positive integer as the first * argument is less than, equal to, or greater than the second. */ public int compare(Object o1, Object o2) { if ( ((LMTNode)o1).m_alpha < ((LMTNode)o2).m_alpha) return -1; if ( ((LMTNode)o1).m_alpha > ((LMTNode)o2).m_alpha) return 1; return 0; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } } /** * Class for logistic model tree structure. * * * @author Niels Landwehr * @author Marc Sumner * @version $Revision: 8034 $ */ public class LMTNode extends LogisticBase { /** for serialization */ static final long serialVersionUID = 1862737145870398755L; /** Total number of training instances. */ protected double m_totalInstanceWeight; /** Node id*/ protected int m_id; /** ID of logistic model at leaf*/ protected int m_leafModelNum; /** Alpha-value (for pruning) at the node*/ public double m_alpha; /** Weighted number of training examples currently misclassified by the logistic model at the node*/ public double m_numIncorrectModel; /** Weighted number of training examples currently misclassified by the subtree rooted at the node*/ public double m_numIncorrectTree; /**minimum number of instances at which a node is considered for splitting*/ protected int m_minNumInstances; /**ModelSelection object (for splitting)*/ protected ModelSelection m_modelSelection; /**Filter to convert nominal attributes to binary*/ protected NominalToBinary m_nominalToBinary; /**Simple regression functions fit by LogitBoost at higher levels in the tree*/ protected SimpleLinearRegression[][] m_higherRegressions; /**Number of simple regression functions fit by LogitBoost at higher levels in the tree*/ protected int m_numHigherRegressions = 0; /**Number of folds for CART pruning*/ protected static int m_numFoldsPruning = 5; /**Use heuristic that determines the number of LogitBoost iterations only once in the beginning? */ protected boolean m_fastRegression; /**Number of instances at the node*/ protected int m_numInstances; /**The ClassifierSplitModel (for splitting)*/ protected ClassifierSplitModel m_localModel; /**Array of children of the node*/ protected LMTNode[] m_sons; /**True if node is leaf*/ protected boolean m_isLeaf; /** * Constructor for logistic model tree node. * * @param modelSelection selection method for local splitting model * @param numBoostingIterations sets the numBoostingIterations parameter * @param fastRegression sets the fastRegression parameter * @param errorOnProbabilities Use error on probabilities for stopping criterion of LogitBoost? * @param minNumInstances minimum number of instances at which a node is considered for splitting */ public LMTNode(ModelSelection modelSelection, int numBoostingIterations, boolean fastRegression, boolean errorOnProbabilities, int minNumInstances, double weightTrimBeta, boolean useAIC) { m_modelSelection = modelSelection; m_fixedNumIterations = numBoostingIterations; m_fastRegression = fastRegression; m_errorOnProbabilities = errorOnProbabilities; m_minNumInstances = minNumInstances; m_maxIterations = 200; setWeightTrimBeta(weightTrimBeta); setUseAIC(useAIC); } /** * Method for building a logistic model tree (only called for the root node). * Grows an initial logistic model tree and prunes it back using the CART pruning scheme. * * @param data the data to train with * @throws Exception if something goes wrong */ public void buildClassifier(Instances data) throws Exception{ //heuristic to avoid cross-validating the number of LogitBoost iterations //at every node: build standalone logistic model and take its optimum number //of iteration everywhere in the tree. if (m_fastRegression && (m_fixedNumIterations < 0)) m_fixedNumIterations = tryLogistic(data); //Need to cross-validate alpha-parameter for CART-pruning Instances cvData = new Instances(data); cvData.stratify(m_numFoldsPruning); double[][] alphas = new double[m_numFoldsPruning][]; double[][] errors = new double[m_numFoldsPruning][]; for (int i = 0; i < m_numFoldsPruning; i++) { //for every fold, grow tree on training set... Instances train = cvData.trainCV(m_numFoldsPruning, i); Instances test = cvData.testCV(m_numFoldsPruning, i); buildTree(train, null, train.numInstances() , 0); int numNodes = getNumInnerNodes(); alphas[i] = new double[numNodes + 2]; errors[i] = new double[numNodes + 2]; //... then prune back and log alpha-values and errors on test set prune(alphas[i], errors[i], test); } //build tree using all the data buildTree(data, null, data.numInstances(), 0); int numNodes = getNumInnerNodes(); double[] treeAlphas = new double[numNodes + 2]; //prune back and log alpha-values int iterations = prune(treeAlphas, null, null); double[] treeErrors = new double[numNodes + 2]; for (int i = 0; i <= iterations; i++){ //compute midpoint alphas double alpha = Math.sqrt(treeAlphas[i] * treeAlphas[i+1]); double error = 0; //compute error estimate for final trees from the midpoint-alphas and the error estimates gotten in //the cross-validation for (int k = 0; k < m_numFoldsPruning; k++) { int l = 0; while (alphas[k][l] <= alpha) l++; error += errors[k][l - 1]; } treeErrors[i] = error; } //find best alpha int best = -1; double bestError = Double.MAX_VALUE; for (int i = iterations; i >= 0; i--) { if (treeErrors[i] < bestError) { bestError = treeErrors[i]; best = i; } } double bestAlpha = Math.sqrt(treeAlphas[best] * treeAlphas[best + 1]); //"unprune" final tree (faster than regrowing it) unprune(); //CART-prune it with best alpha prune(bestAlpha); cleanup(); } /** * Method for building the tree structure. * Builds a logistic model, splits the node and recursively builds tree for child nodes. * @param data the training data passed on to this node * @param higherRegressions An array of regression functions produced by LogitBoost at higher * levels in the tree. They represent a logistic regression model that is refined locally * at this node. * @param totalInstanceWeight the total number of training examples * @param higherNumParameters effective number of parameters in the logistic regression model built * in parent nodes * @throws Exception if something goes wrong */ public void buildTree(Instances data, SimpleLinearRegression[][] higherRegressions, double totalInstanceWeight, double higherNumParameters) throws Exception{ //save some stuff m_totalInstanceWeight = totalInstanceWeight; m_train = new Instances(data); m_isLeaf = true; m_sons = null; m_numInstances = m_train.numInstances(); m_numClasses = m_train.numClasses(); //init m_numericData = getNumericData(m_train); m_numericDataHeader = new Instances(m_numericData, 0); m_regressions = initRegressions(); m_numRegressions = 0; if (higherRegressions != null) m_higherRegressions = higherRegressions; else m_higherRegressions = new SimpleLinearRegression[m_numClasses][0]; m_numHigherRegressions = m_higherRegressions[0].length; m_numParameters = higherNumParameters; //build logistic model if (m_numInstances >= m_numFoldsBoosting) { if (m_fixedNumIterations > 0){ performBoosting(m_fixedNumIterations); } else if (getUseAIC()) { performBoostingInfCriterion(); } else { performBoostingCV(); } } m_numParameters += m_numRegressions; //only keep the simple regression functions that correspond to the selected number of LogitBoost iterations m_regressions = selectRegressions(m_regressions); boolean grow; //split node if more than minNumInstances... if (m_numInstances > m_minNumInstances) { //split node: either splitting on class value (a la C4.5) or splitting on residuals if (m_modelSelection instanceof ResidualModelSelection) { //need ps/Ys/Zs/weights double[][] probs = getProbs(getFs(m_numericData)); double[][] trainYs = getYs(m_train); double[][] dataZs = getZs(probs, trainYs); double[][] dataWs = getWs(probs, trainYs); m_localModel = ((ResidualModelSelection)m_modelSelection).selectModel(m_train, dataZs, dataWs); } else { m_localModel = m_modelSelection.selectModel(m_train); } //... and valid split found grow = (m_localModel.numSubsets() > 1); } else { grow = false; } if (grow) { //create and build children of node m_isLeaf = false; Instances[] localInstances = m_localModel.split(m_train); m_sons = new LMTNode[m_localModel.numSubsets()]; for (int i = 0; i < m_sons.length; i++) { m_sons[i] = new LMTNode(m_modelSelection, m_fixedNumIterations, m_fastRegression, m_errorOnProbabilities,m_minNumInstances, getWeightTrimBeta(), getUseAIC()); //the "higherRegressions" (partial logistic model fit at higher levels in the tree) passed //on to the children are the "higherRegressions" at this node plus the regressions added //at this node (m_regressions). m_sons[i].buildTree(localInstances[i], mergeArrays(m_regressions, m_higherRegressions), m_totalInstanceWeight, m_numParameters); localInstances[i] = null; } } } /** * Prunes a logistic model tree using the CART pruning scheme, given a * cost-complexity parameter alpha. * * @param alpha the cost-complexity measure * @throws Exception if something goes wrong */ public void prune(double alpha) throws Exception { Vector nodeList; CompareNode comparator = new CompareNode(); //determine training error of logistic models and subtrees, and calculate alpha-values from them modelErrors(); treeErrors(); calculateAlphas(); //get list of all inner nodes in the tree nodeList = getNodes(); boolean prune = (nodeList.size() > 0); while (prune) { //select node with minimum alpha LMTNode nodeToPrune = (LMTNode)Collections.min(nodeList,comparator); //want to prune if its alpha is smaller than alpha if (nodeToPrune.m_alpha > alpha) break; nodeToPrune.m_isLeaf = true; nodeToPrune.m_sons = null; //update tree errors and alphas treeErrors(); calculateAlphas(); nodeList = getNodes(); prune = (nodeList.size() > 0); } } /** * Method for performing one fold in the cross-validation of the cost-complexity parameter. * Generates a sequence of alpha-values with error estimates for the corresponding (partially pruned) * trees, given the test set of that fold. * @param alphas array to hold the generated alpha-values * @param errors array to hold the corresponding error estimates * @param test test set of that fold (to obtain error estimates) * @throws Exception if something goes wrong */ public int prune(double[] alphas, double[] errors, Instances test) throws Exception { Vector nodeList; CompareNode comparator = new CompareNode(); //determine training error of logistic models and subtrees, and calculate alpha-values from them modelErrors(); treeErrors(); calculateAlphas(); //get list of all inner nodes in the tree nodeList = getNodes(); boolean prune = (nodeList.size() > 0); //alpha_0 is always zero (unpruned tree) alphas[0] = 0; Evaluation eval; //error of unpruned tree if (errors != null) { eval = new Evaluation(test); eval.evaluateModel(this, test); errors[0] = eval.errorRate(); } int iteration = 0; while (prune) { iteration++; //get node with minimum alpha LMTNode nodeToPrune = (LMTNode)Collections.min(nodeList,comparator); nodeToPrune.m_isLeaf = true; //Do not set m_sons null, want to unprune //get alpha-value of node alphas[iteration] = nodeToPrune.m_alpha; //log error if (errors != null) { eval = new Evaluation(test); eval.evaluateModel(this, test); errors[iteration] = eval.errorRate(); } //update errors/alphas treeErrors(); calculateAlphas(); nodeList = getNodes(); prune = (nodeList.size() > 0); } //set last alpha 1 to indicate end alphas[iteration + 1] = 1.0; return iteration; } /** *Method to "unprune" a logistic model tree. *Sets all leaf-fields to false. *Faster than re-growing the tree because the logistic models do not have to be fit again. */ protected void unprune() { if (m_sons != null) { m_isLeaf = false; for (int i = 0; i < m_sons.length; i++) m_sons[i].unprune(); } } /** *Determines the optimum number of LogitBoost iterations to perform by building a standalone logistic *regression function on the training data. Used for the heuristic that avoids cross-validating this *number again at every node. *@param data training instances for the logistic model *@throws Exception if something goes wrong */ protected int tryLogistic(Instances data) throws Exception{ //convert nominal attributes Instances filteredData = new Instances(data); NominalToBinary nominalToBinary = new NominalToBinary(); nominalToBinary.setInputFormat(filteredData); filteredData = Filter.useFilter(filteredData, nominalToBinary); LogisticBase logistic = new LogisticBase(0,true,m_errorOnProbabilities); //limit LogitBoost to 200 iterations (speed) logistic.setMaxIterations(200); logistic.setWeightTrimBeta(getWeightTrimBeta()); // Not in Marc's code. Added by Eibe. logistic.setUseAIC(getUseAIC()); logistic.buildClassifier(filteredData); //return best number of iterations return logistic.getNumRegressions(); } /** * Method to count the number of inner nodes in the tree * @return the number of inner nodes */ public int getNumInnerNodes(){ if (m_isLeaf) return 0; int numNodes = 1; for (int i = 0; i < m_sons.length; i++) numNodes += m_sons[i].getNumInnerNodes(); return numNodes; } /** * Returns the number of leaves in the tree. * Leaves are only counted if their logistic model has changed compared to the one of the parent node. * @return the number of leaves */ public int getNumLeaves(){ int numLeaves; if (!m_isLeaf) { numLeaves = 0; int numEmptyLeaves = 0; for (int i = 0; i < m_sons.length; i++) { numLeaves += m_sons[i].getNumLeaves(); if (m_sons[i].m_isLeaf && !m_sons[i].hasModels()) numEmptyLeaves++; } if (numEmptyLeaves > 1) { numLeaves -= (numEmptyLeaves - 1); } } else { numLeaves = 1; } return numLeaves; } /** *Updates the numIncorrectModel field for all nodes. This is needed for calculating the alpha-values. */ public void modelErrors() throws Exception{ Evaluation eval = new Evaluation(m_train); if (!m_isLeaf) { m_isLeaf = true; eval.evaluateModel(this, m_train); m_isLeaf = false; m_numIncorrectModel = eval.incorrect(); for (int i = 0; i < m_sons.length; i++) m_sons[i].modelErrors(); } else { eval.evaluateModel(this, m_train); m_numIncorrectModel = eval.incorrect(); } } /** *Updates the numIncorrectTree field for all nodes. This is needed for calculating the alpha-values. */ public void treeErrors(){ if (m_isLeaf) { m_numIncorrectTree = m_numIncorrectModel; } else { m_numIncorrectTree = 0; for (int i = 0; i < m_sons.length; i++) { m_sons[i].treeErrors(); m_numIncorrectTree += m_sons[i].m_numIncorrectTree; } } } /** *Updates the alpha field for all nodes. */ public void calculateAlphas() throws Exception { if (!m_isLeaf) { double errorDiff = m_numIncorrectModel - m_numIncorrectTree; if (errorDiff <= 0) { //split increases training error (should not normally happen). //prune it instantly. m_isLeaf = true; m_sons = null; m_alpha = Double.MAX_VALUE; } else { //compute alpha errorDiff /= m_totalInstanceWeight; m_alpha = errorDiff / (double)(getNumLeaves() - 1); for (int i = 0; i < m_sons.length; i++) m_sons[i].calculateAlphas(); } } else { //alpha = infinite for leaves (do not want to prune) m_alpha = Double.MAX_VALUE; } } /** * Merges two arrays of regression functions into one * @param a1 one array * @param a2 the other array * * @return an array that contains all entries from both input arrays */ protected SimpleLinearRegression[][] mergeArrays(SimpleLinearRegression[][] a1, SimpleLinearRegression[][] a2){ int numModels1 = a1[0].length; int numModels2 = a2[0].length; SimpleLinearRegression[][] result = new SimpleLinearRegression[m_numClasses][numModels1 + numModels2]; for (int i = 0; i < m_numClasses; i++) for (int j = 0; j < numModels1; j++) { result[i][j] = a1[i][j]; } for (int i = 0; i < m_numClasses; i++) for (int j = 0; j < numModels2; j++) result[i][j+numModels1] = a2[i][j]; return result; } /** * Return a list of all inner nodes in the tree * @return the list of nodes */ public Vector getNodes(){ Vector nodeList = new Vector(); getNodes(nodeList); return nodeList; } /** * Fills a list with all inner nodes in the tree * * @param nodeList the list to be filled */ public void getNodes(Vector nodeList) { if (!m_isLeaf) { nodeList.add(this); for (int i = 0; i < m_sons.length; i++) m_sons[i].getNodes(nodeList); } } /** * Returns a numeric version of a set of instances. * All nominal attributes are replaced by binary ones, and the class variable is replaced * by a pseudo-class variable that is used by LogitBoost. */ protected Instances getNumericData(Instances train) throws Exception{ Instances filteredData = new Instances(train); m_nominalToBinary = new NominalToBinary(); m_nominalToBinary.setInputFormat(filteredData); filteredData = Filter.useFilter(filteredData, m_nominalToBinary); return super.getNumericData(filteredData); } /** * Computes the F-values of LogitBoost for an instance from the current logistic model at the node * Note that this also takes into account the (partial) logistic model fit at higher levels in * the tree. * @param instance the instance * @return the array of F-values */ protected double[] getFs(Instance instance) throws Exception{ double [] pred = new double [m_numClasses]; //Need to take into account partial model fit at higher levels in the tree (m_higherRegressions) //and the part of the model fit at this node (m_regressions). //Fs from m_regressions (use method of LogisticBase) double [] instanceFs = super.getFs(instance); //Fs from m_higherRegressions for (int i = 0; i < m_numHigherRegressions; i++) { double predSum = 0; for (int j = 0; j < m_numClasses; j++) { pred[j] = m_higherRegressions[j][i].classifyInstance(instance); predSum += pred[j]; } predSum /= m_numClasses; for (int j = 0; j < m_numClasses; j++) { instanceFs[j] += (pred[j] - predSum) * (m_numClasses - 1) / m_numClasses; } } return instanceFs; } /** *Returns true if the logistic regression model at this node has changed compared to the *one at the parent node. *@return whether it has changed */ public boolean hasModels() { return (m_numRegressions > 0); } /** * Returns the class probabilities for an instance according to the logistic model at the node. * @param instance the instance * @return the array of probabilities */ public double[] modelDistributionForInstance(Instance instance) throws Exception { //make copy and convert nominal attributes instance = (Instance)instance.copy(); m_nominalToBinary.input(instance); instance = m_nominalToBinary.output(); //saet numeric pseudo-class instance.setDataset(m_numericDataHeader); return probs(getFs(instance)); } /** * Returns the class probabilities for an instance given by the logistic model tree. * @param instance the instance * @return the array of probabilities */ public double[] distributionForInstance(Instance instance) throws Exception { double[] probs; if (m_isLeaf) { //leaf: use logistic model probs = modelDistributionForInstance(instance); } else { //sort into appropiate child node int branch = m_localModel.whichSubset(instance); probs = m_sons[branch].distributionForInstance(instance); } return probs; } /** * Returns the number of leaves (normal count). * @return the number of leaves */ public int numLeaves() { if (m_isLeaf) return 1; int numLeaves = 0; for (int i = 0; i < m_sons.length; i++) numLeaves += m_sons[i].numLeaves(); return numLeaves; } /** * Returns the number of nodes. * @return the number of nodes */ public int numNodes() { if (m_isLeaf) return 1; int numNodes = 1; for (int i = 0; i < m_sons.length; i++) numNodes += m_sons[i].numNodes(); return numNodes; } /** * Returns a description of the logistic model tree (tree structure and logistic models) * @return describing string */ public String toString(){ //assign numbers to logistic regression functions at leaves assignLeafModelNumbers(0); try{ StringBuffer text = new StringBuffer(); if (m_isLeaf) { text.append(": "); text.append("LM_"+m_leafModelNum+":"+getModelParameters()); } else { dumpTree(0,text); } text.append("\n\nNumber of Leaves : \t"+numLeaves()+"\n"); text.append("\nSize of the Tree : \t"+numNodes()+"\n"); //This prints logistic models after the tree, comment out if only tree should be printed text.append(modelsToString()); return text.toString(); } catch (Exception e){ return "Can't print logistic model tree"; } } /** * Returns a string describing the number of LogitBoost iterations performed at this node, the total number * of LogitBoost iterations performed (including iterations at higher levels in the tree), and the number * of training examples at this node. * @return the describing string */ public String getModelParameters(){ StringBuffer text = new StringBuffer(); int numModels = m_numRegressions+m_numHigherRegressions; text.append(m_numRegressions+"/"+numModels+" ("+m_numInstances+")"); return text.toString(); } /** * Help method for printing tree structure. * * @throws Exception if something goes wrong */ protected void dumpTree(int depth,StringBuffer text) throws Exception { for (int i = 0; i < m_sons.length; i++) { text.append("\n"); for (int j = 0; j < depth; j++) text.append("| "); text.append(m_localModel.leftSide(m_train)); text.append(m_localModel.rightSide(i, m_train)); if (m_sons[i].m_isLeaf) { text.append(": "); text.append("LM_"+m_sons[i].m_leafModelNum+":"+m_sons[i].getModelParameters()); }else m_sons[i].dumpTree(depth+1,text); } } /** * Assigns unique IDs to all nodes in the tree */ public int assignIDs(int lastID) { int currLastID = lastID + 1; m_id = currLastID; if (m_sons != null) { for (int i = 0; i < m_sons.length; i++) { currLastID = m_sons[i].assignIDs(currLastID); } } return currLastID; } /** * Assigns numbers to the logistic regression models at the leaves of the tree */ public int assignLeafModelNumbers(int leafCounter) { if (!m_isLeaf) { m_leafModelNum = 0; for (int i = 0; i < m_sons.length; i++){ leafCounter = m_sons[i].assignLeafModelNumbers(leafCounter); } } else { leafCounter++; m_leafModelNum = leafCounter; } return leafCounter; } /** * Returns an array containing the coefficients of the logistic regression function at this node. * @return the array of coefficients, first dimension is the class, second the attribute. */ protected double[][] getCoefficients(){ //Need to take into account partial model fit at higher levels in the tree (m_higherRegressions) //and the part of the model fit at this node (m_regressions). //get coefficients from m_regressions: use method of LogisticBase double[][] coefficients = super.getCoefficients(); //get coefficients from m_higherRegressions: double constFactor = (double)(m_numClasses - 1) / (double)m_numClasses; // (J - 1)/J for (int j = 0; j < m_numClasses; j++) { for (int i = 0; i < m_numHigherRegressions; i++) { double slope = m_higherRegressions[j][i].getSlope(); double intercept = m_higherRegressions[j][i].getIntercept(); int attribute = m_higherRegressions[j][i].getAttributeIndex(); coefficients[j][0] += constFactor * intercept; coefficients[j][attribute + 1] += constFactor * slope; } } return coefficients; } /** * Returns a string describing the logistic regression function at the node. */ public String modelsToString(){ StringBuffer text = new StringBuffer(); if (m_isLeaf) { text.append("LM_"+m_leafModelNum+":"+super.toString()); } else { for (int i = 0; i < m_sons.length; i++) { text.append("\n"+m_sons[i].modelsToString()); } } return text.toString(); } /** * Returns graph describing the tree. * * @throws Exception if something goes wrong */ public String graph() throws Exception { StringBuffer text = new StringBuffer(); assignIDs(-1); assignLeafModelNumbers(0); text.append("digraph LMTree {\n"); if (m_isLeaf) { text.append("N" + m_id + " [label=\"LM_"+m_leafModelNum+":"+getModelParameters()+"\" " + "shape=box style=filled"); text.append("]\n"); }else { text.append("N" + m_id + " [label=\"" + m_localModel.leftSide(m_train) + "\" "); text.append("]\n"); graphTree(text); } return text.toString() +"}\n"; } /** * Helper function for graph description of tree * * @throws Exception if something goes wrong */ private void graphTree(StringBuffer text) throws Exception { for (int i = 0; i < m_sons.length; i++) { text.append("N" + m_id + "->" + "N" + m_sons[i].m_id + " [label=\"" + m_localModel.rightSide(i,m_train).trim() + "\"]\n"); if (m_sons[i].m_isLeaf) { text.append("N" +m_sons[i].m_id + " [label=\"LM_"+m_sons[i].m_leafModelNum+":"+ m_sons[i].getModelParameters()+"\" " + "shape=box style=filled"); text.append("]\n"); } else { text.append("N" + m_sons[i].m_id + " [label=\""+m_sons[i].m_localModel.leftSide(m_train) + "\" "); text.append("]\n"); m_sons[i].graphTree(text); } } } /** * Cleanup in order to save memory. */ public void cleanup() { super.cleanup(); if (!m_isLeaf) { for (int i = 0; i < m_sons.length; i++) m_sons[i].cleanup(); } } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } }
29,859
30.136601
111
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/trees/lmt/LogisticBase.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * LogisticBase.java * Copyright (C) 2003-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.trees.lmt; import weka.classifiers.AbstractClassifier; import weka.classifiers.Evaluation; import weka.classifiers.functions.SimpleLinearRegression; import weka.core.Attribute; import weka.core.Instance; import weka.core.Instances; import weka.core.RevisionUtils; import weka.core.Utils; import weka.core.WeightedInstancesHandler; /** * Base/helper class for building logistic regression models with the LogitBoost algorithm. * Used for building logistic model trees (weka.classifiers.trees.lmt.LMT) * and standalone logistic regression (weka.classifiers.functions.SimpleLogistic). * <!-- options-start --> * Valid options are: <p/> * * <pre> -D * If set, classifier is run in debug mode and * may output additional info to the console</pre> * <!-- options-end --> * * @author Niels Landwehr * @author Marc Sumner * @version $Revision: 8034 $ */ public class LogisticBase extends AbstractClassifier implements WeightedInstancesHandler { /** for serialization */ static final long serialVersionUID = 168765678097825064L; /** Header-only version of the numeric version of the training data*/ protected Instances m_numericDataHeader; /** * Numeric version of the training data. Original class is replaced by a numeric pseudo-class. */ protected Instances m_numericData; /** Training data */ protected Instances m_train; /** Use cross-validation to determine best number of LogitBoost iterations ?*/ protected boolean m_useCrossValidation; /**Use error on probabilities for stopping criterion of LogitBoost? */ protected boolean m_errorOnProbabilities; /**Use fixed number of iterations for LogitBoost? (if negative, cross-validate number of iterations)*/ protected int m_fixedNumIterations; /**Use heuristic to stop performing LogitBoost iterations earlier? * If enabled, LogitBoost is stopped if the current (local) minimum of the error on a test set as * a function of the number of iterations has not changed for m_heuristicStop iterations. */ protected int m_heuristicStop = 50; /**The number of LogitBoost iterations performed.*/ protected int m_numRegressions = 0; /**The maximum number of LogitBoost iterations*/ protected int m_maxIterations; /**The number of different classes*/ protected int m_numClasses; /**Array holding the simple regression functions fit by LogitBoost*/ protected SimpleLinearRegression[][] m_regressions; /**Number of folds for cross-validating number of LogitBoost iterations*/ protected static int m_numFoldsBoosting = 5; /**Threshold on the Z-value for LogitBoost*/ protected static final double Z_MAX = 3; /** If true, the AIC is used to choose the best iteration*/ private boolean m_useAIC = false; /** Effective number of parameters used for AIC / BIC automatic stopping */ protected double m_numParameters = 0; /**Threshold for trimming weights. Instances with a weight lower than this (as a percentage * of total weights) are not included in the regression fit. **/ protected double m_weightTrimBeta = 0; /** * Constructor that creates LogisticBase object with standard options. */ public LogisticBase(){ m_fixedNumIterations = -1; m_useCrossValidation = true; m_errorOnProbabilities = false; m_maxIterations = 500; m_useAIC = false; m_numParameters = 0; } /** * Constructor to create LogisticBase object. * @param numBoostingIterations fixed number of iterations for LogitBoost (if negative, use cross-validation or * stopping criterion on the training data). * @param useCrossValidation cross-validate number of LogitBoost iterations (if false, use stopping * criterion on the training data). * @param errorOnProbabilities if true, use error on probabilities * instead of misclassification for stopping criterion of LogitBoost */ public LogisticBase(int numBoostingIterations, boolean useCrossValidation, boolean errorOnProbabilities){ m_fixedNumIterations = numBoostingIterations; m_useCrossValidation = useCrossValidation; m_errorOnProbabilities = errorOnProbabilities; m_maxIterations = 500; m_useAIC = false; m_numParameters = 0; } /** * Builds the logistic regression model usiing LogitBoost. * * @param data the training data * @throws Exception if something goes wrong */ public void buildClassifier(Instances data) throws Exception { m_train = new Instances(data); m_numClasses = m_train.numClasses(); //init the array of simple regression functions m_regressions = initRegressions(); m_numRegressions = 0; //get numeric version of the training data (class variable replaced by numeric pseudo-class) m_numericData = getNumericData(m_train); //save header info m_numericDataHeader = new Instances(m_numericData, 0); if (m_fixedNumIterations > 0) { //run LogitBoost for fixed number of iterations performBoosting(m_fixedNumIterations); } else if (m_useAIC) { // Marc had this after the test for m_useCrossValidation. Changed by Eibe. //run LogitBoost using information criterion for stopping performBoostingInfCriterion(); } else if (m_useCrossValidation) { //cross-validate number of LogitBoost iterations performBoostingCV(); } else { //run LogitBoost with number of iterations that minimizes error on the training set performBoosting(); } //only keep the simple regression functions that correspond to the selected number of LogitBoost iterations m_regressions = selectRegressions(m_regressions); } /** * Runs LogitBoost, determining the best number of iterations by cross-validation. * * @throws Exception if something goes wrong */ protected void performBoostingCV() throws Exception{ //completed iteration keeps track of the number of iterations that have been //performed in every fold (some might stop earlier than others). //Best iteration is selected only from these. int completedIterations = m_maxIterations; Instances allData = new Instances(m_train); allData.stratify(m_numFoldsBoosting); double[] error = new double[m_maxIterations + 1]; for (int i = 0; i < m_numFoldsBoosting; i++) { //split into training/test data in fold Instances train = allData.trainCV(m_numFoldsBoosting,i); Instances test = allData.testCV(m_numFoldsBoosting,i); //initialize LogitBoost m_numRegressions = 0; m_regressions = initRegressions(); //run LogitBoost iterations int iterations = performBoosting(train,test,error,completedIterations); if (iterations < completedIterations) completedIterations = iterations; } //determine iteration with minimum error over the folds int bestIteration = getBestIteration(error,completedIterations); //rebuild model on all of the training data m_numRegressions = 0; performBoosting(bestIteration); } /** * Runs LogitBoost, determining the best number of iterations by an information criterion (currently AIC). */ protected void performBoostingInfCriterion() throws Exception{ double criterion = 0.0; double bestCriterion = Double.MAX_VALUE; int bestIteration = 0; int noMin = 0; // Variable to keep track of criterion values (AIC) double criterionValue = Double.MAX_VALUE; // initialize Ys/Fs/ps double[][] trainYs = getYs(m_train); double[][] trainFs = getFs(m_numericData); double[][] probs = getProbs(trainFs); // Array with true/false if the attribute is included in the model or not boolean[][] attributes = new boolean[m_numClasses][m_numericDataHeader.numAttributes()]; int iteration = 0; while (iteration < m_maxIterations) { //perform single LogitBoost iteration boolean foundAttribute = performIteration(iteration, trainYs, trainFs, probs, m_numericData); if (foundAttribute) { iteration++; m_numRegressions = iteration; } else { //could not fit simple linear regression: stop LogitBoost break; } double numberOfAttributes = m_numParameters + iteration; // Fill criterion array values criterionValue = 2.0 * negativeLogLikelihood(trainYs, probs) + 2.0 * numberOfAttributes; //heuristic: stop LogitBoost if the current minimum has not changed for <m_heuristicStop> iterations if (noMin > m_heuristicStop) break; if (criterionValue < bestCriterion) { bestCriterion = criterionValue; bestIteration = iteration; noMin = 0; } else { noMin++; } } m_numRegressions = 0; performBoosting(bestIteration); } /** * Runs LogitBoost on a training set and monitors the error on a test set. * Used for running one fold when cross-validating the number of LogitBoost iterations. * @param train the training set * @param test the test set * @param error array to hold the logged error values * @param maxIterations the maximum number of LogitBoost iterations to run * @return the number of completed LogitBoost iterations (can be smaller than maxIterations * if the heuristic for early stopping is active or there is a problem while fitting the regressions * in LogitBoost). * @throws Exception if something goes wrong */ protected int performBoosting(Instances train, Instances test, double[] error, int maxIterations) throws Exception{ //get numeric version of the (sub)set of training instances Instances numericTrain = getNumericData(train); //initialize Ys/Fs/ps double[][] trainYs = getYs(train); double[][] trainFs = getFs(numericTrain); double[][] probs = getProbs(trainFs); int iteration = 0; int noMin = 0; double lastMin = Double.MAX_VALUE; if (m_errorOnProbabilities) error[0] += getMeanAbsoluteError(test); else error[0] += getErrorRate(test); while (iteration < maxIterations) { //perform single LogitBoost iteration boolean foundAttribute = performIteration(iteration, trainYs, trainFs, probs, numericTrain); if (foundAttribute) { iteration++; m_numRegressions = iteration; } else { //could not fit simple linear regression: stop LogitBoost break; } if (m_errorOnProbabilities) error[iteration] += getMeanAbsoluteError(test); else error[iteration] += getErrorRate(test); //heuristic: stop LogitBoost if the current minimum has not changed for <m_heuristicStop> iterations if (noMin > m_heuristicStop) break; if (error[iteration] < lastMin) { lastMin = error[iteration]; noMin = 0; } else { noMin++; } } return iteration; } /** * Runs LogitBoost with a fixed number of iterations. * @param numIterations the number of iterations to run * @throws Exception if something goes wrong */ protected void performBoosting(int numIterations) throws Exception{ //initialize Ys/Fs/ps double[][] trainYs = getYs(m_train); double[][] trainFs = getFs(m_numericData); double[][] probs = getProbs(trainFs); int iteration = 0; //run iterations while (iteration < numIterations) { boolean foundAttribute = performIteration(iteration, trainYs, trainFs, probs, m_numericData); if (foundAttribute) iteration++; else break; } m_numRegressions = iteration; } /** * Runs LogitBoost using the stopping criterion on the training set. * The number of iterations is used that gives the lowest error on the training set, either misclassification * or error on probabilities (depending on the errorOnProbabilities option). * @throws Exception if something goes wrong */ protected void performBoosting() throws Exception{ //initialize Ys/Fs/ps double[][] trainYs = getYs(m_train); double[][] trainFs = getFs(m_numericData); double[][] probs = getProbs(trainFs); int iteration = 0; double[] trainErrors = new double[m_maxIterations+1]; trainErrors[0] = getErrorRate(m_train); int noMin = 0; double lastMin = Double.MAX_VALUE; while (iteration < m_maxIterations) { boolean foundAttribute = performIteration(iteration, trainYs, trainFs, probs, m_numericData); if (foundAttribute) { iteration++; m_numRegressions = iteration; } else { //could not fit simple regression break; } trainErrors[iteration] = getErrorRate(m_train); //heuristic: stop LogitBoost if the current minimum has not changed for <m_heuristicStop> iterations if (noMin > m_heuristicStop) break; if (trainErrors[iteration] < lastMin) { lastMin = trainErrors[iteration]; noMin = 0; } else { noMin++; } } //find iteration with best error m_numRegressions = getBestIteration(trainErrors, iteration); } /** * Returns the misclassification error of the current model on a set of instances. * @param data the set of instances * @return the error rate * @throws Exception if something goes wrong */ protected double getErrorRate(Instances data) throws Exception { Evaluation eval = new Evaluation(data); eval.evaluateModel(this,data); return eval.errorRate(); } /** * Returns the error of the probability estimates for the current model on a set of instances. * @param data the set of instances * @return the error * @throws Exception if something goes wrong */ protected double getMeanAbsoluteError(Instances data) throws Exception { Evaluation eval = new Evaluation(data); eval.evaluateModel(this,data); return eval.meanAbsoluteError(); } /** * Helper function to find the minimum in an array of error values. * * @param errors an array containing errors * @param maxIteration the maximum of iterations * @return the minimum */ protected int getBestIteration(double[] errors, int maxIteration) { double bestError = errors[0]; int bestIteration = 0; for (int i = 1; i <= maxIteration; i++) { if (errors[i] < bestError) { bestError = errors[i]; bestIteration = i; } } return bestIteration; } /** * Performs a single iteration of LogitBoost, and updates the model accordingly. * A simple regression function is fit to the response and added to the m_regressions array. * @param iteration the current iteration * @param trainYs the y-values (see description of LogitBoost) for the model trained so far * @param trainFs the F-values (see description of LogitBoost) for the model trained so far * @param probs the p-values (see description of LogitBoost) for the model trained so far * @param trainNumeric numeric version of the training data * @return returns true if iteration performed successfully, false if no simple regression function * could be fitted. * @throws Exception if something goes wrong */ protected boolean performIteration(int iteration, double[][] trainYs, double[][] trainFs, double[][] probs, Instances trainNumeric) throws Exception { for (int j = 0; j < m_numClasses; j++) { // Keep track of sum of weights double[] weights = new double[trainNumeric.numInstances()]; double weightSum = 0.0; //make copy of data (need to save the weights) Instances boostData = new Instances(trainNumeric); for (int i = 0; i < trainNumeric.numInstances(); i++) { //compute response and weight double p = probs[i][j]; double actual = trainYs[i][j]; double z = getZ(actual, p); double w = (actual - p) / z; //set values for instance Instance current = boostData.instance(i); current.setValue(boostData.classIndex(), z); current.setWeight(current.weight() * w); weights[i] = current.weight(); weightSum += current.weight(); } Instances instancesCopy = new Instances(boostData); if (weightSum > 0) { // Only the (1-beta)th quantile of instances are sent to the base classifier if (m_weightTrimBeta > 0) { double weightPercentage = 0.0; int[] weightsOrder = new int[trainNumeric.numInstances()]; weightsOrder = Utils.sort(weights); instancesCopy.delete(); for (int i = weightsOrder.length-1; (i >= 0) && (weightPercentage < (1-m_weightTrimBeta)); i--) { instancesCopy.add(boostData.instance(weightsOrder[i])); weightPercentage += (weights[weightsOrder[i]] / weightSum); } } //Scale the weights weightSum = instancesCopy.sumOfWeights(); for (int i = 0; i < instancesCopy.numInstances(); i++) { Instance current = instancesCopy.instance(i); current.setWeight(current.weight() * (double)instancesCopy.numInstances() / weightSum); } } //fit simple regression function m_regressions[j][iteration].buildClassifier(instancesCopy); boolean foundAttribute = m_regressions[j][iteration].foundUsefulAttribute(); if (!foundAttribute) { //could not fit simple regression function return false; } } // Evaluate / increment trainFs from the classifier for (int i = 0; i < trainFs.length; i++) { double [] pred = new double [m_numClasses]; double predSum = 0; for (int j = 0; j < m_numClasses; j++) { pred[j] = m_regressions[j][iteration] .classifyInstance(trainNumeric.instance(i)); predSum += pred[j]; } predSum /= m_numClasses; for (int j = 0; j < m_numClasses; j++) { trainFs[i][j] += (pred[j] - predSum) * (m_numClasses - 1) / m_numClasses; } } // Compute the current probability estimates for (int i = 0; i < trainYs.length; i++) { probs[i] = probs(trainFs[i]); } return true; } /** * Helper function to initialize m_regressions. * * @return the generated classifiers */ protected SimpleLinearRegression[][] initRegressions(){ SimpleLinearRegression[][] classifiers = new SimpleLinearRegression[m_numClasses][m_maxIterations]; for (int j = 0; j < m_numClasses; j++) { for (int i = 0; i < m_maxIterations; i++) { classifiers[j][i] = new SimpleLinearRegression(); classifiers[j][i].setSuppressErrorMessage(true); } } return classifiers; } /** * Converts training data to numeric version. The class variable is replaced by a pseudo-class * used by LogitBoost. * * @param data the data to convert * @return the converted data * @throws Exception if something goes wrong */ protected Instances getNumericData(Instances data) throws Exception{ Instances numericData = new Instances(data); int classIndex = numericData.classIndex(); numericData.setClassIndex(-1); numericData.deleteAttributeAt(classIndex); numericData.insertAttributeAt(new Attribute("'pseudo class'"), classIndex); numericData.setClassIndex(classIndex); return numericData; } /** * Helper function for cutting back m_regressions to the set of classifiers * (corresponsing to the number of LogitBoost iterations) that gave the * smallest error. * * @param classifiers the original set of classifiers * @return the cut back set of classifiers */ protected SimpleLinearRegression[][] selectRegressions(SimpleLinearRegression[][] classifiers){ SimpleLinearRegression[][] goodClassifiers = new SimpleLinearRegression[m_numClasses][m_numRegressions]; for (int j = 0; j < m_numClasses; j++) { for (int i = 0; i < m_numRegressions; i++) { goodClassifiers[j][i] = classifiers[j][i]; } } return goodClassifiers; } /** * Computes the LogitBoost response variable from y/p values * (actual/estimated class probabilities). * * @param actual the actual class probability * @param p the estimated class probability * @return the LogitBoost response */ protected double getZ(double actual, double p) { double z; if (actual == 1) { z = 1.0 / p; if (z > Z_MAX) { // threshold z = Z_MAX; } } else { z = -1.0 / (1.0 - p); if (z < -Z_MAX) { // threshold z = -Z_MAX; } } return z; } /** * Computes the LogitBoost response for an array of y/p values * (actual/estimated class probabilities). * * @param dataYs the actual class probabilities * @param probs the estimated class probabilities * @return the LogitBoost response */ protected double[][] getZs(double[][] probs, double[][] dataYs) { double[][] dataZs = new double[probs.length][m_numClasses]; for (int j = 0; j < m_numClasses; j++) for (int i = 0; i < probs.length; i++) dataZs[i][j] = getZ(dataYs[i][j], probs[i][j]); return dataZs; } /** * Computes the LogitBoost weights from an array of y/p values * (actual/estimated class probabilities). * * @param dataYs the actual class probabilities * @param probs the estimated class probabilities * @return the LogitBoost weights */ protected double[][] getWs(double[][] probs, double[][] dataYs) { double[][] dataWs = new double[probs.length][m_numClasses]; for (int j = 0; j < m_numClasses; j++) for (int i = 0; i < probs.length; i++){ double z = getZ(dataYs[i][j], probs[i][j]); dataWs[i][j] = (dataYs[i][j] - probs[i][j]) / z; } return dataWs; } /** * Computes the p-values (probabilities for the classes) from the F-values * of the logistic model. * * @param Fs the F-values * @return the p-values */ protected double[] probs(double[] Fs) { double maxF = -Double.MAX_VALUE; for (int i = 0; i < Fs.length; i++) { if (Fs[i] > maxF) { maxF = Fs[i]; } } double sum = 0; double[] probs = new double[Fs.length]; for (int i = 0; i < Fs.length; i++) { probs[i] = Math.exp(Fs[i] - maxF); sum += probs[i]; } Utils.normalize(probs, sum); return probs; } /** * Computes the Y-values (actual class probabilities) for a set of instances. * * @param data the data to compute the Y-values from * @return the Y-values */ protected double[][] getYs(Instances data){ double [][] dataYs = new double [data.numInstances()][m_numClasses]; for (int j = 0; j < m_numClasses; j++) { for (int k = 0; k < data.numInstances(); k++) { dataYs[k][j] = (data.instance(k).classValue() == j) ? 1.0: 0.0; } } return dataYs; } /** * Computes the F-values for a single instance. * * @param instance the instance to compute the F-values for * @return the F-values * @throws Exception if something goes wrong */ protected double[] getFs(Instance instance) throws Exception{ double [] pred = new double [m_numClasses]; double [] instanceFs = new double [m_numClasses]; //add up the predictions from the simple regression functions for (int i = 0; i < m_numRegressions; i++) { double predSum = 0; for (int j = 0; j < m_numClasses; j++) { pred[j] = m_regressions[j][i].classifyInstance(instance); predSum += pred[j]; } predSum /= m_numClasses; for (int j = 0; j < m_numClasses; j++) { instanceFs[j] += (pred[j] - predSum) * (m_numClasses - 1) / m_numClasses; } } return instanceFs; } /** * Computes the F-values for a set of instances. * * @param data the data to work on * @return the F-values * @throws Exception if something goes wrong */ protected double[][] getFs(Instances data) throws Exception{ double[][] dataFs = new double[data.numInstances()][]; for (int k = 0; k < data.numInstances(); k++) { dataFs[k] = getFs(data.instance(k)); } return dataFs; } /** * Computes the p-values (probabilities for the different classes) from * the F-values for a set of instances. * * @param dataFs the F-values * @return the p-values */ protected double[][] getProbs(double[][] dataFs){ int numInstances = dataFs.length; double[][] probs = new double[numInstances][]; for (int k = 0; k < numInstances; k++) { probs[k] = probs(dataFs[k]); } return probs; } /** * Returns the negative loglikelihood of the Y-values (actual class probabilities) given the * p-values (current probability estimates). * * @param dataYs the Y-values * @param probs the p-values * @return the likelihood */ protected double negativeLogLikelihood(double[][] dataYs, double[][] probs) { double logLikelihood = 0; for (int i = 0; i < dataYs.length; i++) { for (int j = 0; j < m_numClasses; j++) { if (dataYs[i][j] == 1.0) { logLikelihood -= Math.log(probs[i][j]); } } } return logLikelihood;// / (double)dataYs.length; } /** * Returns an array of the indices of the attributes used in the logistic model. * The first dimension is the class, the second dimension holds a list of attribute indices. * Attribute indices start at zero. * @return the array of attribute indices */ public int[][] getUsedAttributes(){ int[][] usedAttributes = new int[m_numClasses][]; //first extract coefficients double[][] coefficients = getCoefficients(); for (int j = 0; j < m_numClasses; j++){ //boolean array indicating if attribute used boolean[] attributes = new boolean[m_numericDataHeader.numAttributes()]; for (int i = 0; i < attributes.length; i++) { //attribute used if coefficient > 0 if (!Utils.eq(coefficients[j][i + 1],0)) attributes[i] = true; } int numAttributes = 0; for (int i = 0; i < m_numericDataHeader.numAttributes(); i++) if (attributes[i]) numAttributes++; //"collect" all attributes into array of indices int[] usedAttributesClass = new int[numAttributes]; int count = 0; for (int i = 0; i < m_numericDataHeader.numAttributes(); i++) { if (attributes[i]) { usedAttributesClass[count] = i; count++; } } usedAttributes[j] = usedAttributesClass; } return usedAttributes; } /** * The number of LogitBoost iterations performed (= the number of simple * regression functions fit). * * @return the number of LogitBoost iterations performed */ public int getNumRegressions() { return m_numRegressions; } /** * Get the value of weightTrimBeta. * * @return Value of weightTrimBeta. */ public double getWeightTrimBeta(){ return m_weightTrimBeta; } /** * Get the value of useAIC. * * @return Value of useAIC. */ public boolean getUseAIC(){ return m_useAIC; } /** * Sets the parameter "maxIterations". * * @param maxIterations the maximum iterations */ public void setMaxIterations(int maxIterations) { m_maxIterations = maxIterations; } /** * Sets the option "heuristicStop". * * @param heuristicStop the heuristic stop to use */ public void setHeuristicStop(int heuristicStop){ m_heuristicStop = heuristicStop; } /** * Sets the option "weightTrimBeta". */ public void setWeightTrimBeta(double w){ m_weightTrimBeta = w; } /** * Set the value of useAIC. * * @param c Value to assign to useAIC. */ public void setUseAIC(boolean c){ m_useAIC = c; } /** * Returns the maxIterations parameter. * * @return the maximum iteration */ public int getMaxIterations(){ return m_maxIterations; } /** * Returns an array holding the coefficients of the logistic model. * First dimension is the class, the second one holds a list of coefficients. * At position zero, the constant term of the model is stored, then, the coefficients for * the attributes in ascending order. * @return the array of coefficients */ protected double[][] getCoefficients(){ double[][] coefficients = new double[m_numClasses][m_numericDataHeader.numAttributes() + 1]; for (int j = 0; j < m_numClasses; j++) { //go through simple regression functions and add their coefficient to the coefficient of //the attribute they are built on. for (int i = 0; i < m_numRegressions; i++) { double slope = m_regressions[j][i].getSlope(); double intercept = m_regressions[j][i].getIntercept(); int attribute = m_regressions[j][i].getAttributeIndex(); coefficients[j][0] += intercept; coefficients[j][attribute + 1] += slope; } } // Need to multiply all coefficients by (J-1) / J for (int j = 0; j < coefficients.length; j++) { for (int i = 0; i < coefficients[0].length; i++) { coefficients[j][i] *= (double)(m_numClasses - 1) / (double)m_numClasses; } } return coefficients; } /** * Returns the fraction of all attributes in the data that are used in the * logistic model (in percent). * An attribute is used in the model if it is used in any of the models for * the different classes. * * @return the fraction of all attributes that are used */ public double percentAttributesUsed(){ boolean[] attributes = new boolean[m_numericDataHeader.numAttributes()]; double[][] coefficients = getCoefficients(); for (int j = 0; j < m_numClasses; j++){ for (int i = 1; i < m_numericDataHeader.numAttributes() + 1; i++) { //attribute used if it is used in any class, note coefficients are shifted by one (because //of constant term). if (!Utils.eq(coefficients[j][i],0)) attributes[i - 1] = true; } } //count number of used attributes (without the class attribute) double count = 0; for (int i = 0; i < attributes.length; i++) if (attributes[i]) count++; return count / (double)(m_numericDataHeader.numAttributes() - 1) * 100.0; } /** * Returns a description of the logistic model (i.e., attributes and * coefficients). * * @return the description of the model */ public String toString(){ StringBuffer s = new StringBuffer(); //get used attributes int[][] attributes = getUsedAttributes(); //get coefficients double[][] coefficients = getCoefficients(); for (int j = 0; j < m_numClasses; j++) { s.append("\nClass "+j+" :\n"); //constant term s.append(Utils.doubleToString(coefficients[j][0],4,2)+" + \n"); for (int i = 0; i < attributes[j].length; i++) { //attribute/coefficient pairs s.append("["+m_numericDataHeader.attribute(attributes[j][i]).name()+"]"); s.append(" * " + Utils.doubleToString(coefficients[j][attributes[j][i]+1],4,2)); if (i != attributes[j].length - 1) s.append(" +"); s.append("\n"); } } return new String(s); } /** * Returns class probabilities for an instance. * * @param instance the instance to compute the distribution for * @return the class probabilities * @throws Exception if distribution can't be computed successfully */ public double[] distributionForInstance(Instance instance) throws Exception { instance = (Instance)instance.copy(); //set to numeric pseudo-class instance.setDataset(m_numericDataHeader); //calculate probs via Fs return probs(getFs(instance)); } /** * Cleanup in order to save memory. */ public void cleanup() { //save just header info m_train = new Instances(m_train,0); m_numericData = null; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } }
33,629
31.398844
117
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/trees/lmt/ResidualModelSelection.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * ResidualModelSelection.java * Copyright (C) 2003-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.trees.lmt; import weka.classifiers.trees.j48.ClassifierSplitModel; import weka.classifiers.trees.j48.Distribution; import weka.classifiers.trees.j48.ModelSelection; import weka.classifiers.trees.j48.NoSplit; import weka.core.Instances; import weka.core.RevisionUtils; /** * Helper class for logistic model trees (weka.classifiers.trees.lmt.LMT) to implement the * splitting criterion based on residuals. * * @author Niels Landwehr * @version $Revision: 8034 $ */ public class ResidualModelSelection extends ModelSelection { /** for serialization */ private static final long serialVersionUID = -293098783159385148L; /** Minimum number of instances for leaves*/ protected int m_minNumInstances; /** Minimum information gain for split*/ protected double m_minInfoGain; /** * Constructor to create ResidualModelSelection object. * @param minNumInstances minimum number of instances for leaves */ public ResidualModelSelection(int minNumInstances) { m_minNumInstances = minNumInstances; m_minInfoGain = 1.0E-4; } /**Method not in use*/ public void cleanup() { //method not in use } /** * Selects split based on residuals for the given dataset. */ public final ClassifierSplitModel selectModel(Instances data, double[][] dataZs, double[][] dataWs) throws Exception{ int numAttributes = data.numAttributes(); if (numAttributes < 2) throw new Exception("Can't select Model without non-class attribute"); if (data.numInstances() < m_minNumInstances) return new NoSplit(new Distribution(data)); double bestGain = -Double.MAX_VALUE; int bestAttribute = -1; //try split on every attribute for (int i = 0; i < numAttributes; i++) { if (i != data.classIndex()) { //build split ResidualSplit split = new ResidualSplit(i); split.buildClassifier(data, dataZs, dataWs); if (split.checkModel(m_minNumInstances)){ //evaluate split double gain = split.entropyGain(); if (gain > bestGain) { bestGain = gain; bestAttribute = i; } } } } if (bestGain >= m_minInfoGain){ //return best split ResidualSplit split = new ResidualSplit(bestAttribute); split.buildClassifier(data, dataZs, dataWs); return split; } else { //could not find any split with enough information gain return new NoSplit(new Distribution(data)); } } /**Method not in use*/ public final ClassifierSplitModel selectModel(Instances train) { //method not in use return null; } /**Method not in use*/ public final ClassifierSplitModel selectModel(Instances train, Instances test) { //method not in use return null; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } }
3,732
27.496183
97
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/trees/lmt/ResidualSplit.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * ResidualSplit.java * Copyright (C) 2003-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.trees.lmt; import weka.classifiers.trees.j48.ClassifierSplitModel; import weka.classifiers.trees.j48.Distribution; import weka.core.Attribute; import weka.core.Instance; import weka.core.Instances; import weka.core.RevisionUtils; import weka.core.Utils; /** * Helper class for logistic model trees (weka.classifiers.trees.lmt.LMT) to implement the * splitting criterion based on residuals of the LogitBoost algorithm. * * @author Niels Landwehr * @version $Revision: 8034 $ */ public class ResidualSplit extends ClassifierSplitModel{ /** for serialization */ private static final long serialVersionUID = -5055883734183713525L; /**The attribute selected for the split*/ protected Attribute m_attribute; /**The index of the attribute selected for the split*/ protected int m_attIndex; /**Number of instances in the set*/ protected int m_numInstances; /**Number of classed*/ protected int m_numClasses; /**The set of instances*/ protected Instances m_data; /**The Z-values (LogitBoost response) for the set of instances*/ protected double[][] m_dataZs; /**The LogitBoost-weights for the set of instances*/ protected double[][] m_dataWs; /**The split point (for numeric attributes)*/ protected double m_splitPoint; /** *Creates a split object *@param attIndex the index of the attribute to split on */ public ResidualSplit(int attIndex) { m_attIndex = attIndex; } /** * Builds the split. * Needs the Z/W values of LogitBoost for the set of instances. */ public void buildClassifier(Instances data, double[][] dataZs, double[][] dataWs) throws Exception { m_numClasses = data.numClasses(); m_numInstances = data.numInstances(); if (m_numInstances == 0) throw new Exception("Can't build split on 0 instances"); //save data/Zs/Ws m_data = data; m_dataZs = dataZs; m_dataWs = dataWs; m_attribute = data.attribute(m_attIndex); //determine number of subsets and split point for numeric attributes if (m_attribute.isNominal()) { m_splitPoint = 0.0; m_numSubsets = m_attribute.numValues(); } else { getSplitPoint(); m_numSubsets = 2; } //create distribution for data m_distribution = new Distribution(data, this); } /** * Selects split point for numeric attribute. */ protected boolean getSplitPoint() throws Exception{ //compute possible split points double[] splitPoints = new double[m_numInstances]; int numSplitPoints = 0; Instances sortedData = new Instances(m_data); sortedData.sort(sortedData.attribute(m_attIndex)); double last, current; last = sortedData.instance(0).value(m_attIndex); for (int i = 0; i < m_numInstances - 1; i++) { current = sortedData.instance(i+1).value(m_attIndex); if (!Utils.eq(current, last)){ splitPoints[numSplitPoints++] = (last + current) / 2.0; } last = current; } //compute entropy for all split points double[] entropyGain = new double[numSplitPoints]; for (int i = 0; i < numSplitPoints; i++) { m_splitPoint = splitPoints[i]; entropyGain[i] = entropyGain(); } //get best entropy gain int bestSplit = -1; double bestGain = -Double.MAX_VALUE; for (int i = 0; i < numSplitPoints; i++) { if (entropyGain[i] > bestGain) { bestGain = entropyGain[i]; bestSplit = i; } } if (bestSplit < 0) return false; m_splitPoint = splitPoints[bestSplit]; return true; } /** * Computes entropy gain for current split. */ public double entropyGain() throws Exception{ int numSubsets; if (m_attribute.isNominal()) { numSubsets = m_attribute.numValues(); } else { numSubsets = 2; } double[][][] splitDataZs = new double[numSubsets][][]; double[][][] splitDataWs = new double[numSubsets][][]; //determine size of the subsets int[] subsetSize = new int[numSubsets]; for (int i = 0; i < m_numInstances; i++) { int subset = whichSubset(m_data.instance(i)); if (subset < 0) throw new Exception("ResidualSplit: no support for splits on missing values"); subsetSize[subset]++; } for (int i = 0; i < numSubsets; i++) { splitDataZs[i] = new double[subsetSize[i]][]; splitDataWs[i] = new double[subsetSize[i]][]; } int[] subsetCount = new int[numSubsets]; //sort Zs/Ws into subsets for (int i = 0; i < m_numInstances; i++) { int subset = whichSubset(m_data.instance(i)); splitDataZs[subset][subsetCount[subset]] = m_dataZs[i]; splitDataWs[subset][subsetCount[subset]] = m_dataWs[i]; subsetCount[subset]++; } //calculate entropy gain double entropyOrig = entropy(m_dataZs, m_dataWs); double entropySplit = 0.0; for (int i = 0; i < numSubsets; i++) { entropySplit += entropy(splitDataZs[i], splitDataWs[i]); } return entropyOrig - entropySplit; } /** * Helper function to compute entropy from Z/W values. */ protected double entropy(double[][] dataZs, double[][] dataWs){ //method returns entropy * sumOfWeights double entropy = 0.0; int numInstances = dataZs.length; for (int j = 0; j < m_numClasses; j++) { //compute mean for class double m = 0.0; double sum = 0.0; for (int i = 0; i < numInstances; i++) { m += dataZs[i][j] * dataWs[i][j]; sum += dataWs[i][j]; } m /= sum; //sum up entropy for class for (int i = 0; i < numInstances; i++) { entropy += dataWs[i][j] * Math.pow(dataZs[i][j] - m,2); } } return entropy; } /** * Checks if there are at least 2 subsets that contain >= minNumInstances. */ public boolean checkModel(int minNumInstances){ //checks if there are at least 2 subsets that contain >= minNumInstances int count = 0; for (int i = 0; i < m_distribution.numBags(); i++) { if (m_distribution.perBag(i) >= minNumInstances) count++; } return (count >= 2); } /** * Returns name of splitting attribute (left side of condition). */ public final String leftSide(Instances data) { return data.attribute(m_attIndex).name(); } /** * Prints the condition satisfied by instances in a subset. */ public final String rightSide(int index,Instances data) { StringBuffer text; text = new StringBuffer(); if (data.attribute(m_attIndex).isNominal()) text.append(" = "+ data.attribute(m_attIndex).value(index)); else if (index == 0) text.append(" <= "+ Utils.doubleToString(m_splitPoint,6)); else text.append(" > "+ Utils.doubleToString(m_splitPoint,6)); return text.toString(); } public final int whichSubset(Instance instance) throws Exception { if (instance.isMissing(m_attIndex)) return -1; else{ if (instance.attribute(m_attIndex).isNominal()) return (int)instance.value(m_attIndex); else if (Utils.smOrEq(instance.value(m_attIndex),m_splitPoint)) return 0; else return 1; } } /** Method not in use*/ public void buildClassifier(Instances data) { //method not in use } /**Method not in use*/ public final double [] weights(Instance instance){ //method not in use return null; } /**Method not in use*/ public final String sourceExpression(int index, Instances data) { //method not in use return ""; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } }
8,455
25.507837
100
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/trees/m5/CorrelationSplitInfo.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * CorrelationSplitInfo.java * Copyright (C) 2000-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.trees.m5; import java.io.Serializable; import weka.core.Instances; import weka.core.RevisionHandler; import weka.core.RevisionUtils; import weka.core.Utils; import weka.core.PairedStats; /** * Finds split points using correlation. * * @author Mark Hall (mhall@cs.waikato.ac.nz) * @version $Revision: 8034 $ */ public final class CorrelationSplitInfo implements Cloneable, Serializable, SplitEvaluate, RevisionHandler { /** for serialization */ private static final long serialVersionUID = 4212734895125452770L; /** * the first instance */ private int m_first; /** * the last instance */ private int m_last; private int m_position; /** * the maximum impurity reduction */ private double m_maxImpurity; /** * the attribute being tested */ private int m_splitAttr; /** * the best value on which to split */ private double m_splitValue; /** * the number of instances */ private int m_number; /** * Constructs an object which contains the split information * * @param low the index of the first instance * @param high the index of the last instance * @param attr an attribute */ public CorrelationSplitInfo(int low, int high, int attr) { initialize(low, high, attr); } /** * Makes a copy of this CorrelationSplitInfo object */ public final SplitEvaluate copy() throws Exception { CorrelationSplitInfo s = (CorrelationSplitInfo) this.clone(); return s; } /** * Resets the object of split information * * @param low the index of the first instance * @param high the index of the last instance * @param attr the attribute */ public final void initialize(int low, int high, int attr) { m_number = high - low + 1; m_first = low; m_last = high; m_position = -1; m_maxImpurity = -Double.MAX_VALUE; m_splitAttr = attr; m_splitValue = 0.0; } /** * Finds the best splitting point for an attribute in the instances * * @param attr the splitting attribute * @param inst the instances * @exception Exception if something goes wrong */ public final void attrSplit(int attr, Instances inst) throws Exception { int i; int len; int part; int low = 0; int high = inst.numInstances() - 1; PairedStats full = new PairedStats(0.01); PairedStats leftSubset = new PairedStats(0.01); PairedStats rightSubset = new PairedStats(0.01); int classIndex = inst.classIndex(); double leftCorr, rightCorr; double leftVar, rightVar, allVar; double order = 2.0; initialize(low, high, attr); if (m_number < 4) { return; } len = ((high - low + 1) < 5) ? 1 : (high - low + 1) / 5; m_position = low; part = low + len - 1; // prime the subsets for (i = low; i < len; i++) { full.add(inst.instance(i).value(attr), inst.instance(i).value(classIndex)); leftSubset.add(inst.instance(i).value(attr), inst.instance(i).value(classIndex)); } for (i = len; i < inst.numInstances(); i++) { full.add(inst.instance(i).value(attr), inst.instance(i).value(classIndex)); rightSubset.add(inst.instance(i).value(attr), inst.instance(i).value(classIndex)); } full.calculateDerived(); allVar = (full.yStats.stdDev * full.yStats.stdDev); allVar = Math.abs(allVar); allVar = Math.pow(allVar, (1.0 / order)); for (i = low + len; i < high - len - 1; i++) { rightSubset.subtract(inst.instance(i).value(attr), inst.instance(i).value(classIndex)); leftSubset.add(inst.instance(i).value(attr), inst.instance(i).value(classIndex)); if (!Utils.eq(inst.instance(i + 1).value(attr), inst.instance(i).value(attr))) { leftSubset.calculateDerived(); rightSubset.calculateDerived(); leftCorr = Math.abs(leftSubset.correlation); rightCorr = Math.abs(rightSubset.correlation); leftVar = (leftSubset.yStats.stdDev * leftSubset.yStats.stdDev); leftVar = Math.abs(leftVar); leftVar = Math.pow(leftVar, (1.0 / order)); rightVar = (rightSubset.yStats.stdDev * rightSubset.yStats.stdDev); rightVar = Math.abs(rightVar); rightVar = Math.pow(rightVar, (1.0 / order)); double score = allVar - ((leftSubset.count / full.count) * leftVar) - ((rightSubset.count / full.count) * rightVar); // score /= allVar; leftCorr = (leftSubset.count / full.count) * leftCorr; rightCorr = (rightSubset.count / full.count) * rightCorr; double c_score = (leftCorr + rightCorr) - Math.abs(full.correlation); // c_score += score; if (!Utils.eq(score, 0.0)) { if (score > m_maxImpurity) { m_maxImpurity = score; m_splitValue = (inst.instance(i).value(attr) + inst.instance(i + 1) .value(attr)) * 0.5; m_position = i; } } } } } /** * Returns the impurity of this split * * @return the impurity of this split */ public double maxImpurity() { return m_maxImpurity; } /** * Returns the attribute used in this split * * @return the attribute used in this split */ public int splitAttr() { return m_splitAttr; } /** * Returns the position of the split in the sorted values. -1 indicates that * a split could not be found. * * @return an <code>int</code> value */ public int position() { return m_position; } /** * Returns the split value * * @return the split value */ public double splitValue() { return m_splitValue; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } }
6,551
24.897233
78
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/trees/m5/Impurity.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * Impurity.java * Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.trees.m5; import weka.core.Instances; import weka.core.RevisionHandler; import weka.core.RevisionUtils; /** * Class for handling the impurity values when spliting the instances * @author Yong Wang (yongwang@cs.waikato.ac.nz) * @version $Revision: 8034 $ */ public final class Impurity implements RevisionHandler { double n; // number of total instances int attr; // splitting attribute double nl; // number of instances in the left group double nr; // number of instances in the right group double sl; // sum of the left group double sr; // sum of the right group double s2l; // squared sum of the left group double s2r; // squared sum of the right group double sdl; // standard deviation of the left group double sdr; // standard deviation of the right group double vl; // variance of the left group double vr; // variance of the right group double sd; // overall standard deviation double va; // overall variance double impurity; // impurity value; int order; // order = 1, variance; order = 2, standard deviation; order = 3, the cubic root of the variance; // order = k, the k-th order root of the variance /** * Constructs an Impurity object containing the impurity values of partitioning the instances using an attribute * @param partition the index of the last instance in the left subset * @param attribute the attribute used in partitioning * @param inst instances * @param k the order of the impurity; =1, the variance; =2, the stardard deviation; =k, the k-th order root of the variance */ public Impurity(int partition,int attribute,Instances inst,int k){ Values values = new Values(0,inst.numInstances()-1,inst.classIndex(),inst); attr = attribute; n = inst.numInstances(); sd = values.sd; va = values.va; values = new Values(0,partition,inst.classIndex(),inst); nl = partition + 1; sl = values.sum; s2l = values.sqrSum; values = new Values(partition+1,inst.numInstances()-1,inst.classIndex(),inst); nr = inst.numInstances() - partition -1; sr = values.sum; s2r = values.sqrSum; order = k; this.incremental(0,0); } /** * Converts an Impurity object to a string * @return the converted string */ public final String toString() { StringBuffer text = new StringBuffer(); text.append("Print impurity values:\n"); text.append(" Number of total instances:\t" + n + "\n"); text.append(" Splitting attribute:\t\t" + attr + "\n"); text.append(" Number of the instances in the left:\t" + nl + "\n"); text.append(" Number of the instances in the right:\t" + nr + "\n"); text.append(" Sum of the left:\t\t\t" + sl + "\n"); text.append(" Sum of the right:\t\t\t" + sr + "\n"); text.append(" Squared sum of the left:\t\t" + s2l + "\n"); text.append(" Squared sum of the right:\t\t" + s2r + "\n"); text.append(" Standard deviation of the left:\t" + sdl + "\n"); text.append(" Standard deviation of the right:\t" + sdr + "\n"); text.append(" Variance of the left:\t\t" + vr + "\n"); text.append(" Variance of the right:\t\t" + vr + "\n"); text.append(" Overall standard deviation:\t\t" + sd + "\n"); text.append(" Overall variance:\t\t\t" + va + "\n"); text.append(" Impurity (order " + order + "):\t\t" + impurity + "\n"); return text.toString(); } /** * Incrementally computes the impurirty values * @param value the incremental value * @param type if type=1, value will be added to the left subset; type=-1, to the right subset; type=0, initializes */ public final void incremental(double value,int type){ double y=0.,yl=0.,yr=0.; switch(type){ case 1: nl += 1; nr -= 1; sl += value; sr -= value; s2l += value*value; s2r -= value*value; break; case -1: nl -= 1; nr += 1; sl -= value; sr += value; s2l -= value*value; s2r += value*value; break; case 0: break; default: System.err.println("wrong type in Impurity.incremental()."); } if(nl<=0.0){ vl=0.0; sdl=0.0; } else { vl = (nl*s2l-sl*sl)/((double)nl*((double)nl)); vl = Math.abs(vl); sdl = Math.sqrt(vl); } if(nr<=0.0){ vr=0.0; sdr=0.0; } else { vr = (nr*s2r-sr*sr)/((double)nr*((double)nr)); vr = Math.abs(vr); sdr = Math.sqrt(vr); } if(order <= 0)System.err.println("Impurity order less than zero in Impurity.incremental()"); else if(order == 1) { y = va; yl = vl; yr = vr; } else { y = Math.pow(va,1./order); yl = Math.pow(vl,1./order); yr = Math.pow(vr,1./order); } if(nl<=0.0 || nr<=0.0) impurity = 0.0; else { impurity = y - ((double)nl/(double)n)*yl - ((double)nr/(double)n)*yr; } } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } }
6,223
32.462366
129
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/trees/m5/M5Base.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * M5Base.java * Copyright (C) 2000-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.trees.m5; import java.util.Enumeration; import java.util.Random; import java.util.Vector; import weka.classifiers.AbstractClassifier; import weka.classifiers.functions.LinearRegression; import weka.core.AdditionalMeasureProducer; import weka.core.Capabilities; import weka.core.FastVector; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.TechnicalInformation; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import weka.core.TechnicalInformationHandler; import weka.core.Utils; import weka.filters.Filter; import weka.filters.supervised.attribute.NominalToBinary; import weka.filters.unsupervised.attribute.RemoveUseless; import weka.filters.unsupervised.attribute.ReplaceMissingValues; /** * M5Base. Implements base routines * for generating M5 Model trees and rules. <p> * * The original algorithm M5 was invented by Quinlan: <br/> * * Quinlan J. R. (1992). Learning with continuous classes. Proceedings of * the Australian Joint Conference on Artificial Intelligence. 343--348. * World Scientific, Singapore. <p/> * * Yong Wang made improvements and created M5': <br/> * * Wang, Y and Witten, I. H. (1997). Induction of model trees for * predicting continuous classes. Proceedings of the poster papers of the * European Conference on Machine Learning. University of Economics, * Faculty of Informatics and Statistics, Prague. <p/> * * Valid options are:<p> * * -U <br> * Use unsmoothed predictions. <p> * * -R <br> * Build regression tree/rule rather than model tree/rule * * @author Mark Hall (mhall@cs.waikato.ac.nz) * @version $Revision: 8034 $ */ public abstract class M5Base extends AbstractClassifier implements AdditionalMeasureProducer, TechnicalInformationHandler { /** for serialization */ private static final long serialVersionUID = -4022221950191647679L; /** * the instances covered by the tree/rules */ private Instances m_instances; /** * the rule set */ protected FastVector m_ruleSet; /** * generate a decision list instead of a single tree. */ private boolean m_generateRules; /** * use unsmoothed predictions */ private boolean m_unsmoothedPredictions; /** * filter to fill in missing values */ private ReplaceMissingValues m_replaceMissing; /** * filter to convert nominal attributes to binary */ private NominalToBinary m_nominalToBinary; /** * for removing useless attributes */ private RemoveUseless m_removeUseless; /** * Save instances at each node in an M5 tree for visualization purposes. */ protected boolean m_saveInstances = false; /** * Make a regression tree/rule instead of a model tree/rule */ protected boolean m_regressionTree; /** * Do not prune tree/rules */ protected boolean m_useUnpruned = false; /** * The minimum number of instances to allow at a leaf node */ protected double m_minNumInstances = 4; /** * Constructor */ public M5Base() { m_generateRules = false; m_unsmoothedPredictions = false; m_useUnpruned = false; m_minNumInstances = 4; } /** * returns information about the classifier * @return a description suitable for * displaying in the explorer/experimenter gui */ public String globalInfo() { return "M5Base. Implements base routines for generating M5 Model trees and " + "rules\n" + "The original algorithm M5 was invented by R. Quinlan and Yong Wang " + "made improvements.\n\n" + "For more information see:\n\n" + getTechnicalInformation().toString(); } /** * Returns an instance of a TechnicalInformation object, containing * detailed information about the technical background of this class, * e.g., paper reference or book this class is based on. * * @return the technical information about this class */ public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; TechnicalInformation additional; result = new TechnicalInformation(Type.INPROCEEDINGS); result.setValue(Field.AUTHOR, "Ross J. Quinlan"); result.setValue(Field.TITLE, "Learning with Continuous Classes"); result.setValue(Field.BOOKTITLE, "5th Australian Joint Conference on Artificial Intelligence"); result.setValue(Field.YEAR, "1992"); result.setValue(Field.PAGES, "343-348"); result.setValue(Field.PUBLISHER, "World Scientific"); result.setValue(Field.ADDRESS, "Singapore"); additional = result.add(Type.INPROCEEDINGS); additional.setValue(Field.AUTHOR, "Y. Wang and I. H. Witten"); additional.setValue(Field.TITLE, "Induction of model trees for predicting continuous classes"); additional.setValue(Field.BOOKTITLE, "Poster papers of the 9th European Conference on Machine Learning"); additional.setValue(Field.YEAR, "1997"); additional.setValue(Field.PUBLISHER, "Springer"); return result; } /** * Returns an enumeration describing the available options * * @return an enumeration of all the available options */ public Enumeration listOptions() { Vector newVector = new Vector(4); newVector.addElement(new Option("\tUse unpruned tree/rules", "N", 0, "-N")); newVector.addElement(new Option("\tUse unsmoothed predictions", "U", 0, "-U")); newVector.addElement(new Option("\tBuild regression tree/rule rather " +"than a model tree/rule", "R", 0, "-R")); newVector.addElement(new Option("\tSet minimum number of instances " +"per leaf\n\t(default 4)", "M",1,"-M <minimum number of instances>")); return newVector.elements(); } /** * Parses a given list of options. <p/> * * Valid options are:<p> * * -U <br> * Use unsmoothed predictions. <p> * * -R <br> * Build a regression tree rather than a model tree. <p> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { setUnpruned(Utils.getFlag('N', options)); setUseUnsmoothed(Utils.getFlag('U', options)); setBuildRegressionTree(Utils.getFlag('R', options)); String optionString = Utils.getOption('M', options); if (optionString.length() != 0) { setMinNumInstances((new Double(optionString)).doubleValue()); } Utils.checkForRemainingOptions(options); } /** * Gets the current settings of the classifier. * * @return an array of strings suitable for passing to setOptions */ public String[] getOptions() { String[] options = new String[5]; int current = 0; if (getUnpruned()) { options[current++] = "-N"; } if (getUseUnsmoothed()) { options[current++] = "-U"; } if (getBuildRegressionTree()) { options[current++] = "-R"; } options[current++] = "-M"; options[current++] = ""+getMinNumInstances(); while (current < options.length) { options[current++] = ""; } return options; } /** * Returns the tip text for this property * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String unprunedTipText() { return "Whether unpruned tree/rules are to be generated."; } /** * Use unpruned tree/rules * * @param unpruned true if unpruned tree/rules are to be generated */ public void setUnpruned(boolean unpruned) { m_useUnpruned = unpruned; } /** * Get whether unpruned tree/rules are being generated * * @return true if unpruned tree/rules are to be generated */ public boolean getUnpruned() { return m_useUnpruned; } /** * Returns the tip text for this property * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String generateRulesTipText() { return "Whether to generate rules (decision list) rather than a tree."; } /** * Generate rules (decision list) rather than a tree * * @param u true if rules are to be generated */ protected void setGenerateRules(boolean u) { m_generateRules = u; } /** * get whether rules are being generated rather than a tree * * @return true if rules are to be generated */ protected boolean getGenerateRules() { return m_generateRules; } /** * Returns the tip text for this property * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String useUnsmoothedTipText() { return "Whether to use unsmoothed predictions."; } /** * Use unsmoothed predictions * * @param s true if unsmoothed predictions are to be used */ public void setUseUnsmoothed(boolean s) { m_unsmoothedPredictions = s; } /** * Get whether or not smoothing is being used * * @return true if unsmoothed predictions are to be used */ public boolean getUseUnsmoothed() { return m_unsmoothedPredictions; } /** * Returns the tip text for this property * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String buildRegressionTreeTipText() { return "Whether to generate a regression tree/rule instead of a model tree/rule."; } /** * Get the value of regressionTree. * * @return Value of regressionTree. */ public boolean getBuildRegressionTree() { return m_regressionTree; } /** * Set the value of regressionTree. * * @param newregressionTree Value to assign to regressionTree. */ public void setBuildRegressionTree(boolean newregressionTree) { m_regressionTree = newregressionTree; } /** * Returns the tip text for this property * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String minNumInstancesTipText() { return "The minimum number of instances to allow at a leaf node."; } /** * Set the minimum number of instances to allow at a leaf node * * @param minNum the minimum number of instances */ public void setMinNumInstances(double minNum) { m_minNumInstances = minNum; } /** * Get the minimum number of instances to allow at a leaf node * * @return a <code>double</code> value */ public double getMinNumInstances() { return m_minNumInstances; } /** * Returns default capabilities of the classifier, i.e., of LinearRegression. * * @return the capabilities of this classifier */ public Capabilities getCapabilities() { return new LinearRegression().getCapabilities(); } /** * Generates the classifier. * * @param data set of instances serving as training data * @throws Exception if the classifier has not been generated * successfully */ public void buildClassifier(Instances data) throws Exception { // can classifier handle the data? getCapabilities().testWithFail(data); // remove instances with missing class data = new Instances(data); data.deleteWithMissingClass(); m_instances = new Instances(data); m_replaceMissing = new ReplaceMissingValues(); m_replaceMissing.setInputFormat(m_instances); m_instances = Filter.useFilter(m_instances, m_replaceMissing); m_nominalToBinary = new NominalToBinary(); m_nominalToBinary.setInputFormat(m_instances); m_instances = Filter.useFilter(m_instances, m_nominalToBinary); m_removeUseless = new RemoveUseless(); m_removeUseless.setInputFormat(m_instances); m_instances = Filter.useFilter(m_instances, m_removeUseless); m_instances.randomize(new Random(1)); m_ruleSet = new FastVector(); Rule tempRule; if (m_generateRules) { Instances tempInst = m_instances; do { tempRule = new Rule(); tempRule.setSmoothing(!m_unsmoothedPredictions); tempRule.setRegressionTree(m_regressionTree); tempRule.setUnpruned(m_useUnpruned); tempRule.setSaveInstances(false); tempRule.setMinNumInstances(m_minNumInstances); tempRule.buildClassifier(tempInst); m_ruleSet.addElement(tempRule); // System.err.println("Built rule : "+tempRule.toString()); tempInst = tempRule.notCoveredInstances(); tempRule.freeNotCoveredInstances(); } while (tempInst.numInstances() > 0); } else { // just build a single tree tempRule = new Rule(); tempRule.setUseTree(true); // tempRule.setGrowFullTree(true); tempRule.setSmoothing(!m_unsmoothedPredictions); tempRule.setSaveInstances(m_saveInstances); tempRule.setRegressionTree(m_regressionTree); tempRule.setUnpruned(m_useUnpruned); tempRule.setMinNumInstances(m_minNumInstances); Instances temp_train; temp_train = m_instances; tempRule.buildClassifier(temp_train); m_ruleSet.addElement(tempRule); // System.err.print(tempRule.m_topOfTree.treeToString(0)); } // save space m_instances = new Instances(m_instances, 0); } /** * Calculates a prediction for an instance using a set of rules * or an M5 model tree * * @param inst the instance whos class value is to be predicted * @return the prediction * @throws Exception if a prediction can't be made. */ public double classifyInstance(Instance inst) throws Exception { Rule temp; double prediction = 0; boolean success = false; m_replaceMissing.input(inst); inst = m_replaceMissing.output(); m_nominalToBinary.input(inst); inst = m_nominalToBinary.output(); m_removeUseless.input(inst); inst = m_removeUseless.output(); if (m_ruleSet == null) { throw new Exception("Classifier has not been built yet!"); } if (!m_generateRules) { temp = (Rule) m_ruleSet.elementAt(0); return temp.classifyInstance(inst); } boolean cont; int i; for (i = 0; i < m_ruleSet.size(); i++) { cont = false; temp = (Rule) m_ruleSet.elementAt(i); try { prediction = temp.classifyInstance(inst); success = true; } catch (Exception e) { cont = true; } if (!cont) { break; } } if (!success) { System.out.println("Error in predicting (DecList)"); } return prediction; } /** * Returns a description of the classifier * * @return a description of the classifier as a String */ public String toString() { StringBuffer text = new StringBuffer(); Rule temp; if (m_ruleSet == null) { return "Classifier hasn't been built yet!"; } if (m_generateRules) { text.append("M5 " + ((m_useUnpruned == true) ? "unpruned " : "pruned ") + ((m_regressionTree == true) ? "regression " : "model ") + "rules "); if (!m_unsmoothedPredictions) { text.append("\n(using smoothed linear models) "); } text.append(":\n"); text.append("Number of Rules : " + m_ruleSet.size() + "\n\n"); for (int j = 0; j < m_ruleSet.size(); j++) { temp = (Rule) m_ruleSet.elementAt(j); text.append("Rule: " + (j + 1) + "\n"); text.append(temp.toString()); } } else { temp = (Rule) m_ruleSet.elementAt(0); text.append(temp.toString()); } return text.toString(); } /** * Returns an enumeration of the additional measure names * @return an enumeration of the measure names */ public Enumeration enumerateMeasures() { Vector newVector = new Vector(1); newVector.addElement("measureNumRules"); return newVector.elements(); } /** * Returns the value of the named measure * @param additionalMeasureName the name of the measure to query for its value * @return the value of the named measure * @throws Exception if the named measure is not supported */ public double getMeasure(String additionalMeasureName) { if (additionalMeasureName.compareToIgnoreCase("measureNumRules") == 0) { return measureNumRules(); } else { throw new IllegalArgumentException(additionalMeasureName + " not supported (M5)"); } } /** * return the number of rules * @return the number of rules (same as # linear models & * # leaves in the tree) */ public double measureNumRules() { if (m_generateRules) { return m_ruleSet.size(); } return ((Rule)m_ruleSet.elementAt(0)).m_topOfTree.numberOfLinearModels(); } public RuleNode getM5RootNode() { Rule temp = (Rule) m_ruleSet.elementAt(0); return temp.getM5RootNode(); } }
17,600
26.458658
109
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/trees/m5/PreConstructedLinearModel.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * RuleNode.java * Copyright (C) 2000-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.trees.m5; import java.io.Serializable; import weka.classifiers.AbstractClassifier; import weka.core.Instance; import weka.core.Instances; import weka.core.RevisionUtils; import weka.core.Utils; /** * This class encapsulates a linear regression function. It is a classifier * but does not learn the function itself, instead it is constructed with * coefficients and intercept obtained elsewhere. The buildClassifier method * must still be called however as this stores a copy of the training data's * header for use in printing the model to the console. * * @author Mark Hall (mhall@cs.waikato.ac.nz) * @version $Revision: 8034 $ */ public class PreConstructedLinearModel extends AbstractClassifier implements Serializable { /** for serialization */ static final long serialVersionUID = 2030974097051713247L; /** The coefficients */ private double [] m_coefficients; /** The intercept */ private double m_intercept; /** Holds the instances header for printing the model */ private Instances m_instancesHeader; /** number of coefficients in the model */ private int m_numParameters; /** * Constructor * * @param coeffs an array of coefficients * @param intercept the intercept */ public PreConstructedLinearModel(double [] coeffs, double intercept) { m_coefficients = coeffs; m_intercept = intercept; int count = 0; for (int i = 0; i < coeffs.length; i++) { if (coeffs[i] != 0) { count++; } } m_numParameters = count; } /** * Builds the classifier. In this case all that is done is that a * copy of the training instances header is saved. * * @param instances an <code>Instances</code> value * @exception Exception if an error occurs */ public void buildClassifier(Instances instances) throws Exception { m_instancesHeader = new Instances(instances, 0); } /** * Predicts the class of the supplied instance using the linear model. * * @param inst the instance to make a prediction for * @return the prediction * @exception Exception if an error occurs */ public double classifyInstance(Instance inst) throws Exception { double result = 0; // System.out.println(inst); for (int i = 0; i < m_coefficients.length; i++) { if (i != inst.classIndex() && !inst.isMissing(i)) { // System.out.println(inst.value(i)+" "+m_coefficients[i]); result += m_coefficients[i] * inst.value(i); } } result += m_intercept; return result; } /** * Return the number of parameters (coefficients) in the linear model * * @return the number of parameters */ public int numParameters() { return m_numParameters; } /** * Return the array of coefficients * * @return the coefficients */ public double [] coefficients() { return m_coefficients; } /** * Return the intercept * * @return the intercept */ public double intercept() { return m_intercept; } /** * Returns a textual description of this linear model * * @return String containing a description of this linear model */ public String toString() { StringBuffer b = new StringBuffer(); b.append("\n"+m_instancesHeader.classAttribute().name() + " = "); boolean first = true; for (int i = 0; i < m_coefficients.length; i++) { if (m_coefficients[i] != 0.0) { double c = m_coefficients[i]; if (first) { b.append("\n\t" + Utils.doubleToString(c, 12, 4).trim() + " * " + m_instancesHeader.attribute(i).name() + " "); first = false; } else { b.append("\n\t" + ((m_coefficients[i] < 0) ? "- " + Utils.doubleToString(Math.abs(c), 12, 4).trim() : "+ " + Utils.doubleToString(Math.abs(c), 12, 4).trim()) + " * " + m_instancesHeader.attribute(i).name() + " "); } } } b.append("\n\t" + ((m_intercept < 0) ? "- " : "+ ") + Utils.doubleToString(Math.abs(m_intercept), 12, 4).trim()); return b.toString(); } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } }
4,981
26.988764
76
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/trees/m5/Rule.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * Rule.java * Copyright (C) 2000-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.trees.m5; import java.io.Serializable; import weka.core.Instance; import weka.core.Instances; import weka.core.RevisionHandler; import weka.core.RevisionUtils; import weka.core.Utils; /** * Generates a single m5 tree or rule * * @author Mark Hall * @version $Revision: 8034 $ */ public class Rule implements Serializable, RevisionHandler { /** for serialization */ private static final long serialVersionUID = -4458627451682483204L; protected static int LEFT = 0; protected static int RIGHT = 1; /** * the instances covered by this rule */ private Instances m_instances; /** * the class index */ private int m_classIndex; /** * the number of attributes */ private int m_numAttributes; /** * the number of instances in the dataset */ private int m_numInstances; /** * the indexes of the attributes used to split on for this rule */ private int[] m_splitAtts; /** * the corresponding values of the split points */ private double[] m_splitVals; /** * the corresponding internal nodes. Used for smoothing rules. */ private RuleNode[] m_internalNodes; /** * the corresponding relational operators (0 = "<=", 1 = ">") */ private int[] m_relOps; /** * the leaf encapsulating the linear model for this rule */ private RuleNode m_ruleModel; /** * the top of the m5 tree for this rule */ protected RuleNode m_topOfTree; /** * the standard deviation of the class for all the instances */ private double m_globalStdDev; /** * the absolute deviation of the class for all the instances */ private double m_globalAbsDev; /** * the instances covered by this rule */ private Instances m_covered; /** * the number of instances covered by this rule */ private int m_numCovered; /** * the instances not covered by this rule */ private Instances m_notCovered; /** * use a pruned m5 tree rather than make a rule */ private boolean m_useTree; /** * use the original m5 smoothing procedure */ private boolean m_smoothPredictions; /** * Save instances at each node in an M5 tree for visualization purposes. */ private boolean m_saveInstances; /** * Make a regression tree instead of a model tree */ private boolean m_regressionTree; /** * Build unpruned tree/rule */ private boolean m_useUnpruned; /** * The minimum number of instances to allow at a leaf node */ private double m_minNumInstances; /** * Constructor declaration * */ public Rule() { m_useTree = false; m_smoothPredictions = false; m_useUnpruned = false; m_minNumInstances = 4; } /** * Generates a single rule or m5 model tree. * * @param data set of instances serving as training data * @exception Exception if the rule has not been generated * successfully */ public void buildClassifier(Instances data) throws Exception { m_instances = null; m_topOfTree = null; m_covered = null; m_notCovered = null; m_ruleModel = null; m_splitAtts = null; m_splitVals = null; m_relOps = null; m_internalNodes = null; m_instances = data; m_classIndex = m_instances.classIndex(); m_numAttributes = m_instances.numAttributes(); m_numInstances = m_instances.numInstances(); // first calculate global deviation of class attribute m_globalStdDev = Rule.stdDev(m_classIndex, m_instances); m_globalAbsDev = Rule.absDev(m_classIndex, m_instances); m_topOfTree = new RuleNode(m_globalStdDev, m_globalAbsDev, null); m_topOfTree.setSaveInstances(m_saveInstances); m_topOfTree.setRegressionTree(m_regressionTree); m_topOfTree.setMinNumInstances(m_minNumInstances); m_topOfTree.buildClassifier(m_instances); if (!m_useUnpruned) { m_topOfTree.prune(); } else { m_topOfTree.installLinearModels(); } if (m_smoothPredictions) { m_topOfTree.installSmoothedModels(); } //m_topOfTree.printAllModels(); m_topOfTree.numLeaves(0); if (!m_useTree) { makeRule(); // save space // m_topOfTree = null; } // save space m_instances = new Instances(m_instances, 0); } /** * Calculates a prediction for an instance using this rule * or M5 model tree * * @param instance the instance whos class value is to be predicted * @return the prediction * @exception Exception if a prediction can't be made. */ public double classifyInstance(Instance instance) throws Exception { if (m_useTree) { return m_topOfTree.classifyInstance(instance); } // does the instance pass the rule's conditions? if (m_splitAtts.length > 0) { for (int i = 0; i < m_relOps.length; i++) { if (m_relOps[i] == LEFT) // left { if (instance.value(m_splitAtts[i]) > m_splitVals[i]) { throw new Exception("Rule does not classify instance"); } } else { if (instance.value(m_splitAtts[i]) <= m_splitVals[i]) { throw new Exception("Rule does not classify instance"); } } } } // the linear model's prediction for this rule return m_ruleModel.classifyInstance(instance); } /** * Returns the top of the tree. */ public RuleNode topOfTree() { return m_topOfTree; } /** * Make the single best rule from a pruned m5 model tree * * @exception Exception if something goes wrong. */ private void makeRule() throws Exception { RuleNode[] best_leaf = new RuleNode[1]; double[] best_cov = new double[1]; RuleNode temp; m_notCovered = new Instances(m_instances, 0); m_covered = new Instances(m_instances, 0); best_cov[0] = -1; best_leaf[0] = null; m_topOfTree.findBestLeaf(best_cov, best_leaf); temp = best_leaf[0]; if (temp == null) { throw new Exception("Unable to generate rule!"); } // save the linear model for this rule m_ruleModel = temp; int count = 0; while (temp.parentNode() != null) { count++; temp = temp.parentNode(); } temp = best_leaf[0]; m_relOps = new int[count]; m_splitAtts = new int[count]; m_splitVals = new double[count]; if (m_smoothPredictions) { m_internalNodes = new RuleNode[count]; } // trace back to the root int i = 0; while (temp.parentNode() != null) { m_splitAtts[i] = temp.parentNode().splitAtt(); m_splitVals[i] = temp.parentNode().splitVal(); if (temp.parentNode().leftNode() == temp) { m_relOps[i] = LEFT; temp.parentNode().m_right = null; } else { m_relOps[i] = RIGHT; temp.parentNode().m_left = null; } if (m_smoothPredictions) { m_internalNodes[i] = temp.parentNode(); } temp = temp.parentNode(); i++; } // now assemble the covered and uncovered instances boolean ok; for (i = 0; i < m_numInstances; i++) { ok = true; for (int j = 0; j < m_relOps.length; j++) { if (m_relOps[j] == LEFT) { if (m_instances.instance(i).value(m_splitAtts[j]) > m_splitVals[j]) { m_notCovered.add(m_instances.instance(i)); ok = false; break; } } else { if (m_instances.instance(i).value(m_splitAtts[j]) <= m_splitVals[j]) { m_notCovered.add(m_instances.instance(i)); ok = false; break; } } } if (ok) { m_numCovered++; // m_covered.add(m_instances.instance(i)); } } } /** * Return a description of the m5 tree or rule * * @return a description of the m5 tree or rule as a String */ public String toString() { if (m_useTree) { return treeToString(); } else { return ruleToString(); } } /** * Return a description of the m5 tree * * @return a description of the m5 tree as a String */ private String treeToString() { StringBuffer text = new StringBuffer(); if (m_topOfTree == null) { return "Tree/Rule has not been built yet!"; } text.append("M5 " + ((m_useUnpruned) ? "unpruned " : "pruned ") + ((m_regressionTree) ? "regression " : "model ") +"tree:\n"); if (m_smoothPredictions == true) { text.append("(using smoothed linear models)\n"); } text.append(m_topOfTree.treeToString(0)); text.append(m_topOfTree.printLeafModels()); text.append("\nNumber of Rules : " + m_topOfTree.numberOfLinearModels()); return text.toString(); } /** * Return a description of the rule * * @return a description of the rule as a String */ private String ruleToString() { StringBuffer text = new StringBuffer(); if (m_splitAtts.length > 0) { text.append("IF\n"); for (int i = m_splitAtts.length - 1; i >= 0; i--) { text.append("\t" + m_covered.attribute(m_splitAtts[i]).name() + " "); if (m_relOps[i] == 0) { text.append("<= "); } else { text.append("> "); } text.append(Utils.doubleToString(m_splitVals[i], 1, 3) + "\n"); } text.append("THEN\n"); } if (m_ruleModel != null) { try { text.append(m_ruleModel.printNodeLinearModel()); text.append(" [" + m_numCovered/*m_covered.numInstances()*/); if (m_globalAbsDev > 0.0) { text.append("/"+Utils.doubleToString((100 * m_ruleModel. rootMeanSquaredError() / m_globalStdDev), 1, 3) + "%]\n\n"); } else { text.append("]\n\n"); } } catch (Exception e) { return "Can't print rule"; } } // System.out.println(m_instances); return text.toString(); } /** * Use unpruned tree/rules * * @param unpruned true if unpruned tree/rules are to be generated */ public void setUnpruned(boolean unpruned) { m_useUnpruned = unpruned; } /** * Get whether unpruned tree/rules are being generated * * @return true if unpruned tree/rules are to be generated */ public boolean getUnpruned() { return m_useUnpruned; } /** * Use an m5 tree rather than generate rules * * @param u true if m5 tree is to be used */ public void setUseTree(boolean u) { m_useTree = u; } /** * get whether an m5 tree is being used rather than rules * * @return true if an m5 tree is being used. */ public boolean getUseTree() { return m_useTree; } /** * Smooth predictions * * @param s true if smoothing is to be used */ public void setSmoothing(boolean s) { m_smoothPredictions = s; } /** * Get whether or not smoothing has been turned on * * @return true if smoothing is being used */ public boolean getSmoothing() { return m_smoothPredictions; } /** * Get the instances not covered by this rule * * @return the instances not covered */ public Instances notCoveredInstances() { return m_notCovered; } /** * Free up memory consumed by the set of instances * not covered by this rule. */ public void freeNotCoveredInstances() { m_notCovered = null; } // /** // * Get the instances covered by this rule // * // * @return the instances covered by this rule // */ // public Instances coveredInstances() { // return m_covered; // } /** * Returns the standard deviation value of the supplied attribute index. * * @param attr an attribute index * @param inst the instances * @return the standard deviation value */ protected static final double stdDev(int attr, Instances inst) { int i,count=0; double sd,va,sum=0.0,sqrSum=0.0,value; for(i = 0; i <= inst.numInstances() - 1; i++) { count++; value = inst.instance(i).value(attr); sum += value; sqrSum += value * value; } if(count > 1) { va = (sqrSum - sum * sum / count) / count; va = Math.abs(va); sd = Math.sqrt(va); } else { sd = 0.0; } return sd; } /** * Returns the absolute deviation value of the supplied attribute index. * * @param attr an attribute index * @param inst the instances * @return the absolute deviation value */ protected static final double absDev(int attr, Instances inst) { int i; double average=0.0,absdiff=0.0,absDev; for(i = 0; i <= inst.numInstances()-1; i++) { average += inst.instance(i).value(attr); } if(inst.numInstances() > 1) { average /= (double)inst.numInstances(); for(i=0; i <= inst.numInstances()-1; i++) { absdiff += Math.abs(inst.instance(i).value(attr) - average); } absDev = absdiff / (double)inst.numInstances(); } else { absDev = 0.0; } return absDev; } /** * Sets whether instances at each node in an M5 tree should be saved * for visualization purposes. Default is to save memory. * * @param save a <code>boolean</code> value */ protected void setSaveInstances(boolean save) { m_saveInstances = save; } /** * Get the value of regressionTree. * * @return Value of regressionTree. */ public boolean getRegressionTree() { return m_regressionTree; } /** * Set the value of regressionTree. * * @param newregressionTree Value to assign to regressionTree. */ public void setRegressionTree(boolean newregressionTree) { m_regressionTree = newregressionTree; } /** * Set the minumum number of instances to allow at a leaf node * * @param minNum the minimum number of instances */ public void setMinNumInstances(double minNum) { m_minNumInstances = minNum; } /** * Get the minimum number of instances to allow at a leaf node * * @return a <code>double</code> value */ public double getMinNumInstances() { return m_minNumInstances; } public RuleNode getM5RootNode() { return m_topOfTree; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } }
14,867
21.979907
77
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/trees/m5/RuleNode.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * RuleNode.java * Copyright (C) 2000 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.trees.m5; import weka.classifiers.AbstractClassifier; import weka.classifiers.Evaluation; import weka.classifiers.functions.LinearRegression; import weka.core.FastVector; import weka.core.Instance; import weka.core.Instances; import weka.core.RevisionUtils; import weka.core.Utils; import weka.filters.Filter; import weka.filters.unsupervised.attribute.Remove; /** * Constructs a node for use in an m5 tree or rule * * @author Mark Hall (mhall@cs.waikato.ac.nz) * @version $Revision: 8034 $ */ public class RuleNode extends AbstractClassifier { /** for serialization */ static final long serialVersionUID = 1979807611124337144L; /** * instances reaching this node */ private Instances m_instances; /** * the class index */ private int m_classIndex; /** * the number of instances reaching this node */ protected int m_numInstances; /** * the number of attributes */ private int m_numAttributes; /** * Node is a leaf */ private boolean m_isLeaf; /** * attribute this node splits on */ private int m_splitAtt; /** * the value of the split attribute */ private double m_splitValue; /** * the linear model at this node */ private PreConstructedLinearModel m_nodeModel; /** * the number of paramters in the chosen model for this node---either * the subtree model or the linear model. * The constant term is counted as a paramter---this is for pruning * purposes */ public int m_numParameters; /** * the mean squared error of the model at this node (either linear or * subtree) */ private double m_rootMeanSquaredError; /** * left child node */ protected RuleNode m_left; /** * right child node */ protected RuleNode m_right; /** * the parent of this node */ private RuleNode m_parent; /** * a node will not be split if it contains less then m_splitNum instances */ private double m_splitNum = 4; /** * a node will not be split if its class standard deviation is less * than 5% of the class standard deviation of all the instances */ private double m_devFraction = 0.05; private double m_pruningMultiplier = 2; /** * the number assigned to the linear model if this node is a leaf. * = 0 if this node is not a leaf */ private int m_leafModelNum; /** * a node will not be split if the class deviation of its * instances is less than m_devFraction of the deviation of the * global class */ private double m_globalDeviation; /** * the absolute deviation of the global class */ private double m_globalAbsDeviation; /** * Indices of the attributes to be used in generating a linear model * at this node */ private int [] m_indices; /** * Constant used in original m5 smoothing calculation */ private static final double SMOOTHING_CONSTANT = 15.0; /** * Node id. */ private int m_id; /** * Save the instances at each node (for visualizing in the * Explorer's treevisualizer. */ private boolean m_saveInstances = false; /** * Make a regression tree instead of a model tree */ private boolean m_regressionTree; /** * Creates a new <code>RuleNode</code> instance. * * @param globalDev the global standard deviation of the class * @param globalAbsDev the global absolute deviation of the class * @param parent the parent of this node */ public RuleNode(double globalDev, double globalAbsDev, RuleNode parent) { m_nodeModel = null; m_right = null; m_left = null; m_parent = parent; m_globalDeviation = globalDev; m_globalAbsDeviation = globalAbsDev; } /** * Build this node (find an attribute and split point) * * @param data the instances on which to build this node * @throws Exception if an error occurs */ public void buildClassifier(Instances data) throws Exception { m_rootMeanSquaredError = Double.MAX_VALUE; // m_instances = new Instances(data); m_instances = data; m_classIndex = m_instances.classIndex(); m_numInstances = m_instances.numInstances(); m_numAttributes = m_instances.numAttributes(); m_nodeModel = null; m_right = null; m_left = null; if ((m_numInstances < m_splitNum) || (Rule.stdDev(m_classIndex, m_instances) < (m_globalDeviation * m_devFraction))) { m_isLeaf = true; } else { m_isLeaf = false; } split(); } /** * Classify an instance using this node. Recursively calls classifyInstance * on child nodes. * * @param inst the instance to classify * @return the prediction for this instance * @throws Exception if an error occurs */ public double classifyInstance(Instance inst) throws Exception { if (m_isLeaf) { if (m_nodeModel == null) { throw new Exception("Classifier has not been built correctly."); } return m_nodeModel.classifyInstance(inst); } if (inst.value(m_splitAtt) <= m_splitValue) { return m_left.classifyInstance(inst); } else { return m_right.classifyInstance(inst); } } /** * Applies the m5 smoothing procedure to a prediction * * @param n number of instances in selected child of this node * @param pred the prediction so far * @param supportPred the prediction of the linear model at this node * @return the current prediction smoothed with the prediction of the * linear model at this node * @throws Exception if an error occurs */ protected static double smoothingOriginal(double n, double pred, double supportPred) throws Exception { double smoothed; smoothed = ((n * pred) + (SMOOTHING_CONSTANT * supportPred)) / (n + SMOOTHING_CONSTANT); return smoothed; } /** * Finds an attribute and split point for this node * * @throws Exception if an error occurs */ public void split() throws Exception { int i; Instances leftSubset, rightSubset; SplitEvaluate bestSplit, currentSplit; boolean[] attsBelow; if (!m_isLeaf) { bestSplit = new YongSplitInfo(0, m_numInstances - 1, -1); currentSplit = new YongSplitInfo(0, m_numInstances - 1, -1); // find the best attribute to split on for (i = 0; i < m_numAttributes; i++) { if (i != m_classIndex) { // sort the instances by this attribute m_instances.sort(i); currentSplit.attrSplit(i, m_instances); if ((Math.abs(currentSplit.maxImpurity() - bestSplit.maxImpurity()) > 1.e-6) && (currentSplit.maxImpurity() > bestSplit.maxImpurity() + 1.e-6)) { bestSplit = currentSplit.copy(); } } } // cant find a good split or split point? if (bestSplit.splitAttr() < 0 || bestSplit.position() < 1 || bestSplit.position() > m_numInstances - 1) { m_isLeaf = true; } else { m_splitAtt = bestSplit.splitAttr(); m_splitValue = bestSplit.splitValue(); leftSubset = new Instances(m_instances, m_numInstances); rightSubset = new Instances(m_instances, m_numInstances); for (i = 0; i < m_numInstances; i++) { if (m_instances.instance(i).value(m_splitAtt) <= m_splitValue) { leftSubset.add(m_instances.instance(i)); } else { rightSubset.add(m_instances.instance(i)); } } leftSubset.compactify(); rightSubset.compactify(); // build left and right nodes m_left = new RuleNode(m_globalDeviation, m_globalAbsDeviation, this); m_left.setMinNumInstances(m_splitNum); m_left.setRegressionTree(m_regressionTree); m_left.setSaveInstances(m_saveInstances); m_left.buildClassifier(leftSubset); m_right = new RuleNode(m_globalDeviation, m_globalAbsDeviation, this); m_right.setMinNumInstances(m_splitNum); m_right.setRegressionTree(m_regressionTree); m_right.setSaveInstances(m_saveInstances); m_right.buildClassifier(rightSubset); // now find out what attributes are tested in the left and right // subtrees and use them to learn a linear model for this node if (!m_regressionTree) { attsBelow = attsTestedBelow(); attsBelow[m_classIndex] = true; int count = 0, j; for (j = 0; j < m_numAttributes; j++) { if (attsBelow[j]) { count++; } } int[] indices = new int[count]; count = 0; for (j = 0; j < m_numAttributes; j++) { if (attsBelow[j] && (j != m_classIndex)) { indices[count++] = j; } } indices[count] = m_classIndex; m_indices = indices; } else { m_indices = new int [1]; m_indices[0] = m_classIndex; m_numParameters = 1; } } } if (m_isLeaf) { int [] indices = new int [1]; indices[0] = m_classIndex; m_indices = indices; m_numParameters = 1; // need to evaluate the model here if want correct stats for unpruned // tree } } /** * Build a linear model for this node using those attributes * specified in indices. * * @param indices an array of attribute indices to include in the linear * model * @throws Exception if something goes wrong */ private void buildLinearModel(int [] indices) throws Exception { // copy the training instances and remove all but the tested // attributes Instances reducedInst = new Instances(m_instances); Remove attributeFilter = new Remove(); attributeFilter.setInvertSelection(true); attributeFilter.setAttributeIndicesArray(indices); attributeFilter.setInputFormat(reducedInst); reducedInst = Filter.useFilter(reducedInst, attributeFilter); // build a linear regression for the training data using the // tested attributes LinearRegression temp = new LinearRegression(); temp.buildClassifier(reducedInst); double [] lmCoeffs = temp.coefficients(); double [] coeffs = new double [m_instances.numAttributes()]; for (int i = 0; i < lmCoeffs.length - 1; i++) { if (indices[i] != m_classIndex) { coeffs[indices[i]] = lmCoeffs[i]; } } m_nodeModel = new PreConstructedLinearModel(coeffs, lmCoeffs[lmCoeffs.length - 1]); m_nodeModel.buildClassifier(m_instances); } /** * Returns an array containing the indexes of attributes used in tests * above this node * * @return an array of attribute indexes */ private boolean[] attsTestedAbove() { boolean[] atts = new boolean[m_numAttributes]; boolean[] attsAbove = null; if (m_parent != null) { attsAbove = m_parent.attsTestedAbove(); } if (attsAbove != null) { for (int i = 0; i < m_numAttributes; i++) { atts[i] = attsAbove[i]; } } atts[m_splitAtt] = true; return atts; } /** * Returns an array containing the indexes of attributes used in tests * below this node * * @return an array of attribute indexes */ private boolean[] attsTestedBelow() { boolean[] attsBelow = new boolean[m_numAttributes]; boolean[] attsBelowLeft = null; boolean[] attsBelowRight = null; if (m_right != null) { attsBelowRight = m_right.attsTestedBelow(); } if (m_left != null) { attsBelowLeft = m_left.attsTestedBelow(); } for (int i = 0; i < m_numAttributes; i++) { if (attsBelowLeft != null) { attsBelow[i] = (attsBelow[i] || attsBelowLeft[i]); } if (attsBelowRight != null) { attsBelow[i] = (attsBelow[i] || attsBelowRight[i]); } } if (!m_isLeaf) { attsBelow[m_splitAtt] = true; } return attsBelow; } /** * Sets the leaves' numbers * @param leafCounter the number of leaves counted * @return the number of the total leaves under the node */ public int numLeaves(int leafCounter) { if (!m_isLeaf) { // node m_leafModelNum = 0; if (m_left != null) { leafCounter = m_left.numLeaves(leafCounter); } if (m_right != null) { leafCounter = m_right.numLeaves(leafCounter); } } else { // leaf leafCounter++; m_leafModelNum = leafCounter; } return leafCounter; } /** * print the linear model at this node * * @return the linear model */ public String toString() { return printNodeLinearModel(); } /** * print the linear model at this node * * @return the linear model at this node */ public String printNodeLinearModel() { return m_nodeModel.toString(); } /** * print all leaf models * * @return the leaf models */ public String printLeafModels() { StringBuffer text = new StringBuffer(); if (m_isLeaf) { text.append("\nLM num: " + m_leafModelNum); text.append(m_nodeModel.toString()); text.append("\n"); } else { text.append(m_left.printLeafModels()); text.append(m_right.printLeafModels()); } return text.toString(); } /** * Returns a description of this node (debugging purposes) * * @return a string describing this node */ public String nodeToString() { StringBuffer text = new StringBuffer(); System.out.println("In to string"); text.append("Node:\n\tnum inst: " + m_numInstances); if (m_isLeaf) { text.append("\n\tleaf"); } else { text.append("\tnode"); } text.append("\n\tSplit att: " + m_instances.attribute(m_splitAtt).name()); text.append("\n\tSplit val: " + Utils.doubleToString(m_splitValue, 1, 3)); text.append("\n\tLM num: " + m_leafModelNum); text.append("\n\tLinear model\n" + m_nodeModel.toString()); text.append("\n\n"); if (m_left != null) { text.append(m_left.nodeToString()); } if (m_right != null) { text.append(m_right.nodeToString()); } return text.toString(); } /** * Recursively builds a textual description of the tree * * @param level the level of this node * @return string describing the tree */ public String treeToString(int level) { int i; StringBuffer text = new StringBuffer(); if (!m_isLeaf) { text.append("\n"); for (i = 1; i <= level; i++) { text.append("| "); } if (m_instances.attribute(m_splitAtt).name().charAt(0) != '[') { text.append(m_instances.attribute(m_splitAtt).name() + " <= " + Utils.doubleToString(m_splitValue, 1, 3) + " : "); } else { text.append(m_instances.attribute(m_splitAtt).name() + " false : "); } if (m_left != null) { text.append(m_left.treeToString(level + 1)); } else { text.append("NULL\n"); } for (i = 1; i <= level; i++) { text.append("| "); } if (m_instances.attribute(m_splitAtt).name().charAt(0) != '[') { text.append(m_instances.attribute(m_splitAtt).name() + " > " + Utils.doubleToString(m_splitValue, 1, 3) + " : "); } else { text.append(m_instances.attribute(m_splitAtt).name() + " true : "); } if (m_right != null) { text.append(m_right.treeToString(level + 1)); } else { text.append("NULL\n"); } } else { text.append("LM" + m_leafModelNum); if (m_globalDeviation > 0.0) { text .append(" (" + m_numInstances + "/" + Utils.doubleToString((100.0 * m_rootMeanSquaredError / m_globalDeviation), 1, 3) + "%)\n"); } else { text.append(" (" + m_numInstances + ")\n"); } } return text.toString(); } /** * Traverses the tree and installs linear models at each node. * This method must be called if pruning is not to be performed. * * @throws Exception if an error occurs */ public void installLinearModels() throws Exception { Evaluation nodeModelEval; if (m_isLeaf) { buildLinearModel(m_indices); } else { if (m_left != null) { m_left.installLinearModels(); } if (m_right != null) { m_right.installLinearModels(); } buildLinearModel(m_indices); } nodeModelEval = new Evaluation(m_instances); nodeModelEval.evaluateModel(m_nodeModel, m_instances); m_rootMeanSquaredError = nodeModelEval.rootMeanSquaredError(); // save space if (!m_saveInstances) { m_instances = new Instances(m_instances, 0); } } /** * * @throws Exception */ public void installSmoothedModels() throws Exception { if (m_isLeaf) { double [] coefficients = new double [m_numAttributes]; double intercept; double [] coeffsUsedByLinearModel = m_nodeModel.coefficients(); RuleNode current = this; // prime array with leaf node coefficients for (int i = 0; i < coeffsUsedByLinearModel.length; i++) { if (i != m_classIndex) { coefficients[i] = coeffsUsedByLinearModel[i]; } } // intercept intercept = m_nodeModel.intercept(); do { if (current.m_parent != null) { double n = current.m_numInstances; // contribution of the model below for (int i = 0; i < coefficients.length; i++) { coefficients[i] = ((coefficients[i] * n) / (n + SMOOTHING_CONSTANT)); } intercept = ((intercept * n) / (n + SMOOTHING_CONSTANT)); // contribution of this model coeffsUsedByLinearModel = current.m_parent.getModel().coefficients(); for (int i = 0; i < coeffsUsedByLinearModel.length; i++) { if (i != m_classIndex) { // smooth in these coefficients (at this node) coefficients[i] += ((SMOOTHING_CONSTANT * coeffsUsedByLinearModel[i]) / (n + SMOOTHING_CONSTANT)); } } // smooth in the intercept intercept += ((SMOOTHING_CONSTANT * current.m_parent.getModel().intercept()) / (n + SMOOTHING_CONSTANT)); current = current.m_parent; } } while (current.m_parent != null); m_nodeModel = new PreConstructedLinearModel(coefficients, intercept); m_nodeModel.buildClassifier(m_instances); } if (m_left != null) { m_left.installSmoothedModels(); } if (m_right != null) { m_right.installSmoothedModels(); } } /** * Recursively prune the tree * * @throws Exception if an error occurs */ public void prune() throws Exception { Evaluation nodeModelEval = null; if (m_isLeaf) { buildLinearModel(m_indices); nodeModelEval = new Evaluation(m_instances); // count the constant term as a paramter for a leaf // Evaluate the model nodeModelEval.evaluateModel(m_nodeModel, m_instances); m_rootMeanSquaredError = nodeModelEval.rootMeanSquaredError(); } else { // Prune the left and right subtrees if (m_left != null) { m_left.prune(); } if (m_right != null) { m_right.prune(); } buildLinearModel(m_indices); nodeModelEval = new Evaluation(m_instances); double rmsModel; double adjustedErrorModel; nodeModelEval.evaluateModel(m_nodeModel, m_instances); rmsModel = nodeModelEval.rootMeanSquaredError(); adjustedErrorModel = rmsModel * pruningFactor(m_numInstances, m_nodeModel.numParameters() + 1); // Evaluate this node (ie its left and right subtrees) Evaluation nodeEval = new Evaluation(m_instances); double rmsSubTree; double adjustedErrorNode; int l_params = 0, r_params = 0; nodeEval.evaluateModel(this, m_instances); rmsSubTree = nodeEval.rootMeanSquaredError(); if (m_left != null) { l_params = m_left.numParameters(); } if (m_right != null) { r_params = m_right.numParameters(); } adjustedErrorNode = rmsSubTree * pruningFactor(m_numInstances, (l_params + r_params + 1)); if ((adjustedErrorModel <= adjustedErrorNode) || (adjustedErrorModel < (m_globalDeviation * 0.00001))) { // Choose linear model for this node rather than subtree model m_isLeaf = true; m_right = null; m_left = null; m_numParameters = m_nodeModel.numParameters() + 1; m_rootMeanSquaredError = rmsModel; } else { m_numParameters = (l_params + r_params + 1); m_rootMeanSquaredError = rmsSubTree; } } // save space if (!m_saveInstances) { m_instances = new Instances(m_instances, 0); } } /** * Compute the pruning factor * * @param num_instances number of instances * @param num_params number of parameters in the model * @return the pruning factor */ private double pruningFactor(int num_instances, int num_params) { if (num_instances <= num_params) { return 10.0; // Caution says Yong in his code } return ((double) (num_instances + m_pruningMultiplier * num_params) / (double) (num_instances - num_params)); } /** * Find the leaf with greatest coverage * * @param maxCoverage the greatest coverage found so far * @param bestLeaf the leaf with the greatest coverage */ public void findBestLeaf(double[] maxCoverage, RuleNode[] bestLeaf) { if (!m_isLeaf) { if (m_left != null) { m_left.findBestLeaf(maxCoverage, bestLeaf); } if (m_right != null) { m_right.findBestLeaf(maxCoverage, bestLeaf); } } else { if (m_numInstances > maxCoverage[0]) { maxCoverage[0] = m_numInstances; bestLeaf[0] = this; } } } /** * Return a list containing all the leaves in the tree * * @param v a single element array containing a vector of leaves */ public void returnLeaves(FastVector[] v) { if (m_isLeaf) { v[0].addElement(this); } else { if (m_left != null) { m_left.returnLeaves(v); } if (m_right != null) { m_right.returnLeaves(v); } } } /** * Get the parent of this node * * @return the parent of this node */ public RuleNode parentNode() { return m_parent; } /** * Get the left child of this node * * @return the left child of this node */ public RuleNode leftNode() { return m_left; } /** * Get the right child of this node * * @return the right child of this node */ public RuleNode rightNode() { return m_right; } /** * Get the index of the splitting attribute for this node * * @return the index of the splitting attribute */ public int splitAtt() { return m_splitAtt; } /** * Get the split point for this node * * @return the split point for this node */ public double splitVal() { return m_splitValue; } /** * Get the number of linear models in the tree * * @return the number of linear models */ public int numberOfLinearModels() { if (m_isLeaf) { return 1; } else { return m_left.numberOfLinearModels() + m_right.numberOfLinearModels(); } } /** * Return true if this node is a leaf * * @return true if this node is a leaf */ public boolean isLeaf() { return m_isLeaf; } /** * Get the root mean squared error at this node * * @return the root mean squared error */ protected double rootMeanSquaredError() { return m_rootMeanSquaredError; } /** * Get the linear model at this node * * @return the linear model at this node */ public PreConstructedLinearModel getModel() { return m_nodeModel; } /** * Return the number of instances that reach this node. * * @return the number of instances at this node. */ public int getNumInstances() { return m_numInstances; } /** * Get the number of parameters in the model at this node * * @return the number of parameters in the model at this node */ private int numParameters() { return m_numParameters; } /** * Get the value of regressionTree. * * @return Value of regressionTree. */ public boolean getRegressionTree() { return m_regressionTree; } /** * Set the minumum number of instances to allow at a leaf node * * @param minNum the minimum number of instances */ public void setMinNumInstances(double minNum) { m_splitNum = minNum; } /** * Get the minimum number of instances to allow at a leaf node * * @return a <code>double</code> value */ public double getMinNumInstances() { return m_splitNum; } /** * Set the value of regressionTree. * * @param newregressionTree Value to assign to regressionTree. */ public void setRegressionTree(boolean newregressionTree) { m_regressionTree = newregressionTree; } /** * Print all the linear models at the learf (debugging purposes) */ public void printAllModels() { if (m_isLeaf) { System.out.println(m_nodeModel.toString()); } else { System.out.println(m_nodeModel.toString()); m_left.printAllModels(); m_right.printAllModels(); } } /** * Assigns a unique identifier to each node in the tree * * @param lastID last id number used * @return ID after processing child nodes */ protected int assignIDs(int lastID) { int currLastID = lastID + 1; m_id = currLastID; if (m_left != null) { currLastID = m_left.assignIDs(currLastID); } if (m_right != null) { currLastID = m_right.assignIDs(currLastID); } return currLastID; } /** * Assign a unique identifier to each node in the tree and then * calls graphTree * * @param text a <code>StringBuffer</code> value */ public void graph(StringBuffer text) { assignIDs(-1); graphTree(text); } /** * Return a dotty style string describing the tree * * @param text a <code>StringBuffer</code> value */ protected void graphTree(StringBuffer text) { text.append("N" + m_id + (m_isLeaf ? " [label=\"LM " + m_leafModelNum : " [label=\"" + m_instances.attribute(m_splitAtt).name()) + (m_isLeaf ? " (" + ((m_globalDeviation > 0.0) ? m_numInstances + "/" + Utils.doubleToString((100.0 * m_rootMeanSquaredError / m_globalDeviation), 1, 3) + "%)" : m_numInstances + ")") + "\" shape=box style=filled " : "\"") + (m_saveInstances ? "data=\n" + m_instances + "\n,\n" : "") + "]\n"); if (m_left != null) { text.append("N" + m_id + "->" + "N" + m_left.m_id + " [label=\"<=" + Utils.doubleToString(m_splitValue, 1, 3) + "\"]\n"); m_left.graphTree(text); } if (m_right != null) { text.append("N" + m_id + "->" + "N" + m_right.m_id + " [label=\">" + Utils.doubleToString(m_splitValue, 1, 3) + "\"]\n"); m_right.graphTree(text); } } /** * Set whether to save instances for visualization purposes. * Default is to save memory. * * @param save a <code>boolean</code> value */ protected void setSaveInstances(boolean save) { m_saveInstances = save; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } }
27,765
24.059567
87
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/trees/m5/SplitEvaluate.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * SplitEvaluate.java * Copyright (C) 2000-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.trees.m5; import weka.core.Instances; /** * Interface for objects that determine a split point on an attribute * * @author Mark Hall (mhall@cs.waikato.ac.nz) * @version $Revision: 8034 $ */ public interface SplitEvaluate { /** * makes a copy of the SplitEvaluate object * @return a copy of the object */ SplitEvaluate copy () throws Exception; /** * Finds the best splitting point for an attribute in the instances * @param attr the splitting attribute * @param inst the instances * @exception Exception if something goes wrong */ void attrSplit (int attr, Instances inst) throws Exception; /** * Returns the impurity of this split * * @return the impurity of this split */ double maxImpurity(); /** * Returns the position of the split in the sorted values. -1 indicates that * a split could not be found. * * @return an <code>int</code> value */ int position(); /** * Returns the attribute used in this split * * @return the attribute used in this split */ int splitAttr(); /** * Returns the split value * * @return the split value */ double splitValue(); }
1,996
24.602564
78
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/trees/m5/Values.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * Values.java * Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.trees.m5; import weka.core.Instances; import weka.core.RevisionHandler; import weka.core.RevisionUtils; /** * Stores some statistics. * @author Yong Wang (yongwang@cs.waikato.ac.nz) * @version $Revision: 8034 $ */ public final class Values implements RevisionHandler { int numInstances; // number of the instances int missingInstances; // number of the instances with missing values int first; // index of the first instance int last; // index of the last instance int attr; // attribute double sum; // sum of the instances for attribute double sqrSum; // squared sum of the instances for attribute double va; // variance double sd; // standard deviation /** * Constructs an object which stores some statistics of the instances such * as sum, squared sum, variance, standard deviation * @param low the index of the first instance * @param high the index of the last instance * @param attribute the attribute * @param inst the instances */ public Values(int low,int high,int attribute,Instances inst){ int i,count=0; double value; numInstances = high-low+1; missingInstances = 0; first = low; last = high; attr = attribute; sum=0.0; sqrSum=0.0; for(i=first;i<=last;i++){ if(inst.instance(i).isMissing(attr)==false){ count++; value = inst.instance(i).value(attr); sum += value; sqrSum += value * value; } if(count >1){ va = (sqrSum - sum * sum/count)/count; va = Math.abs(va); sd = Math.sqrt(va); } else {va = 0.0; sd = 0.0;} } } /** * Converts the stats to a string * @return the converted string */ public final String toString(){ StringBuffer text = new StringBuffer(); text.append("Print statistic values of instances (" + first + "-" + last + "\n"); text.append(" Number of instances:\t" + numInstances + "\n"); text.append(" NUmber of instances with unknowns:\t" + missingInstances + "\n"); text.append(" Attribute:\t\t\t:" + attr + "\n"); text.append(" Sum:\t\t\t" + sum + "\n"); text.append(" Squared sum:\t\t" + sqrSum + "\n"); text.append(" Stanard Deviation:\t\t" + sd + "\n"); return text.toString(); } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } }
3,353
28.165217
79
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/trees/m5/YongSplitInfo.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * YongSplitInfo.java * Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand * */ package weka.classifiers.trees.m5; import java.io.Serializable; import weka.core.Instances; import weka.core.RevisionHandler; import weka.core.RevisionUtils; import weka.core.Utils; /** * Stores split information. * * @author Yong Wang (yongwang@cs.waikato.ac.nz) * @author Mark Hall (mhall@cs.waikato.ac.nz) * @version $Revision: 8034 $ */ public final class YongSplitInfo implements Cloneable, Serializable, SplitEvaluate, RevisionHandler { /** for serialization */ private static final long serialVersionUID = 1864267581079767881L; private int number; // number of total instances private int first; // first instance index private int last; // last instance index private int position; // position of maximum impurity reduction private double maxImpurity; // maximum impurity reduction private double leftAve; // left average class value private double rightAve; // right average class value private int splitAttr; // spliting attribute private double splitValue; // splitting value /** * Constructs an object which contains the split information * @param low the index of the first instance * @param high the index of the last instance * @param attr an attribute */ public YongSplitInfo(int low,int high,int attr) { number = high-low+1; first = low; last = high; position = -1; maxImpurity = -1.e20; splitAttr = attr; // attr < 0 is an empty object splitValue = 0.0; Utils.SMALL = 1e-10; } /** * Makes a copy of this SplitInfo object */ public final SplitEvaluate copy () throws Exception { YongSplitInfo s = (YongSplitInfo)this.clone(); return s; } /** * Resets the object of split information * @param low the index of the first instance * @param high the index of the last instance * @param attr the attribute */ public final void initialize(int low,int high,int attr){ number = high-low+1; first = low; last = high; position = -1; maxImpurity = -1.e20; splitAttr = attr; splitValue = 0.0; } /** * Converts the spliting information to string * @param inst the instances */ public final String toString(Instances inst){ StringBuffer text = new StringBuffer(); text.append("Print SplitInfo:\n"); text.append(" Instances:\t\t" + number + " (" + first + "-" + position + "," + (position+1) + "-" + last + ")\n"); text.append(" Maximum Impurity Reduction:\t" + Utils.doubleToString(maxImpurity,1,4) + "\n"); text.append(" Left average:\t" + leftAve + "\n"); text.append(" Right average:\t" + rightAve + "\n"); if(maxImpurity>0.0) text.append(" Splitting function:\t" + inst.attribute(splitAttr).name() + " = " + splitValue + "\n"); else text.append(" Splitting function:\tnull\n"); return text.toString(); } /** * Finds the best splitting point for an attribute in the instances * @param attr the splitting attribute * @param inst the instances * @exception Exception if something goes wrong */ public final void attrSplit(int attr,Instances inst) throws Exception { int i,len,count,part; Impurity imp; int low = 0; int high = inst.numInstances()-1; this.initialize(low,high,attr); if(number < 4) { return; } len = ((high-low+1)<5) ? 1 : (high-low+1) / 5; position = low; part = low + len - 1; imp = new Impurity(part,attr,inst,5); count=0; for(i=low+len;i<=high-len-1;i++) { imp.incremental(inst.instance(i).classValue(),1); if(Utils.eq(inst.instance(i+1).value(attr), inst.instance(i).value(attr)) == false) { count = i; if(imp.impurity > maxImpurity){ maxImpurity = imp.impurity; splitValue = (inst.instance(i).value(attr) + inst.instance(i+1).value(attr)) * 0.5; leftAve = imp.sl / imp.nl; rightAve = imp.sr / imp.nr; position=i; } } } } /** * Returns the impurity of this split * * @return the impurity of this split */ public double maxImpurity () { return maxImpurity; } /** * Returns the attribute used in this split * * @return the attribute used in this split */ public int splitAttr () { return splitAttr; } /** * Returns the position of the split in the sorted values. -1 indicates that * a split could not be found. * * @return an <code>int</code> value */ public int position () { return position; } /** * Returns the split value * * @return the split value */ public double splitValue () { return splitValue; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } }
5,696
25.872642
78
java
tsml-java
tsml-java-master/src/main/java/weka/classifiers/xml/XMLClassifier.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * XMLClassifier.java * Copyright (C) 2004-2012 University of Waikato, Hamilton, New Zealand */ package weka.classifiers.xml; import weka.core.RevisionUtils; import weka.core.xml.XMLBasicSerialization; /** * This class serializes and deserializes a Classifier instance to and * fro XML.<br> * * @author FracPete (fracpete at waikato dot ac dot nz) * @version $Revision: 8034 $ */ public class XMLClassifier extends XMLBasicSerialization { /** * initializes the serialization * * @throws Exception if initialization fails */ public XMLClassifier() throws Exception { super(); } /** * generates internally a new XML document and clears also the IgnoreList and * the mappings for the Read/Write-Methods */ public void clear() throws Exception { super.clear(); // allow m_Properties.addAllowed(weka.classifiers.Classifier.class, "debug"); m_Properties.addAllowed(weka.classifiers.Classifier.class, "options"); } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } }
1,865
27.272727
80
java
tsml-java
tsml-java-master/src/main/java/weka/clusterers/AbstractClusterer.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * AbstractClusterer.java * Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand * */ package weka.clusterers; import java.io.Serializable; import weka.core.Capabilities; import weka.core.CapabilitiesHandler; import weka.core.Instance; import weka.core.Instances; import weka.core.RevisionHandler; import weka.core.RevisionUtils; import weka.core.SerializedObject; import weka.core.Utils; /** * Abstract clusterer. * * @author Mark Hall (mhall@cs.waikato.ac.nz) * @version $Revision: 8034 $ */ public abstract class AbstractClusterer implements Clusterer, Cloneable, Serializable, CapabilitiesHandler, RevisionHandler { /** for serialization */ private static final long serialVersionUID = -6099962589663877632L; // =============== // Public methods. // =============== /** * Generates a clusterer. Has to initialize all fields of the clusterer * that are not being set via options. * * @param data set of instances serving as training data * @exception Exception if the clusterer has not been * generated successfully */ public abstract void buildClusterer(Instances data) throws Exception; /** * Classifies a given instance. Either this or distributionForInstance() * needs to be implemented by subclasses. * * @param instance the instance to be assigned to a cluster * @return the number of the assigned cluster as an integer * @exception Exception if instance could not be clustered * successfully */ public int clusterInstance(Instance instance) throws Exception { double [] dist = distributionForInstance(instance); if (dist == null) { throw new Exception("Null distribution predicted"); } if (Utils.sum(dist) <= 0) { throw new Exception("Unable to cluster instance"); } return Utils.maxIndex(dist); } /** * Predicts the cluster memberships for a given instance. Either * this or clusterInstance() needs to be implemented by subclasses. * * @param instance the instance to be assigned a cluster. * @return an array containing the estimated membership * probabilities of the test instance in each cluster (this * should sum to at most 1) * @exception Exception if distribution could not be * computed successfully */ public double[] distributionForInstance(Instance instance) throws Exception { double[] d = new double[numberOfClusters()]; d[clusterInstance(instance)] = 1.0; return d; } /** * Returns the number of clusters. * * @return the number of clusters generated for a training dataset. * @exception Exception if number of clusters could not be returned * successfully */ public abstract int numberOfClusters() throws Exception; /** * Creates a new instance of a clusterer given it's class name and * (optional) arguments to pass to it's setOptions method. If the * clusterer implements OptionHandler and the options parameter is * non-null, the clusterer will have it's options set. * * @param clustererName the fully qualified class name of the clusterer * @param options an array of options suitable for passing to setOptions. May * be null. * @return the newly created search object, ready for use. * @exception Exception if the clusterer class name is invalid, or the * options supplied are not acceptable to the clusterer. */ public static Clusterer forName(String clustererName, String [] options) throws Exception { return (Clusterer)Utils.forName(Clusterer.class, clustererName, options); } /** * Creates a deep copy of the given clusterer using serialization. * * @param model the clusterer to copy * @return a deep copy of the clusterer * @exception Exception if an error occurs */ public static Clusterer makeCopy(Clusterer model) throws Exception { return (Clusterer) new SerializedObject(model).getObject(); } /** * Creates copies of the current clusterer. Note that this method * now uses Serialization to perform a deep copy, so the Clusterer * object must be fully Serializable. Any currently built model will * now be copied as well. * * @param model an example clusterer to copy * @param num the number of clusterer copies to create. * @return an array of clusterers. * @exception Exception if an error occurs */ public static Clusterer [] makeCopies(Clusterer model, int num) throws Exception { if (model == null) { throw new Exception("No model clusterer set"); } Clusterer [] clusterers = new Clusterer [num]; SerializedObject so = new SerializedObject(model); for(int i = 0; i < clusterers.length; i++) { clusterers[i] = (Clusterer) so.getObject(); } return clusterers; } /** * Returns the Capabilities of this clusterer. Derived classifiers have to * override this method to enable capabilities. * * @return the capabilities of this object * @see Capabilities */ public Capabilities getCapabilities() { Capabilities result; result = new Capabilities(this); result.enableAll(); // result.enable(Capability.NO_CLASS); return result; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } /** * runs the clusterer instance with the given options. * * @param clusterer the clusterer to run * @param options the commandline options */ public static void runClusterer(Clusterer clusterer, String[] options) { try { System.out.println(ClusterEvaluation.evaluateClusterer(clusterer, options)); } catch (Exception e) { if ( (e.getMessage() == null) || ( (e.getMessage() != null) && (e.getMessage().indexOf("General options") == -1) ) ) e.printStackTrace(); else System.err.println(e.getMessage()); } } }
6,722
30.269767
87
java
tsml-java
tsml-java-master/src/main/java/weka/clusterers/AbstractDensityBasedClusterer.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * AbstractDensityBasedClusterer.java * Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand * */ package weka.clusterers; import weka.core.Instance; import weka.core.SerializedObject; import weka.core.Utils; /** * Abstract clustering model that produces (for each test instance) * an estimate of the membership in each cluster * (ie. a probability distribution). * * @author Mark Hall (mhall@cs.waikato.ac.nz) * @author Eibe Frank (eibe@cs.waikato.ac.nz) * @version $Revision: 8034 $ */ public abstract class AbstractDensityBasedClusterer extends AbstractClusterer implements DensityBasedClusterer { /** for serialization. */ private static final long serialVersionUID = -5950728041704213845L; // =============== // Public methods. // =============== /** * Returns the prior probability of each cluster. * * @return the prior probability for each cluster * @exception Exception if priors could not be * returned successfully */ public abstract double[] clusterPriors() throws Exception; /** * Computes the log of the conditional density (per cluster) for a given instance. * * @param instance the instance to compute the density for * @return an array containing the estimated densities * @exception Exception if the density could not be computed * successfully */ public abstract double[] logDensityPerClusterForInstance(Instance instance) throws Exception; /** * Computes the density for a given instance. * * @param instance the instance to compute the density for * @return the density. * @exception Exception if the density could not be computed successfully */ public double logDensityForInstance(Instance instance) throws Exception { double[] a = logJointDensitiesForInstance(instance); double max = a[Utils.maxIndex(a)]; double sum = 0.0; for(int i = 0; i < a.length; i++) { sum += Math.exp(a[i] - max); } return max + Math.log(sum); } /** * Returns the cluster probability distribution for an instance. * * @param instance the instance to be clustered * @return the probability distribution * @throws Exception if computation fails */ public double[] distributionForInstance(Instance instance) throws Exception { return Utils.logs2probs(logJointDensitiesForInstance(instance)); } /** * Returns the logs of the joint densities for a given instance. * * @param inst the instance * @return the array of values * @exception Exception if values could not be computed */ public double[] logJointDensitiesForInstance(Instance inst) throws Exception { double[] weights = logDensityPerClusterForInstance(inst); double[] priors = clusterPriors(); for (int i = 0; i < weights.length; i++) { if (priors[i] > 0) { weights[i] += Math.log(priors[i]); } else { throw new IllegalArgumentException("Cluster empty!"); } } return weights; } /** * Creates copies of the current clusterer. Note that this method * now uses Serialization to perform a deep copy, so the Clusterer * object must be fully Serializable. Any currently built model will * now be copied as well. * * @param model an example clusterer to copy * @param num the number of clusterer copies to create. * @return an array of clusterers. * @exception Exception if an error occurs */ public static DensityBasedClusterer [] makeCopies(DensityBasedClusterer model, int num) throws Exception { if (model == null) { throw new Exception("No model clusterer set"); } DensityBasedClusterer [] clusterers = new DensityBasedClusterer [num]; SerializedObject so = new SerializedObject(model); for(int i = 0; i < clusterers.length; i++) { clusterers[i] = (DensityBasedClusterer) so.getObject(); } return clusterers; } }
4,614
30.394558
84
java
tsml-java
tsml-java-master/src/main/java/weka/clusterers/CLOPE.java
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* * Copyright (C) 2008 * & Alexander Smirnov (austellus@gmail.com) */ package weka.clusterers; import java.io.Serializable; import java.util.ArrayList; import java.util.Enumeration; import java.util.HashMap; import java.util.Vector; import weka.core.Capabilities; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.OptionHandler; import weka.core.RevisionUtils; import weka.core.SparseInstance; import weka.core.TechnicalInformation; import weka.core.TechnicalInformationHandler; import weka.core.Utils; import weka.core.Capabilities.Capability; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; /** <!-- globalinfo-start --> * Yiling Yang, Xudong Guan, Jinyuan You: CLOPE: a fast and effective clustering algorithm for transactional data. In: Proceedings of the eighth ACM SIGKDD international conference on Knowledge discovery and data mining, 682-687, 2002. * <p/> <!-- globalinfo-end --> * <!-- technical-bibtex-start --> * BibTeX: * <pre> * &#64;inproceedings{Yang2002, * author = {Yiling Yang and Xudong Guan and Jinyuan You}, * booktitle = {Proceedings of the eighth ACM SIGKDD international conference on Knowledge discovery and data mining}, * pages = {682-687}, * publisher = {ACM New York, NY, USA}, * title = {CLOPE: a fast and effective clustering algorithm for transactional data}, * year = {2002} * } * </pre> * <p/> <!-- technical-bibtex-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -R &lt;num&gt; * Repulsion * (default 2.6)</pre> * <!-- options-end --> * * @author Alexander Smirnov (austellus@gmail.com) * @version $Revision: 5538 $ */ public class CLOPE extends AbstractClusterer implements OptionHandler, TechnicalInformationHandler { /** for serialization */ static final long serialVersionUID = -567567567567588L; /** * Inner class for cluster of CLOPE. * * @see Serializable */ private class CLOPECluster implements Serializable { /** * Number of transactions */ public int N = 0; //number of transactions /** * Number of distinct items (or width) */ public int W = 0; /** * Size of cluster */ public int S = 0; /** * Hash of <item, occurrence> pairs */ public HashMap occ = new HashMap(); /** * Add item to cluster */ public void AddItem(String Item) { int count; if (!this.occ.containsKey(Item)) { this.occ.put(Item, 1); } else { count = (Integer) this.occ.get(Item); count++; this.occ.remove(Item); this.occ.put(Item, count); } this.S++; } public void AddItem(Integer Item) { int count; if (!this.occ.containsKey(Item)) { this.occ.put(Item, 1); } else { count = (Integer) this.occ.get(Item); count++; this.occ.remove(Item); this.occ.put(Item, count); } this.S++; } /** * Delete item from cluster */ public void DeleteItem(String Item) { int count; count = (Integer) this.occ.get(Item); if (count == 1) { this.occ.remove(Item); } else { count--; this.occ.remove(Item); this.occ.put(Item, count); } this.S--; } public void DeleteItem(Integer Item) { int count; count = (Integer) this.occ.get(Item); if (count == 1) { this.occ.remove(Item); } else { count--; this.occ.remove(Item); this.occ.put(Item, count); } this.S--; } /** * Calculate Delta */ public double DeltaAdd(Instance inst, double r) { //System.out.println("DeltaAdd"); int S_new; int W_new; double profit; double profit_new; double deltaprofit; S_new = 0; W_new = occ.size(); if (inst instanceof SparseInstance) { //System.out.println("DeltaAddSparceInstance"); for (int i = 0; i < inst.numValues(); i++) { S_new++; if ((Integer) this.occ.get(inst.index(i)) == null) { W_new++; } } } else { for (int i = 0; i < inst.numAttributes(); i++) { if (!inst.isMissing(i)) { S_new++; if ((Integer) this.occ.get(i + inst.toString(i)) == null) { W_new++; } } } } S_new += S; if (N == 0) { deltaprofit = S_new / Math.pow(W_new, r); } else { profit = S * N / Math.pow(W, r); profit_new = S_new * (N + 1) / Math.pow(W_new, r); deltaprofit = profit_new - profit; } return deltaprofit; } /** * Add instance to cluster */ public void AddInstance(Instance inst) { if (inst instanceof SparseInstance) { // System.out.println("AddSparceInstance"); for (int i = 0; i < inst.numValues(); i++) { AddItem(inst.index(i)); // for(int i=0;i<inst.numAttributes();int++){ // AddItem(inst.index(i)+inst.value(i)); } } else { for (int i = 0; i < inst.numAttributes(); i++) { if (!inst.isMissing(i)) { AddItem(i + inst.toString(i)); } } } this.W = this.occ.size(); this.N++; } /** * Delete instance from cluster */ public void DeleteInstance(Instance inst) { if (inst instanceof SparseInstance) { // System.out.println("DeleteSparceInstance"); for (int i = 0; i < inst.numValues(); i++) { DeleteItem(inst.index(i)); } } else { for (int i = 0; i <= inst.numAttributes() - 1; i++) { if (!inst.isMissing(i)) { DeleteItem(i + inst.toString(i)); } } } this.W = this.occ.size(); this.N--; } } /** * Array of clusters */ public ArrayList<CLOPECluster> clusters = new ArrayList<CLOPECluster>(); /** * Specifies the repulsion default */ protected double m_RepulsionDefault = 2.6; /** * Specifies the repulsion */ protected double m_Repulsion = m_RepulsionDefault; /** * Number of clusters */ protected int m_numberOfClusters = -1; /** * Counter for the processed instances */ protected int m_processed_InstanceID; /** * Number of instances */ protected int m_numberOfInstances; /** * */ protected ArrayList<Integer> m_clusterAssignments = new ArrayList(); /** * whether the number of clusters was already determined */ protected boolean m_numberOfClustersDetermined = false; public int numberOfClusters() { determineNumberOfClusters(); return m_numberOfClusters; } protected void determineNumberOfClusters() { m_numberOfClusters = clusters.size(); m_numberOfClustersDetermined = true; } public Enumeration listOptions() { Vector result = new Vector(); result.addElement(new Option( "\tRepulsion\n" + "\t(default " + m_RepulsionDefault + ")", "R", 1, "-R <num>")); return result.elements(); } /** * Parses a given list of options. <p/> * <!-- options-start --> * Valid options are: <p/> * * <pre> -R &lt;num&gt; * Repulsion * (default 2.6)</pre> * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { String tmpStr; tmpStr = Utils.getOption('R', options); if (tmpStr.length() != 0) { setRepulsion(Double.parseDouble(tmpStr)); } else { setRepulsion(m_RepulsionDefault); } } /** * Gets the current settings of CLOPE * * @return an array of strings suitable for passing to setOptions() */ public String[] getOptions() { Vector result; result = new Vector(); result.add("-R"); result.add("" + getRepulsion()); return (String[]) result.toArray(new String[result.size()]); } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String repulsionTipText() { return "Repulsion to be used."; } /** * set the repulsion * * @param value the repulsion * @throws Exception if number of clusters is negative */ public void setRepulsion(double value) { m_Repulsion = value; } /** * gets the repulsion * * @return the repulsion */ public double getRepulsion() { return m_Repulsion; } /** * Returns default capabilities of the clusterer. * * @return the capabilities of this clusterer */ public Capabilities getCapabilities() { Capabilities result = super.getCapabilities(); result.disableAll(); result.enable(Capability.NO_CLASS); // attributes result.enable(Capability.NOMINAL_ATTRIBUTES); // result.enable(Capability.NUMERIC_ATTRIBUTES); result.enable(Capability.MISSING_VALUES); return result; } /** * Generate Clustering via CLOPE * @param instances The instances that need to be clustered * @throws java.lang.Exception If clustering was not successful */ public void buildClusterer(Instances data) throws Exception { clusters.clear(); m_processed_InstanceID = 0; m_clusterAssignments.clear(); m_numberOfInstances = data.numInstances(); boolean moved; //Phase 1 for (int i = 0; i < data.numInstances(); i++) { int clusterid = AddInstanceToBestCluster(data.instance(i)); m_clusterAssignments.add(clusterid); } //Phase 2 do { moved = false; for (int i = 0; i < data.numInstances(); i++) { m_processed_InstanceID = i; int clusterid = MoveInstanceToBestCluster(data.instance(i)); if (clusterid != m_clusterAssignments.get(i)) { moved = true; m_clusterAssignments.set(i, clusterid); } } } while (!moved); m_processed_InstanceID = 0; } /** * the default constructor */ public CLOPE() { super(); } /** * Add instance to best cluster */ public int AddInstanceToBestCluster(Instance inst) { double delta; double deltamax; int clustermax = -1; if (clusters.size() > 0) { int tempS = 0; int tempW = 0; if (inst instanceof SparseInstance) { for (int i = 0; i < inst.numValues(); i++) { tempS++; tempW++; } } else { for (int i = 0; i < inst.numAttributes(); i++) { if (!inst.isMissing(i)) { tempS++; tempW++; } } } deltamax = tempS / Math.pow(tempW, m_Repulsion); for (int i = 0; i < clusters.size(); i++) { CLOPECluster tempcluster = clusters.get(i); delta = tempcluster.DeltaAdd(inst, m_Repulsion); // System.out.println("delta " + delta); if (delta > deltamax) { deltamax = delta; clustermax = i; } } } else { CLOPECluster newcluster = new CLOPECluster(); clusters.add(newcluster); newcluster.AddInstance(inst); return clusters.size() - 1; } if (clustermax == -1) { CLOPECluster newcluster = new CLOPECluster(); clusters.add(newcluster); newcluster.AddInstance(inst); return clusters.size() - 1; } clusters.get(clustermax).AddInstance(inst); return clustermax; } /** * Move instance to best cluster */ public int MoveInstanceToBestCluster(Instance inst) { clusters.get(m_clusterAssignments.get(m_processed_InstanceID)).DeleteInstance(inst); m_clusterAssignments.set(m_processed_InstanceID, -1); double delta; double deltamax; int clustermax = -1; int tempS = 0; int tempW = 0; if (inst instanceof SparseInstance) { for (int i = 0; i < inst.numValues(); i++) { tempS++; tempW++; } } else { for (int i = 0; i < inst.numAttributes(); i++) { if (!inst.isMissing(i)) { tempS++; tempW++; } } } deltamax = tempS / Math.pow(tempW, m_Repulsion); for (int i = 0; i < clusters.size(); i++) { CLOPECluster tempcluster = clusters.get(i); delta = tempcluster.DeltaAdd(inst, m_Repulsion); // System.out.println("delta " + delta); if (delta > deltamax) { deltamax = delta; clustermax = i; } } if (clustermax == -1) { CLOPECluster newcluster = new CLOPECluster(); clusters.add(newcluster); newcluster.AddInstance(inst); return clusters.size() - 1; } clusters.get(clustermax).AddInstance(inst); return clustermax; } /** * Classifies a given instance. * * @param instance The instance to be assigned to a cluster * @return int The number of the assigned cluster as an integer * @throws java.lang.Exception If instance could not be clustered * successfully */ public int clusterInstance(Instance instance) throws Exception { if (m_processed_InstanceID >= m_numberOfInstances) { m_processed_InstanceID = 0; } int i = m_clusterAssignments.get(m_processed_InstanceID); m_processed_InstanceID++; return i; } /** * return a string describing this clusterer * * @return a description of the clusterer as a string */ public String toString() { StringBuffer stringBuffer = new StringBuffer(); stringBuffer.append("CLOPE clustering results\n" + "========================================================================================\n\n"); stringBuffer.append("Clustered instances: " + m_clusterAssignments.size() + "\n"); return stringBuffer.toString() + "\n"; } /** * Returns a string describing this DataMining-Algorithm * @return String Information for the gui-explorer */ public String globalInfo() { return getTechnicalInformation().toString(); } /** * Returns an instance of a TechnicalInformation object, containing * detailed information about the technical background of this class, * e.g., paper reference or book this class is based on. * * @return the technical information about this class */ public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; result = new TechnicalInformation(Type.INPROCEEDINGS); result.setValue(Field.AUTHOR, "Yiling Yang and Xudong Guan and Jinyuan You"); result.setValue(Field.TITLE, "CLOPE: a fast and effective clustering algorithm for transactional data"); result.setValue(Field.BOOKTITLE, "Proceedings of the eighth ACM SIGKDD international conference on Knowledge discovery and data mining"); result.setValue(Field.YEAR, "2002"); result.setValue(Field.PAGES, "682-687"); result.setValue(Field.PUBLISHER, "ACM New York, NY, USA"); return result; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 5538 $"); } /** * Main method for testing this class. * * @param argv should contain the following arguments: <p> * -t training file [-R repulsion] */ public static void main(String[] argv) { runClusterer(new CLOPE(), argv); } }
15,601
23.765079
234
java
tsml-java
tsml-java-master/src/main/java/weka/clusterers/CheckClusterer.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * CheckClusterer.java * Copyright (C) 2006-2012 University of Waikato, Hamilton, New Zealand * */ package weka.clusterers; import java.util.Enumeration; import java.util.Random; import java.util.Vector; import weka.core.CheckScheme; import weka.core.FastVector; import weka.core.Instance; import weka.core.Instances; import weka.core.MultiInstanceCapabilitiesHandler; import weka.core.Option; import weka.core.OptionHandler; import weka.core.RevisionUtils; import weka.core.SerializationHelper; import weka.core.TestInstances; import weka.core.Utils; import weka.core.WeightedInstancesHandler; /** * Class for examining the capabilities and finding problems with * clusterers. If you implement a clusterer using the WEKA.libraries, * you should run the checks on it to ensure robustness and correct * operation. Passing all the tests of this object does not mean * bugs in the clusterer don't exist, but this will help find some * common ones. <p/> * * Typical usage: <p/> * <code>java weka.clusterers.CheckClusterer -W clusterer_name * -- clusterer_options </code><p/> * * CheckClusterer reports on the following: * <ul> * <li> Clusterer abilities * <ul> * <li> Possible command line options to the clusterer </li> * <li> Whether the clusterer can predict nominal, numeric, string, * date or relational class attributes.</li> * <li> Whether the clusterer can handle numeric predictor attributes </li> * <li> Whether the clusterer can handle nominal predictor attributes </li> * <li> Whether the clusterer can handle string predictor attributes </li> * <li> Whether the clusterer can handle date predictor attributes </li> * <li> Whether the clusterer can handle relational predictor attributes </li> * <li> Whether the clusterer can handle multi-instance data </li> * <li> Whether the clusterer can handle missing predictor values </li> * <li> Whether the clusterer can handle instance weights </li> * </ul> * </li> * <li> Correct functioning * <ul> * <li> Correct initialisation during buildClusterer (i.e. no result * changes when buildClusterer called repeatedly) </li> * <li> Whether the clusterer alters the data pased to it * (number of instances, instance order, instance weights, etc) </li> * </ul> * </li> * <li> Degenerate cases * <ul> * <li> building clusterer with zero training instances </li> * <li> all but one predictor attribute values missing </li> * <li> all predictor attribute values missing </li> * <li> all but one class values missing </li> * <li> all class values missing </li> * </ul> * </li> * </ul> * Running CheckClusterer with the debug option set will output the * training dataset for any failed tests.<p/> * * The <code>weka.clusterers.AbstractClustererTest</code> uses this * class to test all the clusterers. Any changes here, have to be * checked in that abstract test class, too. <p/> * <!-- options-start --> * Valid options are: <p/> * * <pre> -D * Turn on debugging output.</pre> * * <pre> -S * Silent mode - prints nothing to stdout.</pre> * * <pre> -N &lt;num&gt; * The number of instances in the datasets (default 20).</pre> * * <pre> -nominal &lt;num&gt; * The number of nominal attributes (default 2).</pre> * * <pre> -nominal-values &lt;num&gt; * The number of values for nominal attributes (default 1).</pre> * * <pre> -numeric &lt;num&gt; * The number of numeric attributes (default 1).</pre> * * <pre> -string &lt;num&gt; * The number of string attributes (default 1).</pre> * * <pre> -date &lt;num&gt; * The number of date attributes (default 1).</pre> * * <pre> -relational &lt;num&gt; * The number of relational attributes (default 1).</pre> * * <pre> -num-instances-relational &lt;num&gt; * The number of instances in relational/bag attributes (default 10).</pre> * * <pre> -words &lt;comma-separated-list&gt; * The words to use in string attributes.</pre> * * <pre> -word-separators &lt;chars&gt; * The word separators to use in string attributes.</pre> * * <pre> -W * Full name of the clusterer analyzed. * eg: weka.clusterers.SimpleKMeans * (default weka.clusterers.SimpleKMeans)</pre> * * <pre> * Options specific to clusterer weka.clusterers.SimpleKMeans: * </pre> * * <pre> -N &lt;num&gt; * number of clusters. * (default 2).</pre> * * <pre> -V * Display std. deviations for centroids. * </pre> * * <pre> -M * Replace missing values with mean/mode. * </pre> * * <pre> -S &lt;num&gt; * Random number seed. * (default 10)</pre> * <!-- options-end --> * * Options after -- are passed to the designated clusterer.<p/> * * @author Len Trigg (trigg@cs.waikato.ac.nz) * @author FracPete (fracpete at waikato dot ac dot nz) * @version $Revision: 8034 $ * @see TestInstances */ public class CheckClusterer extends CheckScheme { /* * Note about test methods: * - methods return array of booleans * - first index: success or not * - second index: acceptable or not (e.g., Exception is OK) * * FracPete (fracpete at waikato dot ac dot nz) */ /*** The clusterer to be examined */ protected Clusterer m_Clusterer = new SimpleKMeans(); /** * default constructor */ public CheckClusterer() { super(); setNumInstances(40); } /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ public Enumeration listOptions() { Vector result = new Vector(); Enumeration en = super.listOptions(); while (en.hasMoreElements()) result.addElement(en.nextElement()); result.addElement(new Option( "\tFull name of the clusterer analyzed.\n" +"\teg: weka.clusterers.SimpleKMeans\n" + "\t(default weka.clusterers.SimpleKMeans)", "W", 1, "-W")); if ((m_Clusterer != null) && (m_Clusterer instanceof OptionHandler)) { result.addElement(new Option("", "", 0, "\nOptions specific to clusterer " + m_Clusterer.getClass().getName() + ":")); Enumeration enu = ((OptionHandler)m_Clusterer).listOptions(); while (enu.hasMoreElements()) result.addElement(enu.nextElement()); } return result.elements(); } /** * Parses a given list of options. <p/> * <!-- options-start --> * Valid options are: <p/> * * <pre> -D * Turn on debugging output.</pre> * * <pre> -S * Silent mode - prints nothing to stdout.</pre> * * <pre> -N &lt;num&gt; * The number of instances in the datasets (default 20).</pre> * * <pre> -nominal &lt;num&gt; * The number of nominal attributes (default 2).</pre> * * <pre> -nominal-values &lt;num&gt; * The number of values for nominal attributes (default 1).</pre> * * <pre> -numeric &lt;num&gt; * The number of numeric attributes (default 1).</pre> * * <pre> -string &lt;num&gt; * The number of string attributes (default 1).</pre> * * <pre> -date &lt;num&gt; * The number of date attributes (default 1).</pre> * * <pre> -relational &lt;num&gt; * The number of relational attributes (default 1).</pre> * * <pre> -num-instances-relational &lt;num&gt; * The number of instances in relational/bag attributes (default 10).</pre> * * <pre> -words &lt;comma-separated-list&gt; * The words to use in string attributes.</pre> * * <pre> -word-separators &lt;chars&gt; * The word separators to use in string attributes.</pre> * * <pre> -W * Full name of the clusterer analyzed. * eg: weka.clusterers.SimpleKMeans * (default weka.clusterers.SimpleKMeans)</pre> * * <pre> * Options specific to clusterer weka.clusterers.SimpleKMeans: * </pre> * * <pre> -N &lt;num&gt; * number of clusters. * (default 2).</pre> * * <pre> -V * Display std. deviations for centroids. * </pre> * * <pre> -M * Replace missing values with mean/mode. * </pre> * * <pre> -S &lt;num&gt; * Random number seed. * (default 10)</pre> * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { String tmpStr; tmpStr = Utils.getOption('N', options); super.setOptions(options); if (tmpStr.length() != 0) setNumInstances(Integer.parseInt(tmpStr)); else setNumInstances(40); tmpStr = Utils.getOption('W', options); if (tmpStr.length() == 0) tmpStr = weka.clusterers.SimpleKMeans.class.getName(); setClusterer( (Clusterer) forName( "weka.clusterers", Clusterer.class, tmpStr, Utils.partitionOptions(options))); } /** * Gets the current settings of the CheckClusterer. * * @return an array of strings suitable for passing to setOptions */ public String[] getOptions() { Vector result; String[] options; int i; result = new Vector(); options = super.getOptions(); for (i = 0; i < options.length; i++) result.add(options[i]); if (getClusterer() != null) { result.add("-W"); result.add(getClusterer().getClass().getName()); } if ((m_Clusterer != null) && (m_Clusterer instanceof OptionHandler)) options = ((OptionHandler) m_Clusterer).getOptions(); else options = new String[0]; if (options.length > 0) { result.add("--"); for (i = 0; i < options.length; i++) result.add(options[i]); } return (String[]) result.toArray(new String[result.size()]); } /** * Begin the tests, reporting results to System.out */ public void doTests() { if (getClusterer() == null) { println("\n=== No clusterer set ==="); return; } println("\n=== Check on Clusterer: " + getClusterer().getClass().getName() + " ===\n"); // Start tests println("--> Checking for interfaces"); canTakeOptions(); boolean updateable = updateableClusterer()[0]; boolean weightedInstancesHandler = weightedInstancesHandler()[0]; boolean multiInstanceHandler = multiInstanceHandler()[0]; println("--> Clusterer tests"); declaresSerialVersionUID(); runTests(weightedInstancesHandler, multiInstanceHandler, updateable); } /** * Set the clusterer for testing. * * @param newClusterer the Clusterer to use. */ public void setClusterer(Clusterer newClusterer) { m_Clusterer = newClusterer; } /** * Get the clusterer used as the clusterer * * @return the clusterer used as the clusterer */ public Clusterer getClusterer() { return m_Clusterer; } /** * Run a battery of tests * * @param weighted true if the clusterer says it handles weights * @param multiInstance true if the clusterer is a multi-instance clusterer * @param updateable true if the classifier is updateable */ protected void runTests(boolean weighted, boolean multiInstance, boolean updateable) { boolean PNom = canPredict(true, false, false, false, false, multiInstance)[0]; boolean PNum = canPredict(false, true, false, false, false, multiInstance)[0]; boolean PStr = canPredict(false, false, true, false, false, multiInstance)[0]; boolean PDat = canPredict(false, false, false, true, false, multiInstance)[0]; boolean PRel; if (!multiInstance) PRel = canPredict(false, false, false, false, true, multiInstance)[0]; else PRel = false; if (PNom || PNum || PStr || PDat || PRel) { if (weighted) instanceWeights(PNom, PNum, PStr, PDat, PRel, multiInstance); canHandleZeroTraining(PNom, PNum, PStr, PDat, PRel, multiInstance); boolean handleMissingPredictors = canHandleMissing(PNom, PNum, PStr, PDat, PRel, multiInstance, true, 20)[0]; if (handleMissingPredictors) canHandleMissing(PNom, PNum, PStr, PDat, PRel, multiInstance, true, 100); correctBuildInitialisation(PNom, PNum, PStr, PDat, PRel, multiInstance); datasetIntegrity(PNom, PNum, PStr, PDat, PRel, multiInstance, handleMissingPredictors); if (updateable) updatingEquality(PNom, PNum, PStr, PDat, PRel, multiInstance); } } /** * Checks whether the scheme can take command line options. * * @return index 0 is true if the clusterer can take options */ protected boolean[] canTakeOptions() { boolean[] result = new boolean[2]; print("options..."); if (m_Clusterer instanceof OptionHandler) { println("yes"); if (m_Debug) { println("\n=== Full report ==="); Enumeration enu = ((OptionHandler)m_Clusterer).listOptions(); while (enu.hasMoreElements()) { Option option = (Option) enu.nextElement(); print(option.synopsis() + "\n" + option.description() + "\n"); } println("\n"); } result[0] = true; } else { println("no"); result[0] = false; } return result; } /** * Checks whether the scheme can build models incrementally. * * @return index 0 is true if the clusterer can train incrementally */ protected boolean[] updateableClusterer() { boolean[] result = new boolean[2]; print("updateable clusterer..."); if (m_Clusterer instanceof UpdateableClusterer) { println("yes"); result[0] = true; } else { println("no"); result[0] = false; } return result; } /** * Checks whether the scheme says it can handle instance weights. * * @return true if the clusterer handles instance weights */ protected boolean[] weightedInstancesHandler() { boolean[] result = new boolean[2]; print("weighted instances clusterer..."); if (m_Clusterer instanceof WeightedInstancesHandler) { println("yes"); result[0] = true; } else { println("no"); result[0] = false; } return result; } /** * Checks whether the scheme handles multi-instance data. * * @return true if the clusterer handles multi-instance data */ protected boolean[] multiInstanceHandler() { boolean[] result = new boolean[2]; print("multi-instance clusterer..."); if (m_Clusterer instanceof MultiInstanceCapabilitiesHandler) { println("yes"); result[0] = true; } else { println("no"); result[0] = false; } return result; } /** * tests for a serialVersionUID. Fails in case the scheme doesn't declare * a UID. * * @return index 0 is true if the scheme declares a UID */ protected boolean[] declaresSerialVersionUID() { boolean[] result = new boolean[2]; print("serialVersionUID..."); result[0] = !SerializationHelper.needsUID(m_Clusterer.getClass()); if (result[0]) println("yes"); else println("no"); return result; } /** * Checks basic prediction of the scheme, for simple non-troublesome * datasets. * * @param nominalPredictor if true use nominal predictor attributes * @param numericPredictor if true use numeric predictor attributes * @param stringPredictor if true use string predictor attributes * @param datePredictor if true use date predictor attributes * @param relationalPredictor if true use relational predictor attributes * @param multiInstance whether multi-instance is needed * @return index 0 is true if the test was passed, index 1 is true if test * was acceptable */ protected boolean[] canPredict( boolean nominalPredictor, boolean numericPredictor, boolean stringPredictor, boolean datePredictor, boolean relationalPredictor, boolean multiInstance) { print("basic predict"); printAttributeSummary( nominalPredictor, numericPredictor, stringPredictor, datePredictor, relationalPredictor, multiInstance); print("..."); FastVector accepts = new FastVector(); accepts.addElement("unary"); accepts.addElement("binary"); accepts.addElement("nominal"); accepts.addElement("numeric"); accepts.addElement("string"); accepts.addElement("date"); accepts.addElement("relational"); accepts.addElement("multi-instance"); accepts.addElement("not in classpath"); int numTrain = getNumInstances(), missingLevel = 0; boolean predictorMissing = false; return runBasicTest(nominalPredictor, numericPredictor, stringPredictor, datePredictor, relationalPredictor, multiInstance, missingLevel, predictorMissing, numTrain, accepts); } /** * Checks whether the scheme can handle zero training instances. * * @param nominalPredictor if true use nominal predictor attributes * @param numericPredictor if true use numeric predictor attributes * @param stringPredictor if true use string predictor attributes * @param datePredictor if true use date predictor attributes * @param relationalPredictor if true use relational predictor attributes * @param multiInstance whether multi-instance is needed * @return index 0 is true if the test was passed, index 1 is true if test * was acceptable */ protected boolean[] canHandleZeroTraining( boolean nominalPredictor, boolean numericPredictor, boolean stringPredictor, boolean datePredictor, boolean relationalPredictor, boolean multiInstance) { print("handle zero training instances"); printAttributeSummary( nominalPredictor, numericPredictor, stringPredictor, datePredictor, relationalPredictor, multiInstance); print("..."); FastVector accepts = new FastVector(); accepts.addElement("train"); accepts.addElement("value"); int numTrain = 0, missingLevel = 0; boolean predictorMissing = false; return runBasicTest( nominalPredictor, numericPredictor, stringPredictor, datePredictor, relationalPredictor, multiInstance, missingLevel, predictorMissing, numTrain, accepts); } /** * Checks whether the scheme correctly initialises models when * buildClusterer is called. This test calls buildClusterer with * one training dataset. buildClusterer is then called on a training set * with different structure, and then again with the original training set. * If the equals method of the ClusterEvaluation class returns * false, this is noted as incorrect build initialisation. * * @param nominalPredictor if true use nominal predictor attributes * @param numericPredictor if true use numeric predictor attributes * @param stringPredictor if true use string predictor attributes * @param datePredictor if true use date predictor attributes * @param relationalPredictor if true use relational predictor attributes * @param multiInstance whether multi-instance is needed * @return index 0 is true if the test was passed */ protected boolean[] correctBuildInitialisation( boolean nominalPredictor, boolean numericPredictor, boolean stringPredictor, boolean datePredictor, boolean relationalPredictor, boolean multiInstance) { boolean[] result = new boolean[2]; print("correct initialisation during buildClusterer"); printAttributeSummary( nominalPredictor, numericPredictor, stringPredictor, datePredictor, relationalPredictor, multiInstance); print("..."); int numTrain = getNumInstances(), missingLevel = 0; boolean predictorMissing = false; Instances train1 = null; Instances train2 = null; Clusterer clusterer = null; ClusterEvaluation evaluation1A = null; ClusterEvaluation evaluation1B = null; ClusterEvaluation evaluation2 = null; boolean built = false; int stage = 0; try { // Make two train sets with different numbers of attributes train1 = makeTestDataset(42, numTrain, nominalPredictor ? getNumNominal() : 0, numericPredictor ? getNumNumeric() : 0, stringPredictor ? getNumString() : 0, datePredictor ? getNumDate() : 0, relationalPredictor ? getNumRelational() : 0, multiInstance); train2 = makeTestDataset(84, numTrain, nominalPredictor ? getNumNominal() + 1 : 0, numericPredictor ? getNumNumeric() + 1 : 0, stringPredictor ? getNumString() : 0, datePredictor ? getNumDate() : 0, relationalPredictor ? getNumRelational() : 0, multiInstance); if (nominalPredictor && !multiInstance) { train1.deleteAttributeAt(0); train2.deleteAttributeAt(0); } if (missingLevel > 0) { addMissing(train1, missingLevel, predictorMissing); addMissing(train2, missingLevel, predictorMissing); } clusterer = AbstractClusterer.makeCopies(getClusterer(), 1)[0]; evaluation1A = new ClusterEvaluation(); evaluation1B = new ClusterEvaluation(); evaluation2 = new ClusterEvaluation(); } catch (Exception ex) { throw new Error("Error setting up for tests: " + ex.getMessage()); } try { stage = 0; clusterer.buildClusterer(train1); built = true; evaluation1A.setClusterer(clusterer); evaluation1A.evaluateClusterer(train1); stage = 1; built = false; clusterer.buildClusterer(train2); built = true; evaluation2.setClusterer(clusterer); evaluation2.evaluateClusterer(train2); stage = 2; built = false; clusterer.buildClusterer(train1); built = true; evaluation1B.setClusterer(clusterer); evaluation1B.evaluateClusterer(train1); stage = 3; if (!evaluation1A.equals(evaluation1B)) { if (m_Debug) { println("\n=== Full report ===\n"); println("First buildClusterer()"); println(evaluation1A.clusterResultsToString() + "\n\n"); println("Second buildClusterer()"); println(evaluation1B.clusterResultsToString() + "\n\n"); } throw new Exception("Results differ between buildClusterer calls"); } println("yes"); result[0] = true; if (false && m_Debug) { println("\n=== Full report ===\n"); println("First buildClusterer()"); println(evaluation1A.clusterResultsToString() + "\n\n"); println("Second buildClusterer()"); println(evaluation1B.clusterResultsToString() + "\n\n"); } } catch (Exception ex) { println("no"); result[0] = false; if (m_Debug) { println("\n=== Full Report ==="); print("Problem during"); if (built) { print(" testing"); } else { print(" training"); } switch (stage) { case 0: print(" of dataset 1"); break; case 1: print(" of dataset 2"); break; case 2: print(" of dataset 1 (2nd build)"); break; case 3: print(", comparing results from builds of dataset 1"); break; } println(": " + ex.getMessage() + "\n"); println("here are the datasets:\n"); println("=== Train1 Dataset ===\n" + train1.toString() + "\n"); println("=== Train2 Dataset ===\n" + train2.toString() + "\n"); } } return result; } /** * Checks basic missing value handling of the scheme. If the missing * values cause an exception to be thrown by the scheme, this will be * recorded. * * @param nominalPredictor if true use nominal predictor attributes * @param numericPredictor if true use numeric predictor attributes * @param stringPredictor if true use string predictor attributes * @param datePredictor if true use date predictor attributes * @param relationalPredictor if true use relational predictor attributes * @param multiInstance whether multi-instance is needed * @param predictorMissing true if the missing values may be in * the predictors * @param missingLevel the percentage of missing values * @return index 0 is true if the test was passed, index 1 is true if test * was acceptable */ protected boolean[] canHandleMissing( boolean nominalPredictor, boolean numericPredictor, boolean stringPredictor, boolean datePredictor, boolean relationalPredictor, boolean multiInstance, boolean predictorMissing, int missingLevel) { if (missingLevel == 100) print("100% "); print("missing"); if (predictorMissing) { print(" predictor"); } print(" values"); printAttributeSummary( nominalPredictor, numericPredictor, stringPredictor, datePredictor, relationalPredictor, multiInstance); print("..."); FastVector accepts = new FastVector(); accepts.addElement("missing"); accepts.addElement("value"); accepts.addElement("train"); int numTrain = getNumInstances(); return runBasicTest(nominalPredictor, numericPredictor, stringPredictor, datePredictor, relationalPredictor, multiInstance, missingLevel, predictorMissing, numTrain, accepts); } /** * Checks whether the clusterer can handle instance weights. * This test compares the clusterer performance on two datasets * that are identical except for the training weights. If the * results change, then the clusterer must be using the weights. It * may be possible to get a false positive from this test if the * weight changes aren't significant enough to induce a change * in clusterer performance (but the weights are chosen to minimize * the likelihood of this). * * @param nominalPredictor if true use nominal predictor attributes * @param numericPredictor if true use numeric predictor attributes * @param stringPredictor if true use string predictor attributes * @param datePredictor if true use date predictor attributes * @param relationalPredictor if true use relational predictor attributes * @param multiInstance whether multi-instance is needed * @return index 0 true if the test was passed */ protected boolean[] instanceWeights( boolean nominalPredictor, boolean numericPredictor, boolean stringPredictor, boolean datePredictor, boolean relationalPredictor, boolean multiInstance) { print("clusterer uses instance weights"); printAttributeSummary( nominalPredictor, numericPredictor, stringPredictor, datePredictor, relationalPredictor, multiInstance); print("..."); int numTrain = 2*getNumInstances(), missingLevel = 0; boolean predictorMissing = false; boolean[] result = new boolean[2]; Instances train = null; Clusterer [] clusterers = null; ClusterEvaluation evaluationB = null; ClusterEvaluation evaluationI = null; boolean built = false; boolean evalFail = false; try { train = makeTestDataset(42, numTrain, nominalPredictor ? getNumNominal() + 1 : 0, numericPredictor ? getNumNumeric() + 1 : 0, stringPredictor ? getNumString() : 0, datePredictor ? getNumDate() : 0, relationalPredictor ? getNumRelational() : 0, multiInstance); if (nominalPredictor && !multiInstance) train.deleteAttributeAt(0); if (missingLevel > 0) addMissing(train, missingLevel, predictorMissing); clusterers = AbstractClusterer.makeCopies(getClusterer(), 2); evaluationB = new ClusterEvaluation(); evaluationI = new ClusterEvaluation(); clusterers[0].buildClusterer(train); evaluationB.setClusterer(clusterers[0]); } catch (Exception ex) { throw new Error("Error setting up for tests: " + ex.getMessage()); } try { // Now modify instance weights and re-built/test for (int i = 0; i < train.numInstances(); i++) { train.instance(i).setWeight(0); } Random random = new Random(1); for (int i = 0; i < train.numInstances() / 2; i++) { int inst = Math.abs(random.nextInt()) % train.numInstances(); int weight = Math.abs(random.nextInt()) % 10 + 1; train.instance(inst).setWeight(weight); } clusterers[1].buildClusterer(train); built = true; evaluationI.setClusterer(clusterers[1]); if (evaluationB.equals(evaluationI)) { // println("no"); evalFail = true; throw new Exception("evalFail"); } println("yes"); result[0] = true; } catch (Exception ex) { println("no"); result[0] = false; if (m_Debug) { println("\n=== Full Report ==="); if (evalFail) { println("Results don't differ between non-weighted and " + "weighted instance models."); println("Here are the results:\n"); println("\nboth methods\n"); println(evaluationB.clusterResultsToString()); } else { print("Problem during"); if (built) { print(" testing"); } else { print(" training"); } println(": " + ex.getMessage() + "\n"); } println("Here is the dataset:\n"); println("=== Train Dataset ===\n" + train.toString() + "\n"); println("=== Train Weights ===\n"); for (int i = 0; i < train.numInstances(); i++) { println(" " + (i + 1) + " " + train.instance(i).weight()); } } } return result; } /** * Checks whether the scheme alters the training dataset during * training. If the scheme needs to modify the training * data it should take a copy of the training data. Currently checks * for changes to header structure, number of instances, order of * instances, instance weights. * * @param nominalPredictor if true use nominal predictor attributes * @param numericPredictor if true use numeric predictor attributes * @param stringPredictor if true use string predictor attributes * @param datePredictor if true use date predictor attributes * @param relationalPredictor if true use relational predictor attributes * @param multiInstance whether multi-instance is needed * @param predictorMissing true if we know the clusterer can handle * (at least) moderate missing predictor values * @return index 0 is true if the test was passed */ protected boolean[] datasetIntegrity( boolean nominalPredictor, boolean numericPredictor, boolean stringPredictor, boolean datePredictor, boolean relationalPredictor, boolean multiInstance, boolean predictorMissing) { print("clusterer doesn't alter original datasets"); printAttributeSummary( nominalPredictor, numericPredictor, stringPredictor, datePredictor, relationalPredictor, multiInstance); print("..."); int numTrain = getNumInstances(), missingLevel = 20; boolean[] result = new boolean[2]; Instances train = null; Clusterer clusterer = null; try { train = makeTestDataset(42, numTrain, nominalPredictor ? getNumNominal() : 0, numericPredictor ? getNumNumeric() : 0, stringPredictor ? getNumString() : 0, datePredictor ? getNumDate() : 0, relationalPredictor ? getNumRelational() : 0, multiInstance); if (nominalPredictor && !multiInstance) train.deleteAttributeAt(0); if (missingLevel > 0) addMissing(train, missingLevel, predictorMissing); clusterer = AbstractClusterer.makeCopies(getClusterer(), 1)[0]; } catch (Exception ex) { throw new Error("Error setting up for tests: " + ex.getMessage()); } try { Instances trainCopy = new Instances(train); clusterer.buildClusterer(trainCopy); compareDatasets(train, trainCopy); println("yes"); result[0] = true; } catch (Exception ex) { println("no"); result[0] = false; if (m_Debug) { println("\n=== Full Report ==="); print("Problem during training"); println(": " + ex.getMessage() + "\n"); println("Here is the dataset:\n"); println("=== Train Dataset ===\n" + train.toString() + "\n"); } } return result; } /** * Checks whether an updateable scheme produces the same model when * trained incrementally as when batch trained. The model itself * cannot be compared, so we compare the evaluation on test data * for both models. It is possible to get a false positive on this * test (likelihood depends on the classifier). * * @param nominalPredictor if true use nominal predictor attributes * @param numericPredictor if true use numeric predictor attributes * @param stringPredictor if true use string predictor attributes * @param datePredictor if true use date predictor attributes * @param relationalPredictor if true use relational predictor attributes * @param multiInstance whether multi-instance is needed * @return index 0 is true if the test was passed */ protected boolean[] updatingEquality( boolean nominalPredictor, boolean numericPredictor, boolean stringPredictor, boolean datePredictor, boolean relationalPredictor, boolean multiInstance) { print("incremental training produces the same results" + " as batch training"); printAttributeSummary( nominalPredictor, numericPredictor, stringPredictor, datePredictor, relationalPredictor, multiInstance); print("..."); int numTrain = getNumInstances(), missingLevel = 0; boolean predictorMissing = false, classMissing = false; boolean[] result = new boolean[2]; Instances train = null; Clusterer[] clusterers = null; ClusterEvaluation evaluationB = null; ClusterEvaluation evaluationI = null; boolean built = false; try { train = makeTestDataset(42, numTrain, nominalPredictor ? getNumNominal() : 0, numericPredictor ? getNumNumeric() : 0, stringPredictor ? getNumString() : 0, datePredictor ? getNumDate() : 0, relationalPredictor ? getNumRelational() : 0, multiInstance); if (missingLevel > 0) addMissing(train, missingLevel, predictorMissing, classMissing); clusterers = AbstractClusterer.makeCopies(getClusterer(), 2); evaluationB = new ClusterEvaluation(); evaluationI = new ClusterEvaluation(); clusterers[0].buildClusterer(train); evaluationB.setClusterer(clusterers[0]); } catch (Exception ex) { throw new Error("Error setting up for tests: " + ex.getMessage()); } try { clusterers[1].buildClusterer(new Instances(train, 0)); for (int i = 0; i < train.numInstances(); i++) { ((UpdateableClusterer)clusterers[1]).updateClusterer( train.instance(i)); } built = true; evaluationI.setClusterer(clusterers[1]); if (!evaluationB.equals(evaluationI)) { println("no"); result[0] = false; if (m_Debug) { println("\n=== Full Report ==="); println("Results differ between batch and " + "incrementally built models.\n" + "Depending on the classifier, this may be OK"); println("Here are the results:\n"); println("\nbatch built results\n" + evaluationB.clusterResultsToString()); println("\nincrementally built results\n" + evaluationI.clusterResultsToString()); println("Here are the datasets:\n"); println("=== Train Dataset ===\n" + train.toString() + "\n"); } } else { println("yes"); result[0] = true; } } catch (Exception ex) { result[0] = false; print("Problem during"); if (built) print(" testing"); else print(" training"); println(": " + ex.getMessage() + "\n"); } return result; } /** * Runs a text on the datasets with the given characteristics. * * @param nominalPredictor if true use nominal predictor attributes * @param numericPredictor if true use numeric predictor attributes * @param stringPredictor if true use string predictor attributes * @param datePredictor if true use date predictor attributes * @param relationalPredictor if true use relational predictor attributes * @param multiInstance whether multi-instance is needed * @param missingLevel the percentage of missing values * @param predictorMissing true if the missing values may be in * the predictors * @param numTrain the number of instances in the training set * @param accepts the acceptable string in an exception * @return index 0 is true if the test was passed, index 1 is true if test * was acceptable */ protected boolean[] runBasicTest(boolean nominalPredictor, boolean numericPredictor, boolean stringPredictor, boolean datePredictor, boolean relationalPredictor, boolean multiInstance, int missingLevel, boolean predictorMissing, int numTrain, FastVector accepts) { boolean[] result = new boolean[2]; Instances train = null; Clusterer clusterer = null; try { train = makeTestDataset(42, numTrain, nominalPredictor ? getNumNominal() : 0, numericPredictor ? getNumNumeric() : 0, stringPredictor ? getNumString() : 0, datePredictor ? getNumDate() : 0, relationalPredictor ? getNumRelational() : 0, multiInstance); if (nominalPredictor && !multiInstance) train.deleteAttributeAt(0); if (missingLevel > 0) addMissing(train, missingLevel, predictorMissing); clusterer = AbstractClusterer.makeCopies(getClusterer(), 1)[0]; } catch (Exception ex) { ex.printStackTrace(); throw new Error("Error setting up for tests: " + ex.getMessage()); } try { clusterer.buildClusterer(train); println("yes"); result[0] = true; } catch (Exception ex) { boolean acceptable = false; String msg = ex.getMessage().toLowerCase(); for (int i = 0; i < accepts.size(); i++) { if (msg.indexOf((String)accepts.elementAt(i)) >= 0) { acceptable = true; } } println("no" + (acceptable ? " (OK error message)" : "")); result[1] = acceptable; if (m_Debug) { println("\n=== Full Report ==="); print("Problem during training"); println(": " + ex.getMessage() + "\n"); if (!acceptable) { if (accepts.size() > 0) { print("Error message doesn't mention "); for (int i = 0; i < accepts.size(); i++) { if (i != 0) { print(" or "); } print('"' + (String)accepts.elementAt(i) + '"'); } } println("here is the dataset:\n"); println("=== Train Dataset ===\n" + train.toString() + "\n"); } } } return result; } /** * Add missing values to a dataset. * * @param data the instances to add missing values to * @param level the level of missing values to add (if positive, this * is the probability that a value will be set to missing, if negative * all but one value will be set to missing (not yet implemented)) * @param predictorMissing if true, predictor attributes will be modified */ protected void addMissing(Instances data, int level, boolean predictorMissing) { Random random = new Random(1); for (int i = 0; i < data.numInstances(); i++) { Instance current = data.instance(i); for (int j = 0; j < data.numAttributes(); j++) { if (predictorMissing) { if (Math.abs(random.nextInt()) % 100 < level) current.setMissing(j); } } } } /** * Make a simple set of instances with variable position of the class * attribute, which can later be modified for use in specific tests. * * @param seed the random number seed * @param numInstances the number of instances to generate * @param numNominal the number of nominal attributes * @param numNumeric the number of numeric attributes * @param numString the number of string attributes * @param numDate the number of date attributes * @param numRelational the number of relational attributes * @param multiInstance whether the dataset should a multi-instance dataset * @return the test dataset * @throws Exception if the dataset couldn't be generated * @see TestInstances#CLASS_IS_LAST */ protected Instances makeTestDataset(int seed, int numInstances, int numNominal, int numNumeric, int numString, int numDate, int numRelational, boolean multiInstance) throws Exception { TestInstances dataset = new TestInstances(); dataset.setSeed(seed); dataset.setNumInstances(numInstances); dataset.setNumNominal(numNominal); dataset.setNumNumeric(numNumeric); dataset.setNumString(numString); dataset.setNumDate(numDate); dataset.setNumRelational(numRelational); dataset.setClassIndex(TestInstances.NO_CLASS); dataset.setMultiInstance(multiInstance); return dataset.generate(); } /** * Print out a short summary string for the dataset characteristics * * @param nominalPredictor true if nominal predictor attributes are present * @param numericPredictor true if numeric predictor attributes are present * @param stringPredictor true if string predictor attributes are present * @param datePredictor true if date predictor attributes are present * @param relationalPredictor true if relational predictor attributes are present * @param multiInstance whether multi-instance is needed */ protected void printAttributeSummary(boolean nominalPredictor, boolean numericPredictor, boolean stringPredictor, boolean datePredictor, boolean relationalPredictor, boolean multiInstance) { String str = ""; if (numericPredictor) str += "numeric"; if (nominalPredictor) { if (str.length() > 0) str += " & "; str += "nominal"; } if (stringPredictor) { if (str.length() > 0) str += " & "; str += "string"; } if (datePredictor) { if (str.length() > 0) str += " & "; str += "date"; } if (relationalPredictor) { if (str.length() > 0) str += " & "; str += "relational"; } str = " (" + str + " predictors)"; print(str); } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } /** * Test method for this class * * @param args the commandline options */ public static void main(String [] args) { runCheck(new CheckClusterer(), args); } }
45,970
33.002219
112
java
tsml-java
tsml-java-master/src/main/java/weka/clusterers/ClusterEvaluation.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * ClusterEvaluation.java * Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand * */ package weka.clusterers; import java.beans.BeanInfo; import java.beans.Introspector; import java.beans.MethodDescriptor; import java.io.BufferedWriter; import java.io.FileWriter; import java.io.Serializable; import java.lang.reflect.Method; import java.util.Enumeration; import java.util.Random; import java.util.Vector; import weka.core.Drawable; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.OptionHandler; import weka.core.Range; import weka.core.RevisionHandler; import weka.core.RevisionUtils; import weka.core.Utils; import weka.core.converters.ConverterUtils.DataSource; import weka.filters.Filter; import weka.filters.unsupervised.attribute.Remove; /** * Class for evaluating clustering models.<p/> * * Valid options are: <p/> * * -t name of the training file <br/> * Specify the training file. <p/> * * -T name of the test file <br/> * Specify the test file to apply clusterer to. <p/> * * -d name of file to save clustering model to <br/> * Specify output file. <p/> * * -l name of file to load clustering model from <br/> * Specifiy input file. <p/> * * -p attribute range <br/> * Output predictions. Predictions are for the training file if only the * training file is specified, otherwise they are for the test file. The range * specifies attribute values to be output with the predictions. * Use '-p 0' for none. <p/> * * -x num folds <br/> * Set the number of folds for a cross validation of the training data. * Cross validation can only be done for distribution clusterers and will * be performed if the test file is missing. <p/> * * -s num <br/> * Sets the seed for randomizing the data for cross-validation. <p/> * * -c class <br/> * Set the class attribute. If set, then class based evaluation of clustering * is performed. <p/> * * -g name of graph file <br/> * Outputs the graph representation of the clusterer to the file. Only for * clusterer that implemented the <code>weka.core.Drawable</code> interface. * <p/> * * @author Mark Hall (mhall@cs.waikato.ac.nz) * @version $Revision: 8034 $ * @see weka.core.Drawable */ public class ClusterEvaluation implements Serializable, RevisionHandler { /** for serialization */ static final long serialVersionUID = -830188327319128005L; /** the clusterer */ private Clusterer m_Clusterer; /** holds a string describing the results of clustering the training data */ private StringBuffer m_clusteringResults; /** holds the number of clusters found by the clusterer */ private int m_numClusters; /** holds the assigments of instances to clusters for a particular testing dataset */ private double[] m_clusterAssignments; /** holds the average log likelihood for a particular testing dataset if the clusterer is a DensityBasedClusterer */ private double m_logL; /** will hold the mapping of classes to clusters (for class based evaluation) */ private int[] m_classToCluster = null; /** * set the clusterer * @param clusterer the clusterer to use */ public void setClusterer(Clusterer clusterer) { m_Clusterer = clusterer; } /** * return the results of clustering. * @return a string detailing the results of clustering a data set */ public String clusterResultsToString() { return m_clusteringResults.toString(); } /** * Return the number of clusters found for the most recent call to * evaluateClusterer * @return the number of clusters found */ public int getNumClusters() { return m_numClusters; } /** * Return an array of cluster assignments corresponding to the most * recent set of instances clustered. * @return an array of cluster assignments */ public double[] getClusterAssignments() { return m_clusterAssignments; } /** * Return the array (ordered by cluster number) of minimum error class to * cluster mappings * @return an array of class to cluster mappings */ public int[] getClassesToClusters() { return m_classToCluster; } /** * Return the log likelihood corresponding to the most recent * set of instances clustered. * * @return a <code>double</code> value */ public double getLogLikelihood() { return m_logL; } /** * Constructor. Sets defaults for each member variable. Default Clusterer * is EM. */ public ClusterEvaluation () { setClusterer(new SimpleKMeans()); m_clusteringResults = new StringBuffer(); m_clusterAssignments = null; } /** * Evaluate the clusterer on a set of instances. Calculates clustering * statistics and stores cluster assigments for the instances in * m_clusterAssignments * * @param test the set of instances to cluster * @throws Exception if something goes wrong */ public void evaluateClusterer(Instances test) throws Exception { evaluateClusterer(test, ""); } /** * Evaluate the clusterer on a set of instances. Calculates clustering * statistics and stores cluster assigments for the instances in * m_clusterAssignments * * @param test the set of instances to cluster * @param testFileName the name of the test file for incremental testing, * if "" or null then not used * * @throws Exception if something goes wrong */ public void evaluateClusterer(Instances test, String testFileName) throws Exception { evaluateClusterer(test, testFileName, true); } /** * Evaluate the clusterer on a set of instances. Calculates clustering * statistics and stores cluster assigments for the instances in * m_clusterAssignments * * @param test the set of instances to cluster * @param testFileName the name of the test file for incremental testing, * if "" or null then not used * @param outputModel true if the clustering model is to be output as well * as the stats * * @throws Exception if something goes wrong */ public void evaluateClusterer(Instances test, String testFileName, boolean outputModel) throws Exception { int i = 0; int cnum; double loglk = 0.0; int cc = m_Clusterer.numberOfClusters(); m_numClusters = cc; double[] instanceStats = new double[cc]; Instances testRaw = null; boolean hasClass = (test.classIndex() >= 0); int unclusteredInstances = 0; Vector<Double> clusterAssignments = new Vector<Double>(); Filter filter = null; DataSource source = null; Instance inst; if (testFileName == null) testFileName = ""; // load data if (testFileName.length() != 0) source = new DataSource(testFileName); else source = new DataSource(test); testRaw = source.getStructure(test.classIndex()); // If class is set then do class based evaluation as well if (hasClass) { if (testRaw.classAttribute().isNumeric()) throw new Exception("ClusterEvaluation: Class must be nominal!"); filter = new Remove(); ((Remove) filter).setAttributeIndices("" + (testRaw.classIndex() + 1)); ((Remove) filter).setInvertSelection(false); filter.setInputFormat(testRaw); } i = 0; while (source.hasMoreElements(testRaw)) { // next instance inst = source.nextElement(testRaw); if (filter != null) { filter.input(inst); filter.batchFinished(); inst = filter.output(); } cnum = -1; try { if (m_Clusterer instanceof DensityBasedClusterer) { loglk += ((DensityBasedClusterer)m_Clusterer). logDensityForInstance(inst); cnum = m_Clusterer.clusterInstance(inst); clusterAssignments.add((double) cnum); } else { cnum = m_Clusterer.clusterInstance(inst); clusterAssignments.add((double) cnum); } } catch (Exception e) { clusterAssignments.add(-1.0); unclusteredInstances++; } if (cnum != -1) { instanceStats[cnum]++; } } double sum = Utils.sum(instanceStats); loglk /= sum; m_logL = loglk; m_clusterAssignments = new double [clusterAssignments.size()]; for (i = 0; i < clusterAssignments.size(); i++) { m_clusterAssignments[i] = clusterAssignments.get(i); } int numInstFieldWidth = (int)((Math.log(clusterAssignments.size())/Math.log(10))+1); if (outputModel) { m_clusteringResults.append(m_Clusterer.toString()); } m_clusteringResults.append("Clustered Instances\n\n"); int clustFieldWidth = (int)((Math.log(cc)/Math.log(10))+1); for (i = 0; i < cc; i++) { if (instanceStats[i] > 0) m_clusteringResults.append(Utils.doubleToString((double)i, clustFieldWidth, 0) + " " + Utils.doubleToString(instanceStats[i], numInstFieldWidth, 0) + " (" + Utils.doubleToString((instanceStats[i] / sum * 100.0) , 3, 0) + "%)\n"); } if (unclusteredInstances > 0) m_clusteringResults.append("\nUnclustered instances : " +unclusteredInstances); if (m_Clusterer instanceof DensityBasedClusterer) m_clusteringResults.append("\n\nLog likelihood: " + Utils.doubleToString(loglk, 1, 5) + "\n"); if (hasClass) { evaluateClustersWithRespectToClass(test, testFileName); } } /** * Evaluates cluster assignments with respect to actual class labels. * Assumes that m_Clusterer has been trained and tested on * inst (minus the class). * * @param inst the instances (including class) to evaluate with respect to * @param fileName the name of the test file for incremental testing, * if "" or null then not used * @throws Exception if something goes wrong */ private void evaluateClustersWithRespectToClass(Instances inst, String fileName) throws Exception { int numClasses = inst.classAttribute().numValues(); int[][] counts = new int [m_numClusters][numClasses]; int[] clusterTotals = new int[m_numClusters]; double[] best = new double[m_numClusters+1]; double[] current = new double[m_numClusters+1]; DataSource source = null; Instances instances = null; Instance instance = null; int i; int numInstances; if (fileName == null) fileName = ""; if (fileName.length() != 0) { source = new DataSource(fileName); } else source = new DataSource(inst); instances = source.getStructure(inst.classIndex()); i = 0; while (source.hasMoreElements(instances)) { instance = source.nextElement(instances); if (m_clusterAssignments[i] >= 0) { counts[(int)m_clusterAssignments[i]][(int)instance.classValue()]++; clusterTotals[(int)m_clusterAssignments[i]]++; } i++; } numInstances = i; best[m_numClusters] = Double.MAX_VALUE; mapClasses(m_numClusters, 0, counts, clusterTotals, current, best, 0); m_clusteringResults.append("\n\nClass attribute: " +inst.classAttribute().name() +"\n"); m_clusteringResults.append("Classes to Clusters:\n"); String matrixString = toMatrixString(counts, clusterTotals, new Instances(inst, 0)); m_clusteringResults.append(matrixString).append("\n"); int Cwidth = 1 + (int)(Math.log(m_numClusters) / Math.log(10)); // add the minimum error assignment for (i = 0; i < m_numClusters; i++) { if (clusterTotals[i] > 0) { m_clusteringResults.append("Cluster " +Utils.doubleToString((double)i,Cwidth,0)); m_clusteringResults.append(" <-- "); if (best[i] < 0) { m_clusteringResults.append("No class\n"); } else { m_clusteringResults. append(inst.classAttribute().value((int)best[i])).append("\n"); } } } m_clusteringResults.append("\nIncorrectly clustered instances :\t" +best[m_numClusters]+"\t" +(Utils.doubleToString((best[m_numClusters] / numInstances * 100.0), 8, 4)) +" %\n"); // copy the class assignments m_classToCluster = new int [m_numClusters]; for (i = 0; i < m_numClusters; i++) { m_classToCluster[i] = (int)best[i]; } } /** * Returns a "confusion" style matrix of classes to clusters assignments * @param counts the counts of classes for each cluster * @param clusterTotals total number of examples in each cluster * @param inst the training instances (with class) * @return the "confusion" style matrix as string * @throws Exception if matrix can't be generated */ private String toMatrixString(int[][] counts, int[] clusterTotals, Instances inst) throws Exception { StringBuffer ms = new StringBuffer(); int maxval = 0; for (int i = 0; i < m_numClusters; i++) { for (int j = 0; j < counts[i].length; j++) { if (counts[i][j] > maxval) { maxval = counts[i][j]; } } } int Cwidth = 1 + Math.max((int)(Math.log(maxval) / Math.log(10)), (int)(Math.log(m_numClusters) / Math.log(10))); ms.append("\n"); for (int i = 0; i < m_numClusters; i++) { if (clusterTotals[i] > 0) { ms.append(" ").append(Utils.doubleToString((double)i, Cwidth, 0)); } } ms.append(" <-- assigned to cluster\n"); for (int i = 0; i< counts[0].length; i++) { for (int j = 0; j < m_numClusters; j++) { if (clusterTotals[j] > 0) { ms.append(" ").append(Utils.doubleToString((double)counts[j][i], Cwidth, 0)); } } ms.append(" | ").append(inst.classAttribute().value(i)).append("\n"); } return ms.toString(); } /** * Finds the minimum error mapping of classes to clusters. Recursively * considers all possible class to cluster assignments. * * @param numClusters the number of clusters * @param lev the cluster being processed * @param counts the counts of classes in clusters * @param clusterTotals the total number of examples in each cluster * @param current the current path through the class to cluster assignment * tree * @param best the best assignment path seen * @param error accumulates the error for a particular path */ public static void mapClasses(int numClusters, int lev, int[][] counts, int[] clusterTotals, double[] current, double[] best, int error) { // leaf if (lev == numClusters) { if (error < best[numClusters]) { best[numClusters] = error; for (int i = 0; i < numClusters; i++) { best[i] = current[i]; } } } else { // empty cluster -- ignore if (clusterTotals[lev] == 0) { current[lev] = -1; // cluster ignored mapClasses(numClusters, lev+1, counts, clusterTotals, current, best, error); } else { // first try no class assignment to this cluster current[lev] = -1; // cluster assigned no class (ie all errors) mapClasses(numClusters, lev+1, counts, clusterTotals, current, best, error+clusterTotals[lev]); // now loop through the classes in this cluster for (int i = 0; i < counts[0].length; i++) { if (counts[lev][i] > 0) { boolean ok = true; // check to see if this class has already been assigned for (int j = 0; j < lev; j++) { if ((int)current[j] == i) { ok = false; break; } } if (ok) { current[lev] = i; mapClasses(numClusters, lev+1, counts, clusterTotals, current, best, (error + (clusterTotals[lev] - counts[lev][i]))); } } } } } } /** * Evaluates a clusterer with the options given in an array of * strings. It takes the string indicated by "-t" as training file, the * string indicated by "-T" as test file. * If the test file is missing, a stratified ten-fold * cross-validation is performed (distribution clusterers only). * Using "-x" you can change the number of * folds to be used, and using "-s" the random seed. * If the "-p" option is present it outputs the classification for * each test instance. If you provide the name of an object file using * "-l", a clusterer will be loaded from the given file. If you provide the * name of an object file using "-d", the clusterer built from the * training data will be saved to the given file. * * @param clusterer machine learning clusterer * @param options the array of string containing the options * @throws Exception if model could not be evaluated successfully * @return a string describing the results */ public static String evaluateClusterer(Clusterer clusterer, String[] options) throws Exception { int seed = 1, folds = 10; boolean doXval = false; Instances train = null; Random random; String trainFileName, testFileName, seedString, foldsString; String objectInputFileName, objectOutputFileName, attributeRangeString; String graphFileName; String[] savedOptions = null; boolean printClusterAssignments = false; Range attributesToOutput = null; StringBuffer text = new StringBuffer(); int theClass = -1; // class based evaluation of clustering boolean updateable = (clusterer instanceof UpdateableClusterer); DataSource source = null; Instance inst; if (Utils.getFlag('h', options) || Utils.getFlag("help", options)) { // global info requested as well? boolean globalInfo = Utils.getFlag("synopsis", options) || Utils.getFlag("info", options); throw new Exception("Help requested." + makeOptionString(clusterer, globalInfo)); } try { // Get basic options (options the same for all clusterers //printClusterAssignments = Utils.getFlag('p', options); objectInputFileName = Utils.getOption('l', options); objectOutputFileName = Utils.getOption('d', options); trainFileName = Utils.getOption('t', options); testFileName = Utils.getOption('T', options); graphFileName = Utils.getOption('g', options); // Check -p option try { attributeRangeString = Utils.getOption('p', options); } catch (Exception e) { throw new Exception(e.getMessage() + "\nNOTE: the -p option has changed. " + "It now expects a parameter specifying a range of attributes " + "to list with the predictions. Use '-p 0' for none."); } if (attributeRangeString.length() != 0) { printClusterAssignments = true; if (!attributeRangeString.equals("0")) attributesToOutput = new Range(attributeRangeString); } if (trainFileName.length() == 0) { if (objectInputFileName.length() == 0) { throw new Exception("No training file and no object " + "input file given."); } if (testFileName.length() == 0) { throw new Exception("No training file and no test file given."); } } else { if ((objectInputFileName.length() != 0) && (printClusterAssignments == false)) { throw new Exception("Can't use both train and model file " + "unless -p specified."); } } seedString = Utils.getOption('s', options); if (seedString.length() != 0) { seed = Integer.parseInt(seedString); } foldsString = Utils.getOption('x', options); if (foldsString.length() != 0) { folds = Integer.parseInt(foldsString); doXval = true; } } catch (Exception e) { throw new Exception('\n' + e.getMessage() + makeOptionString(clusterer, false)); } try { if (trainFileName.length() != 0) { source = new DataSource(trainFileName); train = source.getStructure(); String classString = Utils.getOption('c',options); if (classString.length() != 0) { if (classString.compareTo("last") == 0) theClass = train.numAttributes(); else if (classString.compareTo("first") == 0) theClass = 1; else theClass = Integer.parseInt(classString); if (theClass != -1) { if (doXval || testFileName.length() != 0) throw new Exception("Can only do class based evaluation on the " +"training data"); if (objectInputFileName.length() != 0) throw new Exception("Can't load a clusterer and do class based " +"evaluation"); if (objectOutputFileName.length() != 0) throw new Exception( "Can't do class based evaluation and save clusterer"); } } else { // if the dataset defines a class attribute, use it if (train.classIndex() != -1) { theClass = train.classIndex() + 1; System.err.println( "Note: using class attribute from dataset, i.e., attribute #" + theClass); } } if (theClass != -1) { if (theClass < 1 || theClass > train.numAttributes()) throw new Exception("Class is out of range!"); if (!train.attribute(theClass - 1).isNominal()) throw new Exception("Class must be nominal!"); train.setClassIndex(theClass - 1); } } } catch (Exception e) { throw new Exception("ClusterEvaluation: " + e.getMessage() + '.'); } // Save options if (options != null) { savedOptions = new String[options.length]; System.arraycopy(options, 0, savedOptions, 0, options.length); } if (objectInputFileName.length() != 0) Utils.checkForRemainingOptions(options); // Set options for clusterer if (clusterer instanceof OptionHandler) ((OptionHandler)clusterer).setOptions(options); Utils.checkForRemainingOptions(options); Instances trainHeader = train; if (objectInputFileName.length() != 0) { // Load the clusterer from file // clusterer = (Clusterer) SerializationHelper.read(objectInputFileName); java.io.ObjectInputStream ois = new java.io.ObjectInputStream( new java.io.BufferedInputStream( new java.io.FileInputStream(objectInputFileName))); clusterer = (Clusterer) ois.readObject(); // try and get the training header try { trainHeader = (Instances) ois.readObject(); } catch (Exception ex) { // don't moan if we cant } } else { // Build the clusterer if no object file provided if (theClass == -1) { if (updateable) { clusterer.buildClusterer(source.getStructure()); while (source.hasMoreElements(train)) { inst = source.nextElement(train); ((UpdateableClusterer) clusterer).updateClusterer(inst); } ((UpdateableClusterer) clusterer).updateFinished(); } else { clusterer.buildClusterer(source.getDataSet()); } } else { Remove removeClass = new Remove(); removeClass.setAttributeIndices("" + theClass); removeClass.setInvertSelection(false); removeClass.setInputFormat(train); if (updateable) { Instances clusterTrain = Filter.useFilter(train, removeClass); clusterer.buildClusterer(clusterTrain); trainHeader = clusterTrain; while (source.hasMoreElements(train)) { inst = source.nextElement(train); removeClass.input(inst); removeClass.batchFinished(); Instance clusterTrainInst = removeClass.output(); ((UpdateableClusterer) clusterer).updateClusterer(clusterTrainInst); } ((UpdateableClusterer) clusterer).updateFinished(); } else { Instances clusterTrain = Filter.useFilter(source.getDataSet(), removeClass); clusterer.buildClusterer(clusterTrain); trainHeader = clusterTrain; } ClusterEvaluation ce = new ClusterEvaluation(); ce.setClusterer(clusterer); ce.evaluateClusterer(train, trainFileName); return "\n\n=== Clustering stats for training data ===\n\n" + ce.clusterResultsToString(); } } /* Output cluster predictions only (for the test data if specified, otherwise for the training data */ if (printClusterAssignments) { return printClusterings(clusterer, trainFileName, testFileName, attributesToOutput); } text.append(clusterer.toString()); text.append("\n\n=== Clustering stats for training data ===\n\n" + printClusterStats(clusterer, trainFileName)); if (testFileName.length() != 0) { // check header compatibility DataSource test = new DataSource(testFileName); Instances testStructure = test.getStructure(); if (!trainHeader.equalHeaders(testStructure)) { throw new Exception("Training and testing data are not compatible\n" + trainHeader.equalHeadersMsg(testStructure)); } text.append("\n\n=== Clustering stats for testing data ===\n\n" + printClusterStats(clusterer, testFileName)); } if ((clusterer instanceof DensityBasedClusterer) && (doXval == true) && (testFileName.length() == 0) && (objectInputFileName.length() == 0)) { // cross validate the log likelihood on the training data random = new Random(seed); random.setSeed(seed); train = source.getDataSet(); train.randomize(random); text.append( crossValidateModel( clusterer.getClass().getName(), train, folds, savedOptions, random)); } // Save the clusterer if an object output file is provided if (objectOutputFileName.length() != 0) { //SerializationHelper.write(objectOutputFileName, clusterer); saveClusterer(objectOutputFileName, clusterer, trainHeader); } // If classifier is drawable output string describing graph if ((clusterer instanceof Drawable) && (graphFileName.length() != 0)) { BufferedWriter writer = new BufferedWriter(new FileWriter(graphFileName)); writer.write(((Drawable) clusterer).graph()); writer.newLine(); writer.flush(); writer.close(); } return text.toString(); } private static void saveClusterer(String fileName, Clusterer clusterer, Instances header) throws Exception { java.io.ObjectOutputStream oos = new java.io.ObjectOutputStream( new java.io.BufferedOutputStream( new java.io.FileOutputStream(fileName))); oos.writeObject(clusterer); if (header != null) { oos.writeObject(header); } oos.flush(); oos.close(); } /** * Perform a cross-validation for DensityBasedClusterer on a set of instances. * * @param clusterer the clusterer to use * @param data the training data * @param numFolds number of folds of cross validation to perform * @param random random number seed for cross-validation * @return the cross-validated log-likelihood * @throws Exception if an error occurs */ public static double crossValidateModel(DensityBasedClusterer clusterer, Instances data, int numFolds, Random random) throws Exception { Instances train, test; double foldAv = 0;; data = new Instances(data); data.randomize(random); // double sumOW = 0; for (int i = 0; i < numFolds; i++) { // Build and test clusterer train = data.trainCV(numFolds, i, random); clusterer.buildClusterer(train); test = data.testCV(numFolds, i); for (int j = 0; j < test.numInstances(); j++) { try { foldAv += ((DensityBasedClusterer)clusterer). logDensityForInstance(test.instance(j)); // sumOW += test.instance(j).weight(); // double temp = Utils.sum(tempDist); } catch (Exception ex) { // unclustered instances } } } // return foldAv / sumOW; return foldAv / data.numInstances(); } /** * Performs a cross-validation * for a DensityBasedClusterer clusterer on a set of instances. * * @param clustererString a string naming the class of the clusterer * @param data the data on which the cross-validation is to be * performed * @param numFolds the number of folds for the cross-validation * @param options the options to the clusterer * @param random a random number generator * @return a string containing the cross validated log likelihood * @throws Exception if a clusterer could not be generated */ public static String crossValidateModel (String clustererString, Instances data, int numFolds, String[] options, Random random) throws Exception { Clusterer clusterer = null; String[] savedOptions = null; double CvAv = 0.0; StringBuffer CvString = new StringBuffer(); if (options != null) { savedOptions = new String[options.length]; } data = new Instances(data); // create clusterer try { clusterer = (Clusterer)Class.forName(clustererString).newInstance(); } catch (Exception e) { throw new Exception("Can't find class with name " + clustererString + '.'); } if (!(clusterer instanceof DensityBasedClusterer)) { throw new Exception(clustererString + " must be a distrinbution " + "clusterer."); } // Save options if (options != null) { System.arraycopy(options, 0, savedOptions, 0, options.length); } // Parse options if (clusterer instanceof OptionHandler) { try { ((OptionHandler)clusterer).setOptions(savedOptions); Utils.checkForRemainingOptions(savedOptions); } catch (Exception e) { throw new Exception("Can't parse given options in " + "cross-validation!"); } } CvAv = crossValidateModel((DensityBasedClusterer)clusterer, data, numFolds, random); CvString.append("\n" + numFolds + " fold CV Log Likelihood: " + Utils.doubleToString(CvAv, 6, 4) + "\n"); return CvString.toString(); } // =============== // Private methods // =============== /** * Print the cluster statistics for either the training * or the testing data. * * @param clusterer the clusterer to use for generating statistics. * @param fileName the file to load * @return a string containing cluster statistics. * @throws Exception if statistics can't be generated. */ private static String printClusterStats (Clusterer clusterer, String fileName) throws Exception { StringBuffer text = new StringBuffer(); int i = 0; int cnum; double loglk = 0.0; int cc = clusterer.numberOfClusters(); double[] instanceStats = new double[cc]; int unclusteredInstances = 0; if (fileName.length() != 0) { DataSource source = new DataSource(fileName); Instances structure = source.getStructure(); Instance inst; while (source.hasMoreElements(structure)) { inst = source.nextElement(structure); try { cnum = clusterer.clusterInstance(inst); if (clusterer instanceof DensityBasedClusterer) { loglk += ((DensityBasedClusterer)clusterer). logDensityForInstance(inst); // temp = Utils.sum(dist); } instanceStats[cnum]++; } catch (Exception e) { unclusteredInstances++; } i++; } /* // count the actual number of used clusters int count = 0; for (i = 0; i < cc; i++) { if (instanceStats[i] > 0) { count++; } } if (count > 0) { double[] tempStats = new double [count]; count=0; for (i=0;i<cc;i++) { if (instanceStats[i] > 0) { tempStats[count++] = instanceStats[i]; } } instanceStats = tempStats; cc = instanceStats.length; } */ int clustFieldWidth = (int)((Math.log(cc)/Math.log(10))+1); int numInstFieldWidth = (int)((Math.log(i)/Math.log(10))+1); double sum = Utils.sum(instanceStats); loglk /= sum; text.append("Clustered Instances\n"); for (i = 0; i < cc; i++) { if (instanceStats[i] > 0) { text.append(Utils.doubleToString((double)i, clustFieldWidth, 0) + " " + Utils.doubleToString(instanceStats[i], numInstFieldWidth, 0) + " (" + Utils.doubleToString((instanceStats[i]/sum*100.0) , 3, 0) + "%)\n"); } } if (unclusteredInstances > 0) { text.append("\nUnclustered Instances : "+unclusteredInstances); } if (clusterer instanceof DensityBasedClusterer) { text.append("\n\nLog likelihood: " + Utils.doubleToString(loglk, 1, 5) + "\n"); } } return text.toString(); } /** * Print the cluster assignments for either the training * or the testing data. * * @param clusterer the clusterer to use for cluster assignments * @param trainFileName the train file * @param testFileName an optional test file * @param attributesToOutput the attributes to print * @return a string containing the instance indexes and cluster assigns. * @throws Exception if cluster assignments can't be printed */ private static String printClusterings (Clusterer clusterer, String trainFileName, String testFileName, Range attributesToOutput) throws Exception { StringBuffer text = new StringBuffer(); int i = 0; int cnum; DataSource source = null; Instance inst; Instances structure; if (testFileName.length() != 0) source = new DataSource(testFileName); else source = new DataSource(trainFileName); structure = source.getStructure(); while (source.hasMoreElements(structure)) { inst = source.nextElement(structure); try { cnum = clusterer.clusterInstance(inst); text.append(i + " " + cnum + " " + attributeValuesString(inst, attributesToOutput) + "\n"); } catch (Exception e) { /* throw new Exception('\n' + "Unable to cluster instance\n" + e.getMessage()); */ text.append(i + " Unclustered " + attributeValuesString(inst, attributesToOutput) + "\n"); } i++; } return text.toString(); } /** * Builds a string listing the attribute values in a specified range of indices, * separated by commas and enclosed in brackets. * * @param instance the instance to print the values from * @param attRange the range of the attributes to list * @return a string listing values of the attributes in the range */ private static String attributeValuesString(Instance instance, Range attRange) { StringBuffer text = new StringBuffer(); if (attRange != null) { boolean firstOutput = true; attRange.setUpper(instance.numAttributes() - 1); for (int i=0; i<instance.numAttributes(); i++) if (attRange.isInRange(i)) { if (firstOutput) text.append("("); else text.append(","); text.append(instance.toString(i)); firstOutput = false; } if (!firstOutput) text.append(")"); } return text.toString(); } /** * Make up the help string giving all the command line options * * @param clusterer the clusterer to include options for * @return a string detailing the valid command line options */ private static String makeOptionString (Clusterer clusterer, boolean globalInfo) { StringBuffer optionsText = new StringBuffer(""); // General options optionsText.append("\n\nGeneral options:\n\n"); optionsText.append("-h or -help\n"); optionsText.append("\tOutput help information.\n"); optionsText.append("-synopsis or -info\n"); optionsText.append("\tOutput synopsis for clusterer (use in conjunction " + " with -h)\n"); optionsText.append("-t <name of training file>\n"); optionsText.append("\tSets training file.\n"); optionsText.append("-T <name of test file>\n"); optionsText.append("\tSets test file.\n"); optionsText.append("-l <name of input file>\n"); optionsText.append("\tSets model input file.\n"); optionsText.append("-d <name of output file>\n"); optionsText.append("\tSets model output file.\n"); optionsText.append("-p <attribute range>\n"); optionsText.append("\tOutput predictions. Predictions are for " + "training file" + "\n\tif only training file is specified," + "\n\totherwise predictions are for the test file." + "\n\tThe range specifies attribute values to be output" + "\n\twith the predictions. Use '-p 0' for none.\n"); optionsText.append("-x <number of folds>\n"); optionsText.append("\tOnly Distribution Clusterers can be cross validated.\n"); optionsText.append("-s <random number seed>\n"); optionsText.append("\tSets the seed for randomizing the data in cross-validation\n"); optionsText.append("-c <class index>\n"); optionsText.append("\tSet class attribute. If supplied, class is ignored"); optionsText.append("\n\tduring clustering but is used in a classes to"); optionsText.append("\n\tclusters evaluation.\n"); if (clusterer instanceof Drawable) { optionsText.append("-g <name of graph file>\n"); optionsText.append("\tOutputs the graph representation of the clusterer to the file.\n"); } // Get scheme-specific options if (clusterer instanceof OptionHandler) { optionsText.append("\nOptions specific to " + clusterer.getClass().getName() + ":\n\n"); Enumeration enu = ((OptionHandler)clusterer).listOptions(); while (enu.hasMoreElements()) { Option option = (Option)enu.nextElement(); optionsText.append(option.synopsis() + '\n'); optionsText.append(option.description() + "\n"); } } // Get global information (if available) if (globalInfo) { try { String gi = getGlobalInfo(clusterer); optionsText.append(gi); } catch (Exception ex) { // quietly ignore } } return optionsText.toString(); } /** * Return the global info (if it exists) for the supplied clusterer * * @param clusterer the clusterer to get the global info for * @return the global info (synopsis) for the clusterer * @throws Exception if there is a problem reflecting on the clusterer */ protected static String getGlobalInfo(Clusterer clusterer) throws Exception { BeanInfo bi = Introspector.getBeanInfo(clusterer.getClass()); MethodDescriptor[] methods; methods = bi.getMethodDescriptors(); Object[] args = {}; String result = "\nSynopsis for " + clusterer.getClass().getName() + ":\n\n"; for (int i = 0; i < methods.length; i++) { String name = methods[i].getDisplayName(); Method meth = methods[i].getMethod(); if (name.equals("globalInfo")) { String globalInfo = (String)(meth.invoke(clusterer, args)); result += globalInfo; break; } } return result; } /** * Tests whether the current evaluation object is equal to another * evaluation object * * @param obj the object to compare against * @return true if the two objects are equal */ public boolean equals(Object obj) { if ((obj == null) || !(obj.getClass().equals(this.getClass()))) return false; ClusterEvaluation cmp = (ClusterEvaluation) obj; if ((m_classToCluster != null) != (cmp.m_classToCluster != null)) return false; if (m_classToCluster != null) { for (int i = 0; i < m_classToCluster.length; i++) { if (m_classToCluster[i] != cmp.m_classToCluster[i]) return false; } } if ((m_clusterAssignments != null) != (cmp.m_clusterAssignments != null)) return false; if (m_clusterAssignments != null) { for (int i = 0; i < m_clusterAssignments.length; i++) { if (m_clusterAssignments[i] != cmp.m_clusterAssignments[i]) return false; } } if (Double.isNaN(m_logL) != Double.isNaN(cmp.m_logL)) return false; if (!Double.isNaN(m_logL)) { if (m_logL != cmp.m_logL) return false; } if (m_numClusters != cmp.m_numClusters) return false; // TODO: better comparison? via members? String clusteringResults1 = m_clusteringResults.toString().replaceAll("Elapsed time.*", ""); String clusteringResults2 = cmp.m_clusteringResults.toString().replaceAll("Elapsed time.*", ""); if (!clusteringResults1.equals(clusteringResults2)) return false; return true; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } /** * Main method for testing this class. * * @param args the options */ public static void main (String[] args) { try { if (args.length == 0) { throw new Exception("The first argument must be the name of a " + "clusterer"); } String ClustererString = args[0]; args[0] = ""; Clusterer newClusterer = AbstractClusterer.forName(ClustererString, null); System.out.println(evaluateClusterer(newClusterer, args)); } catch (Exception e) { System.out.println(e.getMessage()); } } }
41,312
30.951276
123
java
tsml-java
tsml-java-master/src/main/java/weka/clusterers/Clusterer.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * Clusterer.java * Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand * */ package weka.clusterers; import weka.core.Capabilities; import weka.core.Instance; import weka.core.Instances; /** * Interface for clusterers. Clients will typically extend either * AbstractClusterer or AbstractDensityBasedClusterer. * * @author Mark Hall (mhall@cs.waikato.ac.nz) * @version $Revision: 8034 $ */ public interface Clusterer { /** * Generates a clusterer. Has to initialize all fields of the clusterer * that are not being set via options. * * @param data set of instances serving as training data * @exception Exception if the clusterer has not been * generated successfully */ void buildClusterer(Instances data) throws Exception; /** * Classifies a given instance. Either this or distributionForInstance() * needs to be implemented by subclasses. * * @param instance the instance to be assigned to a cluster * @return the number of the assigned cluster as an integer * @exception Exception if instance could not be clustered * successfully */ int clusterInstance(Instance instance) throws Exception; /** * Predicts the cluster memberships for a given instance. Either * this or clusterInstance() needs to be implemented by subclasses. * * @param instance the instance to be assigned a cluster. * @return an array containing the estimated membership * probabilities of the test instance in each cluster (this * should sum to at most 1) * @exception Exception if distribution could not be * computed successfully */ public double[] distributionForInstance(Instance instance) throws Exception; /** * Returns the number of clusters. * * @return the number of clusters generated for a training dataset. * @exception Exception if number of clusters could not be returned * successfully */ int numberOfClusters() throws Exception; /** * Returns the Capabilities of this clusterer. Derived classifiers have to * override this method to enable capabilities. * * @return the capabilities of this object * @see Capabilities */ public Capabilities getCapabilities(); }
2,936
31.633333
78
java
tsml-java
tsml-java-master/src/main/java/weka/clusterers/Cobweb.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * Cobweb.java * Copyright (C) 2001-2012 University of Waikato, Hamilton, New Zealand * */ package weka.clusterers; import java.io.Serializable; import java.util.Enumeration; import java.util.Random; import java.util.Vector; import weka.core.AttributeStats; import weka.core.Capabilities; import weka.core.Capabilities.Capability; import weka.core.Drawable; import weka.core.FastVector; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.RevisionHandler; import weka.core.RevisionUtils; import weka.core.TechnicalInformation; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import weka.core.TechnicalInformationHandler; import weka.core.Utils; import weka.core.Stats; import weka.filters.Filter; import weka.filters.unsupervised.attribute.Add; /** <!-- globalinfo-start --> * Class implementing the Cobweb and Classit clustering algorithms.<br/> * <br/> * Note: the application of node operators (merging, splitting etc.) in terms of ordering and priority differs (and is somewhat ambiguous) between the original Cobweb and Classit papers. This algorithm always compares the best host, adding a new leaf, merging the two best hosts, and splitting the best host when considering where to place a new instance.<br/> * <br/> * For more information see:<br/> * <br/> * D. Fisher (1987). Knowledge acquisition via incremental conceptual clustering. Machine Learning. 2(2):139-172.<br/> * <br/> * J. H. Gennari, P. Langley, D. Fisher (1990). Models of incremental concept formation. Artificial Intelligence. 40:11-61. * <p/> <!-- globalinfo-end --> * <!-- technical-bibtex-start --> * BibTeX: * <pre> * &#64;article{Fisher1987, * author = {D. Fisher}, * journal = {Machine Learning}, * number = {2}, * pages = {139-172}, * title = {Knowledge acquisition via incremental conceptual clustering}, * volume = {2}, * year = {1987} * } * * &#64;article{Gennari1990, * author = {J. H. Gennari and P. Langley and D. Fisher}, * journal = {Artificial Intelligence}, * pages = {11-61}, * title = {Models of incremental concept formation}, * volume = {40}, * year = {1990} * } * </pre> * <p/> <!-- technical-bibtex-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -A &lt;acuity&gt; * Acuity. * (default=1.0)</pre> * * <pre> -C &lt;cutoff&gt; * Cutoff. * (default=0.002)</pre> * * <pre> -S &lt;num&gt; * Random number seed. * (default 42)</pre> * <!-- options-end --> * * @author <a href="mailto:mhall@cs.waikato.ac.nz">Mark Hall</a> * @version $Revision: 8034 $ * @see RandomizableClusterer * @see Drawable */ public class Cobweb extends RandomizableClusterer implements Drawable, TechnicalInformationHandler, UpdateableClusterer { /** for serialization */ static final long serialVersionUID = 928406656495092318L; /** * Inner class handling node operations for Cobweb. * * @see Serializable */ public class CNode implements Serializable, RevisionHandler { /** for serialization */ static final long serialVersionUID = 3452097436933325631L; /** * Within cluster attribute statistics */ private AttributeStats[] m_attStats; /** * Number of attributes */ private int m_numAttributes; /** * Instances at this node */ protected Instances m_clusterInstances = null; /** * Children of this node */ private FastVector m_children = null; /** * Total instances at this node */ private double m_totalInstances = 0.0; /** * Cluster number of this node */ private int m_clusterNum = -1; /** * Creates an empty <code>CNode</code> instance. * * @param numAttributes the number of attributes in the data */ public CNode(int numAttributes) { m_numAttributes = numAttributes; } /** * Creates a new leaf <code>CNode</code> instance. * * @param numAttributes the number of attributes in the data * @param leafInstance the instance to store at this leaf */ public CNode(int numAttributes, Instance leafInstance) { this(numAttributes); if (m_clusterInstances == null) { m_clusterInstances = new Instances(leafInstance.dataset(), 1); } m_clusterInstances.add(leafInstance); updateStats(leafInstance, false); } /** * Adds an instance to this cluster. * * @param newInstance the instance to add * @throws Exception if an error occurs */ protected void addInstance(Instance newInstance) throws Exception { // Add the instance to this cluster if (m_clusterInstances == null) { m_clusterInstances = new Instances(newInstance.dataset(), 1); m_clusterInstances.add(newInstance); updateStats(newInstance, false); return; } else if (m_children == null) { /* we are a leaf, so make our existing instance(s) into a child and then add the new instance as a child */ m_children = new FastVector(); CNode tempSubCluster = new CNode(m_numAttributes, m_clusterInstances.instance(0)); // System.out.println("Dumping "+m_clusterInstances.numInstances()); for (int i = 1; i < m_clusterInstances.numInstances(); i++) { tempSubCluster.m_clusterInstances. add(m_clusterInstances.instance(i)); tempSubCluster.updateStats(m_clusterInstances.instance(i), false); } m_children = new FastVector(); m_children.addElement(tempSubCluster); m_children.addElement(new CNode(m_numAttributes, newInstance)); m_clusterInstances.add(newInstance); updateStats(newInstance, false); // here is where we check against cutoff (also check cutoff // in findHost) if (categoryUtility() < m_cutoff) { // System.out.println("Cutting (leaf add) "); m_children = null; } return; } // otherwise, find the best host for this instance CNode bestHost = findHost(newInstance, false); if (bestHost != null) { // now add to the best host bestHost.addInstance(newInstance); } } /** * Temporarily adds a new instance to each of this nodes children * in turn and computes the category utility. * * @param newInstance the new instance to evaluate * @return an array of category utility values---the result of considering * each child in turn as a host for the new instance * @throws Exception if an error occurs */ private double[] cuScoresForChildren(Instance newInstance) throws Exception { // look for a host in existing children double[] categoryUtils = new double [m_children.size()]; // look for a home for this instance in the existing children for (int i = 0; i < m_children.size(); i++) { CNode temp = (CNode) m_children.elementAt(i); // tentitively add the new instance to this child temp.updateStats(newInstance, false); categoryUtils[i] = categoryUtility(); // remove the new instance from this child temp.updateStats(newInstance, true); } return categoryUtils; } private double cuScoreForBestTwoMerged(CNode merged, CNode a, CNode b, Instance newInstance) throws Exception { double mergedCU = -Double.MAX_VALUE; // consider merging the best and second // best. merged.m_clusterInstances = new Instances(m_clusterInstances, 1); merged.addChildNode(a); merged.addChildNode(b); merged.updateStats(newInstance, false); // add new instance to stats // remove the best and second best nodes m_children.removeElementAt(m_children.indexOf(a)); m_children.removeElementAt(m_children.indexOf(b)); m_children.addElement(merged); mergedCU = categoryUtility(); // restore the status quo merged.updateStats(newInstance, true); m_children.removeElementAt(m_children.indexOf(merged)); m_children.addElement(a); m_children.addElement(b); return mergedCU; } /** * Finds a host for the new instance in this nodes children. Also * considers merging the two best hosts and splitting the best host. * * @param newInstance the instance to find a host for * @param structureFrozen true if the instance is not to be added to * the tree and instead the best potential host is to be returned * @return the best host * @throws Exception if an error occurs */ private CNode findHost(Instance newInstance, boolean structureFrozen) throws Exception { if (!structureFrozen) { updateStats(newInstance, false); } // look for a host in existing children and also consider as a new leaf double[] categoryUtils = cuScoresForChildren(newInstance); // make a temporary new leaf for this instance and get CU CNode newLeaf = new CNode(m_numAttributes, newInstance); m_children.addElement(newLeaf); double bestHostCU = categoryUtility(); CNode finalBestHost = newLeaf; // remove new leaf when searching for best and second best nodes to // consider for merging and splitting m_children.removeElementAt(m_children.size()-1); // now determine the best host (and the second best) int best = 0; int secondBest = 0; for (int i = 0; i < categoryUtils.length; i++) { if (categoryUtils[i] > categoryUtils[secondBest]) { if (categoryUtils[i] > categoryUtils[best]) { secondBest = best; best = i; } else { secondBest = i; } } } CNode a = (CNode) m_children.elementAt(best); CNode b = (CNode) m_children.elementAt(secondBest); if (categoryUtils[best] > bestHostCU) { bestHostCU = categoryUtils[best]; finalBestHost = a; // System.out.println("Node is best"); } if (structureFrozen) { if (finalBestHost == newLeaf) { return null; // *this* node is the best host } else { return finalBestHost; } } double mergedCU = -Double.MAX_VALUE; CNode merged = new CNode(m_numAttributes); if (a != b) { mergedCU = cuScoreForBestTwoMerged(merged, a, b, newInstance); if (mergedCU > bestHostCU) { bestHostCU = mergedCU; finalBestHost = merged; } } // Consider splitting the best double splitCU = -Double.MAX_VALUE; double splitBestChildCU = -Double.MAX_VALUE; double splitPlusNewLeafCU = -Double.MAX_VALUE; double splitPlusMergeBestTwoCU = -Double.MAX_VALUE; if (a.m_children != null) { FastVector tempChildren = new FastVector(); for (int i = 0; i < m_children.size(); i++) { CNode existingChild = (CNode)m_children.elementAt(i); if (existingChild != a) { tempChildren.addElement(existingChild); } } for (int i = 0; i < a.m_children.size(); i++) { CNode promotedChild = (CNode)a.m_children.elementAt(i); tempChildren.addElement(promotedChild); } // also add the new leaf tempChildren.addElement(newLeaf); FastVector saveStatusQuo = m_children; m_children = tempChildren; splitPlusNewLeafCU = categoryUtility(); // split + new leaf // remove the new leaf tempChildren.removeElementAt(tempChildren.size()-1); // now look for best and second best categoryUtils = cuScoresForChildren(newInstance); // now determine the best host (and the second best) best = 0; secondBest = 0; for (int i = 0; i < categoryUtils.length; i++) { if (categoryUtils[i] > categoryUtils[secondBest]) { if (categoryUtils[i] > categoryUtils[best]) { secondBest = best; best = i; } else { secondBest = i; } } } CNode sa = (CNode) m_children.elementAt(best); CNode sb = (CNode) m_children.elementAt(secondBest); splitBestChildCU = categoryUtils[best]; // now merge best and second best CNode mergedSplitChildren = new CNode(m_numAttributes); if (sa != sb) { splitPlusMergeBestTwoCU = cuScoreForBestTwoMerged(mergedSplitChildren, sa, sb, newInstance); } splitCU = (splitBestChildCU > splitPlusNewLeafCU) ? splitBestChildCU : splitPlusNewLeafCU; splitCU = (splitCU > splitPlusMergeBestTwoCU) ? splitCU : splitPlusMergeBestTwoCU; if (splitCU > bestHostCU) { bestHostCU = splitCU; finalBestHost = this; // tempChildren.removeElementAt(tempChildren.size()-1); } else { // restore the status quo m_children = saveStatusQuo; } } if (finalBestHost != this) { // can commit the instance to the set of instances at this node m_clusterInstances.add(newInstance); } else { m_numberSplits++; } if (finalBestHost == merged) { m_numberMerges++; m_children.removeElementAt(m_children.indexOf(a)); m_children.removeElementAt(m_children.indexOf(b)); m_children.addElement(merged); } if (finalBestHost == newLeaf) { finalBestHost = new CNode(m_numAttributes); m_children.addElement(finalBestHost); } if (bestHostCU < m_cutoff) { if (finalBestHost == this) { // splitting was the best, but since we are cutting all children // recursion is aborted and we still need to add the instance // to the set of instances at this node m_clusterInstances.add(newInstance); } m_children = null; finalBestHost = null; } if (finalBestHost == this) { // splitting is still the best, so downdate the stats as // we'll be recursively calling on this node updateStats(newInstance, true); } return finalBestHost; } /** * Adds the supplied node as a child of this node. All of the child's * instances are added to this nodes instances * * @param child the child to add */ protected void addChildNode(CNode child) { for (int i = 0; i < child.m_clusterInstances.numInstances(); i++) { Instance temp = child.m_clusterInstances.instance(i); m_clusterInstances.add(temp); updateStats(temp, false); } if (m_children == null) { m_children = new FastVector(); } m_children.addElement(child); } /** * Computes the utility of all children with respect to this node * * @return the category utility of the children with respect to this node. * @throws Exception if there are no children */ protected double categoryUtility() throws Exception { if (m_children == null) { throw new Exception("categoryUtility: No children!"); } double totalCU = 0; for (int i = 0; i < m_children.size(); i++) { CNode child = (CNode) m_children.elementAt(i); totalCU += categoryUtilityChild(child); } totalCU /= (double)m_children.size(); return totalCU; } /** * Computes the utility of a single child with respect to this node * * @param child the child for which to compute the utility * @return the utility of the child with respect to this node * @throws Exception if something goes wrong */ protected double categoryUtilityChild(CNode child) throws Exception { double sum = 0; for (int i = 0; i < m_numAttributes; i++) { if (m_clusterInstances.attribute(i).isNominal()) { for (int j = 0; j < m_clusterInstances.attribute(i).numValues(); j++) { double x = child.getProbability(i, j); double y = getProbability(i, j); sum += (x * x) - (y * y); } } else { // numeric attribute sum += ((m_normal / child.getStandardDev(i)) - (m_normal / getStandardDev(i))); } } return (child.m_totalInstances / m_totalInstances) * sum; } /** * Returns the probability of a value of a nominal attribute in this node * * @param attIndex the index of the attribute * @param valueIndex the index of the value of the attribute * @return the probability * @throws Exception if the requested attribute is not nominal */ protected double getProbability(int attIndex, int valueIndex) throws Exception { if (!m_clusterInstances.attribute(attIndex).isNominal()) { throw new Exception("getProbability: attribute is not nominal"); } if (m_attStats[attIndex].totalCount <= 0) { return 0; } return (double) m_attStats[attIndex].nominalCounts[valueIndex] / (double) m_attStats[attIndex].totalCount; } /** * Returns the standard deviation of a numeric attribute * * @param attIndex the index of the attribute * @return the standard deviation * @throws Exception if an error occurs */ protected double getStandardDev(int attIndex) throws Exception { if (!m_clusterInstances.attribute(attIndex).isNumeric()) { throw new Exception("getStandardDev: attribute is not numeric"); } m_attStats[attIndex].numericStats.calculateDerived(); double stdDev = m_attStats[attIndex].numericStats.stdDev; if (Double.isNaN(stdDev) || Double.isInfinite(stdDev)) { return m_acuity; } return Math.max(m_acuity, stdDev); } /** * Update attribute stats using the supplied instance. * * @param updateInstance the instance for updating * @param delete true if the values of the supplied instance are * to be removed from the statistics */ protected void updateStats(Instance updateInstance, boolean delete) { if (m_attStats == null) { m_attStats = new AttributeStats[m_numAttributes]; for (int i = 0; i < m_numAttributes; i++) { m_attStats[i] = new AttributeStats(); if (m_clusterInstances.attribute(i).isNominal()) { m_attStats[i].nominalCounts = new int [m_clusterInstances.attribute(i).numValues()]; } else { m_attStats[i].numericStats = new Stats(); } } } for (int i = 0; i < m_numAttributes; i++) { if (!updateInstance.isMissing(i)) { double value = updateInstance.value(i); if (m_clusterInstances.attribute(i).isNominal()) { m_attStats[i].nominalCounts[(int)value] += (delete) ? (-1.0 * updateInstance.weight()) : updateInstance.weight(); m_attStats[i].totalCount += (delete) ? (-1.0 * updateInstance.weight()) : updateInstance.weight(); } else { if (delete) { m_attStats[i].numericStats.subtract(value, updateInstance.weight()); } else { m_attStats[i].numericStats.add(value, updateInstance.weight()); } } } } m_totalInstances += (delete) ? (-1.0 * updateInstance.weight()) : (updateInstance.weight()); } /** * Recursively assigns numbers to the nodes in the tree. * * @param cl_num an <code>int[]</code> value * @throws Exception if an error occurs */ private void assignClusterNums(int[] cl_num) throws Exception { if (m_children != null && m_children.size() < 2) { throw new Exception("assignClusterNums: tree not built correctly!"); } m_clusterNum = cl_num[0]; cl_num[0]++; if (m_children != null) { for (int i = 0; i < m_children.size(); i++) { CNode child = (CNode) m_children.elementAt(i); child.assignClusterNums(cl_num); } } } /** * Recursively build a string representation of the Cobweb tree * * @param depth depth of this node in the tree * @param text holds the string representation */ protected void dumpTree(int depth, StringBuffer text) { if (depth == 0) determineNumberOfClusters(); if (m_children == null) { text.append("\n"); for (int j = 0; j < depth; j++) { text.append("| "); } text.append("leaf "+m_clusterNum+" [" +m_clusterInstances.numInstances()+"]"); } else { for (int i = 0; i < m_children.size(); i++) { text.append("\n"); for (int j = 0; j < depth; j++) { text.append("| "); } text.append("node "+m_clusterNum+" [" +m_clusterInstances.numInstances() +"]"); ((CNode) m_children.elementAt(i)).dumpTree(depth+1, text); } } } /** * Returns the instances at this node as a string. Appends the cluster * number of the child that each instance belongs to. * * @return a <code>String</code> value * @throws Exception if an error occurs */ protected String dumpData() throws Exception { if (m_children == null) { return m_clusterInstances.toString(); } // construct instances string with cluster numbers attached CNode tempNode = new CNode(m_numAttributes); tempNode.m_clusterInstances = new Instances(m_clusterInstances, 1); for (int i = 0; i < m_children.size(); i++) { tempNode.addChildNode((CNode)m_children.elementAt(i)); } Instances tempInst = tempNode.m_clusterInstances; tempNode = null; Add af = new Add(); af.setAttributeName("Cluster"); String labels = ""; for (int i = 0; i < m_children.size(); i++) { CNode temp = (CNode)m_children.elementAt(i); labels += ("C"+temp.m_clusterNum); if (i < m_children.size()-1) { labels+=","; } } af.setNominalLabels(labels); af.setInputFormat(tempInst); tempInst = Filter.useFilter(tempInst, af); tempInst.setRelationName("Cluster "+m_clusterNum); int z = 0; for (int i = 0; i < m_children.size(); i++) { CNode temp = (CNode)m_children.elementAt(i); for (int j = 0; j < temp.m_clusterInstances.numInstances(); j++) { tempInst.instance(z).setValue(m_numAttributes, (double)i); z++; } } return tempInst.toString(); } /** * Recursively generate the graph string for the Cobweb tree. * * @param text holds the graph string * @throws Exception if generation fails */ protected void graphTree(StringBuffer text) throws Exception { text.append("N"+m_clusterNum + " [label=\""+((m_children == null) ? "leaf " : "node ") +m_clusterNum+" " +" ("+m_clusterInstances.numInstances() +")\" " +((m_children == null) ? "shape=box style=filled " : "") +(m_saveInstances ? "data =\n"+dumpData() +"\n,\n" : "") + "]\n"); if (m_children != null) { for (int i = 0; i < m_children.size(); i++) { CNode temp = (CNode)m_children.elementAt(i); text.append("N"+m_clusterNum +"->" +"N" + temp.m_clusterNum + "\n"); } for (int i = 0; i < m_children.size(); i++) { CNode temp = (CNode)m_children.elementAt(i); temp.graphTree(text); } } } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } } /** * Normal constant. */ protected static final double m_normal = 1.0/(2 * Math.sqrt(Math.PI)); /** * Acuity (minimum standard deviation). */ protected double m_acuity = 1.0; /** * Cutoff (minimum category utility). */ protected double m_cutoff = 0.01 * Cobweb.m_normal; /** * Holds the root of the Cobweb tree. */ protected CNode m_cobwebTree = null; /** * Number of clusters (nodes in the tree). Must never be queried directly, * only via the method numberOfClusters(). Otherwise it's not guaranteed that * it contains the correct value. * * @see #numberOfClusters() * @see #m_numberOfClustersDetermined */ protected int m_numberOfClusters = -1; /** whether the number of clusters was already determined */ protected boolean m_numberOfClustersDetermined = false; /** the number of splits that happened */ protected int m_numberSplits; /** the number of merges that happened */ protected int m_numberMerges; /** * Output instances in graph representation of Cobweb tree (Allows * instances at nodes in the tree to be visualized in the Explorer). */ protected boolean m_saveInstances = false; /** * default constructor */ public Cobweb() { super(); m_SeedDefault = 42; setSeed(m_SeedDefault); } /** * Returns a string describing this clusterer * @return a description of the evaluator suitable for * displaying in the explorer/experimenter gui */ public String globalInfo() { return "Class implementing the Cobweb and Classit clustering algorithms.\n\n" + "Note: the application of node operators (merging, splitting etc.) in " + "terms of ordering and priority differs (and is somewhat ambiguous) " + "between the original Cobweb and Classit papers. This algorithm always " + "compares the best host, adding a new leaf, merging the two best hosts, " + "and splitting the best host when considering where to place a new " + "instance.\n\n" + "For more information see:\n\n" + getTechnicalInformation().toString(); } /** * Returns an instance of a TechnicalInformation object, containing * detailed information about the technical background of this class, * e.g., paper reference or book this class is based on. * * @return the technical information about this class */ public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; TechnicalInformation additional; result = new TechnicalInformation(Type.ARTICLE); result.setValue(Field.AUTHOR, "D. Fisher"); result.setValue(Field.YEAR, "1987"); result.setValue(Field.TITLE, "Knowledge acquisition via incremental conceptual clustering"); result.setValue(Field.JOURNAL, "Machine Learning"); result.setValue(Field.VOLUME, "2"); result.setValue(Field.NUMBER, "2"); result.setValue(Field.PAGES, "139-172"); additional = result.add(Type.ARTICLE); additional.setValue(Field.AUTHOR, "J. H. Gennari and P. Langley and D. Fisher"); additional.setValue(Field.YEAR, "1990"); additional.setValue(Field.TITLE, "Models of incremental concept formation"); additional.setValue(Field.JOURNAL, "Artificial Intelligence"); additional.setValue(Field.VOLUME, "40"); additional.setValue(Field.PAGES, "11-61"); return result; } /** * Returns default capabilities of the clusterer. * * @return the capabilities of this clusterer */ public Capabilities getCapabilities() { Capabilities result = super.getCapabilities(); result.disableAll(); result.enable(Capability.NO_CLASS); // attributes result.enable(Capability.NOMINAL_ATTRIBUTES); result.enable(Capability.NUMERIC_ATTRIBUTES); result.enable(Capability.DATE_ATTRIBUTES); result.enable(Capability.MISSING_VALUES); // other result.setMinimumNumberInstances(0); return result; } /** * Builds the clusterer. * * @param data the training instances. * @throws Exception if something goes wrong. */ public void buildClusterer(Instances data) throws Exception { m_numberOfClusters = -1; m_cobwebTree = null; m_numberSplits = 0; m_numberMerges = 0; // can clusterer handle the data? getCapabilities().testWithFail(data); // randomize the instances data = new Instances(data); if (getSeed() >= 0) { data.randomize(new Random(getSeed())); } for (int i = 0; i < data.numInstances(); i++) { updateClusterer(data.instance(i)); } updateFinished(); } /** * Singals the end of the updating. */ public void updateFinished() { determineNumberOfClusters(); } /** * Classifies a given instance. * * @param instance the instance to be assigned to a cluster * @return the number of the assigned cluster as an interger * if the class is enumerated, otherwise the predicted value * @throws Exception if instance could not be classified * successfully */ public int clusterInstance(Instance instance) throws Exception { CNode host = m_cobwebTree; CNode temp = null; determineNumberOfClusters(); do { if (host.m_children == null) { temp = null; break; } // host.updateStats(instance, false); temp = host.findHost(instance, true); // host.updateStats(instance, true); if (temp != null) { host = temp; } } while (temp != null); return host.m_clusterNum; } /** * determines the number of clusters if necessary * * @see #m_numberOfClusters * @see #m_numberOfClustersDetermined */ protected void determineNumberOfClusters() { if ( !m_numberOfClustersDetermined && (m_cobwebTree != null) ) { int[] numClusts = new int [1]; numClusts[0] = 0; try { m_cobwebTree.assignClusterNums(numClusts); } catch (Exception e) { e.printStackTrace(); numClusts[0] = 0; } m_numberOfClusters = numClusts[0]; m_numberOfClustersDetermined = true; } } /** * Returns the number of clusters. * * @return the number of clusters */ public int numberOfClusters() { determineNumberOfClusters(); return m_numberOfClusters; } /** * Get the root of the tree. * * @return the root of the tree. */ public CNode getTreeRoot() { return m_cobwebTree; } /** * Adds an instance to the clusterer. * * @param newInstance the instance to be added * @throws Exception if something goes wrong */ public void updateClusterer(Instance newInstance) throws Exception { m_numberOfClustersDetermined = false; if (m_cobwebTree == null) { m_cobwebTree = new CNode(newInstance.numAttributes(), newInstance); } else { m_cobwebTree.addInstance(newInstance); } } /** * Adds an instance to the Cobweb tree. * * @param newInstance the instance to be added * @throws Exception if something goes wrong * @deprecated updateClusterer(Instance) should be used instead * @see #updateClusterer(Instance) */ public void addInstance(Instance newInstance) throws Exception { updateClusterer(newInstance); } /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. **/ public Enumeration listOptions() { Vector result = new Vector(); result.addElement(new Option( "\tAcuity.\n" +"\t(default=1.0)", "A", 1,"-A <acuity>")); result.addElement(new Option( "\tCutoff.\n" +"\t(default=0.002)", "C", 1,"-C <cutoff>")); Enumeration en = super.listOptions(); while (en.hasMoreElements()) result.addElement(en.nextElement()); return result.elements(); } /** * Parses a given list of options. <p/> * <!-- options-start --> * Valid options are: <p/> * * <pre> -A &lt;acuity&gt; * Acuity. * (default=1.0)</pre> * * <pre> -C &lt;cutoff&gt; * Cutoff. * (default=0.002)</pre> * * <pre> -S &lt;num&gt; * Random number seed. * (default 42)</pre> * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { String optionString; optionString = Utils.getOption('A', options); if (optionString.length() != 0) { Double temp = new Double(optionString); setAcuity(temp.doubleValue()); } else { m_acuity = 1.0; } optionString = Utils.getOption('C', options); if (optionString.length() != 0) { Double temp = new Double(optionString); setCutoff(temp.doubleValue()); } else { m_cutoff = 0.01 * Cobweb.m_normal; } super.setOptions(options); } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String acuityTipText() { return "set the minimum standard deviation for numeric attributes"; } /** * set the acuity. * @param a the acuity value */ public void setAcuity(double a) { m_acuity = a; } /** * get the acuity value * @return the acuity */ public double getAcuity() { return m_acuity; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String cutoffTipText() { return "set the category utility threshold by which to prune nodes"; } /** * set the cutoff * @param c the cutof */ public void setCutoff(double c) { m_cutoff = c; } /** * get the cutoff * @return the cutoff */ public double getCutoff() { return m_cutoff; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String saveInstanceDataTipText() { return "save instance information for visualization purposes"; } /** * Get the value of saveInstances. * * @return Value of saveInstances. */ public boolean getSaveInstanceData() { return m_saveInstances; } /** * Set the value of saveInstances. * * @param newsaveInstances Value to assign to saveInstances. */ public void setSaveInstanceData(boolean newsaveInstances) { m_saveInstances = newsaveInstances; } /** * Gets the current settings of Cobweb. * * @return an array of strings suitable for passing to setOptions() */ public String[] getOptions() { int i; Vector<String> result; String[] options; result = new Vector<String>(); result.add("-A"); result.add("" + m_acuity); result.add("-C"); result.add("" + m_cutoff); options = super.getOptions(); for (i = 0; i < options.length; i++) result.add(options[i]); return result.toArray(new String[result.size()]); } /** * Returns a description of the clusterer as a string. * * @return a string describing the clusterer. */ public String toString() { StringBuffer text = new StringBuffer(); if (m_cobwebTree == null) { return "Cobweb hasn't been built yet!"; } else { m_cobwebTree.dumpTree(0, text); return "Number of merges: " + m_numberMerges+"\nNumber of splits: " + m_numberSplits+"\nNumber of clusters: " + numberOfClusters() +"\n"+text.toString()+"\n\n"; } } /** * Returns the type of graphs this class * represents * @return Drawable.TREE */ public int graphType() { return Drawable.TREE; } /** * Generates the graph string of the Cobweb tree * * @return a <code>String</code> value * @throws Exception if an error occurs */ public String graph() throws Exception { StringBuffer text = new StringBuffer(); text.append("digraph CobwebTree {\n"); m_cobwebTree.graphTree(text); text.append("}\n"); return text.toString(); } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } /** * Returns the tip text for this property * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String seedTipText() { String result = super.seedTipText() + " Use -1 for no randomization."; return result; } /** * Main method. * * @param argv the commandline options */ public static void main(String[] argv) { runClusterer(new Cobweb(), argv); } }
36,251
27.277691
360
java
tsml-java
tsml-java-master/src/main/java/weka/clusterers/DBScan.java
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* * Copyright (C) 2004 * & Matthias Schubert (schubert@dbs.ifi.lmu.de) * & Zhanna Melnikova-Albrecht (melnikov@cip.ifi.lmu.de) * & Rainer Holzmann (holzmann@cip.ifi.lmu.de) */ package weka.clusterers; import weka.clusterers.forOPTICSAndDBScan.DataObjects.DataObject; import weka.clusterers.forOPTICSAndDBScan.Databases.Database; import weka.core.Capabilities; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.OptionHandler; import weka.core.RevisionUtils; import weka.core.TechnicalInformation; import weka.core.TechnicalInformationHandler; import weka.core.Utils; import weka.core.Capabilities.Capability; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import weka.filters.Filter; import weka.filters.unsupervised.attribute.ReplaceMissingValues; import java.lang.reflect.Constructor; import java.lang.reflect.InvocationTargetException; import java.text.DecimalFormat; import java.util.Enumeration; import java.util.Iterator; import java.util.List; import java.util.Vector; /** <!-- globalinfo-start --> * Martin Ester, Hans-Peter Kriegel, Joerg Sander, Xiaowei Xu: A Density-Based Algorithm for Discovering Clusters in Large Spatial Databases with Noise. In: Second International Conference on Knowledge Discovery and Data Mining, 226-231, 1996. * <p/> <!-- globalinfo-end --> * <!-- technical-bibtex-start --> * BibTeX: * <pre> * &#64;inproceedings{Ester1996, * author = {Martin Ester and Hans-Peter Kriegel and Joerg Sander and Xiaowei Xu}, * booktitle = {Second International Conference on Knowledge Discovery and Data Mining}, * editor = {Evangelos Simoudis and Jiawei Han and Usama M. Fayyad}, * pages = {226-231}, * publisher = {AAAI Press}, * title = {A Density-Based Algorithm for Discovering Clusters in Large Spatial Databases with Noise}, * year = {1996} * } * </pre> * <p/> <!-- technical-bibtex-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -E &lt;double&gt; * epsilon (default = 0.9)</pre> * * <pre> -M &lt;int&gt; * minPoints (default = 6)</pre> * * <pre> -I &lt;String&gt; * index (database) used for DBScan (default = weka.clusterers.forOPTICSAndDBScan.Databases.SequentialDatabase)</pre> * * <pre> -D &lt;String&gt; * distance-type (default = weka.clusterers.forOPTICSAndDBScan.DataObjects.EuclidianDataObject)</pre> * <!-- options-end --> * * @author Matthias Schubert (schubert@dbs.ifi.lmu.de) * @author Zhanna Melnikova-Albrecht (melnikov@cip.ifi.lmu.de) * @author Rainer Holzmann (holzmann@cip.ifi.lmu.de) * @version $Revision: 5538 $ */ public class DBScan extends AbstractClusterer implements OptionHandler, TechnicalInformationHandler { /** for serialization */ static final long serialVersionUID = -1666498248451219728L; /** * Specifies the radius for a range-query */ private double epsilon = 0.9; /** * Specifies the density (the range-query must contain at least minPoints DataObjects) */ private int minPoints = 6; /** * Replace missing values in training instances */ private ReplaceMissingValues replaceMissingValues_Filter; /** * Holds the number of clusters generated */ private int numberOfGeneratedClusters; /** * Holds the distance-type that is used * (default = weka.clusterers.forOPTICSAndDBScan.DataObjects.EuclidianDataObject) */ private String database_distanceType = "weka.clusterers.forOPTICSAndDBScan.DataObjects.EuclidianDataObject"; /** * Holds the type of the used database * (default = weka.clusterers.forOPTICSAndDBScan.Databases.SequentialDatabase) */ private String database_Type = "weka.clusterers.forOPTICSAndDBScan.Databases.SequentialDatabase"; /** * The database that is used for DBScan */ private Database database; /** * Holds the current clusterID */ private int clusterID; /** * Counter for the processed instances */ private int processed_InstanceID; /** * Holds the time-value (seconds) for the duration of the clustering-process */ private double elapsedTime; /** * Returns default capabilities of the clusterer. * * @return the capabilities of this clusterer */ public Capabilities getCapabilities() { Capabilities result = super.getCapabilities(); result.disableAll(); result.enable(Capability.NO_CLASS); // attributes result.enable(Capability.NOMINAL_ATTRIBUTES); result.enable(Capability.NUMERIC_ATTRIBUTES); result.enable(Capability.DATE_ATTRIBUTES); result.enable(Capability.MISSING_VALUES); return result; } // ***************************************************************************************************************** // constructors // ***************************************************************************************************************** // ***************************************************************************************************************** // methods // ***************************************************************************************************************** /** * Generate Clustering via DBScan * @param instances The instances that need to be clustered * @throws java.lang.Exception If clustering was not successful */ public void buildClusterer(Instances instances) throws Exception { // can clusterer handle the data? getCapabilities().testWithFail(instances); long time_1 = System.currentTimeMillis(); processed_InstanceID = 0; numberOfGeneratedClusters = 0; clusterID = 0; replaceMissingValues_Filter = new ReplaceMissingValues(); replaceMissingValues_Filter.setInputFormat(instances); Instances filteredInstances = Filter.useFilter(instances, replaceMissingValues_Filter); database = databaseForName(getDatabase_Type(), filteredInstances); for (int i = 0; i < database.getInstances().numInstances(); i++) { DataObject dataObject = dataObjectForName(getDatabase_distanceType(), database.getInstances().instance(i), Integer.toString(i), database); database.insert(dataObject); } database.setMinMaxValues(); Iterator iterator = database.dataObjectIterator(); while (iterator.hasNext()) { DataObject dataObject = (DataObject) iterator.next(); if (dataObject.getClusterLabel() == DataObject.UNCLASSIFIED) { if (expandCluster(dataObject)) { clusterID++; numberOfGeneratedClusters++; } } } long time_2 = System.currentTimeMillis(); elapsedTime = (double) (time_2 - time_1) / 1000.0; } /** * Assigns this dataObject to a cluster or remains it as NOISE * @param dataObject The DataObject that needs to be assigned * @return true, if the DataObject could be assigned, else false */ private boolean expandCluster(DataObject dataObject) { List seedList = database.epsilonRangeQuery(getEpsilon(), dataObject); /** dataObject is NO coreObject */ if (seedList.size() < getMinPoints()) { dataObject.setClusterLabel(DataObject.NOISE); return false; } /** dataObject is coreObject */ for (int i = 0; i < seedList.size(); i++) { DataObject seedListDataObject = (DataObject) seedList.get(i); /** label this seedListDataObject with the current clusterID, because it is in epsilon-range */ seedListDataObject.setClusterLabel(clusterID); if (seedListDataObject.equals(dataObject)) { seedList.remove(i); i--; } } /** Iterate the seedList of the startDataObject */ for (int j = 0; j < seedList.size(); j++) { DataObject seedListDataObject = (DataObject) seedList.get(j); List seedListDataObject_Neighbourhood = database.epsilonRangeQuery(getEpsilon(), seedListDataObject); /** seedListDataObject is coreObject */ if (seedListDataObject_Neighbourhood.size() >= getMinPoints()) { for (int i = 0; i < seedListDataObject_Neighbourhood.size(); i++) { DataObject p = (DataObject) seedListDataObject_Neighbourhood.get(i); if (p.getClusterLabel() == DataObject.UNCLASSIFIED || p.getClusterLabel() == DataObject.NOISE) { if (p.getClusterLabel() == DataObject.UNCLASSIFIED) { seedList.add(p); } p.setClusterLabel(clusterID); } } } seedList.remove(j); j--; } return true; } /** * Classifies a given instance. * * @param instance The instance to be assigned to a cluster * @return int The number of the assigned cluster as an integer * @throws java.lang.Exception If instance could not be clustered * successfully */ public int clusterInstance(Instance instance) throws Exception { if (processed_InstanceID >= database.size()) processed_InstanceID = 0; int cnum = (database.getDataObject(Integer.toString(processed_InstanceID++))).getClusterLabel(); if (cnum == DataObject.NOISE) throw new Exception(); else return cnum; } /** * Returns the number of clusters. * * @return int The number of clusters generated for a training dataset. * @throws java.lang.Exception if number of clusters could not be returned * successfully */ public int numberOfClusters() throws Exception { return numberOfGeneratedClusters; } /** * Returns an enumeration of all the available options.. * * @return Enumeration An enumeration of all available options. */ public Enumeration listOptions() { Vector vector = new Vector(); vector.addElement( new Option("\tepsilon (default = 0.9)", "E", 1, "-E <double>")); vector.addElement( new Option("\tminPoints (default = 6)", "M", 1, "-M <int>")); vector.addElement( new Option("\tindex (database) used for DBScan (default = weka.clusterers.forOPTICSAndDBScan.Databases.SequentialDatabase)", "I", 1, "-I <String>")); vector.addElement( new Option("\tdistance-type (default = weka.clusterers.forOPTICSAndDBScan.DataObjects.EuclidianDataObject)", "D", 1, "-D <String>")); return vector.elements(); } /** * Sets the OptionHandler's options using the given list. All options * will be set (or reset) during this call (i.e. incremental setting * of options is not possible). <p/> * <!-- options-start --> * Valid options are: <p/> * * <pre> -E &lt;double&gt; * epsilon (default = 0.9)</pre> * * <pre> -M &lt;int&gt; * minPoints (default = 6)</pre> * * <pre> -I &lt;String&gt; * index (database) used for DBScan (default = weka.clusterers.forOPTICSAndDBScan.Databases.SequentialDatabase)</pre> * * <pre> -D &lt;String&gt; * distance-type (default = weka.clusterers.forOPTICSAndDBScan.DataObjects.EuclidianDataObject)</pre> * <!-- options-end --> * * @param options The list of options as an array of strings * @throws java.lang.Exception If an option is not supported */ public void setOptions(String[] options) throws Exception { String optionString = Utils.getOption('E', options); if (optionString.length() != 0) { setEpsilon(Double.parseDouble(optionString)); } optionString = Utils.getOption('M', options); if (optionString.length() != 0) { setMinPoints(Integer.parseInt(optionString)); } optionString = Utils.getOption('I', options); if (optionString.length() != 0) { setDatabase_Type(optionString); } optionString = Utils.getOption('D', options); if (optionString.length() != 0) { setDatabase_distanceType(optionString); } } /** * Gets the current option settings for the OptionHandler. * * @return String[] The list of current option settings as an array of strings */ public String[] getOptions() { String[] options = new String[8]; int current = 0; options[current++] = "-E"; options[current++] = "" + getEpsilon(); options[current++] = "-M"; options[current++] = "" + getMinPoints(); options[current++] = "-I"; options[current++] = "" + getDatabase_Type(); options[current++] = "-D"; options[current++] = "" + getDatabase_distanceType(); return options; } /** * Returns a new Class-Instance of the specified database * @param database_Type String of the specified database * @param instances Instances that were delivered from WEKA * @return Database New constructed Database */ public Database databaseForName(String database_Type, Instances instances) { Object o = null; Constructor co = null; try { co = (Class.forName(database_Type)).getConstructor(new Class[]{Instances.class}); o = co.newInstance(new Object[]{instances}); } catch (NoSuchMethodException e) { e.printStackTrace(); } catch (SecurityException e) { e.printStackTrace(); } catch (ClassNotFoundException e) { e.printStackTrace(); } catch (InstantiationException e) { e.printStackTrace(); } catch (IllegalAccessException e) { e.printStackTrace(); } catch (InvocationTargetException e) { e.printStackTrace(); } return (Database) o; } /** * Returns a new Class-Instance of the specified database * @param database_distanceType String of the specified distance-type * @param instance The original instance that needs to hold by this DataObject * @param key Key for this DataObject * @param database Link to the database * @return DataObject New constructed DataObject */ public DataObject dataObjectForName(String database_distanceType, Instance instance, String key, Database database) { Object o = null; Constructor co = null; try { co = (Class.forName(database_distanceType)). getConstructor(new Class[]{Instance.class, String.class, Database.class}); o = co.newInstance(new Object[]{instance, key, database}); } catch (NoSuchMethodException e) { e.printStackTrace(); } catch (SecurityException e) { e.printStackTrace(); } catch (ClassNotFoundException e) { e.printStackTrace(); } catch (InstantiationException e) { e.printStackTrace(); } catch (IllegalAccessException e) { e.printStackTrace(); } catch (InvocationTargetException e) { e.printStackTrace(); } return (DataObject) o; } /** * Sets a new value for minPoints * @param minPoints MinPoints */ public void setMinPoints(int minPoints) { this.minPoints = minPoints; } /** * Sets a new value for epsilon * @param epsilon Epsilon */ public void setEpsilon(double epsilon) { this.epsilon = epsilon; } /** * Returns the value of epsilon * @return double Epsilon */ public double getEpsilon() { return epsilon; } /** * Returns the value of minPoints * @return int MinPoints */ public int getMinPoints() { return minPoints; } /** * Returns the distance-type * @return String Distance-type */ public String getDatabase_distanceType() { return database_distanceType; } /** * Returns the type of the used index (database) * @return String Index-type */ public String getDatabase_Type() { return database_Type; } /** * Sets a new distance-type * @param database_distanceType The new distance-type */ public void setDatabase_distanceType(String database_distanceType) { this.database_distanceType = database_distanceType; } /** * Sets a new database-type * @param database_Type The new database-type */ public void setDatabase_Type(String database_Type) { this.database_Type = database_Type; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String epsilonTipText() { return "radius of the epsilon-range-queries"; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String minPointsTipText() { return "minimun number of DataObjects required in an epsilon-range-query"; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String database_TypeTipText() { return "used database"; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String database_distanceTypeTipText() { return "used distance-type"; } /** * Returns a string describing this DataMining-Algorithm * @return String Information for the gui-explorer */ public String globalInfo() { return getTechnicalInformation().toString(); } /** * Returns an instance of a TechnicalInformation object, containing * detailed information about the technical background of this class, * e.g., paper reference or book this class is based on. * * @return the technical information about this class */ public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; result = new TechnicalInformation(Type.INPROCEEDINGS); result.setValue(Field.AUTHOR, "Martin Ester and Hans-Peter Kriegel and Joerg Sander and Xiaowei Xu"); result.setValue(Field.TITLE, "A Density-Based Algorithm for Discovering Clusters in Large Spatial Databases with Noise"); result.setValue(Field.BOOKTITLE, "Second International Conference on Knowledge Discovery and Data Mining"); result.setValue(Field.EDITOR, "Evangelos Simoudis and Jiawei Han and Usama M. Fayyad"); result.setValue(Field.YEAR, "1996"); result.setValue(Field.PAGES, "226-231"); result.setValue(Field.PUBLISHER, "AAAI Press"); return result; } /** * Returns a description of the clusterer * * @return a string representation of the clusterer */ public String toString() { StringBuffer stringBuffer = new StringBuffer(); stringBuffer.append("DBScan clustering results\n" + "========================================================================================\n\n"); stringBuffer.append("Clustered DataObjects: " + database.size() + "\n"); stringBuffer.append("Number of attributes: " + database.getInstances().numAttributes() + "\n"); stringBuffer.append("Epsilon: " + getEpsilon() + "; minPoints: " + getMinPoints() + "\n"); stringBuffer.append("Index: " + getDatabase_Type() + "\n"); stringBuffer.append("Distance-type: " + getDatabase_distanceType() + "\n"); stringBuffer.append("Number of generated clusters: " + numberOfGeneratedClusters + "\n"); DecimalFormat decimalFormat = new DecimalFormat(".##"); stringBuffer.append("Elapsed time: " + decimalFormat.format(elapsedTime) + "\n\n"); for (int i = 0; i < database.size(); i++) { DataObject dataObject = database.getDataObject(Integer.toString(i)); stringBuffer.append("(" + Utils.doubleToString(Double.parseDouble(dataObject.getKey()), (Integer.toString(database.size()).length()), 0) + ".) " + Utils.padRight(dataObject.toString(), 69) + " --> " + ((dataObject.getClusterLabel() == DataObject.NOISE) ? "NOISE\n" : dataObject.getClusterLabel() + "\n")); } return stringBuffer.toString() + "\n"; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 5538 $"); } /** * Main Method for testing DBScan * @param args Valid parameters are: 'E' epsilon (default = 0.9); 'M' minPoints (default = 6); * 'I' index-type (default = weka.clusterers.forOPTICSAndDBScan.Databases.SequentialDatabase); * 'D' distance-type (default = weka.clusterers.forOPTICSAndDBScan.DataObjects.EuclidianDataObject); */ public static void main(String[] args) { runClusterer(new DBScan(), args); } }
22,827
34.613105
243
java
tsml-java
tsml-java-master/src/main/java/weka/clusterers/DensityBasedClusterer.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * DensityBasedClusterer.java * Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand * */ package weka.clusterers; import weka.core.Instance; /** * Interface for clusterers that can estimate the density for a given instance. * Implementations will typically extend AbstractDensityBasedClusterer. * * @author Mark Hall (mhall@cs.waikato.ac.nz) * @author Eibe Frank (eibe@cs.waikato.ac.nz) * @version $Revision: 9379 $ */ public interface DensityBasedClusterer extends Clusterer { /** * Returns the prior probability of each cluster. * * @return the prior probability for each cluster * @exception Exception if priors could not be returned successfully */ double[] clusterPriors() throws Exception; /** * Computes the log of the conditional density (per cluster) for a given * instance. * * @param instance the instance to compute the density for * @return an array containing the estimated densities * @exception Exception if the density could not be computed successfully */ double[] logDensityPerClusterForInstance(Instance instance) throws Exception; /** * Computes the density for a given instance. * * @param instance the instance to compute the density for * @return the density. * @exception Exception if the density could not be computed successfully */ double logDensityForInstance(Instance instance) throws Exception; /** * Returns the logs of the joint densities for a given instance. * * @param inst the instance * @return the array of values * @exception Exception if values could not be computed */ double[] logJointDensitiesForInstance(Instance inst) throws Exception; /** * Returns the cluster probability distribution for an instance. * * @param instance the instance to be clustered * @return the probability distribution * @throws Exception if computation fails */ @Override double[] distributionForInstance(Instance instance) throws Exception; }
2,700
31.939024
79
java
tsml-java
tsml-java-master/src/main/java/weka/clusterers/EM.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * EM.java * Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand * */ package weka.clusterers; import java.util.ArrayList; import java.util.Enumeration; import java.util.List; import java.util.Random; import java.util.Vector; import java.util.concurrent.Callable; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.Future; import weka.core.Attribute; import weka.core.Capabilities; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.RevisionUtils; import weka.core.Utils; import weka.core.WeightedInstancesHandler; import weka.estimators.DiscreteEstimator; import weka.estimators.Estimator; import weka.filters.unsupervised.attribute.ReplaceMissingValues; /** <!-- globalinfo-start --> * Simple EM (expectation maximisation) class.<br/> * <br/> * EM assigns a probability distribution to each instance which indicates the probability of it belonging to each of the clusters. EM can decide how many clusters to create by cross validation, or you may specify apriori how many clusters to generate.<br/> * <br/> * The cross validation performed to determine the number of clusters is done in the following steps:<br/> * 1. the number of clusters is set to 1<br/> * 2. the training set is split randomly into 10 folds.<br/> * 3. EM is performed 10 times using the 10 folds the usual CV way.<br/> * 4. the loglikelihood is averaged over all 10 results.<br/> * 5. if loglikelihood has increased the number of clusters is increased by 1 and the program continues at step 2. <br/> * <br/> * The number of folds is fixed to 10, as long as the number of instances in the training set is not smaller 10. If this is the case the number of folds is set equal to the number of instances. * <p/> <!-- globalinfo-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -N &lt;num&gt; * number of clusters. If omitted or -1 specified, then * cross validation is used to select the number of clusters.</pre> * * <pre> -X &lt;num&gt; * Number of folds to use when cross-validating to find the best number of clusters.</pre> * * <pre> -max &lt;num&gt; * Maximum number of clusters to consider during cross-validation. If omitted or -1 specified, then * there is no upper limit on the number of clusters.</pre> * * <pre> -ll-cv &lt;num&gt; * Minimum improvement in cross-validated log likelihood required * to consider increasing the number of clusters. * (default 1e-6)</pre> * * <pre> -I &lt;num&gt; * max iterations. * (default 100)</pre> * * <pre> -ll-iter &lt;num&gt; * Minimum improvement in log likelihood required * to perform another iteration of the E and M steps. * (default 1e-6)</pre> * * <pre> -V * verbose.</pre> * * <pre> -M &lt;num&gt; * minimum allowable standard deviation for normal density * computation * (default 1e-6)</pre> * * <pre> -O * Display model in old format (good when there are many clusters) * </pre> * * <pre> -num-slots &lt;num&gt; * Number of execution slots. * (default 1 - i.e. no parallelism)</pre> * * <pre> -S &lt;num&gt; * Random number seed. * (default 100)</pre> * <!-- options-end --> * * @author Mark Hall (mhall@cs.waikato.ac.nz) * @author Eibe Frank (eibe@cs.waikato.ac.nz) * @version $Revision: 9361 $ */ public class EM extends RandomizableDensityBasedClusterer implements NumberOfClustersRequestable, WeightedInstancesHandler { /** for serialization */ static final long serialVersionUID = 8348181483812829475L; /** hold the discrete estimators for each cluster */ private Estimator m_model[][]; /** hold the normal estimators for each cluster */ private double m_modelNormal[][][]; /** default minimum standard deviation */ private double m_minStdDev = 1e-6; private double[] m_minStdDevPerAtt; /** hold the weights of each instance for each cluster */ private double m_weights[][]; /** the prior probabilities for clusters */ private double m_priors[]; /** the loglikelihood of the data */ private double m_loglikely; /** full training instances */ private Instances m_theInstances = null; /** number of clusters selected by the user or cross validation */ private int m_num_clusters; /** * the initial number of clusters requested by the user--- -1 if xval is to be * used to find the number of clusters */ private int m_initialNumClusters; /** Don't consider more clusters than this under CV (-1 means no upper bound) */ private int m_upperBoundNumClustersCV = -1; /** number of attributes */ private int m_num_attribs; /** number of training instances */ private int m_num_instances; /** maximum iterations to perform */ private int m_max_iterations; /** attribute min values */ private double[] m_minValues; /** attribute max values */ private double[] m_maxValues; /** random number generator */ private Random m_rr; /** Verbose? */ private boolean m_verbose; /** globally replace missing values */ private ReplaceMissingValues m_replaceMissing; /** display model output in old-style format */ private boolean m_displayModelInOldFormat; /** Number of threads to use for E and M steps */ protected int m_executionSlots = 1; /** For parallel execution mode */ protected transient ExecutorService m_executorPool; /** False once training has completed */ protected boolean m_training; /** The actual number of iterations performed */ protected int m_iterationsPerformed; /** Minimum improvement in log likelihood when iterating */ protected double m_minLogLikelihoodImprovementIterating = 1e-6; /** Minimum improvement to increase number of clusters when cross-validating */ protected double m_minLogLikelihoodImprovementCV = 1e-6; /** The number of folds to use for cross-validation */ protected int m_cvFolds = 10; /** * Returns a string describing this clusterer * * @return a description of the evaluator suitable for displaying in the * explorer/experimenter gui */ public String globalInfo() { return "Simple EM (expectation maximisation) class.\n\n" + "EM assigns a probability distribution to each instance which " + "indicates the probability of it belonging to each of the clusters. " + "EM can decide how many clusters to create by cross validation, or you " + "may specify apriori how many clusters to generate.\n\n" + "The cross validation performed to determine the number of clusters " + "is done in the following steps:\n" + "1. the number of clusters is set to 1\n" + "2. the training set is split randomly into 10 folds.\n" + "3. EM is performed 10 times using the 10 folds the usual CV way.\n" + "4. the loglikelihood is averaged over all 10 results.\n" + "5. if loglikelihood has increased the number of clusters is increased " + "by 1 and the program continues at step 2. \n\n" + "The number of folds is fixed to 10, as long as the number of " + "instances in the training set is not smaller 10. If this is the case " + "the number of folds is set equal to the number of instances."; } /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ @Override public Enumeration listOptions() { Vector result = new Vector(); result.addElement(new Option( "\tnumber of clusters. If omitted or -1 specified, then \n" + "\tcross validation is used to select the number of clusters.", "N", 1, "-N <num>")); result .addElement(new Option( "\tNumber of folds to use when cross-validating to find the best number of clusters.", "X", 1, "-X <num>")); result .addElement(new Option( "\tMaximum number of clusters to consider during cross-validation. If omitted or -1 specified, then \n" + "\tthere is no upper limit on the number of clusters.", "max", 1, "-max <num>")); result.addElement(new Option( "\tMinimum improvement in cross-validated log likelihood required" + "\n\tto consider increasing the number of clusters." + "\n\t(default 1e-6)", "ll-cv", 1, "-ll-cv <num>")); result.addElement(new Option("\tmax iterations." + "\n\t(default 100)", "I", 1, "-I <num>")); result.addElement(new Option( "\tMinimum improvement in log likelihood required" + "\n\tto perform another iteration of the E and M steps." + "\n\t(default 1e-6)", "ll-iter", 1, "-ll-iter <num>")); result.addElement(new Option("\tverbose.", "V", 0, "-V")); result.addElement(new Option( "\tminimum allowable standard deviation for normal density\n" + "\tcomputation\n" + "\t(default 1e-6)", "M", 1, "-M <num>")); result.addElement(new Option( "\tDisplay model in old format (good when there are " + "many clusters)\n", "O", 0, "-O")); result.addElement(new Option("\tNumber of execution slots.\n" + "\t(default 1 - i.e. no parallelism)", "num-slots", 1, "-num-slots <num>")); Enumeration en = super.listOptions(); while (en.hasMoreElements()) result.addElement(en.nextElement()); return result.elements(); } /** * Parses a given list of options. * <p/> * <!-- options-start --> * Valid options are: <p/> * * <pre> -N &lt;num&gt; * number of clusters. If omitted or -1 specified, then * cross validation is used to select the number of clusters.</pre> * * <pre> -X &lt;num&gt; * Number of folds to use when cross-validating to find the best number of clusters.</pre> * * <pre> -max &lt;num&gt; * Maximum number of clusters to consider during cross-validation. If omitted or -1 specified, then * there is no upper limit on the number of clusters.</pre> * * <pre> -ll-cv &lt;num&gt; * Minimum improvement in cross-validated log likelihood required * to consider increasing the number of clusters. * (default 1e-6)</pre> * * <pre> -I &lt;num&gt; * max iterations. * (default 100)</pre> * * <pre> -ll-iter &lt;num&gt; * Minimum improvement in log likelihood required * to perform another iteration of the E and M steps. * (default 1e-6)</pre> * * <pre> -V * verbose.</pre> * * <pre> -M &lt;num&gt; * minimum allowable standard deviation for normal density * computation * (default 1e-6)</pre> * * <pre> -O * Display model in old format (good when there are many clusters) * </pre> * * <pre> -num-slots &lt;num&gt; * Number of execution slots. * (default 1 - i.e. no parallelism)</pre> * * <pre> -S &lt;num&gt; * Random number seed. * (default 100)</pre> * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ @Override public void setOptions(String[] options) throws Exception { resetOptions(); setDebug(Utils.getFlag('V', options)); String optionString = Utils.getOption('I', options); if (optionString.length() != 0) { setMaxIterations(Integer.parseInt(optionString)); } optionString = Utils.getOption('X', options); if (optionString.length() > 0) { setNumFolds(Integer.parseInt(optionString)); } optionString = Utils.getOption("ll-iter", options); if (optionString.length() > 0) { setMinLogLikelihoodImprovementIterating(Double.parseDouble(optionString)); } optionString = Utils.getOption("ll-cv", options); if (optionString.length() > 0) { setMinLogLikelihoodImprovementCV(Double.parseDouble(optionString)); } optionString = Utils.getOption('N', options); if (optionString.length() != 0) { setNumClusters(Integer.parseInt(optionString)); } optionString = Utils.getOption("max", options); if (optionString.length() > 0) { setMaximumNumberOfClusters(Integer.parseInt(optionString)); } optionString = Utils.getOption('M', options); if (optionString.length() != 0) { setMinStdDev((new Double(optionString)).doubleValue()); } setDisplayModelInOldFormat(Utils.getFlag('O', options)); String slotsS = Utils.getOption("num-slots", options); if (slotsS.length() > 0) { setNumExecutionSlots(Integer.parseInt(slotsS)); } super.setOptions(options); } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String numFoldsTipText() { return "The number of folds to use when cross-validating to find the " + "best number of clusters (default = 10)"; } /** * Set the number of folds to use when cross-validating to find the best * number of clusters. * * @param folds the number of folds to use */ public void setNumFolds(int folds) { m_cvFolds = folds; } /** * Get the number of folds to use when cross-validating to find the best * number of clusters. * * @return the number of folds to use */ public int getNumFolds() { return m_cvFolds; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String minLogLikelihoodImprovementCVTipText() { return "The minimum improvement in cross-validated log likelihood required " + "in order to consider increasing the number of clusters " + "when cross-validiting to find the best number of clusters"; } /** * Set the minimum improvement in cross-validated log likelihood required to * consider increasing the number of clusters when cross-validating to find * the best number of clusters * * @param min the minimum improvement in log likelihood */ public void setMinLogLikelihoodImprovementCV(double min) { m_minLogLikelihoodImprovementCV = min; } /** * Get the minimum improvement in cross-validated log likelihood required to * consider increasing the number of clusters when cross-validating to find * the best number of clusters * * @return the minimum improvement in log likelihood */ public double getMinLogLikelihoodImprovementCV() { return m_minLogLikelihoodImprovementCV; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String minLogLikelihoodImprovementIteratingTipText() { return "The minimum improvement in log likelihood required to " + "perform another iteration of the E and M steps"; } /** * Set the minimum improvement in log likelihood necessary to perform another * iteration of the E and M steps. * * @param min the minimum improvement in log likelihood */ public void setMinLogLikelihoodImprovementIterating(double min) { m_minLogLikelihoodImprovementIterating = min; } /** * Get the minimum improvement in log likelihood necessary to perform another * iteration of the E and M steps. * * @return the minimum improvement in log likelihood */ public double getMinLogLikelihoodImprovementIterating() { return m_minLogLikelihoodImprovementIterating; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String numExecutionSlotsTipText() { return "The number of execution slots (threads) to use. " + "Set equal to the number of available cpu/cores"; } /** * Set the degree of parallelism to use. * * @param slots the number of tasks to run in parallel when computing the * nearest neighbors and evaluating different values of k between the * lower and upper bounds */ public void setNumExecutionSlots(int slots) { m_executionSlots = slots; } /** * Get the degree of parallelism to use. * * @return the number of tasks to run in parallel when computing the nearest * neighbors and evaluating different values of k between the lower * and upper bounds */ public int getNumExecutionSlots() { return m_executionSlots; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String displayModelInOldFormatTipText() { return "Use old format for model output. The old format is " + "better when there are many clusters. The new format " + "is better when there are fewer clusters and many attributes."; } /** * Set whether to display model output in the old, original format. * * @param d true if model ouput is to be shown in the old format */ public void setDisplayModelInOldFormat(boolean d) { m_displayModelInOldFormat = d; } /** * Get whether to display model output in the old, original format. * * @return true if model ouput is to be shown in the old format */ public boolean getDisplayModelInOldFormat() { return m_displayModelInOldFormat; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String minStdDevTipText() { return "set minimum allowable standard deviation"; } /** * Set the minimum value for standard deviation when calculating normal * density. Reducing this value can help prevent arithmetic overflow resulting * from multiplying large densities (arising from small standard deviations) * when there are many singleton or near singleton values. * * @param m minimum value for standard deviation */ public void setMinStdDev(double m) { m_minStdDev = m; } public void setMinStdDevPerAtt(double[] m) { m_minStdDevPerAtt = m; } /** * Get the minimum allowable standard deviation. * * @return the minumum allowable standard deviation */ public double getMinStdDev() { return m_minStdDev; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String numClustersTipText() { return "set number of clusters. -1 to select number of clusters " + "automatically by cross validation."; } /** * Set the number of clusters (-1 to select by CV). * * @param n the number of clusters * @throws Exception if n is 0 */ @Override public void setNumClusters(int n) throws Exception { if (n == 0) { throw new Exception("Number of clusters must be > 0. (or -1 to " + "select by cross validation)."); } if (n < 0) { m_num_clusters = -1; m_initialNumClusters = -1; } else { m_num_clusters = n; m_initialNumClusters = n; } } /** * Get the number of clusters * * @return the number of clusters. */ public int getNumClusters() { return m_initialNumClusters; } /** * Set the maximum number of clusters to consider when cross-validating * * @param n the maximum number of clusters to consider */ public void setMaximumNumberOfClusters(int n) { m_upperBoundNumClustersCV = n; } /** * Get the maximum number of clusters to consider when cross-validating * * @return the maximum number of clusters to consider */ public int getMaximumNumberOfClusters() { return m_upperBoundNumClustersCV; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String maximumNumberOfClustersTipText() { return "The maximum number of clusters to consider during cross-validation " + "to select the best number of clusters"; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String maxIterationsTipText() { return "maximum number of iterations"; } /** * Set the maximum number of iterations to perform * * @param i the number of iterations * @throws Exception if i is less than 1 */ public void setMaxIterations(int i) throws Exception { if (i < 1) { throw new Exception("Maximum number of iterations must be > 0!"); } m_max_iterations = i; } /** * Get the maximum number of iterations * * @return the number of iterations */ public int getMaxIterations() { return m_max_iterations; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String debugTipText() { return "If set to true, clusterer may output additional info to " + "the console."; } /** * Set debug mode - verbose output * * @param v true for verbose output */ public void setDebug(boolean v) { m_verbose = v; } /** * Get debug mode * * @return true if debug mode is set */ public boolean getDebug() { return m_verbose; } /** * Gets the current settings of EM. * * @return an array of strings suitable for passing to setOptions() */ @Override public String[] getOptions() { int i; Vector result; String[] options; result = new Vector(); result.add("-I"); result.add("" + m_max_iterations); result.add("-N"); result.add("" + getNumClusters()); result.add("-X"); result.add("" + getNumFolds()); result.add("-max"); result.add("" + getMaximumNumberOfClusters()); result.add("-ll-cv"); result.add("" + getMinLogLikelihoodImprovementCV()); result.add("-ll-iter"); result.add("" + getMinLogLikelihoodImprovementIterating()); result.add("-M"); result.add("" + getMinStdDev()); if (m_displayModelInOldFormat) { result.add("-O"); } result.add("-num-slots"); result.add("" + getNumExecutionSlots()); options = super.getOptions(); for (i = 0; i < options.length; i++) result.add(options[i]); return (String[]) result.toArray(new String[result.size()]); } /** * Initialize the global aggregated estimators and storage. * * @param inst the instances * @throws Exception if initialization fails **/ private void EM_Init(Instances inst) throws Exception { int i, j, k; // run k means 10 times and choose best solution SimpleKMeans bestK = null; double bestSqE = Double.MAX_VALUE; for (i = 0; i < 10; i++) { SimpleKMeans sk = new SimpleKMeans(); sk.setSeed(m_rr.nextInt()); sk.setNumClusters(m_num_clusters); sk.setNumExecutionSlots(m_executionSlots); sk.setDisplayStdDevs(true); sk.buildClusterer(inst); if (sk.getSquaredError() < bestSqE) { bestSqE = sk.getSquaredError(); bestK = sk; } } // initialize with best k-means solution m_num_clusters = bestK.numberOfClusters(); m_weights = new double[inst.numInstances()][m_num_clusters]; m_model = new DiscreteEstimator[m_num_clusters][m_num_attribs]; m_modelNormal = new double[m_num_clusters][m_num_attribs][3]; m_priors = new double[m_num_clusters]; Instances centers = bestK.getClusterCentroids(); Instances stdD = bestK.getClusterStandardDevs(); int[][][] nominalCounts = bestK.getClusterNominalCounts(); int[] clusterSizes = bestK.getClusterSizes(); for (i = 0; i < m_num_clusters; i++) { Instance center = centers.instance(i); for (j = 0; j < m_num_attribs; j++) { if (inst.attribute(j).isNominal()) { m_model[i][j] = new DiscreteEstimator(m_theInstances.attribute(j) .numValues(), true); for (k = 0; k < inst.attribute(j).numValues(); k++) { m_model[i][j].addValue(k, nominalCounts[i][j][k]); } } else { double minStdD = (m_minStdDevPerAtt != null) ? m_minStdDevPerAtt[j] : m_minStdDev; double mean = (center.isMissing(j)) ? inst.meanOrMode(j) : center .value(j); m_modelNormal[i][j][0] = mean; double stdv = (stdD.instance(i).isMissing(j)) ? ((m_maxValues[j] - m_minValues[j]) / (2 * m_num_clusters)) : stdD.instance(i).value(j); if (stdv < minStdD) { stdv = inst.attributeStats(j).numericStats.stdDev; if (Double.isInfinite(stdv)) { stdv = minStdD; } if (stdv < minStdD) { stdv = minStdD; } } if (stdv <= 0) { stdv = m_minStdDev; } m_modelNormal[i][j][1] = stdv; m_modelNormal[i][j][2] = 1.0; } } } for (j = 0; j < m_num_clusters; j++) { // m_priors[j] += 1.0; m_priors[j] = clusterSizes[j]; } Utils.normalize(m_priors); } /** * calculate prior probabilites for the clusters * * @param inst the instances * @throws Exception if priors can't be calculated **/ private void estimate_priors(Instances inst) throws Exception { for (int i = 0; i < m_num_clusters; i++) { m_priors[i] = 0.0; } for (int i = 0; i < inst.numInstances(); i++) { for (int j = 0; j < m_num_clusters; j++) { m_priors[j] += inst.instance(i).weight() * m_weights[i][j]; } } Utils.normalize(m_priors); } /** Constant for normal distribution. */ private static double m_normConst = Math.log(Math.sqrt(2 * Math.PI)); /** * Density function of normal distribution. * * @param x input value * @param mean mean of distribution * @param stdDev standard deviation of distribution * @return the density */ private double logNormalDens(double x, double mean, double stdDev) { double diff = x - mean; // System.err.println("x: "+x+" mean: "+mean+" diff: "+diff+" stdv: "+stdDev); // System.err.println("diff*diff/(2*stdv*stdv): "+ (diff * diff / (2 * // stdDev * stdDev))); return -(diff * diff / (2 * stdDev * stdDev)) - m_normConst - Math.log(stdDev); } /** * New probability estimators for an iteration */ private void new_estimators() { for (int i = 0; i < m_num_clusters; i++) { for (int j = 0; j < m_num_attribs; j++) { if (m_theInstances.attribute(j).isNominal()) { m_model[i][j] = new DiscreteEstimator(m_theInstances.attribute(j) .numValues(), true); } else { m_modelNormal[i][j][0] = m_modelNormal[i][j][1] = m_modelNormal[i][j][2] = 0.0; } } } } /** * Start the pool of execution threads */ protected void startExecutorPool() { if (m_executorPool != null) { m_executorPool.shutdownNow(); } m_executorPool = Executors.newFixedThreadPool(m_executionSlots); } private class ETask implements Callable<double[]> { protected int m_lowNum; protected int m_highNum; protected boolean m_changeWeights; protected Instances m_eData; public ETask(Instances data, int lowInstNum, int highInstNum, boolean changeWeights) { m_eData = data; m_lowNum = lowInstNum; m_highNum = highInstNum; m_changeWeights = changeWeights; } @Override public double[] call() { double[] llk = new double[2]; double loglk = 0.0, sOW = 0.0; try { for (int i = m_lowNum; i < m_highNum; i++) { Instance in = m_eData.instance(i); loglk += in.weight() * EM.this.logDensityForInstance(in); sOW += in.weight(); if (m_changeWeights) { m_weights[i] = distributionForInstance(in); } } // completedETask(loglk, sOW, true); } catch (Exception ex) { // completedETask(0, 0, false); } llk[0] = loglk; llk[1] = sOW; return llk; } } private class MTask implements Callable<MTask> { // protected Instances m_dataChunk; protected int m_start; protected int m_end; protected Instances m_inst; protected DiscreteEstimator[][] m_taskModel; double[][][] m_taskModelNormal; public MTask(Instances inst, int start, int end, DiscreteEstimator[][] discEst, double[][][] numericEst) { // m_dataChunk = chunk; m_start = start; m_end = end; m_inst = inst; m_taskModel = discEst; m_taskModelNormal = numericEst; } @Override public MTask call() { for (int i = 0; i < m_num_clusters; i++) { for (int j = 0; j < m_num_attribs; j++) { for (int l = m_start; l < m_end; l++) { Instance in = m_inst.instance(l); if (!in.isMissing(j)) { if (m_inst.attribute(j).isNominal()) { m_taskModel[i][j].addValue(in.value(j), in.weight() * m_weights[l][i]); } else { m_taskModelNormal[i][j][0] += (in.value(j) * in.weight() * m_weights[l][i]); m_taskModelNormal[i][j][2] += in.weight() * m_weights[l][i]; m_taskModelNormal[i][j][1] += (in.value(j) * in.value(j) * in.weight() * m_weights[l][i]); } } } } } // completedMTask(this, true); return this; } } private void M_reEstimate(Instances inst) { // calcualte mean and std deviation for numeric attributes for (int j = 0; j < m_num_attribs; j++) { if (!inst.attribute(j).isNominal()) { for (int i = 0; i < m_num_clusters; i++) { if (m_modelNormal[i][j][2] <= 0) { m_modelNormal[i][j][1] = Double.MAX_VALUE; // m_modelNormal[i][j][0] = 0; m_modelNormal[i][j][0] = m_minStdDev; } else { // variance m_modelNormal[i][j][1] = (m_modelNormal[i][j][1] - (m_modelNormal[i][j][0] * m_modelNormal[i][j][0] / m_modelNormal[i][j][2])) / (m_modelNormal[i][j][2]); if (m_modelNormal[i][j][1] < 0) { m_modelNormal[i][j][1] = 0; } // std dev double minStdD = (m_minStdDevPerAtt != null) ? m_minStdDevPerAtt[j] : m_minStdDev; m_modelNormal[i][j][1] = Math.sqrt(m_modelNormal[i][j][1]); if ((m_modelNormal[i][j][1] <= minStdD)) { m_modelNormal[i][j][1] = inst.attributeStats(j).numericStats.stdDev; if ((m_modelNormal[i][j][1] <= minStdD)) { m_modelNormal[i][j][1] = minStdD; } } if ((m_modelNormal[i][j][1] <= 0)) { m_modelNormal[i][j][1] = m_minStdDev; } if (Double.isInfinite(m_modelNormal[i][j][1])) { m_modelNormal[i][j][1] = m_minStdDev; } // mean m_modelNormal[i][j][0] /= m_modelNormal[i][j][2]; } } } } } /** * The M step of the EM algorithm. * * @param inst the training instances * @throws Exception if something goes wrong */ private void M(Instances inst) throws Exception { int i, j, l; new_estimators(); estimate_priors(inst); // sum for (i = 0; i < m_num_clusters; i++) { for (j = 0; j < m_num_attribs; j++) { for (l = 0; l < inst.numInstances(); l++) { Instance in = inst.instance(l); if (!in.isMissing(j)) { if (inst.attribute(j).isNominal()) { m_model[i][j] .addValue(in.value(j), in.weight() * m_weights[l][i]); } else { m_modelNormal[i][j][0] += (in.value(j) * in.weight() * m_weights[l][i]); m_modelNormal[i][j][2] += in.weight() * m_weights[l][i]; m_modelNormal[i][j][1] += (in.value(j) * in.value(j) * in.weight() * m_weights[l][i]); } } } } } // re-estimate Gaussian parameters M_reEstimate(inst); } /** * The E step of the EM algorithm. Estimate cluster membership probabilities. * * @param inst the training instances * @param change_weights whether to change the weights * @return the average log likelihood * @throws Exception if computation fails */ private double E(Instances inst, boolean change_weights) throws Exception { double loglk = 0.0, sOW = 0.0; for (int l = 0; l < inst.numInstances(); l++) { Instance in = inst.instance(l); loglk += in.weight() * logDensityForInstance(in); sOW += in.weight(); if (change_weights) { m_weights[l] = distributionForInstance(in); } } // reestimate priors /* * if (change_weights) { estimate_priors(inst); } */ return loglk / sOW; } /** * Constructor. * **/ public EM() { super(); m_SeedDefault = 100; resetOptions(); } /** * Reset to default options */ protected void resetOptions() { m_minStdDev = 1e-6; m_max_iterations = 100; m_Seed = m_SeedDefault; m_num_clusters = -1; m_initialNumClusters = -1; m_verbose = false; m_minLogLikelihoodImprovementIterating = 1e-6; m_minLogLikelihoodImprovementCV = 1e-6; m_executionSlots = 1; m_cvFolds = 10; } /** * Return the normal distributions for the cluster models * * @return a <code>double[][][]</code> value */ public double[][][] getClusterModelsNumericAtts() { return m_modelNormal; } /** * Return the priors for the clusters * * @return a <code>double[]</code> value */ public double[] getClusterPriors() { return m_priors; } /** * Outputs the generated clusters into a string. * * @return the clusterer in string representation */ @Override public String toString() { if (m_displayModelInOldFormat) { return toStringOriginal(); } if (m_priors == null) { return "No clusterer built yet!"; } StringBuffer temp = new StringBuffer(); temp.append("\nEM\n==\n"); if (m_initialNumClusters == -1) { temp.append("\nNumber of clusters selected by cross validation: " + m_num_clusters + "\n"); } else { temp.append("\nNumber of clusters: " + m_num_clusters + "\n"); } temp.append("Number of iterations performed: " + m_iterationsPerformed + "\n"); int maxWidth = 0; int maxAttWidth = 0; boolean containsKernel = false; // set up max widths // attributes for (int i = 0; i < m_num_attribs; i++) { Attribute a = m_theInstances.attribute(i); if (a.name().length() > maxAttWidth) { maxAttWidth = m_theInstances.attribute(i).name().length(); } if (a.isNominal()) { // check values for (int j = 0; j < a.numValues(); j++) { String val = a.value(j) + " "; if (val.length() > maxAttWidth) { maxAttWidth = val.length(); } } } } for (int i = 0; i < m_num_clusters; i++) { for (int j = 0; j < m_num_attribs; j++) { if (m_theInstances.attribute(j).isNumeric()) { // check mean and std. dev. against maxWidth double mean = Math.log(Math.abs(m_modelNormal[i][j][0])) / Math.log(10.0); double stdD = Math.log(Math.abs(m_modelNormal[i][j][1])) / Math.log(10.0); double width = (mean > stdD) ? mean : stdD; if (width < 0) { width = 1; } // decimal + # decimal places + 1 width += 6.0; if ((int) width > maxWidth) { maxWidth = (int) width; } } else { // nominal distributions DiscreteEstimator d = (DiscreteEstimator) m_model[i][j]; for (int k = 0; k < d.getNumSymbols(); k++) { String size = Utils.doubleToString(d.getCount(k), maxWidth, 4) .trim(); if (size.length() > maxWidth) { maxWidth = size.length(); } } int sum = Utils.doubleToString(d.getSumOfCounts(), maxWidth, 4) .trim().length(); if (sum > maxWidth) { maxWidth = sum; } } } } if (maxAttWidth < "Attribute".length()) { maxAttWidth = "Attribute".length(); } maxAttWidth += 2; temp.append("\n\n"); temp.append(pad("Cluster", " ", (maxAttWidth + maxWidth + 1) - "Cluster".length(), true)); temp.append("\n"); temp.append(pad("Attribute", " ", maxAttWidth - "Attribute".length(), false)); // cluster #'s for (int i = 0; i < m_num_clusters; i++) { String classL = "" + i; temp.append(pad(classL, " ", maxWidth + 1 - classL.length(), true)); } temp.append("\n"); // cluster priors temp.append(pad("", " ", maxAttWidth, true)); for (int i = 0; i < m_num_clusters; i++) { String priorP = Utils.doubleToString(m_priors[i], maxWidth, 2).trim(); priorP = "(" + priorP + ")"; temp.append(pad(priorP, " ", maxWidth + 1 - priorP.length(), true)); } temp.append("\n"); temp.append(pad("", "=", maxAttWidth + (maxWidth * m_num_clusters) + m_num_clusters + 1, true)); temp.append("\n"); for (int i = 0; i < m_num_attribs; i++) { String attName = m_theInstances.attribute(i).name(); temp.append(attName + "\n"); if (m_theInstances.attribute(i).isNumeric()) { String meanL = " mean"; temp.append(pad(meanL, " ", maxAttWidth + 1 - meanL.length(), false)); for (int j = 0; j < m_num_clusters; j++) { // means String mean = Utils.doubleToString(m_modelNormal[j][i][0], maxWidth, 4).trim(); temp.append(pad(mean, " ", maxWidth + 1 - mean.length(), true)); } temp.append("\n"); // now do std deviations String stdDevL = " std. dev."; temp.append(pad(stdDevL, " ", maxAttWidth + 1 - stdDevL.length(), false)); for (int j = 0; j < m_num_clusters; j++) { String stdDev = Utils.doubleToString(m_modelNormal[j][i][1], maxWidth, 4).trim(); temp.append(pad(stdDev, " ", maxWidth + 1 - stdDev.length(), true)); } temp.append("\n\n"); } else { Attribute a = m_theInstances.attribute(i); for (int j = 0; j < a.numValues(); j++) { String val = " " + a.value(j); temp.append(pad(val, " ", maxAttWidth + 1 - val.length(), false)); for (int k = 0; k < m_num_clusters; k++) { DiscreteEstimator d = (DiscreteEstimator) m_model[k][i]; String count = Utils.doubleToString(d.getCount(j), maxWidth, 4) .trim(); temp.append(pad(count, " ", maxWidth + 1 - count.length(), true)); } temp.append("\n"); } // do the totals String total = " [total]"; temp.append(pad(total, " ", maxAttWidth + 1 - total.length(), false)); for (int k = 0; k < m_num_clusters; k++) { DiscreteEstimator d = (DiscreteEstimator) m_model[k][i]; String count = Utils.doubleToString(d.getSumOfCounts(), maxWidth, 4) .trim(); temp.append(pad(count, " ", maxWidth + 1 - count.length(), true)); } temp.append("\n"); } } return temp.toString(); } private String pad(String source, String padChar, int length, boolean leftPad) { StringBuffer temp = new StringBuffer(); if (leftPad) { for (int i = 0; i < length; i++) { temp.append(padChar); } temp.append(source); } else { temp.append(source); for (int i = 0; i < length; i++) { temp.append(padChar); } } return temp.toString(); } /** * Outputs the generated clusters into a string. * * @return the clusterer in string representation */ protected String toStringOriginal() { if (m_priors == null) { return "No clusterer built yet!"; } StringBuffer temp = new StringBuffer(); temp.append("\nEM\n==\n"); if (m_initialNumClusters == -1) { temp.append("\nNumber of clusters selected by cross validation: " + m_num_clusters + "\n"); } else { temp.append("\nNumber of clusters: " + m_num_clusters + "\n"); } for (int j = 0; j < m_num_clusters; j++) { temp.append("\nCluster: " + j + " Prior probability: " + Utils.doubleToString(m_priors[j], 4) + "\n\n"); for (int i = 0; i < m_num_attribs; i++) { temp.append("Attribute: " + m_theInstances.attribute(i).name() + "\n"); if (m_theInstances.attribute(i).isNominal()) { if (m_model[j][i] != null) { temp.append(m_model[j][i].toString()); } } else { temp.append("Normal Distribution. Mean = " + Utils.doubleToString(m_modelNormal[j][i][0], 4) + " StdDev = " + Utils.doubleToString(m_modelNormal[j][i][1], 4) + "\n"); } } } return temp.toString(); } /** * verbose output for debugging * * @param inst the training instances */ private void EM_Report(Instances inst) { int i, j, l, m; System.out.println("======================================"); for (j = 0; j < m_num_clusters; j++) { for (i = 0; i < m_num_attribs; i++) { System.out.println("Clust: " + j + " att: " + i + "\n"); if (m_theInstances.attribute(i).isNominal()) { if (m_model[j][i] != null) { System.out.println(m_model[j][i].toString()); } } else { System.out.println("Normal Distribution. Mean = " + Utils.doubleToString(m_modelNormal[j][i][0], 8, 4) + " StandardDev = " + Utils.doubleToString(m_modelNormal[j][i][1], 8, 4) + " WeightSum = " + Utils.doubleToString(m_modelNormal[j][i][2], 8, 4)); } } } for (l = 0; l < inst.numInstances(); l++) { m = Utils.maxIndex(m_weights[l]); System.out.print("Inst " + Utils.doubleToString(l, 5, 0) + " Class " + m + "\t"); for (j = 0; j < m_num_clusters; j++) { System.out.print(Utils.doubleToString(m_weights[l][j], 7, 5) + " "); } System.out.println(); } } /** * estimate the number of clusters by cross validation on the training data. * * @throws Exception if something goes wrong */ private void CVClusters() throws Exception { double CVLogLikely = -Double.MAX_VALUE; double templl, tll; boolean CVincreased = true; m_num_clusters = 1; int upperBoundMaxClusters = (m_upperBoundNumClustersCV > 0) ? m_upperBoundNumClustersCV : Integer.MAX_VALUE; int num_clusters = m_num_clusters; int i; Random cvr; Instances trainCopy; int numFolds = (m_theInstances.numInstances() < m_cvFolds) ? m_theInstances .numInstances() : m_cvFolds; boolean ok = true; int seed = getSeed(); int restartCount = 0; CLUSTER_SEARCH: while (CVincreased) { if (num_clusters > upperBoundMaxClusters) { break CLUSTER_SEARCH; } // theInstances.stratify(10); CVincreased = false; cvr = new Random(getSeed()); trainCopy = new Instances(m_theInstances); trainCopy.randomize(cvr); templl = 0.0; for (i = 0; i < numFolds; i++) { Instances cvTrain = trainCopy.trainCV(numFolds, i, cvr); if (num_clusters > cvTrain.numInstances()) { break CLUSTER_SEARCH; } Instances cvTest = trainCopy.testCV(numFolds, i); m_rr = new Random(seed); for (int z = 0; z < 10; z++) m_rr.nextDouble(); m_num_clusters = num_clusters; EM_Init(cvTrain); try { iterate(cvTrain, false); } catch (Exception ex) { // catch any problems - i.e. empty clusters occurring ex.printStackTrace(); // System.err.println("Restarting after CV training failure ("+num_clusters+" clusters"); seed++; restartCount++; ok = false; if (restartCount > 5) { break CLUSTER_SEARCH; } break; } try { tll = E(cvTest, false); } catch (Exception ex) { // catch any problems - i.e. empty clusters occurring // ex.printStackTrace(); ex.printStackTrace(); // System.err.println("Restarting after CV testing failure ("+num_clusters+" clusters"); // throw new Exception(ex); seed++; restartCount++; ok = false; if (restartCount > 5) { break CLUSTER_SEARCH; } break; } if (m_verbose) { System.out.println("# clust: " + num_clusters + " Fold: " + i + " Loglikely: " + tll); } templl += tll; } if (ok) { restartCount = 0; seed = getSeed(); templl /= numFolds; if (m_verbose) { System.out.println("===================================" + "==============\n# clust: " + num_clusters + " Mean Loglikely: " + templl + "\n================================" + "================="); } // if (templl > CVLogLikely) { if (templl - CVLogLikely > m_minLogLikelihoodImprovementCV) { CVLogLikely = templl; CVincreased = true; num_clusters++; } } } if (m_verbose) { System.out.println("Number of clusters: " + (num_clusters - 1)); } m_num_clusters = num_clusters - 1; } /** * Returns the number of clusters. * * @return the number of clusters generated for a training dataset. * @throws Exception if number of clusters could not be returned successfully */ @Override public int numberOfClusters() throws Exception { if (m_num_clusters == -1) { throw new Exception("Haven't generated any clusters!"); } return m_num_clusters; } /** * Updates the minimum and maximum values for all the attributes based on a * new instance. * * @param instance the new instance */ private void updateMinMax(Instance instance) { for (int j = 0; j < m_theInstances.numAttributes(); j++) { if (!instance.isMissing(j)) { if (Double.isNaN(m_minValues[j])) { m_minValues[j] = instance.value(j); m_maxValues[j] = instance.value(j); } else { if (instance.value(j) < m_minValues[j]) { m_minValues[j] = instance.value(j); } else { if (instance.value(j) > m_maxValues[j]) { m_maxValues[j] = instance.value(j); } } } } } } /** * Returns default capabilities of the clusterer (i.e., the ones of * SimpleKMeans). * * @return the capabilities of this clusterer */ @Override public Capabilities getCapabilities() { Capabilities result = new SimpleKMeans().getCapabilities(); result.setOwner(this); return result; } /** * Generates a clusterer. Has to initialize all fields of the clusterer that * are not being set via options. * * @param data set of instances serving as training data * @throws Exception if the clusterer has not been generated successfully */ @Override public void buildClusterer(Instances data) throws Exception { m_training = true; // can clusterer handle the data? getCapabilities().testWithFail(data); m_replaceMissing = new ReplaceMissingValues(); Instances instances = new Instances(data); instances.setClassIndex(-1); m_replaceMissing.setInputFormat(instances); data = weka.filters.Filter.useFilter(instances, m_replaceMissing); instances = null; m_theInstances = data; // calculate min and max values for attributes m_minValues = new double[m_theInstances.numAttributes()]; m_maxValues = new double[m_theInstances.numAttributes()]; for (int i = 0; i < m_theInstances.numAttributes(); i++) { m_minValues[i] = m_maxValues[i] = Double.NaN; } for (int i = 0; i < m_theInstances.numInstances(); i++) { updateMinMax(m_theInstances.instance(i)); } doEM(); // save memory m_theInstances = new Instances(m_theInstances, 0); m_training = false; } /** * Returns the cluster priors. * * @return the cluster priors */ @Override public double[] clusterPriors() { double[] n = new double[m_priors.length]; System.arraycopy(m_priors, 0, n, 0, n.length); return n; } /** * Computes the log of the conditional density (per cluster) for a given * instance. * * @param inst the instance to compute the density for * @return an array containing the estimated densities * @throws Exception if the density could not be computed successfully */ @Override public double[] logDensityPerClusterForInstance(Instance inst) throws Exception { int i, j; double logprob; double[] wghts = new double[m_num_clusters]; if (!m_training) { m_replaceMissing.input(inst); inst = m_replaceMissing.output(); } for (i = 0; i < m_num_clusters; i++) { // System.err.println("Cluster : "+i); logprob = 0.0; for (j = 0; j < m_num_attribs; j++) { if (!inst.isMissing(j)) { if (inst.attribute(j).isNominal()) { logprob += Math.log(m_model[i][j].getProbability(inst.value(j))); } else { // numeric attribute logprob += logNormalDens(inst.value(j), m_modelNormal[i][j][0], m_modelNormal[i][j][1]); /* * System.err.println(logNormalDens(inst.value(j), * m_modelNormal[i][j][0], m_modelNormal[i][j][1]) + " "); */ } } } // System.err.println(""); wghts[i] = logprob; } return wghts; } /** * Perform the EM algorithm * * @throws Exception if something goes wrong */ private void doEM() throws Exception { if (m_verbose) { System.out.println("Seed: " + getSeed()); } m_rr = new Random(getSeed()); // throw away numbers to avoid problem of similar initial numbers // from a similar seed for (int i = 0; i < 10; i++) m_rr.nextDouble(); m_num_instances = m_theInstances.numInstances(); m_num_attribs = m_theInstances.numAttributes(); if (m_verbose) { System.out.println("Number of instances: " + m_num_instances + "\nNumber of atts: " + m_num_attribs + "\n"); } startExecutorPool(); // setDefaultStdDevs(theInstances); // cross validate to determine number of clusters? if (m_initialNumClusters == -1) { if (m_theInstances.numInstances() > 9) { CVClusters(); m_rr = new Random(getSeed()); for (int i = 0; i < 10; i++) m_rr.nextDouble(); } else { m_num_clusters = 1; } } // fit full training set EM_Init(m_theInstances); m_loglikely = iterate(m_theInstances, m_verbose); m_executorPool.shutdown(); } /** * Launch E step tasks * * @param inst the instances to be clustered * @return the log likelihood from this E step * @throws Exception if a problem occurs */ protected double launchESteps(Instances inst) throws Exception { int numPerTask = inst.numInstances() / m_executionSlots; double eStepLogL = 0; double eStepSow = 0; if (m_executionSlots <= 1 || inst.numInstances() < 2 * m_executionSlots) { return E(inst, true); } List<Future<double[]>> results = new ArrayList<Future<double[]>>(); for (int i = 0; i < m_executionSlots; i++) { int start = i * numPerTask; int end = start + numPerTask; if (i == m_executionSlots - 1) { end = inst.numInstances(); } ETask newTask = new ETask(inst, start, end, true); Future<double[]> futureE = m_executorPool.submit(newTask); results.add(futureE); // m_executorPool.execute(newTask); // et[i] = newTask; // newTask.run(); } for (int i = 0; i < results.size(); i++) { double[] r = results.get(i).get(); eStepLogL += r[0]; eStepSow += r[1]; } eStepLogL /= eStepSow; return eStepLogL; } /** * Launch the M step tasks * * @param inst the instances to be clustered * @throws Exception if a problem occurs */ protected void launchMSteps(Instances inst) throws Exception { if (m_executionSlots <= 1 || inst.numInstances() < 2 * m_executionSlots) { M(inst); return; } // aggregated estimators new_estimators(); estimate_priors(inst); int numPerTask = inst.numInstances() / m_executionSlots; List<Future<MTask>> results = new ArrayList<Future<MTask>>(); for (int i = 0; i < m_executionSlots; i++) { int start = i * numPerTask; int end = start + numPerTask; if (i == m_executionSlots - 1) { end = inst.numInstances(); } DiscreteEstimator[][] model = new DiscreteEstimator[m_num_clusters][m_num_attribs]; double[][][] normal = new double[m_num_clusters][m_num_attribs][3]; for (int ii = 0; ii < m_num_clusters; ii++) { for (int j = 0; j < m_num_attribs; j++) { if (m_theInstances.attribute(j).isNominal()) { model[ii][j] = new DiscreteEstimator(m_theInstances.attribute(j) .numValues(), false); } else { normal[ii][j][0] = normal[ii][j][1] = normal[ii][j][2] = 0.0; } } } MTask newTask = new MTask(inst, start, end, model, normal); Future futureM = m_executorPool.submit(newTask); results.add(futureM); // newTask.run(); } for (Future<MTask> t : results) { MTask m = t.get(); // aggregate for (int i = 0; i < m_num_clusters; i++) { for (int j = 0; j < m_num_attribs; j++) { if (m_theInstances.attribute(j).isNominal()) { for (int k = 0; k < m_theInstances.attribute(j).numValues(); k++) { m_model[i][j].addValue(k, m.m_taskModel[i][j].getCount(k)); } } else { m_modelNormal[i][j][0] += m.m_taskModelNormal[i][j][0]; m_modelNormal[i][j][2] += m.m_taskModelNormal[i][j][2]; m_modelNormal[i][j][1] += m.m_taskModelNormal[i][j][1]; } } } } // re-estimate Gaussian parameters M_reEstimate(inst); } /** * iterates the E and M steps until the log likelihood of the data converges. * * @param inst the training instances. * @param report be verbose. * @return the log likelihood of the data * @throws Exception if something goes wrong */ private double iterate(Instances inst, boolean report) throws Exception { int i; double llkold = 0.0; double llk = 0.0; if (report) { EM_Report(inst); } boolean ok = false; int seed = getSeed(); int restartCount = 0; m_iterationsPerformed = -1; while (!ok) { try { for (i = 0; i < m_max_iterations; i++) { llkold = llk; llk = launchESteps(inst); if (report) { System.out.println("Loglikely: " + llk); } if (i > 0) { if ((llk - llkold) < m_minLogLikelihoodImprovementIterating) { m_iterationsPerformed = i; break; } } launchMSteps(inst); } ok = true; } catch (Exception ex) { // System.err.println("Restarting after training failure"); ex.printStackTrace(); seed++; restartCount++; m_rr = new Random(seed); for (int z = 0; z < 10; z++) { m_rr.nextDouble(); m_rr.nextInt(); } if (restartCount > 5) { // System.err.println("Reducing the number of clusters"); m_num_clusters--; restartCount = 0; } EM_Init(m_theInstances); startExecutorPool(); } } if (m_iterationsPerformed == -1) { m_iterationsPerformed = m_max_iterations; } if (m_verbose) { System.out.println("# iterations performed: " + m_iterationsPerformed); } if (report) { EM_Report(inst); } return llk; } /** * Returns the revision string. * * @return the revision */ @Override public String getRevision() { return RevisionUtils.extract("$Revision: 9361 $"); } // ============ // Test method. // ============ /** * Main method for testing this class. * * @param argv should contain the following arguments: * <p> * -t training file [-T test file] [-N number of clusters] [-S random * seed] */ public static void main(String[] argv) { runClusterer(new EM(), argv); } }
58,415
28.84977
256
java
tsml-java
tsml-java-master/src/main/java/weka/clusterers/FarthestFirst.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * FarthestFirst.java * Copyright (C) 2002-2012 University of Waikato, Hamilton, New Zealand * */ package weka.clusterers; import java.util.Enumeration; import java.util.Random; import java.util.Vector; import weka.core.Attribute; import weka.core.Capabilities; import weka.core.Capabilities.Capability; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.RevisionUtils; import weka.core.TechnicalInformation; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import weka.core.TechnicalInformationHandler; import weka.core.Utils; import weka.filters.Filter; import weka.filters.unsupervised.attribute.ReplaceMissingValues; /** <!-- globalinfo-start --> * Cluster data using the FarthestFirst algorithm.<br/> * <br/> * For more information see:<br/> * <br/> * Hochbaum, Shmoys (1985). A best possible heuristic for the k-center problem. Mathematics of Operations Research. 10(2):180-184.<br/> * <br/> * Sanjoy Dasgupta: Performance Guarantees for Hierarchical Clustering. In: 15th Annual Conference on Computational Learning Theory, 351-363, 2002.<br/> * <br/> * Notes:<br/> * - works as a fast simple approximate clusterer<br/> * - modelled after SimpleKMeans, might be a useful initializer for it * <p/> <!-- globalinfo-end --> * <!-- technical-bibtex-start --> * BibTeX: * <pre> * &#64;article{Hochbaum1985, * author = {Hochbaum and Shmoys}, * journal = {Mathematics of Operations Research}, * number = {2}, * pages = {180-184}, * title = {A best possible heuristic for the k-center problem}, * volume = {10}, * year = {1985} * } * * &#64;inproceedings{Dasgupta2002, * author = {Sanjoy Dasgupta}, * booktitle = {15th Annual Conference on Computational Learning Theory}, * pages = {351-363}, * publisher = {Springer}, * title = {Performance Guarantees for Hierarchical Clustering}, * year = {2002} * } * </pre> * <p/> <!-- technical-bibtex-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -N &lt;num&gt; * number of clusters. (default = 2).</pre> * * <pre> -S &lt;num&gt; * Random number seed. * (default 1)</pre> * <!-- options-end --> * * @author Bernhard Pfahringer (bernhard@cs.waikato.ac.nz) * @version $Revision: 8034 $ * @see RandomizableClusterer */ public class FarthestFirst extends RandomizableClusterer implements TechnicalInformationHandler { //Todo: rewrite to be fully incremental // cleanup, like deleting m_instances /** for serialization */ static final long serialVersionUID = 7499838100631329509L; /** * training instances, not necessary to keep, * could be replaced by m_ClusterCentroids where needed for header info */ protected Instances m_instances; /** * replace missing values in training instances */ protected ReplaceMissingValues m_ReplaceMissingFilter; /** * number of clusters to generate */ protected int m_NumClusters = 2; /** * holds the cluster centroids */ protected Instances m_ClusterCentroids; /** * attribute min values */ private double [] m_Min; /** * attribute max values */ private double [] m_Max; /** * Returns a string describing this clusterer * @return a description of the evaluator suitable for * displaying in the explorer/experimenter gui */ public String globalInfo() { return "Cluster data using the FarthestFirst algorithm.\n\n" + "For more information see:\n\n" + getTechnicalInformation().toString() + "\n\n" + "Notes:\n" + "- works as a fast simple approximate clusterer\n" + "- modelled after SimpleKMeans, might be a useful initializer for it"; } /** * Returns an instance of a TechnicalInformation object, containing * detailed information about the technical background of this class, * e.g., paper reference or book this class is based on. * * @return the technical information about this class */ public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; TechnicalInformation additional; result = new TechnicalInformation(Type.ARTICLE); result.setValue(Field.AUTHOR, "Hochbaum and Shmoys"); result.setValue(Field.YEAR, "1985"); result.setValue(Field.TITLE, "A best possible heuristic for the k-center problem"); result.setValue(Field.JOURNAL, "Mathematics of Operations Research"); result.setValue(Field.VOLUME, "10"); result.setValue(Field.NUMBER, "2"); result.setValue(Field.PAGES, "180-184"); additional = result.add(Type.INPROCEEDINGS); additional.setValue(Field.AUTHOR, "Sanjoy Dasgupta"); additional.setValue(Field.TITLE, "Performance Guarantees for Hierarchical Clustering"); additional.setValue(Field.BOOKTITLE, "15th Annual Conference on Computational Learning Theory"); additional.setValue(Field.YEAR, "2002"); additional.setValue(Field.PAGES, "351-363"); additional.setValue(Field.PUBLISHER, "Springer"); return result; } /** * Returns default capabilities of the clusterer. * * @return the capabilities of this clusterer */ public Capabilities getCapabilities() { Capabilities result = super.getCapabilities(); result.disableAll(); result.enable(Capability.NO_CLASS); // attributes result.enable(Capability.NOMINAL_ATTRIBUTES); result.enable(Capability.NUMERIC_ATTRIBUTES); result.enable(Capability.DATE_ATTRIBUTES); result.enable(Capability.MISSING_VALUES); return result; } /** * Generates a clusterer. Has to initialize all fields of the clusterer * that are not being set via options. * * @param data set of instances serving as training data * @throws Exception if the clusterer has not been * generated successfully */ public void buildClusterer(Instances data) throws Exception { // can clusterer handle the data? getCapabilities().testWithFail(data); //long start = System.currentTimeMillis(); m_ReplaceMissingFilter = new ReplaceMissingValues(); m_ReplaceMissingFilter.setInputFormat(data); m_instances = Filter.useFilter(data, m_ReplaceMissingFilter); initMinMax(m_instances); m_ClusterCentroids = new Instances(m_instances, m_NumClusters); int n = m_instances.numInstances(); Random r = new Random(getSeed()); boolean[] selected = new boolean[n]; double[] minDistance = new double[n]; for(int i = 0; i<n; i++) minDistance[i] = Double.MAX_VALUE; int firstI = r.nextInt(n); m_ClusterCentroids.add(m_instances.instance(firstI)); selected[firstI] = true; updateMinDistance(minDistance,selected,m_instances,m_instances.instance(firstI)); if (m_NumClusters > n) m_NumClusters = n; for(int i = 1; i < m_NumClusters; i++) { int nextI = farthestAway(minDistance, selected); m_ClusterCentroids.add(m_instances.instance(nextI)); selected[nextI] = true; updateMinDistance(minDistance,selected,m_instances,m_instances.instance(nextI)); } m_instances = new Instances(m_instances,0); //long end = System.currentTimeMillis(); //System.out.println("Clustering Time = " + (end-start)); } protected void updateMinDistance(double[] minDistance, boolean[] selected, Instances data, Instance center) { for(int i = 0; i<selected.length; i++) if (!selected[i]) { double d = distance(center,data.instance(i)); if (d<minDistance[i]) minDistance[i] = d; } } protected int farthestAway(double[] minDistance, boolean[] selected) { double maxDistance = -1.0; int maxI = -1; for(int i = 0; i<selected.length; i++) if (!selected[i]) if (maxDistance < minDistance[i]) { maxDistance = minDistance[i]; maxI = i; } return maxI; } protected void initMinMax(Instances data) { m_Min = new double [data.numAttributes()]; m_Max = new double [data.numAttributes()]; for (int i = 0; i < data.numAttributes(); i++) { m_Min[i] = m_Max[i] = Double.NaN; } for (int i = 0; i < data.numInstances(); i++) { updateMinMax(data.instance(i)); } } /** * Updates the minimum and maximum values for all the attributes * based on a new instance. * * @param instance the new instance */ private void updateMinMax(Instance instance) { for (int j = 0;j < instance.numAttributes(); j++) { if (Double.isNaN(m_Min[j])) { m_Min[j] = instance.value(j); m_Max[j] = instance.value(j); } else { if (instance.value(j) < m_Min[j]) { m_Min[j] = instance.value(j); } else { if (instance.value(j) > m_Max[j]) { m_Max[j] = instance.value(j); } } } } } /** * clusters an instance that has been through the filters * * @param instance the instance to assign a cluster to * @return a cluster number */ protected int clusterProcessedInstance(Instance instance) { double minDist = Double.MAX_VALUE; int bestCluster = 0; for (int i = 0; i < m_NumClusters; i++) { double dist = distance(instance, m_ClusterCentroids.instance(i)); if (dist < minDist) { minDist = dist; bestCluster = i; } } return bestCluster; } /** * Classifies a given instance. * * @param instance the instance to be assigned to a cluster * @return the number of the assigned cluster as an integer * if the class is enumerated, otherwise the predicted value * @throws Exception if instance could not be classified * successfully */ public int clusterInstance(Instance instance) throws Exception { m_ReplaceMissingFilter.input(instance); m_ReplaceMissingFilter.batchFinished(); Instance inst = m_ReplaceMissingFilter.output(); return clusterProcessedInstance(inst); } /** * Calculates the distance between two instances * * @param first the first instance * @param second the second instance * @return the distance between the two given instances, between 0 and 1 */ protected double distance(Instance first, Instance second) { double distance = 0; int firstI, secondI; for (int p1 = 0, p2 = 0; p1 < first.numValues() || p2 < second.numValues();) { if (p1 >= first.numValues()) { firstI = m_instances.numAttributes(); } else { firstI = first.index(p1); } if (p2 >= second.numValues()) { secondI = m_instances.numAttributes(); } else { secondI = second.index(p2); } if (firstI == m_instances.classIndex()) { p1++; continue; } if (secondI == m_instances.classIndex()) { p2++; continue; } double diff; if (firstI == secondI) { diff = difference(firstI, first.valueSparse(p1), second.valueSparse(p2)); p1++; p2++; } else if (firstI > secondI) { diff = difference(secondI, 0, second.valueSparse(p2)); p2++; } else { diff = difference(firstI, first.valueSparse(p1), 0); p1++; } distance += diff * diff; } return Math.sqrt(distance / m_instances.numAttributes()); } /** * Computes the difference between two given attribute * values. */ protected double difference(int index, double val1, double val2) { switch (m_instances.attribute(index).type()) { case Attribute.NOMINAL: // If attribute is nominal if (Utils.isMissingValue(val1) || Utils.isMissingValue(val2) || ((int)val1 != (int)val2)) { return 1; } else { return 0; } case Attribute.NUMERIC: // If attribute is numeric if (Utils.isMissingValue(val1) || Utils.isMissingValue(val2)) { if (Utils.isMissingValue(val1) && Utils.isMissingValue(val2)) { return 1; } else { double diff; if (Utils.isMissingValue(val2)) { diff = norm(val1, index); } else { diff = norm(val2, index); } if (diff < 0.5) { diff = 1.0 - diff; } return diff; } } else { return norm(val1, index) - norm(val2, index); } default: return 0; } } /** * Normalizes a given value of a numeric attribute. * * @param x the value to be normalized * @param i the attribute's index * @return the normalized value */ protected double norm(double x, int i) { if (Double.isNaN(m_Min[i]) || Utils.eq(m_Max[i],m_Min[i])) { return 0; } else { return (x - m_Min[i]) / (m_Max[i] - m_Min[i]); } } /** * Returns the number of clusters. * * @return the number of clusters generated for a training dataset. * @throws Exception if number of clusters could not be returned * successfully */ public int numberOfClusters() throws Exception { return m_NumClusters; } /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ public Enumeration listOptions () { Vector result = new Vector(); result.addElement(new Option( "\tnumber of clusters. (default = 2).", "N", 1, "-N <num>")); Enumeration en = super.listOptions(); while (en.hasMoreElements()) result.addElement(en.nextElement()); return result.elements(); } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String numClustersTipText() { return "set number of clusters"; } /** * set the number of clusters to generate * * @param n the number of clusters to generate * @throws Exception if number of clusters is negative */ public void setNumClusters(int n) throws Exception { if (n < 0) { throw new Exception("Number of clusters must be > 0"); } m_NumClusters = n; } /** * gets the number of clusters to generate * * @return the number of clusters to generate */ public int getNumClusters() { return m_NumClusters; } /** * Parses a given list of options. <p/> * <!-- options-start --> * Valid options are: <p/> * * <pre> -N &lt;num&gt; * number of clusters. (default = 2).</pre> * * <pre> -S &lt;num&gt; * Random number seed. * (default 1)</pre> * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ public void setOptions (String[] options) throws Exception { String optionString = Utils.getOption('N', options); if (optionString.length() != 0) { setNumClusters(Integer.parseInt(optionString)); } super.setOptions(options); } /** * Gets the current settings of FarthestFirst * * @return an array of strings suitable for passing to setOptions() */ public String[] getOptions () { int i; Vector result; String[] options; result = new Vector(); result.add("-N"); result.add("" + getNumClusters()); options = super.getOptions(); for (i = 0; i < options.length; i++) result.add(options[i]); return (String[]) result.toArray(new String[result.size()]); } /** * return a string describing this clusterer * * @return a description of the clusterer as a string */ public String toString() { StringBuffer temp = new StringBuffer(); temp.append("\n FarthestFirst\n==============\n"); temp.append("\nCluster centroids:\n"); for (int i = 0; i < m_NumClusters; i++) { temp.append("\nCluster "+i+"\n\t"); for (int j = 0; j < m_ClusterCentroids.numAttributes(); j++) { if (m_ClusterCentroids.attribute(j).isNominal()) { temp.append(" "+m_ClusterCentroids.attribute(j). value((int)m_ClusterCentroids.instance(i).value(j))); } else { temp.append(" "+m_ClusterCentroids.instance(i).value(j)); } } } temp.append("\n\n"); return temp.toString(); } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } /** * Main method for testing this class. * * @param argv should contain the following arguments: <p> * -t training file [-N number of clusters] */ public static void main (String[] argv) { runClusterer(new FarthestFirst(), argv); } }
17,093
26.52657
152
java
tsml-java
tsml-java-master/src/main/java/weka/clusterers/FilteredClusterer.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * FilteredClusterer.java * Copyright (C) 2006-2012 University of Waikato, Hamilton, New Zealand * */ package weka.clusterers; import java.util.Enumeration; import java.util.Vector; import weka.core.Capabilities; import weka.core.Capabilities.Capability; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.OptionHandler; import weka.core.RevisionUtils; import weka.core.Utils; import weka.filters.Filter; import weka.filters.SupervisedFilter; /** <!-- globalinfo-start --> * Class for running an arbitrary clusterer on data that has been passed through an arbitrary filter. Like the clusterer, the structure of the filter is based exclusively on the training data and test instances will be processed by the filter without changing their structure. * <p/> <!-- globalinfo-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -F &lt;filter specification&gt; * Full class name of filter to use, followed * by filter options. * eg: "weka.filters.unsupervised.attribute.Remove -V -R 1,2" * (default: weka.filters.AllFilter)</pre> * * <pre> -W * Full name of base clusterer. * (default: weka.clusterers.SimpleKMeans)</pre> * * <pre> * Options specific to clusterer weka.clusterers.SimpleKMeans: * </pre> * * <pre> -N &lt;num&gt; * number of clusters. * (default 2).</pre> * * <pre> -V * Display std. deviations for centroids. * </pre> * * <pre> -M * Replace missing values with mean/mode. * </pre> * * <pre> -S &lt;num&gt; * Random number seed. * (default 10)</pre> * <!-- options-end --> * * Based on code from the FilteredClassifier by Len Trigg. * * @author Len Trigg (trigg@cs.waikato.ac.nz) * @author FracPete (fracpete at waikato dot ac dot nz) * @version $Revision: 8034 $ * @see weka.classifiers.meta.FilteredClassifier */ public class FilteredClusterer extends SingleClustererEnhancer { /** for serialization. */ private static final long serialVersionUID = 1420005943163412943L; /** The filter. */ protected Filter m_Filter; /** The instance structure of the filtered instances. */ protected Instances m_FilteredInstances; /** * Default constructor. */ public FilteredClusterer() { m_Clusterer = new SimpleKMeans(); m_Filter = new weka.filters.AllFilter(); } /** * Returns a string describing this clusterer. * * @return a description of the clusterer suitable for * displaying in the explorer/experimenter gui */ public String globalInfo() { return "Class for running an arbitrary clusterer on data that has been passed " + "through an arbitrary filter. Like the clusterer, the structure of the filter " + "is based exclusively on the training data and test instances will be processed " + "by the filter without changing their structure."; } /** * String describing default filter. * * @return the default filter classname */ protected String defaultFilterString() { return weka.filters.AllFilter.class.getName(); } /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ public Enumeration listOptions() { Vector result = new Vector(); result.addElement(new Option( "\tFull class name of filter to use, followed\n" + "\tby filter options.\n" + "\teg: \"weka.filters.unsupervised.attribute.Remove -V -R 1,2\"\n" + "(default: " + defaultFilterString() + ")", "F", 1, "-F <filter specification>")); Enumeration enm = super.listOptions(); while (enm.hasMoreElements()) result.addElement(enm.nextElement()); return result.elements(); } /** * Parses a given list of options. <p/> * <!-- options-start --> * Valid options are: <p/> * * <pre> -F &lt;filter specification&gt; * Full class name of filter to use, followed * by filter options. * eg: "weka.filters.unsupervised.attribute.Remove -V -R 1,2" * (default: weka.filters.AllFilter)</pre> * * <pre> -W * Full name of base clusterer. * (default: weka.clusterers.SimpleKMeans)</pre> * * <pre> * Options specific to clusterer weka.clusterers.SimpleKMeans: * </pre> * * <pre> -N &lt;num&gt; * number of clusters. * (default 2).</pre> * * <pre> -V * Display std. deviations for centroids. * </pre> * * <pre> -M * Replace missing values with mean/mode. * </pre> * * <pre> -S &lt;num&gt; * Random number seed. * (default 10)</pre> * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { String tmpStr; String[] tmpOptions; tmpStr = Utils.getOption('F', options); if (tmpStr.length() > 0) { tmpOptions = Utils.splitOptions(tmpStr); if (tmpOptions.length == 0) throw new IllegalArgumentException("Invalid filter specification string"); tmpStr = tmpOptions[0]; tmpOptions[0] = ""; setFilter((Filter) Utils.forName(Filter.class, tmpStr, tmpOptions)); } else { setFilter(new weka.filters.AllFilter()); } super.setOptions(options); } /** * Gets the current settings of the clusterer. * * @return an array of strings suitable for passing to setOptions */ public String[] getOptions() { Vector result; String[] options; int i; result = new Vector(); result.add("-F"); result.add(getFilterSpec()); options = super.getOptions(); for (i = 0; i < options.length; i++) result.add(options[i]); return (String[]) result.toArray(new String[result.size()]); } /** * Returns the tip text for this property. * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String filterTipText() { return "The filter to be used."; } /** * Sets the filter. * * @param filter the filter with all options set. */ public void setFilter(Filter filter) { m_Filter = filter; if (m_Filter instanceof SupervisedFilter) System.out.println( "WARNING: you are using a supervised filter, which will leak " + "information about the class attribute!"); } /** * Gets the filter used. * * @return the filter */ public Filter getFilter() { return m_Filter; } /** * Gets the filter specification string, which contains the class name of * the filter and any options to the filter. * * @return the filter string. */ protected String getFilterSpec() { String result; Filter filter; filter = getFilter(); result = filter.getClass().getName(); if (filter instanceof OptionHandler) result += " " + Utils.joinOptions(((OptionHandler) filter).getOptions()); return result; } /** * Returns default capabilities of the clusterer. * * @return the capabilities of this clusterer */ public Capabilities getCapabilities() { Capabilities result; if (getFilter() == null) { result = super.getCapabilities(); result.disableAll(); result.enable(Capability.NO_CLASS); } else { result = getFilter().getCapabilities(); } // set dependencies for (Capability cap: Capability.values()) result.enableDependency(cap); return result; } /** * Build the clusterer on the filtered data. * * @param data the training data * @throws Exception if the clusterer could not be built successfully */ public void buildClusterer(Instances data) throws Exception { if (m_Clusterer == null) throw new Exception("No base clusterer has been set!"); // remove instances with missing class if (data.classIndex() > -1) { data = new Instances(data); data.deleteWithMissingClass(); } m_Filter.setInputFormat(data); // filter capabilities are checked here data = Filter.useFilter(data, m_Filter); // can clusterer handle the data? getClusterer().getCapabilities().testWithFail(data); m_FilteredInstances = data.stringFreeStructure(); m_Clusterer.buildClusterer(data); } /** * Classifies a given instance after filtering. * * @param instance the instance to be classified * @return the class distribution for the given instance * @throws Exception if instance could not be classified * successfully */ public double[] distributionForInstance(Instance instance) throws Exception { if (m_Filter.numPendingOutput() > 0) throw new Exception("Filter output queue not empty!"); if (!m_Filter.input(instance)) throw new Exception( "Filter didn't make the test instance immediately available!"); m_Filter.batchFinished(); Instance newInstance = m_Filter.output(); return m_Clusterer.distributionForInstance(newInstance); } /** * Output a representation of this clusterer. * * @return a representation of this clusterer */ public String toString() { String result; if (m_FilteredInstances == null) result = "FilteredClusterer: No model built yet."; else result = "FilteredClusterer using " + getClustererSpec() + " on data filtered through " + getFilterSpec() + "\n\nFiltered Header\n" + m_FilteredInstances.toString() + "\n\nClusterer Model\n" + m_Clusterer.toString(); return result; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } /** * Main method for testing this class. * * @param args the commandline options, use "-h" for help */ public static void main(String [] args) { runClusterer(new FilteredClusterer(), args); } }
10,703
25.693267
276
java
tsml-java
tsml-java-master/src/main/java/weka/clusterers/HierarchicalClusterer.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * HierarchicalClusterer.java * Copyright (C) 2009-2012 University of Waikato, Hamilton, New Zealand */ package weka.clusterers; import java.io.Serializable; import java.text.DecimalFormat; import java.util.Comparator; import java.util.Enumeration; import java.util.PriorityQueue; import java.util.Vector; import weka.core.Capabilities; import weka.core.Capabilities.Capability; import weka.core.CapabilitiesHandler; import weka.core.DistanceFunction; import weka.core.Drawable; import weka.core.EuclideanDistance; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.OptionHandler; import weka.core.RevisionUtils; import weka.core.SelectedTag; import weka.core.Tag; import weka.core.Utils; /** <!-- globalinfo-start --> * Hierarchical clustering class. * Implements a number of classic hierarchical clustering methods. <!-- globalinfo-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -N * number of clusters * </pre> * * * <pre> -L * Link type (Single, Complete, Average, Mean, Centroid, Ward, Adjusted complete, Neighbor Joining) * [SINGLE|COMPLETE|AVERAGE|MEAN|CENTROID|WARD|ADJCOMLPETE|NEIGHBOR_JOINING] * </pre> * * <pre> -A * Distance function to use. (default: weka.core.EuclideanDistance) * </pre> * * <pre> -P * Print hierarchy in Newick format, which can be used for display in other programs. * </pre> * * <pre> -D * If set, classifier is run in debug mode and may output additional info to the console. * </pre> * * <pre> -B * \If set, distance is interpreted as branch length, otherwise it is node height. * </pre> * *<!-- options-end --> * * * @author Remco Bouckaert (rrb@xm.co.nz, remco@cs.waikato.ac.nz) * @author Eibe Frank (eibe@cs.waikato.ac.nz) * @version $Revision: 8034 $ */ public class HierarchicalClusterer extends AbstractClusterer implements OptionHandler, CapabilitiesHandler, Drawable { private static final long serialVersionUID = 1L; /** Whether the classifier is run in debug mode. */ protected boolean m_bDebug = false; /** Whether the distance represent node height (if false) or branch length (if true). */ protected boolean m_bDistanceIsBranchLength = false; /** training data **/ Instances m_instances; /** number of clusters desired in clustering **/ int m_nNumClusters = 2; public void setNumClusters(int nClusters) {m_nNumClusters = Math.max(1,nClusters);} public int getNumClusters() {return m_nNumClusters;} /** distance function used for comparing members of a cluster **/ protected DistanceFunction m_DistanceFunction = new EuclideanDistance(); public DistanceFunction getDistanceFunction() {return m_DistanceFunction;} public void setDistanceFunction(DistanceFunction distanceFunction) {m_DistanceFunction = distanceFunction;} /** used for priority queue for efficient retrieval of pair of clusters to merge**/ class Tuple { public Tuple(double d, int i, int j, int nSize1, int nSize2) { m_fDist = d; m_iCluster1 = i; m_iCluster2 = j; m_nClusterSize1 = nSize1; m_nClusterSize2 = nSize2; } double m_fDist; int m_iCluster1; int m_iCluster2; int m_nClusterSize1; int m_nClusterSize2; } /** comparator used by priority queue**/ class TupleComparator implements Comparator<Tuple> { public int compare(Tuple o1, Tuple o2) { if (o1.m_fDist < o2.m_fDist) { return -1; } else if (o1.m_fDist == o2.m_fDist) { return 0; } return 1; } } /** the various link types */ final static int SINGLE = 0; final static int COMPLETE = 1; final static int AVERAGE = 2; final static int MEAN = 3; final static int CENTROID = 4; final static int WARD = 5; final static int ADJCOMLPETE = 6; final static int NEIGHBOR_JOINING = 7; public static final Tag[] TAGS_LINK_TYPE = { new Tag(SINGLE, "SINGLE"), new Tag(COMPLETE, "COMPLETE"), new Tag(AVERAGE, "AVERAGE"), new Tag(MEAN, "MEAN"), new Tag(CENTROID, "CENTROID"), new Tag(WARD, "WARD"), new Tag(ADJCOMLPETE,"ADJCOMLPETE"), new Tag(NEIGHBOR_JOINING,"NEIGHBOR_JOINING") }; /** * Holds the Link type used calculate distance between clusters */ int m_nLinkType = SINGLE; boolean m_bPrintNewick = true;; public boolean getPrintNewick() {return m_bPrintNewick;} public void setPrintNewick(boolean bPrintNewick) {m_bPrintNewick = bPrintNewick;} public void setLinkType(SelectedTag newLinkType) { if (newLinkType.getTags() == TAGS_LINK_TYPE) { m_nLinkType = newLinkType.getSelectedTag().getID(); } } public SelectedTag getLinkType() { return new SelectedTag(m_nLinkType, TAGS_LINK_TYPE); } /** class representing node in cluster hierarchy **/ class Node implements Serializable { Node m_left; Node m_right; Node m_parent; int m_iLeftInstance; int m_iRightInstance; double m_fLeftLength = 0; double m_fRightLength = 0; double m_fHeight = 0; public String toString(int attIndex) { DecimalFormat myFormatter = new DecimalFormat("#.#####"); if (m_left == null) { if (m_right == null) { return "(" + m_instances.instance(m_iLeftInstance).stringValue(attIndex) + ":" + myFormatter.format(m_fLeftLength) + "," + m_instances.instance(m_iRightInstance).stringValue(attIndex) +":" + myFormatter.format(m_fRightLength) + ")"; } else { return "(" + m_instances.instance(m_iLeftInstance).stringValue(attIndex) + ":" + myFormatter.format(m_fLeftLength) + "," + m_right.toString(attIndex) + ":" + myFormatter.format(m_fRightLength) + ")"; } } else { if (m_right == null) { return "(" + m_left.toString(attIndex) + ":" + myFormatter.format(m_fLeftLength) + "," + m_instances.instance(m_iRightInstance).stringValue(attIndex) + ":" + myFormatter.format(m_fRightLength) + ")"; } else { return "(" + m_left.toString(attIndex) + ":" + myFormatter.format(m_fLeftLength) + "," +m_right.toString(attIndex) + ":" + myFormatter.format(m_fRightLength) + ")"; } } } public String toString2(int attIndex) { DecimalFormat myFormatter = new DecimalFormat("#.#####"); if (m_left == null) { if (m_right == null) { return "(" + m_instances.instance(m_iLeftInstance).value(attIndex) + ":" + myFormatter.format(m_fLeftLength) + "," + m_instances.instance(m_iRightInstance).value(attIndex) +":" + myFormatter.format(m_fRightLength) + ")"; } else { return "(" + m_instances.instance(m_iLeftInstance).value(attIndex) + ":" + myFormatter.format(m_fLeftLength) + "," + m_right.toString2(attIndex) + ":" + myFormatter.format(m_fRightLength) + ")"; } } else { if (m_right == null) { return "(" + m_left.toString2(attIndex) + ":" + myFormatter.format(m_fLeftLength) + "," + m_instances.instance(m_iRightInstance).value(attIndex) + ":" + myFormatter.format(m_fRightLength) + ")"; } else { return "(" + m_left.toString2(attIndex) + ":" + myFormatter.format(m_fLeftLength) + "," +m_right.toString2(attIndex) + ":" + myFormatter.format(m_fRightLength) + ")"; } } } void setHeight(double fHeight1, double fHeight2) { m_fHeight = fHeight1; if (m_left == null) { m_fLeftLength = fHeight1; } else { m_fLeftLength = fHeight1 - m_left.m_fHeight; } if (m_right == null) { m_fRightLength = fHeight2; } else { m_fRightLength = fHeight2 - m_right.m_fHeight; } } void setLength(double fLength1, double fLength2) { m_fLeftLength = fLength1; m_fRightLength = fLength2; m_fHeight = fLength1; if (m_left != null) { m_fHeight += m_left.m_fHeight; } } } protected Node [] m_clusters; int [] m_nClusterNr; @Override public void buildClusterer(Instances data) throws Exception { // /System.err.println("Method " + m_nLinkType); m_instances = data; int nInstances = m_instances.numInstances(); if (nInstances == 0) { return; } m_DistanceFunction.setInstances(m_instances); // use array of integer vectors to store cluster indices, // starting with one cluster per instance Vector<Integer> [] nClusterID = new Vector[data.numInstances()]; for (int i = 0; i < data.numInstances(); i++) { nClusterID[i] = new Vector<Integer>(); nClusterID[i].add(i); } // calculate distance matrix int nClusters = data.numInstances(); // used for keeping track of hierarchy Node [] clusterNodes = new Node[nInstances]; if (m_nLinkType == NEIGHBOR_JOINING) { neighborJoining(nClusters, nClusterID, clusterNodes); } else { doLinkClustering(nClusters, nClusterID, clusterNodes); } // move all clusters in m_nClusterID array // & collect hierarchy int iCurrent = 0; m_clusters = new Node[m_nNumClusters]; m_nClusterNr = new int[nInstances]; for (int i = 0; i < nInstances; i++) { if (nClusterID[i].size() > 0) { for (int j = 0; j < nClusterID[i].size(); j++) { m_nClusterNr[nClusterID[i].elementAt(j)] = iCurrent; } m_clusters[iCurrent] = clusterNodes[i]; iCurrent++; } } } // buildClusterer /** use neighbor joining algorithm for clustering * This is roughly based on the RapidNJ simple implementation and runs at O(n^3) * More efficient implementations exist, see RapidNJ (or my GPU implementation :-)) * @param nClusters * @param nClusterID * @param clusterNodes */ void neighborJoining(int nClusters, Vector<Integer>[] nClusterID, Node [] clusterNodes) { int n = m_instances.numInstances(); double [][] fDist = new double[nClusters][nClusters]; for (int i = 0; i < nClusters; i++) { fDist[i][i] = 0; for (int j = i+1; j < nClusters; j++) { fDist[i][j] = getDistance0(nClusterID[i], nClusterID[j]); fDist[j][i] = fDist[i][j]; } } double [] fSeparationSums = new double [n]; double [] fSeparations = new double [n]; int [] nNextActive = new int[n]; //calculate initial separation rows for(int i = 0; i < n; i++){ double fSum = 0; for(int j = 0; j < n; j++){ fSum += fDist[i][j]; } fSeparationSums[i] = fSum; fSeparations[i] = fSum / (nClusters - 2); nNextActive[i] = i +1; } while (nClusters > 2) { // find minimum int iMin1 = -1; int iMin2 = -1; double fMin = Double.MAX_VALUE; if (m_bDebug) { for (int i = 0; i < n; i++) { if(nClusterID[i].size() > 0){ double [] fRow = fDist[i]; double fSep1 = fSeparations[i]; for(int j = 0; j < n; j++){ if(nClusterID[j].size() > 0 && i != j){ double fSep2 = fSeparations[j]; double fVal = fRow[j] - fSep1 - fSep2; if(fVal < fMin){ // new minimum iMin1 = i; iMin2 = j; fMin = fVal; } } } } } } else { int i = 0; while (i < n) { double fSep1 = fSeparations[i]; double [] fRow = fDist[i]; int j = nNextActive[i]; while (j < n) { double fSep2 = fSeparations[j]; double fVal = fRow[j] - fSep1 - fSep2; if(fVal < fMin){ // new minimum iMin1 = i; iMin2 = j; fMin = fVal; } j = nNextActive[j]; } i = nNextActive[i]; } } // record distance double fMinDistance = fDist[iMin1][iMin2]; nClusters--; double fSep1 = fSeparations[iMin1]; double fSep2 = fSeparations[iMin2]; double fDist1 = (0.5 * fMinDistance) + (0.5 * (fSep1 - fSep2)); double fDist2 = (0.5 * fMinDistance) + (0.5 * (fSep2 - fSep1)); if (nClusters > 2) { // update separations & distance double fNewSeparationSum = 0; double fMutualDistance = fDist[iMin1][iMin2]; double[] fRow1 = fDist[iMin1]; double[] fRow2 = fDist[iMin2]; for(int i = 0; i < n; i++) { if(i == iMin1 || i == iMin2 || nClusterID[i].size() == 0) { fRow1[i] = 0; } else { double fVal1 = fRow1[i]; double fVal2 = fRow2[i]; double fDistance = (fVal1 + fVal2 - fMutualDistance) / 2.0; fNewSeparationSum += fDistance; // update the separationsum of cluster i. fSeparationSums[i] += (fDistance - fVal1 - fVal2); fSeparations[i] = fSeparationSums[i] / (nClusters -2); fRow1[i] = fDistance; fDist[i][iMin1] = fDistance; } } fSeparationSums[iMin1] = fNewSeparationSum; fSeparations[iMin1] = fNewSeparationSum / (nClusters - 2); fSeparationSums[iMin2] = 0; merge(iMin1, iMin2, fDist1, fDist2, nClusterID, clusterNodes); int iPrev = iMin2; // since iMin1 < iMin2 we havenActiveRows[0] >= 0, so the next loop should be save while (nClusterID[iPrev].size() == 0) { iPrev--; } nNextActive[iPrev] = nNextActive[iMin2]; } else { merge(iMin1, iMin2, fDist1, fDist2, nClusterID, clusterNodes); break; } } for (int i = 0; i < n; i++) { if (nClusterID[i].size() > 0) { for (int j = i+1; j < n; j++) { if (nClusterID[j].size() > 0) { double fDist1 = fDist[i][j]; if(nClusterID[i].size() == 1) { merge(i,j,fDist1,0,nClusterID, clusterNodes); } else if (nClusterID[j].size() == 1) { merge(i,j,0,fDist1,nClusterID, clusterNodes); } else { merge(i,j,fDist1/2.0,fDist1/2.0,nClusterID, clusterNodes); } break; } } } } } // neighborJoining /** Perform clustering using a link method * This implementation uses a priority queue resulting in a O(n^2 log(n)) algorithm * @param nClusters number of clusters * @param nClusterID * @param clusterNodes */ void doLinkClustering(int nClusters, Vector<Integer>[] nClusterID, Node [] clusterNodes) { int nInstances = m_instances.numInstances(); PriorityQueue<Tuple> queue = new PriorityQueue<Tuple>(nClusters*nClusters/2, new TupleComparator()); double [][] fDistance0 = new double[nClusters][nClusters]; double [][] fClusterDistance = null; if (m_bDebug) { fClusterDistance = new double[nClusters][nClusters]; } for (int i = 0; i < nClusters; i++) { fDistance0[i][i] = 0; for (int j = i+1; j < nClusters; j++) { fDistance0[i][j] = getDistance0(nClusterID[i], nClusterID[j]); fDistance0[j][i] = fDistance0[i][j]; queue.add(new Tuple(fDistance0[i][j], i, j, 1, 1)); if (m_bDebug) { fClusterDistance[i][j] = fDistance0[i][j]; fClusterDistance[j][i] = fDistance0[i][j]; } } } while (nClusters > m_nNumClusters) { int iMin1 = -1; int iMin2 = -1; // find closest two clusters if (m_bDebug) { /* simple but inefficient implementation */ double fMinDistance = Double.MAX_VALUE; for (int i = 0; i < nInstances; i++) { if (nClusterID[i].size()>0) { for (int j = i+1; j < nInstances; j++) { if (nClusterID[j].size()>0) { double fDist = fClusterDistance[i][j]; if (fDist < fMinDistance) { fMinDistance = fDist; iMin1 = i; iMin2 = j; } } } } } merge(iMin1, iMin2, fMinDistance, fMinDistance, nClusterID, clusterNodes); } else { // use priority queue to find next best pair to cluster Tuple t; do { t = queue.poll(); } while (t!=null && (nClusterID[t.m_iCluster1].size() != t.m_nClusterSize1 || nClusterID[t.m_iCluster2].size() != t.m_nClusterSize2)); iMin1 = t.m_iCluster1; iMin2 = t.m_iCluster2; merge(iMin1, iMin2, t.m_fDist, t.m_fDist, nClusterID, clusterNodes); } // merge clusters // update distances & queue for (int i = 0; i < nInstances; i++) { if (i != iMin1 && nClusterID[i].size()!=0) { int i1 = Math.min(iMin1,i); int i2 = Math.max(iMin1,i); double fDistance = getDistance(fDistance0, nClusterID[i1], nClusterID[i2]); if (m_bDebug) { fClusterDistance[i1][i2] = fDistance; fClusterDistance[i2][i1] = fDistance; } queue.add(new Tuple(fDistance, i1, i2, nClusterID[i1].size(), nClusterID[i2].size())); } } nClusters--; } } // doLinkClustering void merge(int iMin1, int iMin2, double fDist1, double fDist2, Vector<Integer>[] nClusterID, Node [] clusterNodes) { if (m_bDebug) { System.err.println("Merging " + iMin1 + " " + iMin2 + " " + fDist1 + " " + fDist2); } if (iMin1 > iMin2) { int h = iMin1; iMin1 = iMin2; iMin2 = h; double f = fDist1; fDist1 = fDist2; fDist2 = f; } nClusterID[iMin1].addAll(nClusterID[iMin2]); nClusterID[iMin2].removeAllElements(); // track hierarchy Node node = new Node(); if (clusterNodes[iMin1] == null) { node.m_iLeftInstance = iMin1; } else { node.m_left = clusterNodes[iMin1]; clusterNodes[iMin1].m_parent = node; } if (clusterNodes[iMin2] == null) { node.m_iRightInstance = iMin2; } else { node.m_right = clusterNodes[iMin2]; clusterNodes[iMin2].m_parent = node; } if (m_bDistanceIsBranchLength) { node.setLength(fDist1, fDist2); } else { node.setHeight(fDist1, fDist2); } clusterNodes[iMin1] = node; } // merge /** calculate distance the first time when setting up the distance matrix **/ double getDistance0(Vector<Integer> cluster1, Vector<Integer> cluster2) { double fBestDist = Double.MAX_VALUE; switch (m_nLinkType) { case SINGLE: case NEIGHBOR_JOINING: case CENTROID: case COMPLETE: case ADJCOMLPETE: case AVERAGE: case MEAN: // set up two instances for distance function Instance instance1 = (Instance) m_instances.instance(cluster1.elementAt(0)).copy(); Instance instance2 = (Instance) m_instances.instance(cluster2.elementAt(0)).copy(); fBestDist = m_DistanceFunction.distance(instance1, instance2); break; case WARD: { // finds the distance of the change in caused by merging the cluster. // The information of a cluster is calculated as the error sum of squares of the // centroids of the cluster and its members. double ESS1 = calcESS(cluster1); double ESS2 = calcESS(cluster2); Vector<Integer> merged = new Vector<Integer>(); merged.addAll(cluster1); merged.addAll(cluster2); double ESS = calcESS(merged); fBestDist = ESS * merged.size() - ESS1 * cluster1.size() - ESS2 * cluster2.size(); } break; } return fBestDist; } // getDistance0 /** calculate the distance between two clusters * @param cluster1 list of indices of instances in the first cluster * @param cluster2 dito for second cluster * @return distance between clusters based on link type */ double getDistance(double [][] fDistance, Vector<Integer> cluster1, Vector<Integer> cluster2) { double fBestDist = Double.MAX_VALUE; switch (m_nLinkType) { case SINGLE: // find single link distance aka minimum link, which is the closest distance between // any item in cluster1 and any item in cluster2 fBestDist = Double.MAX_VALUE; for (int i = 0; i < cluster1.size(); i++) { int i1 = cluster1.elementAt(i); for (int j = 0; j < cluster2.size(); j++) { int i2 = cluster2.elementAt(j); double fDist = fDistance[i1][i2]; if (fBestDist > fDist) { fBestDist = fDist; } } } break; case COMPLETE: case ADJCOMLPETE: // find complete link distance aka maximum link, which is the largest distance between // any item in cluster1 and any item in cluster2 fBestDist = 0; for (int i = 0; i < cluster1.size(); i++) { int i1 = cluster1.elementAt(i); for (int j = 0; j < cluster2.size(); j++) { int i2 = cluster2.elementAt(j); double fDist = fDistance[i1][i2]; if (fBestDist < fDist) { fBestDist = fDist; } } } if (m_nLinkType == COMPLETE) { break; } // calculate adjustment, which is the largest within cluster distance double fMaxDist = 0; for (int i = 0; i < cluster1.size(); i++) { int i1 = cluster1.elementAt(i); for (int j = i+1; j < cluster1.size(); j++) { int i2 = cluster1.elementAt(j); double fDist = fDistance[i1][i2]; if (fMaxDist < fDist) { fMaxDist = fDist; } } } for (int i = 0; i < cluster2.size(); i++) { int i1 = cluster2.elementAt(i); for (int j = i+1; j < cluster2.size(); j++) { int i2 = cluster2.elementAt(j); double fDist = fDistance[i1][i2]; if (fMaxDist < fDist) { fMaxDist = fDist; } } } fBestDist -= fMaxDist; break; case AVERAGE: // finds average distance between the elements of the two clusters fBestDist = 0; for (int i = 0; i < cluster1.size(); i++) { int i1 = cluster1.elementAt(i); for (int j = 0; j < cluster2.size(); j++) { int i2 = cluster2.elementAt(j); fBestDist += fDistance[i1][i2]; } } fBestDist /= (cluster1.size() * cluster2.size()); break; case MEAN: { // calculates the mean distance of a merged cluster (akak Group-average agglomerative clustering) Vector<Integer> merged = new Vector<Integer>(); merged.addAll(cluster1); merged.addAll(cluster2); fBestDist = 0; for (int i = 0; i < merged.size(); i++) { int i1 = merged.elementAt(i); for (int j = i+1; j < merged.size(); j++) { int i2 = merged.elementAt(j); fBestDist += fDistance[i1][i2]; } } int n = merged.size(); fBestDist /= (n*(n-1.0)/2.0); } break; case CENTROID: // finds the distance of the centroids of the clusters double [] fValues1 = new double[m_instances.numAttributes()]; for (int i = 0; i < cluster1.size(); i++) { Instance instance = m_instances.instance(cluster1.elementAt(i)); for (int j = 0; j < m_instances.numAttributes(); j++) { fValues1[j] += instance.value(j); } } double [] fValues2 = new double[m_instances.numAttributes()]; for (int i = 0; i < cluster2.size(); i++) { Instance instance = m_instances.instance(cluster2.elementAt(i)); for (int j = 0; j < m_instances.numAttributes(); j++) { fValues2[j] += instance.value(j); } } for (int j = 0; j < m_instances.numAttributes(); j++) { fValues1[j] /= cluster1.size(); fValues2[j] /= cluster2.size(); } // set up two instances for distance function Instance instance1 = (Instance) m_instances.instance(0).copy(); Instance instance2 = (Instance) m_instances.instance(0).copy(); for (int j = 0; j < m_instances.numAttributes(); j++) { instance1.setValue(j, fValues1[j]); instance2.setValue(j, fValues2[j]); } fBestDist = m_DistanceFunction.distance(instance1, instance2); break; case WARD: { // finds the distance of the change in caused by merging the cluster. // The information of a cluster is calculated as the error sum of squares of the // centroids of the cluster and its members. double ESS1 = calcESS(cluster1); double ESS2 = calcESS(cluster2); Vector<Integer> merged = new Vector<Integer>(); merged.addAll(cluster1); merged.addAll(cluster2); double ESS = calcESS(merged); fBestDist = ESS * merged.size() - ESS1 * cluster1.size() - ESS2 * cluster2.size(); } break; } return fBestDist; } // getDistance /** calculated error sum-of-squares for instances wrt centroid **/ double calcESS(Vector<Integer> cluster) { double [] fValues1 = new double[m_instances.numAttributes()]; for (int i = 0; i < cluster.size(); i++) { Instance instance = m_instances.instance(cluster.elementAt(i)); for (int j = 0; j < m_instances.numAttributes(); j++) { fValues1[j] += instance.value(j); } } for (int j = 0; j < m_instances.numAttributes(); j++) { fValues1[j] /= cluster.size(); } // set up two instances for distance function Instance centroid = (Instance) m_instances.instance(cluster.elementAt(0)).copy(); for (int j = 0; j < m_instances.numAttributes(); j++) { centroid.setValue(j, fValues1[j]); } double fESS = 0; for (int i = 0; i < cluster.size(); i++) { Instance instance = m_instances.instance(cluster.elementAt(i)); fESS += m_DistanceFunction.distance(centroid, instance); } return fESS / cluster.size(); } // calcESS @Override /** instances are assigned a cluster by finding the instance in the training data * with the closest distance to the instance to be clustered. The cluster index of * the training data point is taken as the cluster index. */ public int clusterInstance(Instance instance) throws Exception { if (m_instances.numInstances() == 0) { return 0; } double fBestDist = Double.MAX_VALUE; int iBestInstance = -1; for (int i = 0; i < m_instances.numInstances(); i++) { double fDist = m_DistanceFunction.distance(instance, m_instances.instance(i)); if (fDist < fBestDist) { fBestDist = fDist; iBestInstance = i; } } return m_nClusterNr[iBestInstance]; } @Override /** create distribution with all clusters having zero probability, except the * cluster the instance is assigned to. */ public double[] distributionForInstance(Instance instance) throws Exception { if (numberOfClusters() == 0) { double [] p = new double[1]; p[0] = 1; return p; } double [] p = new double[numberOfClusters()]; p[clusterInstance(instance)] = 1.0; return p; } @Override public Capabilities getCapabilities() { Capabilities result = new Capabilities(this); result.disableAll(); result.enable(Capability.NO_CLASS); // attributes result.enable(Capability.NOMINAL_ATTRIBUTES); result.enable(Capability.NUMERIC_ATTRIBUTES); result.enable(Capability.DATE_ATTRIBUTES); result.enable(Capability.MISSING_VALUES); result.enable(Capability.STRING_ATTRIBUTES); // other result.setMinimumNumberInstances(0); return result; } @Override public int numberOfClusters() throws Exception { return Math.min(m_nNumClusters, m_instances.numInstances()); } /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ public Enumeration listOptions() { Vector newVector = new Vector(8); newVector.addElement(new Option( "\tIf set, classifier is run in debug mode and\n" + "\tmay output additional info to the console", "D", 0, "-D")); newVector.addElement(new Option( "\tIf set, distance is interpreted as branch length\n" + "\totherwise it is node height.", "B", 0, "-B")); newVector.addElement(new Option( "\tnumber of clusters", "N", 1,"-N <Nr Of Clusters>")); newVector.addElement(new Option( "\tFlag to indicate the cluster should be printed in Newick format.", "P", 0,"-P")); newVector.addElement( new Option( "Link type (Single, Complete, Average, Mean, Centroid, Ward, Adjusted complete, Neighbor joining)", "L", 1, "-L [SINGLE|COMPLETE|AVERAGE|MEAN|CENTROID|WARD|ADJCOMLPETE|NEIGHBOR_JOINING]")); newVector.add(new Option( "\tDistance function to use.\n" + "\t(default: weka.core.EuclideanDistance)", "A", 1,"-A <classname and options>")); return newVector.elements(); } /** * Parses a given list of options. <p/> * <!-- options-start --> * Valid options are: <p/> * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { m_bPrintNewick = Utils.getFlag('P', options); String optionString = Utils.getOption('N', options); if (optionString.length() != 0) { Integer temp = new Integer(optionString); setNumClusters(temp); } else { setNumClusters(2); } setDebug(Utils.getFlag('D', options)); setDistanceIsBranchLength(Utils.getFlag('B', options)); String sLinkType = Utils.getOption('L', options); if (sLinkType.compareTo("SINGLE") == 0) {setLinkType(new SelectedTag(SINGLE, TAGS_LINK_TYPE));} if (sLinkType.compareTo("COMPLETE") == 0) {setLinkType(new SelectedTag(COMPLETE, TAGS_LINK_TYPE));} if (sLinkType.compareTo("AVERAGE") == 0) {setLinkType(new SelectedTag(AVERAGE, TAGS_LINK_TYPE));} if (sLinkType.compareTo("MEAN") == 0) {setLinkType(new SelectedTag(MEAN, TAGS_LINK_TYPE));} if (sLinkType.compareTo("CENTROID") == 0) {setLinkType(new SelectedTag(CENTROID, TAGS_LINK_TYPE));} if (sLinkType.compareTo("WARD") == 0) {setLinkType(new SelectedTag(WARD, TAGS_LINK_TYPE));} if (sLinkType.compareTo("ADJCOMLPETE") == 0) {setLinkType(new SelectedTag(ADJCOMLPETE, TAGS_LINK_TYPE));} if (sLinkType.compareTo("NEIGHBOR_JOINING") == 0) {setLinkType(new SelectedTag(NEIGHBOR_JOINING, TAGS_LINK_TYPE));} String nnSearchClass = Utils.getOption('A', options); if(nnSearchClass.length() != 0) { String nnSearchClassSpec[] = Utils.splitOptions(nnSearchClass); if(nnSearchClassSpec.length == 0) { throw new Exception("Invalid DistanceFunction specification string."); } String className = nnSearchClassSpec[0]; nnSearchClassSpec[0] = ""; setDistanceFunction( (DistanceFunction) Utils.forName( DistanceFunction.class, className, nnSearchClassSpec) ); } else { setDistanceFunction(new EuclideanDistance()); } Utils.checkForRemainingOptions(options); } /** * Gets the current settings of the clusterer. * * @return an array of strings suitable for passing to setOptions() */ public String [] getOptions() { String [] options = new String [14]; int current = 0; options[current++] = "-N"; options[current++] = "" + getNumClusters(); options[current++] = "-L"; switch (m_nLinkType) { case (SINGLE) :options[current++] = "SINGLE";break; case (COMPLETE) :options[current++] = "COMPLETE";break; case (AVERAGE) :options[current++] = "AVERAGE";break; case (MEAN) :options[current++] = "MEAN";break; case (CENTROID) :options[current++] = "CENTROID";break; case (WARD) :options[current++] = "WARD";break; case (ADJCOMLPETE) :options[current++] = "ADJCOMLPETE";break; case (NEIGHBOR_JOINING) :options[current++] = "NEIGHBOR_JOINING";break; } if (m_bPrintNewick) { options[current++] = "-P"; } if (getDebug()) { options[current++] = "-D"; } if (getDistanceIsBranchLength()) { options[current++] = "-B"; } options[current++] = "-A"; options[current++] = (m_DistanceFunction.getClass().getName() + " " + Utils.joinOptions(m_DistanceFunction.getOptions())).trim(); while (current < options.length) { options[current++] = ""; } return options; } public String toString() { StringBuffer buf = new StringBuffer(); int attIndex = m_instances.classIndex(); if (attIndex < 0) { // try find a string, or last attribute otherwise attIndex = 0; while (attIndex < m_instances.numAttributes()-1) { if (m_instances.attribute(attIndex).isString()) { break; } attIndex++; } } try { if (m_bPrintNewick && (numberOfClusters() > 0)) { for (int i = 0; i < m_clusters.length; i++) { if (m_clusters[i] != null) { buf.append("Cluster " + i + "\n"); if (m_instances.attribute(attIndex).isString()) { buf.append(m_clusters[i].toString(attIndex)); } else { buf.append(m_clusters[i].toString2(attIndex)); } buf.append("\n\n"); } } } } catch (Exception e) { e.printStackTrace(); } return buf.toString(); } /** * Set debugging mode. * * @param debug true if debug output should be printed */ public void setDebug(boolean debug) { m_bDebug = debug; } /** * Get whether debugging is turned on. * * @return true if debugging output is on */ public boolean getDebug() { return m_bDebug; } public boolean getDistanceIsBranchLength() {return m_bDistanceIsBranchLength;} public void setDistanceIsBranchLength(boolean bDistanceIsHeight) {m_bDistanceIsBranchLength = bDistanceIsHeight;} public String distanceIsBranchLengthTipText() { return "If set to false, the distance between clusters is interpreted " + "as the height of the node linking the clusters. This is appropriate for " + "example for single link clustering. However, for neighbor joining, the " + "distance is better interpreted as branch length. Set this flag to " + "get the latter interpretation."; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String debugTipText() { return "If set to true, classifier may output additional info to " + "the console."; } /** * @return a string to describe the NumClusters */ public String numClustersTipText() { return "Sets the number of clusters. " + "If a single hierarchy is desired, set this to 1."; } /** * @return a string to describe the print Newick flag */ public String printNewickTipText() { return "Flag to indicate whether the cluster should be print in Newick format." + " This can be useful for display in other programs. However, for large datasets" + " a lot of text may be produced, which may not be a nuisance when the Newick format" + " is not required"; } /** * @return a string to describe the distance function */ public String distanceFunctionTipText() { return "Sets the distance function, which measures the distance between two individual. " + "instances (or possibly the distance between an instance and the centroid of a cluster" + "depending on the Link type)."; } /** * @return a string to describe the Link type */ public String linkTypeTipText() { return "Sets the method used to measure the distance between two clusters.\n" + "SINGLE:\n" + " find single link distance aka minimum link, which is the closest distance between" + " any item in cluster1 and any item in cluster2\n" + "COMPLETE:\n" + " find complete link distance aka maximum link, which is the largest distance between" + " any item in cluster1 and any item in cluster2\n" + "ADJCOMLPETE:\n" + " as COMPLETE, but with adjustment, which is the largest within cluster distance\n" + "AVERAGE:\n" + " finds average distance between the elements of the two clusters\n" + "MEAN: \n" + " calculates the mean distance of a merged cluster (akak Group-average agglomerative clustering)\n" + "CENTROID:\n" + " finds the distance of the centroids of the clusters\n" + "WARD:\n" + " finds the distance of the change in caused by merging the cluster." + " The information of a cluster is calculated as the error sum of squares of the" + " centroids of the cluster and its members.\n" + "NEIGHBOR_JOINING\n" + " use neighbor joining algorithm." ; } /** * This will return a string describing the clusterer. * @return The string. */ public String globalInfo() { return "Hierarchical clustering class.\n" + "Implements a number of classic agglomorative (i.e. bottom up) hierarchical clustering methods" + "based on ."; } public static void main(String [] argv) { runClusterer(new HierarchicalClusterer(), argv); } @Override public String graph() throws Exception { if (numberOfClusters() == 0) { return "Newick:(no,clusters)"; } int attIndex = m_instances.classIndex(); if (attIndex < 0) { // try find a string, or last attribute otherwise attIndex = 0; while (attIndex < m_instances.numAttributes()-1) { if (m_instances.attribute(attIndex).isString()) { break; } attIndex++; } } String sNewick = null; if (m_instances.attribute(attIndex).isString()) { sNewick = m_clusters[0].toString(attIndex); } else { sNewick = m_clusters[0].toString2(attIndex); } return "Newick:" + sNewick; } @Override public int graphType() { return Drawable.Newick; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } } // class HierarchicalClusterer
38,769
33.523598
176
java
tsml-java
tsml-java-master/src/main/java/weka/clusterers/MakeDensityBasedClusterer.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * MakeDensityBasedClusterer.java * Copyright (C) 2002-2012 University of Waikato, Hamilton, New Zealand * */ package weka.clusterers; import java.util.Enumeration; import java.util.Vector; import weka.core.Capabilities; import weka.core.Capabilities.Capability; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.OptionHandler; import weka.core.RevisionUtils; import weka.core.Utils; import weka.core.WeightedInstancesHandler; import weka.estimators.DiscreteEstimator; import weka.filters.unsupervised.attribute.ReplaceMissingValues; /** <!-- globalinfo-start --> * Class for wrapping a Clusterer to make it return a distribution and density. Fits normal distributions and discrete distributions within each cluster produced by the wrapped clusterer. Supports the NumberOfClustersRequestable interface only if the wrapped Clusterer does. * <p/> <!-- globalinfo-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -M &lt;num&gt; * minimum allowable standard deviation for normal density computation * (default 1e-6)</pre> * * <pre> -W &lt;clusterer name&gt; * Clusterer to wrap. * (default weka.clusterers.SimpleKMeans)</pre> * * <pre> * Options specific to clusterer weka.clusterers.SimpleKMeans: * </pre> * * <pre> -N &lt;num&gt; * number of clusters. * (default 2).</pre> * * <pre> -V * Display std. deviations for centroids. * </pre> * * <pre> -M * Replace missing values with mean/mode. * </pre> * * <pre> -S &lt;num&gt; * Random number seed. * (default 10)</pre> * <!-- options-end --> * * Options after "--" are passed on to the base clusterer. * * @author Richard Kirkby (rkirkby@cs.waikato.ac.nz) * @author Mark Hall (mhall@cs.waikato.ac.nz) * @author Eibe Frank (eibe@cs.waikato.ac.nz) * @version $Revision: 8034 $ */ public class MakeDensityBasedClusterer extends AbstractDensityBasedClusterer implements NumberOfClustersRequestable, OptionHandler, WeightedInstancesHandler { /** for serialization */ static final long serialVersionUID = -5643302427972186631L; /** holds training instances header information */ private Instances m_theInstances; /** prior probabilities for the fitted clusters */ private double [] m_priors; /** normal distributions fitted to each numeric attribute in each cluster */ private double [][][] m_modelNormal; /** discrete distributions fitted to each discrete attribute in each cluster */ private DiscreteEstimator [][] m_model; /** default minimum standard deviation */ private double m_minStdDev = 1e-6; /** The clusterer being wrapped */ private Clusterer m_wrappedClusterer = new weka.clusterers.SimpleKMeans(); /** globally replace missing values */ private ReplaceMissingValues m_replaceMissing; /** * Default constructor. * */ public MakeDensityBasedClusterer() { super(); } /** * Contructs a MakeDensityBasedClusterer wrapping a given Clusterer. * * @param toWrap the clusterer to wrap around */ public MakeDensityBasedClusterer(Clusterer toWrap) { setClusterer(toWrap); } /** * Returns a string describing classifier * @return a description suitable for * displaying in the explorer/experimenter gui */ public String globalInfo() { return "Class for wrapping a Clusterer to make it return a distribution " + "and density. Fits normal distributions and discrete distributions " + "within each cluster produced by the wrapped clusterer. Supports the " + "NumberOfClustersRequestable interface only if the wrapped Clusterer " + "does."; } /** * String describing default clusterer. * * @return the default clusterer classname */ protected String defaultClustererString() { return SimpleKMeans.class.getName(); } /** * Set the number of clusters to generate. * * @param n the number of clusters to generate * @throws Exception if the wrapped clusterer has not been set, or if * the wrapped clusterer does not implement this facility. */ public void setNumClusters(int n) throws Exception { if (m_wrappedClusterer == null) { throw new Exception("Can't set the number of clusters to generate - " +"no clusterer has been set yet."); } if (!(m_wrappedClusterer instanceof NumberOfClustersRequestable)) { throw new Exception("Can't set the number of clusters to generate - " +"wrapped clusterer does not support this facility."); } ((NumberOfClustersRequestable)m_wrappedClusterer).setNumClusters(n); } /** * Returns default capabilities of the clusterer (i.e., of the wrapper * clusterer). * * @return the capabilities of this clusterer */ public Capabilities getCapabilities() { if (m_wrappedClusterer != null) { return m_wrappedClusterer.getCapabilities(); } Capabilities result = super.getCapabilities(); result.disableAll(); result.enable(Capability.NO_CLASS); return result; } /** * Builds a clusterer for a set of instances. * * @param data the instances to train the clusterer with * @throws Exception if the clusterer hasn't been set or something goes wrong */ public void buildClusterer(Instances data) throws Exception { // can clusterer handle the data? getCapabilities().testWithFail(data); m_replaceMissing = new ReplaceMissingValues(); m_replaceMissing.setInputFormat(data); data = weka.filters.Filter.useFilter(data, m_replaceMissing); m_theInstances = new Instances(data, 0); if (m_wrappedClusterer == null) { throw new Exception("No clusterer has been set"); } m_wrappedClusterer.buildClusterer(data); m_model = new DiscreteEstimator[m_wrappedClusterer.numberOfClusters()][data.numAttributes()]; m_modelNormal = new double[m_wrappedClusterer.numberOfClusters()][data.numAttributes()][2]; double[][] weights = new double[m_wrappedClusterer.numberOfClusters()][data.numAttributes()]; m_priors = new double[m_wrappedClusterer.numberOfClusters()]; for (int i = 0; i < m_wrappedClusterer.numberOfClusters(); i++) { m_priors[i] = 1.0; // laplace correction for (int j = 0; j < data.numAttributes(); j++) { if (data.attribute(j).isNominal()) { m_model[i][j] = new DiscreteEstimator(data.attribute(j).numValues(), true); } } } Instance inst = null; // Compute mean, etc. int[] clusterIndex = new int[data.numInstances()]; for (int i = 0; i < data.numInstances(); i++) { inst = data.instance(i); int cluster = m_wrappedClusterer.clusterInstance(inst); m_priors[cluster] += inst.weight(); for (int j = 0; j < data.numAttributes(); j++) { if (!inst.isMissing(j)) { if (data.attribute(j).isNominal()) { m_model[cluster][j].addValue(inst.value(j),inst.weight()); } else { m_modelNormal[cluster][j][0] += inst.weight() * inst.value(j); weights[cluster][j] += inst.weight(); } } } clusterIndex[i] = cluster; } for (int j = 0; j < data.numAttributes(); j++) { if (data.attribute(j).isNumeric()) { for (int i = 0; i < m_wrappedClusterer.numberOfClusters(); i++) { if (weights[i][j] > 0) { m_modelNormal[i][j][0] /= weights[i][j]; } } } } // Compute standard deviations for (int i = 0; i < data.numInstances(); i++) { inst = data.instance(i); for (int j = 0; j < data.numAttributes(); j++) { if (!inst.isMissing(j)) { if (data.attribute(j).isNumeric()) { double diff = m_modelNormal[clusterIndex[i]][j][0] - inst.value(j); m_modelNormal[clusterIndex[i]][j][1] += inst.weight() * diff * diff; } } } } for (int j = 0; j < data.numAttributes(); j++) { if (data.attribute(j).isNumeric()) { for (int i = 0; i < m_wrappedClusterer.numberOfClusters(); i++) { if (weights[i][j] > 0) { m_modelNormal[i][j][1] = Math.sqrt(m_modelNormal[i][j][1] / weights[i][j]); } else if (weights[i][j] <= 0) { m_modelNormal[i][j][1] = Double.MAX_VALUE; } if (m_modelNormal[i][j][1] <= m_minStdDev) { m_modelNormal[i][j][1] = data.attributeStats(j).numericStats.stdDev; if (m_modelNormal[i][j][1] <= m_minStdDev) { m_modelNormal[i][j][1] = m_minStdDev; } } } } } Utils.normalize(m_priors); } /** * Returns the cluster priors. * * @return the cluster priors */ public double[] clusterPriors() { double[] n = new double[m_priors.length]; System.arraycopy(m_priors, 0, n, 0, n.length); return n; } /** * Computes the log of the conditional density (per cluster) for a given instance. * * @param inst the instance to compute the density for * @return an array containing the estimated densities * @throws Exception if the density could not be computed * successfully */ public double[] logDensityPerClusterForInstance(Instance inst) throws Exception { int i, j; double logprob; double[] wghts = new double[m_wrappedClusterer.numberOfClusters()]; m_replaceMissing.input(inst); inst = m_replaceMissing.output(); for (i = 0; i < m_wrappedClusterer.numberOfClusters(); i++) { logprob = 0; for (j = 0; j < inst.numAttributes(); j++) { if (!inst.isMissing(j)) { if (inst.attribute(j).isNominal()) { logprob += Math.log(m_model[i][j].getProbability(inst.value(j))); } else { // numeric attribute logprob += logNormalDens(inst.value(j), m_modelNormal[i][j][0], m_modelNormal[i][j][1]); } } } wghts[i] = logprob; } return wghts; } /** Constant for normal distribution. */ private static double m_normConst = 0.5 * Math.log(2 * Math.PI); /** * Density function of normal distribution. * @param x input value * @param mean mean of distribution * @param stdDev standard deviation of distribution * @return the density */ private double logNormalDens (double x, double mean, double stdDev) { double diff = x - mean; return - (diff * diff / (2 * stdDev * stdDev)) - m_normConst - Math.log(stdDev); } /** * Returns the number of clusters. * * @return the number of clusters generated for a training dataset. * @throws Exception if number of clusters could not be returned successfully */ public int numberOfClusters() throws Exception { return m_wrappedClusterer.numberOfClusters(); } /** * Returns a description of the clusterer. * * @return a string containing a description of the clusterer */ public String toString() { if (m_priors == null) { return "No clusterer built yet!"; } StringBuffer text = new StringBuffer(); text.append("MakeDensityBasedClusterer: \n\nWrapped clusterer: " + m_wrappedClusterer.toString()); text.append("\nFitted estimators (with ML estimates of variance):\n"); for (int j = 0; j < m_priors.length; j++) { text.append("\nCluster: " + j + " Prior probability: " + Utils.doubleToString(m_priors[j], 4) + "\n\n"); for (int i = 0; i < m_model[0].length; i++) { text.append("Attribute: " + m_theInstances.attribute(i).name() + "\n"); if (m_theInstances.attribute(i).isNominal()) { if (m_model[j][i] != null) { text.append(m_model[j][i].toString()); } } else { text.append("Normal Distribution. Mean = " + Utils.doubleToString(m_modelNormal[j][i][0], 4) + " StdDev = " + Utils.doubleToString(m_modelNormal[j][i][1], 4) + "\n"); } } } return text.toString(); } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String clustererTipText() { return "the clusterer to wrap"; } /** * Sets the clusterer to wrap. * * @param toWrap the clusterer */ public void setClusterer(Clusterer toWrap) { m_wrappedClusterer = toWrap; } /** * Gets the clusterer being wrapped. * * @return the clusterer */ public Clusterer getClusterer() { return m_wrappedClusterer; } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String minStdDevTipText() { return "set minimum allowable standard deviation"; } /** * Set the minimum value for standard deviation when calculating * normal density. Reducing this value can help prevent arithmetic * overflow resulting from multiplying large densities (arising from small * standard deviations) when there are many singleton or near singleton * values. * @param m minimum value for standard deviation */ public void setMinStdDev(double m) { m_minStdDev = m; } /** * Get the minimum allowable standard deviation. * @return the minumum allowable standard deviation */ public double getMinStdDev() { return m_minStdDev; } /** * Returns an enumeration describing the available options.. * * @return an enumeration of all the available options. */ public Enumeration listOptions() { Vector result = new Vector(); result.addElement(new Option( "\tminimum allowable standard deviation for normal density computation " +"\n\t(default 1e-6)" ,"M",1,"-M <num>")); result.addElement(new Option( "\tClusterer to wrap.\n" + "\t(default " + defaultClustererString() + ")", "W", 1,"-W <clusterer name>")); if ((m_wrappedClusterer != null) && (m_wrappedClusterer instanceof OptionHandler)) { result.addElement(new Option( "", "", 0, "\nOptions specific to clusterer " + m_wrappedClusterer.getClass().getName() + ":")); Enumeration enu = ((OptionHandler)m_wrappedClusterer).listOptions(); while (enu.hasMoreElements()) { result.addElement(enu.nextElement()); } } return result.elements(); } /** * Parses a given list of options. <p/> * <!-- options-start --> * Valid options are: <p/> * * <pre> -M &lt;num&gt; * minimum allowable standard deviation for normal density computation * (default 1e-6)</pre> * * <pre> -W &lt;clusterer name&gt; * Clusterer to wrap. * (default weka.clusterers.SimpleKMeans)</pre> * * <pre> * Options specific to clusterer weka.clusterers.SimpleKMeans: * </pre> * * <pre> -N &lt;num&gt; * number of clusters. * (default 2).</pre> * * <pre> -V * Display std. deviations for centroids. * </pre> * * <pre> -M * Replace missing values with mean/mode. * </pre> * * <pre> -S &lt;num&gt; * Random number seed. * (default 10)</pre> * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { String optionString = Utils.getOption('M', options); if (optionString.length() != 0) setMinStdDev((new Double(optionString)).doubleValue()); else setMinStdDev(1e-6); String wString = Utils.getOption('W', options); if (wString.length() == 0) wString = defaultClustererString(); setClusterer(AbstractClusterer.forName(wString, Utils.partitionOptions(options))); } /** * Gets the current settings of the clusterer. * * @return an array of strings suitable for passing to setOptions() */ public String[] getOptions() { String [] clustererOptions = new String [0]; if ((m_wrappedClusterer != null) && (m_wrappedClusterer instanceof OptionHandler)) { clustererOptions = ((OptionHandler)m_wrappedClusterer).getOptions(); } String [] options = new String [clustererOptions.length + 5]; int current = 0; options[current++] = "-M"; options[current++] = ""+getMinStdDev(); if (getClusterer() != null) { options[current++] = "-W"; options[current++] = getClusterer().getClass().getName(); } options[current++] = "--"; System.arraycopy(clustererOptions, 0, options, current, clustererOptions.length); current += clustererOptions.length; while (current < options.length) { options[current++] = ""; } return options; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } /** * Main method for testing this class. * * @param argv the options */ public static void main(String [] argv) { runClusterer(new MakeDensityBasedClusterer(), argv); } }
17,676
28.412646
274
java
tsml-java
tsml-java-master/src/main/java/weka/clusterers/NumberOfClustersRequestable.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * NumberOfClustersRequestable.java * Copyright (C) 2004-2012 University of Waikato, Hamilton, New Zealand * */ package weka.clusterers; /** * Interface to a clusterer that can generate a requested number of * clusters * * @author Mark Hall * @version $Revision: 8034 $ */ public interface NumberOfClustersRequestable { /** * Set the number of clusters to generate * * @param numClusters the number of clusters to generate * @exception Exception if the requested number of * clusters in inapropriate */ void setNumClusters(int numClusters) throws Exception; }
1,282
28.159091
74
java
tsml-java
tsml-java-master/src/main/java/weka/clusterers/RandomizableClusterer.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * RandomizableClusterer.java * Copyright (C) 2006-2012 University of Waikato, Hamilton, New Zealand * */ package weka.clusterers; import java.util.Enumeration; import java.util.Vector; import weka.core.Option; import weka.core.OptionHandler; import weka.core.Randomizable; import weka.core.Utils; /** * Abstract utility class for handling settings common to randomizable * clusterers. * * @author FracPete (fracpete at waikato dot ac dot nz) * @version $Revision: 8034 $ */ public abstract class RandomizableClusterer extends AbstractClusterer implements OptionHandler, Randomizable { /** for serialization */ private static final long serialVersionUID = -4819590778152242745L; /** the default seed value */ protected int m_SeedDefault = 1; /** The random number seed. */ protected int m_Seed = m_SeedDefault; /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ public Enumeration listOptions() { Vector result = new Vector(); result.addElement(new Option( "\tRandom number seed.\n" + "\t(default " + m_SeedDefault + ")", "S", 1, "-S <num>")); return result.elements(); } /** * Parses a given list of options. Valid options are:<p> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { String tmpStr; tmpStr = Utils.getOption('S', options); if (tmpStr.length() != 0) setSeed(Integer.parseInt(tmpStr)); else setSeed(m_SeedDefault); } /** * Gets the current settings of the classifier. * * @return an array of strings suitable for passing to setOptions */ public String[] getOptions() { Vector result; result = new Vector(); result.add("-S"); result.add("" + getSeed()); return (String[]) result.toArray(new String[result.size()]); } /** * Returns the tip text for this property * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String seedTipText() { return "The random number seed to be used."; } /** * Set the seed for random number generation. * * @param value the seed to use */ public void setSeed(int value) { m_Seed = value; } /** * Gets the seed for the random number generations * * @return the seed for the random number generation */ public int getSeed() { return m_Seed; } }
3,274
24.585938
74
java
tsml-java
tsml-java-master/src/main/java/weka/clusterers/RandomizableDensityBasedClusterer.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * RandomizableDensityBasedClusterer.java * Copyright (C) 2006-2012 University of Waikato, Hamilton, New Zealand * */ package weka.clusterers; import java.util.Enumeration; import java.util.Vector; import weka.core.Option; import weka.core.OptionHandler; import weka.core.Randomizable; import weka.core.Utils; /** * Abstract utility class for handling settings common to randomizable * clusterers. * * @author FracPete (fracpete at waikato dot ac dot nz) * @version $Revision: 8034 $ */ public abstract class RandomizableDensityBasedClusterer extends AbstractDensityBasedClusterer implements OptionHandler, Randomizable { /** for serialization */ private static final long serialVersionUID = -5325270357918932849L; /** the default seed value */ protected int m_SeedDefault = 1; /** The random number seed. */ protected int m_Seed = m_SeedDefault; /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ public Enumeration listOptions() { Vector result = new Vector(); result.addElement(new Option( "\tRandom number seed.\n" + "\t(default " + m_SeedDefault + ")", "S", 1, "-S <num>")); return result.elements(); } /** * Parses a given list of options. Valid options are:<p> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { String tmpStr; tmpStr = Utils.getOption('S', options); if (tmpStr.length() != 0) setSeed(Integer.parseInt(tmpStr)); else setSeed(m_SeedDefault); } /** * Gets the current settings of the classifier. * * @return an array of strings suitable for passing to setOptions */ public String[] getOptions() { Vector result; result = new Vector(); result.add("-S"); result.add("" + getSeed()); return (String[]) result.toArray(new String[result.size()]); } /** * Returns the tip text for this property * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String seedTipText() { return "The random number seed to be used."; } /** * Set the seed for random number generation. * * @param value the seed to use */ public void setSeed(int value) { m_Seed = value; } /** * Gets the seed for the random number generations * * @return the seed for the random number generation */ public int getSeed() { return m_Seed; } }
3,310
24.867188
74
java
tsml-java
tsml-java-master/src/main/java/weka/clusterers/RandomizableSingleClustererEnhancer.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * RandomizableSingleClustererEnhancer.java * Copyright (C) 2006-2012 University of Waikato, Hamilton, New Zealand * */ package weka.clusterers; import java.util.Enumeration; import java.util.Vector; import weka.core.Option; import weka.core.OptionHandler; import weka.core.Randomizable; import weka.core.Utils; /** * Abstract utility class for handling settings common to randomizable * clusterers. * * @author FracPete (fracpete at waikato dot ac dot nz) * @version $Revision: 8034 $ */ public abstract class RandomizableSingleClustererEnhancer extends AbstractClusterer implements OptionHandler, Randomizable { /** for serialization */ private static final long serialVersionUID = -644847037106316249L; /** the default seed value */ protected int m_SeedDefault = 1; /** The random number seed. */ protected int m_Seed = m_SeedDefault; /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ public Enumeration listOptions() { Vector result = new Vector(); result.addElement(new Option( "\tRandom number seed.\n" + "\t(default " + m_SeedDefault + ")", "S", 1, "-S <num>")); return result.elements(); } /** * Parses a given list of options. Valid options are:<p> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { String tmpStr; tmpStr = Utils.getOption('S', options); if (tmpStr.length() != 0) setSeed(Integer.parseInt(tmpStr)); else setSeed(m_SeedDefault); } /** * Gets the current settings of the classifier. * * @return an array of strings suitable for passing to setOptions */ public String[] getOptions() { Vector result; result = new Vector(); result.add("-S"); result.add("" + getSeed()); return (String[]) result.toArray(new String[result.size()]); } /** * Returns the tip text for this property * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String seedTipText() { return "The random number seed to be used."; } /** * Set the seed for random number generation. * * @param value the seed to use */ public void setSeed(int value) { m_Seed = value; } /** * Gets the seed for the random number generations * * @return the seed for the random number generation */ public int getSeed() { return m_Seed; } }
3,301
24.796875
74
java
tsml-java
tsml-java-master/src/main/java/weka/clusterers/SimpleKMeans.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * SimpleKMeans.java * Copyright (C) 2000-2012 University of Waikato, Hamilton, New Zealand * */ package weka.clusterers; import java.util.ArrayList; import java.util.Enumeration; import java.util.HashMap; import java.util.List; import java.util.Random; import java.util.Vector; import java.util.concurrent.Callable; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.Future; import weka.classifiers.rules.DecisionTableHashKey; import weka.core.Attribute; import weka.core.Capabilities; import weka.core.Capabilities.Capability; import weka.core.DenseInstance; import weka.core.DistanceFunction; import weka.core.EuclideanDistance; import weka.core.Instance; import weka.core.Instances; import weka.core.ManhattanDistance; import weka.core.Option; import weka.core.RevisionUtils; import weka.core.TechnicalInformation; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import weka.core.TechnicalInformationHandler; import weka.core.Utils; import weka.core.WeightedInstancesHandler; import weka.filters.Filter; import weka.filters.unsupervised.attribute.ReplaceMissingValues; /** <!-- globalinfo-start --> * Cluster data using the k means algorithm. Can use either the Euclidean distance (default) or the Manhattan distance. If the Manhattan distance is used, then centroids are computed as the component-wise median rather than mean. For more information see:<br/> * <br/> * D. Arthur, S. Vassilvitskii: k-means++: the advantages of carefull seeding. In: Proceedings of the eighteenth annual ACM-SIAM symposium on Discrete algorithms, 1027-1035, 2007. * <p/> <!-- globalinfo-end --> * <!-- technical-bibtex-start --> * BibTeX: * <pre> * &#64;inproceedings{Arthur2007, * author = {D. Arthur and S. Vassilvitskii}, * booktitle = {Proceedings of the eighteenth annual ACM-SIAM symposium on Discrete algorithms}, * pages = {1027-1035}, * title = {k-means++: the advantages of carefull seeding}, * year = {2007} * } * </pre> * <p/> <!-- technical-bibtex-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -N &lt;num&gt; * number of clusters. * (default 2).</pre> * * <pre> -P * Initialize using the k-means++ method. * </pre> * * <pre> -V * Display std. deviations for centroids. * </pre> * * <pre> -M * Replace missing values with mean/mode. * </pre> * * <pre> -A &lt;classname and options&gt; * Distance function to use. * (default: weka.core.EuclideanDistance)</pre> * * <pre> -I &lt;num&gt; * Maximum number of iterations. * </pre> * * <pre> -O * Preserve order of instances. * </pre> * * <pre> -fast * Enables faster distance calculations, using cut-off values. * Disables the calculation/output of squared errors/distances. * </pre> * * <pre> -num-slots &lt;num&gt; * Number of execution slots. * (default 1 - i.e. no parallelism)</pre> * * <pre> -S &lt;num&gt; * Random number seed. * (default 10)</pre> * <!-- options-end --> * * @author Mark Hall (mhall@cs.waikato.ac.nz) * @author Eibe Frank (eibe@cs.waikato.ac.nz) * @version $Revision: 9756 $ * @see RandomizableClusterer */ public class SimpleKMeans extends RandomizableClusterer implements NumberOfClustersRequestable, WeightedInstancesHandler, TechnicalInformationHandler { /** for serialization. */ static final long serialVersionUID = -3235809600124455376L; /** * replace missing values in training instances. */ private ReplaceMissingValues m_ReplaceMissingFilter; /** * number of clusters to generate. */ private int m_NumClusters = 2; /** * holds the cluster centroids. */ private Instances m_ClusterCentroids; /** * Holds the standard deviations of the numeric attributes in each cluster. */ private Instances m_ClusterStdDevs; /** * For each cluster, holds the frequency counts for the values of each nominal * attribute. */ private int[][][] m_ClusterNominalCounts; private int[][] m_ClusterMissingCounts; /** * Stats on the full data set for comparison purposes. In case the attribute * is numeric the value is the mean if is being used the Euclidian distance or * the median if Manhattan distance and if the attribute is nominal then it's * mode is saved. */ private double[] m_FullMeansOrMediansOrModes; private double[] m_FullStdDevs; private int[][] m_FullNominalCounts; private int[] m_FullMissingCounts; /** * Display standard deviations for numeric atts. */ private boolean m_displayStdDevs; /** * Replace missing values globally? */ private boolean m_dontReplaceMissing = false; /** * The number of instances in each cluster. */ private int[] m_ClusterSizes; /** * Maximum number of iterations to be executed. */ private int m_MaxIterations = 500; /** * Keep track of the number of iterations completed before convergence. */ private int m_Iterations = 0; /** * Holds the squared errors for all clusters. */ private double[] m_squaredErrors; /** the distance function used. */ protected DistanceFunction m_DistanceFunction = new EuclideanDistance(); /** * Preserve order of instances. */ private boolean m_PreserveOrder = false; /** * Assignments obtained. */ protected int[] m_Assignments = null; /** whether to use fast calculation of distances (using a cut-off). */ protected boolean m_FastDistanceCalc = false; /** Whether to initialize cluster centers using the k-means++ method */ protected boolean m_initializeWithKMeansPlusPlus = false; protected int m_executionSlots = 1; /** For parallel execution mode */ protected transient ExecutorService m_executorPool; /** * the default constructor. */ public SimpleKMeans() { super(); m_SeedDefault = 10; setSeed(m_SeedDefault); } /** * Start the pool of execution threads */ protected void startExecutorPool() { if (m_executorPool != null) { m_executorPool.shutdownNow(); } m_executorPool = Executors.newFixedThreadPool(m_executionSlots); } protected int m_completed; protected int m_failed; @Override public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; result = new TechnicalInformation(Type.INPROCEEDINGS); result.setValue(Field.AUTHOR, "D. Arthur and S. Vassilvitskii"); result.setValue(Field.TITLE, "k-means++: the advantages of carefull seeding"); result.setValue(Field.BOOKTITLE, "Proceedings of the eighteenth annual " + "ACM-SIAM symposium on Discrete algorithms"); result.setValue(Field.YEAR, "2007"); result.setValue(Field.PAGES, "1027-1035"); return result; } /** * Returns a string describing this clusterer. * * @return a description of the evaluator suitable for displaying in the * explorer/experimenter gui */ public String globalInfo() { return "Cluster data using the k means algorithm. Can use either " + "the Euclidean distance (default) or the Manhattan distance." + " If the Manhattan distance is used, then centroids are computed " + "as the component-wise median rather than mean." + " For more information see:\n\n" + getTechnicalInformation().toString(); } /** * Returns default capabilities of the clusterer. * * @return the capabilities of this clusterer */ @Override public Capabilities getCapabilities() { Capabilities result = super.getCapabilities(); result.disableAll(); result.enable(Capability.NO_CLASS); // attributes result.enable(Capability.NOMINAL_ATTRIBUTES); result.enable(Capability.NUMERIC_ATTRIBUTES); result.enable(Capability.MISSING_VALUES); return result; } private class KMeansComputeCentroidTask implements Callable<double[]> { protected Instances m_cluster; protected int m_centroidIndex; public KMeansComputeCentroidTask(int centroidIndex, Instances cluster) { m_cluster = cluster; m_centroidIndex = centroidIndex; } @Override public double[] call() { return moveCentroid(m_centroidIndex, m_cluster, true, false); } } /** * Launch the move centroids tasks * * @param clusters the cluster centroids * @return the number of empty clusters */ protected int launchMoveCentroids(Instances[] clusters) { int emptyClusterCount = 0; List<Future<double[]>> results = new ArrayList<Future<double[]>>(); for (int i = 0; i < m_NumClusters; i++) { if (clusters[i].numInstances() == 0) { emptyClusterCount++; } else { Future<double[]> futureCentroid = m_executorPool .submit(new KMeansComputeCentroidTask(i, clusters[i])); results.add(futureCentroid); } } try { for (Future<double[]> d : results) { m_ClusterCentroids.add(new DenseInstance(1.0, d.get())); } } catch (Exception ex) { ex.printStackTrace(); } return emptyClusterCount; } private class KMeansClusterTask implements Callable<Boolean> { protected int m_start; protected int m_end; protected Instances m_inst; protected int[] m_clusterAssignments; public KMeansClusterTask(Instances inst, int start, int end, int[] clusterAssignments) { m_start = start; m_end = end; m_inst = inst; m_clusterAssignments = clusterAssignments; } @Override public Boolean call() { boolean converged = true; for (int i = m_start; i < m_end; i++) { Instance toCluster = m_inst.instance(i); int newC = clusterInstance(toCluster); if (newC != m_clusterAssignments[i]) { converged = false; } m_clusterAssignments[i] = newC; } return converged; } protected int clusterInstance(Instance inst) { double minDist = Integer.MAX_VALUE; int bestCluster = 0; for (int i = 0; i < m_NumClusters; i++) { double dist; dist = m_DistanceFunction.distance(inst, m_ClusterCentroids.instance(i), minDist); if (dist < minDist) { minDist = dist; bestCluster = i; } } return bestCluster; } } /** * Launch the tasks that assign instances to clusters * * @param insts the instances to be clustered * @param clusterAssignments the array of cluster assignments * @return true if k means has converged * @throws Exception if a problem occurs */ protected boolean launchAssignToClusters(Instances insts, int[] clusterAssignments) throws Exception { int numPerTask = insts.numInstances() / m_executionSlots; List<Future<Boolean>> results = new ArrayList<Future<Boolean>>(); for (int i = 0; i < m_executionSlots; i++) { int start = i * numPerTask; int end = start + numPerTask; if (i == m_executionSlots - 1) { end = insts.numInstances(); } Future<Boolean> futureKM = m_executorPool.submit(new KMeansClusterTask( insts, start, end, clusterAssignments)); results.add(futureKM); } boolean converged = true; for (Future<Boolean> f : results) { if (!f.get()) { converged = false; } } return converged; } /** * Generates a clusterer. Has to initialize all fields of the clusterer that * are not being set via options. * * @param data set of instances serving as training data * @throws Exception if the clusterer has not been generated successfully */ @Override public void buildClusterer(Instances data) throws Exception { // can clusterer handle the data? getCapabilities().testWithFail(data); m_Iterations = 0; m_ReplaceMissingFilter = new ReplaceMissingValues(); Instances instances = new Instances(data); instances.setClassIndex(-1); if (!m_dontReplaceMissing) { m_ReplaceMissingFilter.setInputFormat(instances); instances = Filter.useFilter(instances, m_ReplaceMissingFilter); } m_FullMissingCounts = new int[instances.numAttributes()]; if (m_displayStdDevs) { m_FullStdDevs = new double[instances.numAttributes()]; } m_FullNominalCounts = new int[instances.numAttributes()][0]; m_FullMeansOrMediansOrModes = moveCentroid(0, instances, false, false); for (int i = 0; i < instances.numAttributes(); i++) { m_FullMissingCounts[i] = instances.attributeStats(i).missingCount; if (instances.attribute(i).isNumeric()) { if (m_displayStdDevs) { m_FullStdDevs[i] = Math.sqrt(instances.variance(i)); } if (m_FullMissingCounts[i] == instances.numInstances()) { m_FullMeansOrMediansOrModes[i] = Double.NaN; // mark missing as mean } } else { m_FullNominalCounts[i] = instances.attributeStats(i).nominalCounts; if (m_FullMissingCounts[i] > m_FullNominalCounts[i][Utils .maxIndex(m_FullNominalCounts[i])]) { m_FullMeansOrMediansOrModes[i] = -1; // mark missing as most common // value } } } m_ClusterCentroids = new Instances(instances, m_NumClusters); int[] clusterAssignments = new int[instances.numInstances()]; if (m_PreserveOrder) m_Assignments = clusterAssignments; m_DistanceFunction.setInstances(instances); Random RandomO = new Random(getSeed()); int instIndex; HashMap initC = new HashMap(); DecisionTableHashKey hk = null; Instances initInstances = null; if (m_PreserveOrder) initInstances = new Instances(instances); else initInstances = instances; if (m_initializeWithKMeansPlusPlus) { kMeansPlusPlusInit(initInstances); } else { for (int j = initInstances.numInstances() - 1; j >= 0; j--) { instIndex = RandomO.nextInt(j + 1); hk = new DecisionTableHashKey(initInstances.instance(instIndex), initInstances.numAttributes(), true); if (!initC.containsKey(hk)) { m_ClusterCentroids.add(initInstances.instance(instIndex)); initC.put(hk, null); } initInstances.swap(j, instIndex); if (m_ClusterCentroids.numInstances() == m_NumClusters) { break; } } } m_NumClusters = m_ClusterCentroids.numInstances(); // removing reference initInstances = null; int i; boolean converged = false; int emptyClusterCount; Instances[] tempI = new Instances[m_NumClusters]; m_squaredErrors = new double[m_NumClusters]; m_ClusterNominalCounts = new int[m_NumClusters][instances.numAttributes()][0]; m_ClusterMissingCounts = new int[m_NumClusters][instances.numAttributes()]; startExecutorPool(); while (!converged) { emptyClusterCount = 0; m_Iterations++; converged = true; if (m_executionSlots <= 1 || instances.numInstances() < 2 * m_executionSlots) { for (i = 0; i < instances.numInstances(); i++) { Instance toCluster = instances.instance(i); int newC = clusterProcessedInstance(toCluster, false, true); if (newC != clusterAssignments[i]) { converged = false; } clusterAssignments[i] = newC; } } else { converged = launchAssignToClusters(instances, clusterAssignments); } // update centroids m_ClusterCentroids = new Instances(instances, m_NumClusters); for (i = 0; i < m_NumClusters; i++) { tempI[i] = new Instances(instances, 0); } for (i = 0; i < instances.numInstances(); i++) { tempI[clusterAssignments[i]].add(instances.instance(i)); } if (m_executionSlots <= 1 || instances.numInstances() < 2 * m_executionSlots) { for (i = 0; i < m_NumClusters; i++) { if (tempI[i].numInstances() == 0) { // empty cluster emptyClusterCount++; } else { moveCentroid(i, tempI[i], true, true); } } } else { emptyClusterCount = launchMoveCentroids(tempI); } if (m_Iterations == m_MaxIterations) converged = true; if (emptyClusterCount > 0) { m_NumClusters -= emptyClusterCount; if (converged) { Instances[] t = new Instances[m_NumClusters]; int index = 0; for (int k = 0; k < tempI.length; k++) { if (tempI[k].numInstances() > 0) { t[index++] = tempI[k]; } } tempI = t; } else { tempI = new Instances[m_NumClusters]; } } if (!converged) { m_ClusterNominalCounts = new int[m_NumClusters][instances .numAttributes()][0]; } } // calculate errors if (!m_FastDistanceCalc) { for (i = 0; i < instances.numInstances(); i++) { clusterProcessedInstance(instances.instance(i), true, false); } } if (m_displayStdDevs) { m_ClusterStdDevs = new Instances(instances, m_NumClusters); } m_ClusterSizes = new int[m_NumClusters]; for (i = 0; i < m_NumClusters; i++) { if (m_displayStdDevs) { double[] vals2 = new double[instances.numAttributes()]; for (int j = 0; j < instances.numAttributes(); j++) { if (instances.attribute(j).isNumeric()) { vals2[j] = Math.sqrt(tempI[i].variance(j)); } else { vals2[j] = Utils.missingValue(); } } m_ClusterStdDevs.add(new DenseInstance(1.0, vals2)); } m_ClusterSizes[i] = tempI[i].numInstances(); } m_executorPool.shutdown(); } protected void kMeansPlusPlusInit(Instances data) throws Exception { Random randomO = new Random(getSeed()); HashMap<DecisionTableHashKey, String> initC = new HashMap<DecisionTableHashKey, String>(); // choose initial center uniformly at random int index = randomO.nextInt(data.numInstances()); m_ClusterCentroids.add(data.instance(index)); DecisionTableHashKey hk = new DecisionTableHashKey(data.instance(index), data.numAttributes(), true); initC.put(hk, null); int iteration = 0; int remainingInstances = data.numInstances() - 1; if (m_NumClusters > 1) { // proceed with selecting the rest // distances to the initial randomly chose center double[] distances = new double[data.numInstances()]; double[] cumProbs = new double[data.numInstances()]; for (int i = 0; i < data.numInstances(); i++) { distances[i] = m_DistanceFunction.distance(data.instance(i), m_ClusterCentroids.instance(iteration)); } // now choose the remaining cluster centers for (int i = 1; i < m_NumClusters; i++) { // distances converted to probabilities double[] weights = new double[data.numInstances()]; System.arraycopy(distances, 0, weights, 0, distances.length); Utils.normalize(weights); double sumOfProbs = 0; for (int k = 0; k < data.numInstances(); k++) { sumOfProbs += weights[k]; cumProbs[k] = sumOfProbs; } cumProbs[data.numInstances() - 1] = 1.0; // make sure there are no // rounding issues // choose a random instance double prob = randomO.nextDouble(); for (int k = 0; k < cumProbs.length; k++) { if (prob < cumProbs[k]) { Instance candidateCenter = data.instance(k); hk = new DecisionTableHashKey(candidateCenter, data.numAttributes(), true); if (!initC.containsKey(hk)) { initC.put(hk, null); m_ClusterCentroids.add(candidateCenter); } else { // we shouldn't get here because any instance that is a duplicate // of // an already chosen cluster center should have zero distance (and // hence // zero probability of getting chosen) to that center. System.err.println("We shouldn't get here...."); } remainingInstances--; break; } } iteration++; if (remainingInstances == 0) { break; } // prepare to choose the next cluster center. // check distances against the new cluster center to see if it is closer for (int k = 0; k < data.numInstances(); k++) { if (distances[k] > 0) { double newDist = m_DistanceFunction.distance(data.instance(k), m_ClusterCentroids.instance(iteration)); if (newDist < distances[k]) { distances[k] = newDist; } } } } } } /** * Move the centroid to it's new coordinates. Generate the centroid * coordinates based on it's members (objects assigned to the cluster of the * centroid) and the distance function being used. * * @param centroidIndex index of the centroid which the coordinates will be * computed * @param members the objects that are assigned to the cluster of this * centroid * @param updateClusterInfo if the method is supposed to update the m_Cluster * arrays * @param addToCentroidInstances true if the method is to add the computed * coordinates to the Instances holding the centroids * @return the centroid coordinates */ protected double[] moveCentroid(int centroidIndex, Instances members, boolean updateClusterInfo, boolean addToCentroidInstances) { double[] vals = new double[members.numAttributes()]; // used only for Manhattan Distance Instances sortedMembers = null; int middle = 0; boolean dataIsEven = false; if (m_DistanceFunction instanceof ManhattanDistance) { middle = (members.numInstances() - 1) / 2; dataIsEven = ((members.numInstances() % 2) == 0); if (m_PreserveOrder) { sortedMembers = members; } else { sortedMembers = new Instances(members); } } for (int j = 0; j < members.numAttributes(); j++) { // in case of Euclidian distance the centroid is the mean point // in case of Manhattan distance the centroid is the median point // in both cases, if the attribute is nominal, the centroid is the mode if (m_DistanceFunction instanceof EuclideanDistance || members.attribute(j).isNominal()) { vals[j] = members.meanOrMode(j); } else if (m_DistanceFunction instanceof ManhattanDistance) { // singleton special case if (members.numInstances() == 1) { vals[j] = members.instance(0).value(j); } else { vals[j] = sortedMembers.kthSmallestValue(j, middle + 1); if (dataIsEven) { vals[j] = (vals[j] + sortedMembers.kthSmallestValue(j, middle + 2)) / 2; } } } if (updateClusterInfo) { m_ClusterMissingCounts[centroidIndex][j] = members.attributeStats(j).missingCount; m_ClusterNominalCounts[centroidIndex][j] = members.attributeStats(j).nominalCounts; if (members.attribute(j).isNominal()) { if (m_ClusterMissingCounts[centroidIndex][j] > m_ClusterNominalCounts[centroidIndex][j][Utils .maxIndex(m_ClusterNominalCounts[centroidIndex][j])]) { vals[j] = Utils.missingValue(); // mark mode as missing } } else { if (m_ClusterMissingCounts[centroidIndex][j] == members .numInstances()) { vals[j] = Utils.missingValue(); // mark mean as missing } } } } if (addToCentroidInstances) { m_ClusterCentroids.add(new DenseInstance(1.0, vals)); } return vals; } /** * clusters an instance that has been through the filters. * * @param instance the instance to assign a cluster to * @param updateErrors if true, update the within clusters sum of errors * @param useFastDistCalc whether to use the fast distance calculation or not * @return a cluster number */ private int clusterProcessedInstance(Instance instance, boolean updateErrors, boolean useFastDistCalc) { double minDist = Integer.MAX_VALUE; int bestCluster = 0; for (int i = 0; i < m_NumClusters; i++) { double dist; if (useFastDistCalc) dist = m_DistanceFunction.distance(instance, m_ClusterCentroids.instance(i), minDist); else dist = m_DistanceFunction.distance(instance, m_ClusterCentroids.instance(i)); if (dist < minDist) { minDist = dist; bestCluster = i; } } if (updateErrors) { if (m_DistanceFunction instanceof EuclideanDistance) { // Euclidean distance to Squared Euclidean distance minDist *= minDist; } m_squaredErrors[bestCluster] += minDist; } return bestCluster; } /** * Classifies a given instance. * * @param instance the instance to be assigned to a cluster * @return the number of the assigned cluster as an interger if the class is * enumerated, otherwise the predicted value * @throws Exception if instance could not be classified successfully */ @Override public int clusterInstance(Instance instance) throws Exception { Instance inst = null; if (!m_dontReplaceMissing) { m_ReplaceMissingFilter.input(instance); m_ReplaceMissingFilter.batchFinished(); inst = m_ReplaceMissingFilter.output(); } else { inst = instance; } return clusterProcessedInstance(inst, false, true); } /** * Returns the number of clusters. * * @return the number of clusters generated for a training dataset. * @throws Exception if number of clusters could not be returned successfully */ @Override public int numberOfClusters() throws Exception { return m_NumClusters; } /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ @Override public Enumeration listOptions() { Vector result = new Vector(); result.addElement(new Option("\tnumber of clusters.\n" + "\t(default 2).", "N", 1, "-N <num>")); result.addElement(new Option("\tInitialize using the k-means++ method.\n", "P", 0, "-P")); result.addElement(new Option("\tDisplay std. deviations for centroids.\n", "V", 0, "-V")); result.addElement(new Option("\tDon't replace missing values with mean/mode.\n", "M", 0, "-M")); result.add(new Option("\tDistance function to use.\n" + "\t(default: weka.core.EuclideanDistance)", "A", 1, "-A <classname and options>")); result.add(new Option("\tMaximum number of iterations.\n", "I", 1, "-I <num>")); result.addElement(new Option("\tPreserve order of instances.\n", "O", 0, "-O")); result .addElement(new Option( "\tEnables faster distance calculations, using cut-off values.\n" + "\tDisables the calculation/output of squared errors/distances.\n", "fast", 0, "-fast")); result.addElement(new Option("\tNumber of execution slots.\n" + "\t(default 1 - i.e. no parallelism)", "num-slots", 1, "-num-slots <num>")); Enumeration en = super.listOptions(); while (en.hasMoreElements()) result.addElement(en.nextElement()); return result.elements(); } /** * Returns the tip text for this property. * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String numClustersTipText() { return "set number of clusters"; } /** * set the number of clusters to generate. * * @param n the number of clusters to generate * @throws Exception if number of clusters is negative */ @Override public void setNumClusters(int n) throws Exception { if (n <= 0) { throw new Exception("Number of clusters must be > 0"); } m_NumClusters = n; } /** * gets the number of clusters to generate. * * @return the number of clusters to generate */ public int getNumClusters() { return m_NumClusters; } /** * Returns the tip text for this property. * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String initializeUsingKMeansPlusPlusMethodTipText() { return "Initialize cluster centers using the probabilistic " + " farthest first method of the k-means++ algorithm"; } /** * Set whether to initialize using the probabilistic farthest first like * method of the k-means++ algorithm (rather than the standard random * selection of initial cluster centers). * * @param k true if the k-means++ method is to be used to select initial * cluster centers. */ public void setInitializeUsingKMeansPlusPlusMethod(boolean k) { m_initializeWithKMeansPlusPlus = k; } /** * Get whether to initialize using the probabilistic farthest first like * method of the k-means++ algorithm (rather than the standard random * selection of initial cluster centers). * * @return true if the k-means++ method is to be used to select initial * cluster centers. */ public boolean getInitializeUsingKMeansPlusPlusMethod() { return m_initializeWithKMeansPlusPlus; } /** * Returns the tip text for this property. * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String maxIterationsTipText() { return "set maximum number of iterations"; } /** * set the maximum number of iterations to be executed. * * @param n the maximum number of iterations * @throws Exception if maximum number of iteration is smaller than 1 */ public void setMaxIterations(int n) throws Exception { if (n <= 0) { throw new Exception("Maximum number of iterations must be > 0"); } m_MaxIterations = n; } /** * gets the number of maximum iterations to be executed. * * @return the number of clusters to generate */ public int getMaxIterations() { return m_MaxIterations; } /** * Returns the tip text for this property. * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String displayStdDevsTipText() { return "Display std deviations of numeric attributes " + "and counts of nominal attributes."; } /** * Sets whether standard deviations and nominal count. Should be displayed in * the clustering output. * * @param stdD true if std. devs and counts should be displayed */ public void setDisplayStdDevs(boolean stdD) { m_displayStdDevs = stdD; } /** * Gets whether standard deviations and nominal count. Should be displayed in * the clustering output. * * @return true if std. devs and counts should be displayed */ public boolean getDisplayStdDevs() { return m_displayStdDevs; } /** * Returns the tip text for this property. * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String dontReplaceMissingValuesTipText() { return "Replace missing values globally with mean/mode."; } /** * Sets whether missing values are to be replaced. * * @param r true if missing values are to be replaced */ public void setDontReplaceMissingValues(boolean r) { m_dontReplaceMissing = r; } /** * Gets whether missing values are to be replaced. * * @return true if missing values are to be replaced */ public boolean getDontReplaceMissingValues() { return m_dontReplaceMissing; } /** * Returns the tip text for this property. * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String distanceFunctionTipText() { return "The distance function to use for instances comparison " + "(default: weka.core.EuclideanDistance). "; } /** * returns the distance function currently in use. * * @return the distance function */ public DistanceFunction getDistanceFunction() { return m_DistanceFunction; } /** * sets the distance function to use for instance comparison. * * @param df the new distance function to use * @throws Exception if instances cannot be processed */ public void setDistanceFunction(DistanceFunction df) throws Exception { if (!(df instanceof EuclideanDistance) && !(df instanceof ManhattanDistance)) { throw new Exception( "SimpleKMeans currently only supports the Euclidean and Manhattan distances."); } m_DistanceFunction = df; } /** * Returns the tip text for this property. * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String preserveInstancesOrderTipText() { return "Preserve order of instances."; } /** * Sets whether order of instances must be preserved. * * @param r true if missing values are to be replaced */ public void setPreserveInstancesOrder(boolean r) { m_PreserveOrder = r; } /** * Gets whether order of instances must be preserved. * * @return true if missing values are to be replaced */ public boolean getPreserveInstancesOrder() { return m_PreserveOrder; } /** * Returns the tip text for this property. * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String fastDistanceCalcTipText() { return "Uses cut-off values for speeding up distance calculation, but " + "suppresses also the calculation and output of the within cluster sum " + "of squared errors/sum of distances."; } /** * Sets whether to use faster distance calculation. * * @param value true if faster calculation to be used */ public void setFastDistanceCalc(boolean value) { m_FastDistanceCalc = value; } /** * Gets whether to use faster distance calculation. * * @return true if faster calculation is used */ public boolean getFastDistanceCalc() { return m_FastDistanceCalc; } /** * Returns the tip text for this property * * @return tip text for this property suitable for displaying in the * explorer/experimenter gui */ public String numExecutionSlotsTipText() { return "The number of execution slots (threads) to use. " + "Set equal to the number of available cpu/cores"; } /** * Set the degree of parallelism to use. * * @param slots the number of tasks to run in parallel when computing the * nearest neighbors and evaluating different values of k between the * lower and upper bounds */ public void setNumExecutionSlots(int slots) { m_executionSlots = slots; } /** * Get the degree of parallelism to use. * * @return the number of tasks to run in parallel when computing the nearest * neighbors and evaluating different values of k between the lower * and upper bounds */ public int getNumExecutionSlots() { return m_executionSlots; } /** * Parses a given list of options. * <p/> * <!-- options-start --> * Valid options are: <p/> * * <pre> -N &lt;num&gt; * number of clusters. * (default 2).</pre> * * <pre> -P * Initialize using the k-means++ method. * </pre> * * <pre> -V * Display std. deviations for centroids. * </pre> * * <pre> -M * Replace missing values with mean/mode. * </pre> * * <pre> -A &lt;classname and options&gt; * Distance function to use. * (default: weka.core.EuclideanDistance)</pre> * * <pre> -I &lt;num&gt; * Maximum number of iterations. * </pre> * * <pre> -O * Preserve order of instances. * </pre> * * <pre> -fast * Enables faster distance calculations, using cut-off values. * Disables the calculation/output of squared errors/distances. * </pre> * * <pre> -num-slots &lt;num&gt; * Number of execution slots. * (default 1 - i.e. no parallelism)</pre> * * <pre> -S &lt;num&gt; * Random number seed. * (default 10)</pre> * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ @Override public void setOptions(String[] options) throws Exception { m_displayStdDevs = Utils.getFlag("V", options); m_dontReplaceMissing = Utils.getFlag("M", options); m_initializeWithKMeansPlusPlus = Utils.getFlag('P', options); String optionString = Utils.getOption('N', options); if (optionString.length() != 0) { setNumClusters(Integer.parseInt(optionString)); } optionString = Utils.getOption("I", options); if (optionString.length() != 0) { setMaxIterations(Integer.parseInt(optionString)); } String distFunctionClass = Utils.getOption('A', options); if (distFunctionClass.length() != 0) { String distFunctionClassSpec[] = Utils.splitOptions(distFunctionClass); if (distFunctionClassSpec.length == 0) { throw new Exception("Invalid DistanceFunction specification string."); } String className = distFunctionClassSpec[0]; distFunctionClassSpec[0] = ""; setDistanceFunction((DistanceFunction) Utils.forName( DistanceFunction.class, className, distFunctionClassSpec)); } else { setDistanceFunction(new EuclideanDistance()); } m_PreserveOrder = Utils.getFlag("O", options); m_FastDistanceCalc = Utils.getFlag("fast", options); String slotsS = Utils.getOption("num-slots", options); if (slotsS.length() > 0) { setNumExecutionSlots(Integer.parseInt(slotsS)); } super.setOptions(options); } /** * Gets the current settings of SimpleKMeans. * * @return an array of strings suitable for passing to setOptions() */ @Override public String[] getOptions() { int i; Vector result; String[] options; result = new Vector(); if (m_initializeWithKMeansPlusPlus) { result.add("-P"); } if (m_displayStdDevs) { result.add("-V"); } if (m_dontReplaceMissing) { result.add("-M"); } result.add("-N"); result.add("" + getNumClusters()); result.add("-A"); result.add((m_DistanceFunction.getClass().getName() + " " + Utils .joinOptions(m_DistanceFunction.getOptions())).trim()); result.add("-I"); result.add("" + getMaxIterations()); if (m_PreserveOrder) { result.add("-O"); } if (m_FastDistanceCalc) { result.add("-fast"); } result.add("-num-slots"); result.add("" + getNumExecutionSlots()); options = super.getOptions(); for (i = 0; i < options.length; i++) result.add(options[i]); return (String[]) result.toArray(new String[result.size()]); } /** * return a string describing this clusterer. * * @return a description of the clusterer as a string */ @Override public String toString() { if (m_ClusterCentroids == null) { return "No clusterer built yet!"; } int maxWidth = 0; int maxAttWidth = 0; boolean containsNumeric = false; for (int i = 0; i < m_NumClusters; i++) { for (int j = 0; j < m_ClusterCentroids.numAttributes(); j++) { if (m_ClusterCentroids.attribute(j).name().length() > maxAttWidth) { maxAttWidth = m_ClusterCentroids.attribute(j).name().length(); } if (m_ClusterCentroids.attribute(j).isNumeric()) { containsNumeric = true; double width = Math.log(Math.abs(m_ClusterCentroids.instance(i) .value(j))) / Math.log(10.0); // System.err.println(m_ClusterCentroids.instance(i).value(j)+" "+width); if (width < 0) { width = 1; } // decimal + # decimal places + 1 width += 6.0; if ((int) width > maxWidth) { maxWidth = (int) width; } } } } for (int i = 0; i < m_ClusterCentroids.numAttributes(); i++) { if (m_ClusterCentroids.attribute(i).isNominal()) { Attribute a = m_ClusterCentroids.attribute(i); for (int j = 0; j < m_ClusterCentroids.numInstances(); j++) { String val = a.value((int) m_ClusterCentroids.instance(j).value(i)); if (val.length() > maxWidth) { maxWidth = val.length(); } } for (int j = 0; j < a.numValues(); j++) { String val = a.value(j) + " "; if (val.length() > maxAttWidth) { maxAttWidth = val.length(); } } } } if (m_displayStdDevs) { // check for maximum width of maximum frequency count for (int i = 0; i < m_ClusterCentroids.numAttributes(); i++) { if (m_ClusterCentroids.attribute(i).isNominal()) { int maxV = Utils.maxIndex(m_FullNominalCounts[i]); /* * int percent = (int)((double)m_FullNominalCounts[i][maxV] / * Utils.sum(m_ClusterSizes) * 100.0); */ int percent = 6; // max percent width (100%) String nomV = "" + m_FullNominalCounts[i][maxV]; // + " (" + percent + "%)"; if (nomV.length() + percent > maxWidth) { maxWidth = nomV.length() + 1; } } } } // check for size of cluster sizes for (int i = 0; i < m_ClusterSizes.length; i++) { String size = "(" + m_ClusterSizes[i] + ")"; if (size.length() > maxWidth) { maxWidth = size.length(); } } if (m_displayStdDevs && maxAttWidth < "missing".length()) { maxAttWidth = "missing".length(); } String plusMinus = "+/-"; maxAttWidth += 2; if (m_displayStdDevs && containsNumeric) { maxWidth += plusMinus.length(); } if (maxAttWidth < "Attribute".length() + 2) { maxAttWidth = "Attribute".length() + 2; } if (maxWidth < "Full Data".length()) { maxWidth = "Full Data".length() + 1; } if (maxWidth < "missing".length()) { maxWidth = "missing".length() + 1; } StringBuffer temp = new StringBuffer(); temp.append("\nkMeans\n======\n"); temp.append("\nNumber of iterations: " + m_Iterations); if (!m_FastDistanceCalc) { temp.append("\n"); if (m_DistanceFunction instanceof EuclideanDistance) { temp.append("Within cluster sum of squared errors: " + Utils.sum(m_squaredErrors)); } else { temp.append("Sum of within cluster distances: " + Utils.sum(m_squaredErrors)); } } if (!m_dontReplaceMissing) { temp.append("\nMissing values globally replaced with mean/mode"); } temp.append("\n\nCluster centroids:\n"); temp.append(pad("Cluster#", " ", (maxAttWidth + (maxWidth * 2 + 2)) - "Cluster#".length(), true)); temp.append("\n"); temp.append(pad("Attribute", " ", maxAttWidth - "Attribute".length(), false)); temp.append(pad("Full Data", " ", maxWidth + 1 - "Full Data".length(), true)); // cluster numbers for (int i = 0; i < m_NumClusters; i++) { String clustNum = "" + i; temp.append(pad(clustNum, " ", maxWidth + 1 - clustNum.length(), true)); } temp.append("\n"); // cluster sizes String cSize = "(" + Utils.sum(m_ClusterSizes) + ")"; temp.append(pad(cSize, " ", maxAttWidth + maxWidth + 1 - cSize.length(), true)); for (int i = 0; i < m_NumClusters; i++) { cSize = "(" + m_ClusterSizes[i] + ")"; temp.append(pad(cSize, " ", maxWidth + 1 - cSize.length(), true)); } temp.append("\n"); temp.append(pad("", "=", maxAttWidth + (maxWidth * (m_ClusterCentroids.numInstances() + 1) + m_ClusterCentroids.numInstances() + 1), true)); temp.append("\n"); for (int i = 0; i < m_ClusterCentroids.numAttributes(); i++) { String attName = m_ClusterCentroids.attribute(i).name(); temp.append(attName); for (int j = 0; j < maxAttWidth - attName.length(); j++) { temp.append(" "); } String strVal; String valMeanMode; // full data if (m_ClusterCentroids.attribute(i).isNominal()) { if (m_FullMeansOrMediansOrModes[i] == -1) { // missing valMeanMode = pad("missing", " ", maxWidth + 1 - "missing".length(), true); } else { valMeanMode = pad( (strVal = m_ClusterCentroids.attribute(i).value( (int) m_FullMeansOrMediansOrModes[i])), " ", maxWidth + 1 - strVal.length(), true); } } else { if (Double.isNaN(m_FullMeansOrMediansOrModes[i])) { valMeanMode = pad("missing", " ", maxWidth + 1 - "missing".length(), true); } else { valMeanMode = pad( (strVal = Utils.doubleToString(m_FullMeansOrMediansOrModes[i], maxWidth, 4).trim()), " ", maxWidth + 1 - strVal.length(), true); } } temp.append(valMeanMode); for (int j = 0; j < m_NumClusters; j++) { if (m_ClusterCentroids.attribute(i).isNominal()) { if (m_ClusterCentroids.instance(j).isMissing(i)) { valMeanMode = pad("missing", " ", maxWidth + 1 - "missing".length(), true); } else { valMeanMode = pad( (strVal = m_ClusterCentroids.attribute(i).value( (int) m_ClusterCentroids.instance(j).value(i))), " ", maxWidth + 1 - strVal.length(), true); } } else { if (m_ClusterCentroids.instance(j).isMissing(i)) { valMeanMode = pad("missing", " ", maxWidth + 1 - "missing".length(), true); } else { valMeanMode = pad( (strVal = Utils.doubleToString( m_ClusterCentroids.instance(j).value(i), maxWidth, 4) .trim()), " ", maxWidth + 1 - strVal.length(), true); } } temp.append(valMeanMode); } temp.append("\n"); if (m_displayStdDevs) { // Std devs/max nominal String stdDevVal = ""; if (m_ClusterCentroids.attribute(i).isNominal()) { // Do the values of the nominal attribute Attribute a = m_ClusterCentroids.attribute(i); for (int j = 0; j < a.numValues(); j++) { // full data String val = " " + a.value(j); temp.append(pad(val, " ", maxAttWidth + 1 - val.length(), false)); int count = m_FullNominalCounts[i][j]; int percent = (int) ((double) m_FullNominalCounts[i][j] / Utils.sum(m_ClusterSizes) * 100.0); String percentS = "" + percent + "%)"; percentS = pad(percentS, " ", 5 - percentS.length(), true); stdDevVal = "" + count + " (" + percentS; stdDevVal = pad(stdDevVal, " ", maxWidth + 1 - stdDevVal.length(), true); temp.append(stdDevVal); // Clusters for (int k = 0; k < m_NumClusters; k++) { count = m_ClusterNominalCounts[k][i][j]; percent = (int) ((double) m_ClusterNominalCounts[k][i][j] / m_ClusterSizes[k] * 100.0); percentS = "" + percent + "%)"; percentS = pad(percentS, " ", 5 - percentS.length(), true); stdDevVal = "" + count + " (" + percentS; stdDevVal = pad(stdDevVal, " ", maxWidth + 1 - stdDevVal.length(), true); temp.append(stdDevVal); } temp.append("\n"); } // missing (if any) if (m_FullMissingCounts[i] > 0) { // Full data temp.append(pad(" missing", " ", maxAttWidth + 1 - " missing".length(), false)); int count = m_FullMissingCounts[i]; int percent = (int) ((double) m_FullMissingCounts[i] / Utils.sum(m_ClusterSizes) * 100.0); String percentS = "" + percent + "%)"; percentS = pad(percentS, " ", 5 - percentS.length(), true); stdDevVal = "" + count + " (" + percentS; stdDevVal = pad(stdDevVal, " ", maxWidth + 1 - stdDevVal.length(), true); temp.append(stdDevVal); // Clusters for (int k = 0; k < m_NumClusters; k++) { count = m_ClusterMissingCounts[k][i]; percent = (int) ((double) m_ClusterMissingCounts[k][i] / m_ClusterSizes[k] * 100.0); percentS = "" + percent + "%)"; percentS = pad(percentS, " ", 5 - percentS.length(), true); stdDevVal = "" + count + " (" + percentS; stdDevVal = pad(stdDevVal, " ", maxWidth + 1 - stdDevVal.length(), true); temp.append(stdDevVal); } temp.append("\n"); } temp.append("\n"); } else { // Full data if (Double.isNaN(m_FullMeansOrMediansOrModes[i])) { stdDevVal = pad("--", " ", maxAttWidth + maxWidth + 1 - 2, true); } else { stdDevVal = pad( (strVal = plusMinus + Utils.doubleToString(m_FullStdDevs[i], maxWidth, 4) .trim()), " ", maxWidth + maxAttWidth + 1 - strVal.length(), true); } temp.append(stdDevVal); // Clusters for (int j = 0; j < m_NumClusters; j++) { if (m_ClusterCentroids.instance(j).isMissing(i)) { stdDevVal = pad("--", " ", maxWidth + 1 - 2, true); } else { stdDevVal = pad( (strVal = plusMinus + Utils.doubleToString( m_ClusterStdDevs.instance(j).value(i), maxWidth, 4) .trim()), " ", maxWidth + 1 - strVal.length(), true); } temp.append(stdDevVal); } temp.append("\n\n"); } } } temp.append("\n\n"); return temp.toString(); } private String pad(String source, String padChar, int length, boolean leftPad) { StringBuffer temp = new StringBuffer(); if (leftPad) { for (int i = 0; i < length; i++) { temp.append(padChar); } temp.append(source); } else { temp.append(source); for (int i = 0; i < length; i++) { temp.append(padChar); } } return temp.toString(); } /** * Gets the the cluster centroids. * * @return the cluster centroids */ public Instances getClusterCentroids() { return m_ClusterCentroids; } /** * Gets the standard deviations of the numeric attributes in each cluster. * * @return the standard deviations of the numeric attributes in each cluster */ public Instances getClusterStandardDevs() { return m_ClusterStdDevs; } /** * Returns for each cluster the frequency counts for the values of each * nominal attribute. * * @return the counts */ public int[][][] getClusterNominalCounts() { return m_ClusterNominalCounts; } /** * Gets the squared error for all clusters. * * @return the squared error, NaN if fast distance calculation is used * @see #m_FastDistanceCalc */ public double getSquaredError() { if (m_FastDistanceCalc) return Double.NaN; else return Utils.sum(m_squaredErrors); } /** * Gets the number of instances in each cluster. * * @return The number of instances in each cluster */ public int[] getClusterSizes() { return m_ClusterSizes; } /** * Gets the assignments for each instance. * * @return Array of indexes of the centroid assigned to each instance * @throws Exception if order of instances wasn't preserved or no assignments * were made */ public int[] getAssignments() throws Exception { if (!m_PreserveOrder) { throw new Exception( "The assignments are only available when order of instances is preserved (-O)"); } if (m_Assignments == null) { throw new Exception("No assignments made."); } return m_Assignments; } /** * Returns the revision string. * * @return the revision */ @Override public String getRevision() { return RevisionUtils.extract("$Revision: 9756 $"); } /** * Main method for executing this class. * * @param args use -h to list all parameters */ public static void main(String[] args) { runClusterer(new SimpleKMeans(), args); } }
54,054
29.765509
260
java
tsml-java
tsml-java-master/src/main/java/weka/clusterers/SingleClustererEnhancer.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * SingleClustererEnhancer.java * Copyright (C) 2006-2012 University of Waikato, Hamilton, New Zealand * */ package weka.clusterers; import java.util.Enumeration; import java.util.Vector; import weka.core.Capabilities; import weka.core.Capabilities.Capability; import weka.core.Option; import weka.core.OptionHandler; import weka.core.Utils; /** * Meta-clusterer for enhancing a base clusterer. * * @author FracPete (fracpete at waikato dot ac dot nz) * @version $Revision: 8034 $ */ public abstract class SingleClustererEnhancer extends AbstractClusterer implements OptionHandler { /** for serialization */ private static final long serialVersionUID = 4893928362926428671L; /** the clusterer */ protected Clusterer m_Clusterer = new SimpleKMeans(); /** * String describing default clusterer. * * @return the default clusterer classname */ protected String defaultClustererString() { return SimpleKMeans.class.getName(); } /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ public Enumeration listOptions() { Vector result = new Vector(); result.addElement(new Option( "\tFull name of base clusterer.\n" + "\t(default: " + defaultClustererString() +")", "W", 1, "-W")); if (m_Clusterer instanceof OptionHandler) { result.addElement(new Option( "", "", 0, "\nOptions specific to clusterer " + m_Clusterer.getClass().getName() + ":")); Enumeration enu = ((OptionHandler) m_Clusterer).listOptions(); while (enu.hasMoreElements()) { result.addElement(enu.nextElement()); } } return result.elements(); } /** * Parses a given list of options. * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { String tmpStr; tmpStr = Utils.getOption('W', options); if (tmpStr.length() > 0) { // This is just to set the classifier in case the option // parsing fails. setClusterer(AbstractClusterer.forName(tmpStr, null)); setClusterer(AbstractClusterer.forName(tmpStr, Utils.partitionOptions(options))); } else { // This is just to set the classifier in case the option // parsing fails. setClusterer(AbstractClusterer.forName(defaultClustererString(), null)); setClusterer(AbstractClusterer.forName(defaultClustererString(), Utils.partitionOptions(options))); } } /** * Gets the current settings of the clusterer. * * @return an array of strings suitable for passing to setOptions */ public String[] getOptions() { Vector result; String[] options; int i; result = new Vector(); result.add("-W"); result.add(getClusterer().getClass().getName()); if (getClusterer() instanceof OptionHandler) { result.add("--"); options = ((OptionHandler) getClusterer()).getOptions(); for (i = 0; i < options.length; i++) result.add(options[i]); } return (String[]) result.toArray(new String[result.size()]); } /** * Returns the tip text for this property * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String clustererTipText() { return "The base clusterer to be used."; } /** * Set the base clusterer. * * @param value the classifier to use. */ public void setClusterer(Clusterer value) { m_Clusterer = value; } /** * Get the clusterer used as the base clusterer. * * @return the base clusterer */ public Clusterer getClusterer() { return m_Clusterer; } /** * Gets the clusterer specification string, which contains the class name of * the clusterer and any options to the clusterer * * @return the clusterer string */ protected String getClustererSpec() { String result; Clusterer clusterer; clusterer = getClusterer(); result = clusterer.getClass().getName(); if (clusterer instanceof OptionHandler) result += " " + Utils.joinOptions(((OptionHandler) clusterer).getOptions()); return result; } /** * Returns default capabilities of the clusterer. * * @return the capabilities of this clusterer */ public Capabilities getCapabilities() { Capabilities result; if (getClusterer() == null) result = super.getCapabilities(); else result = getClusterer().getCapabilities(); // set dependencies for (Capability cap: Capability.values()) result.enableDependency(cap); return result; } /** * Returns the number of clusters. * * @return the number of clusters generated for a training dataset. * @throws Exception if number of clusters could not be returned * successfully */ public int numberOfClusters() throws Exception { return m_Clusterer.numberOfClusters(); } }
5,758
26.165094
105
java
tsml-java
tsml-java-master/src/main/java/weka/clusterers/UpdateableClusterer.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * UpdateableClusterer.java * Copyright (C) 2006-2012 University of Waikato, Hamilton, New Zealand */ package weka.clusterers; import weka.core.Instance; /** * Interface to incremental cluster models that can learn using one instance * at a time. * * @author FracPete (fracpete at waikato dot ac dot nz) * @version $Revision: 8034 $ */ public interface UpdateableClusterer { /** * Adds an instance to the clusterer. * * @param newInstance the instance to be added * @throws Exception if something goes wrong */ public void updateClusterer(Instance newInstance) throws Exception; /** * Singals the end of the updating. */ public void updateFinished(); }
1,376
28.297872
77
java
tsml-java
tsml-java-master/src/main/java/weka/clusterers/XMeans.java
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* * XMeans.java * Copyright (C) 2000 University of Waikato, Hamilton, New Zealand * */ package weka.clusterers; import weka.core.AlgVector; import weka.core.Capabilities; import weka.core.DistanceFunction; import weka.core.EuclideanDistance; import weka.core.Instance; import weka.core.Instances; import weka.core.RevisionUtils; import weka.core.neighboursearch.KDTree; import weka.core.Option; import weka.core.OptionHandler; import weka.core.TechnicalInformation; import weka.core.TechnicalInformationHandler; import weka.core.Utils; import weka.core.Capabilities.Capability; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import weka.filters.Filter; import weka.filters.unsupervised.attribute.ReplaceMissingValues; import java.io.BufferedReader; import java.io.File; import java.io.FileOutputStream; import java.io.FileReader; import java.io.PrintWriter; import java.io.Reader; import java.util.Enumeration; import java.util.Random; import java.util.Vector; /** <!-- globalinfo-start --> * Cluster data using the X-means algorithm.<br/> * <br/> * X-Means is K-Means extended by an Improve-Structure part In this part of the algorithm the centers are attempted to be split in its region. The decision between the children of each center and itself is done comparing the BIC-values of the two structures.<br/> * <br/> * For more information see:<br/> * <br/> * Dan Pelleg, Andrew W. Moore: X-means: Extending K-means with Efficient Estimation of the Number of Clusters. In: Seventeenth International Conference on Machine Learning, 727-734, 2000. * <p/> <!-- globalinfo-end --> * <!-- technical-bibtex-start --> * BibTeX: * <pre> * &#64;inproceedings{Pelleg2000, * author = {Dan Pelleg and Andrew W. Moore}, * booktitle = {Seventeenth International Conference on Machine Learning}, * pages = {727-734}, * publisher = {Morgan Kaufmann}, * title = {X-means: Extending K-means with Efficient Estimation of the Number of Clusters}, * year = {2000} * } * </pre> * <p/> <!-- technical-bibtex-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -I &lt;num&gt; * maximum number of overall iterations * (default 1).</pre> * * <pre> -M &lt;num&gt; * maximum number of iterations in the kMeans loop in * the Improve-Parameter part * (default 1000).</pre> * * <pre> -J &lt;num&gt; * maximum number of iterations in the kMeans loop * for the splitted centroids in the Improve-Structure part * (default 1000).</pre> * * <pre> -L &lt;num&gt; * minimum number of clusters * (default 2).</pre> * * <pre> -H &lt;num&gt; * maximum number of clusters * (default 4).</pre> * * <pre> -B &lt;value&gt; * distance value for binary attributes * (default 1.0).</pre> * * <pre> -use-kdtree * Uses the KDTree internally * (default no).</pre> * * <pre> -K &lt;KDTree class specification&gt; * Full class name of KDTree class to use, followed * by scheme options. * eg: "weka.core.neighboursearch.kdtrees.KDTree -P" * (default no KDTree class used).</pre> * * <pre> -C &lt;value&gt; * cutoff factor, takes the given percentage of the splitted * centroids if none of the children win * (default 0.0).</pre> * * <pre> -D &lt;distance function class specification&gt; * Full class name of Distance function class to use, followed * by scheme options. * (default weka.core.EuclideanDistance).</pre> * * <pre> -N &lt;file name&gt; * file to read starting centers from (ARFF format).</pre> * * <pre> -O &lt;file name&gt; * file to write centers to (ARFF format).</pre> * * <pre> -U &lt;int&gt; * The debug level. * (default 0)</pre> * * <pre> -Y &lt;file name&gt; * The debug vectors file.</pre> * * <pre> -S &lt;num&gt; * Random number seed. * (default 10)</pre> * <!-- options-end --> * * @author Gabi Schmidberger (gabi@cs.waikato.ac.nz) * @author Mark Hall (mhall@cs.waikato.ac.nz) * @author Malcolm Ware (mfw4@cs.waikato.ac.nz) * @version $Revision: 5538 $ * @see RandomizableClusterer */ public class XMeans extends RandomizableClusterer implements TechnicalInformationHandler { /* * major TODOS: * * make BIC-Score replaceable by other scores */ /** for serialization. */ private static final long serialVersionUID = -7941793078404132616L; /** training instances. */ protected Instances m_Instances = null; /** model information, should increase readability. */ protected Instances m_Model = null; /** replace missing values in training instances. */ protected ReplaceMissingValues m_ReplaceMissingFilter; /** * Distance value between true and false of binary attributes and * "same" and "different" of nominal attributes (default = 1.0). */ protected double m_BinValue = 1.0; /** BIC-Score of the current model. */ protected double m_Bic = Double.MIN_VALUE; /** Distortion. */ protected double[] m_Mle = null; /** maximum overall iterations. */ protected int m_MaxIterations = 1; /** * maximum iterations to perform Kmeans part * if negative, iterations are not checked. */ protected int m_MaxKMeans = 1000; /** see above, but for kMeans of splitted clusters. */ protected int m_MaxKMeansForChildren = 1000; /** The actual number of clusters. */ protected int m_NumClusters = 2; /** min number of clusters to generate. */ protected int m_MinNumClusters = 2; /** max number of clusters to generate. */ protected int m_MaxNumClusters = 4; /** the distance function used. */ protected DistanceFunction m_DistanceF = new EuclideanDistance(); /** cluster centers. */ protected Instances m_ClusterCenters; /** file name of the output file for the cluster centers. */ protected File m_InputCenterFile = new File(System.getProperty("user.dir")); /* --> DebugVectors - USED FOR DEBUGGING */ /** input file for the random vectors --> USED FOR DEBUGGING. */ protected Reader m_DebugVectorsInput = null; /** the index for the current debug vector. */ protected int m_DebugVectorsIndex = 0; /** all the debug vectors. */ protected Instances m_DebugVectors = null; /** file name of the input file for the random vectors. */ protected File m_DebugVectorsFile = new File(System.getProperty("user.dir")); /** input file for the cluster centers. */ protected Reader m_CenterInput = null; /** file name of the output file for the cluster centers. */ protected File m_OutputCenterFile = new File(System.getProperty("user.dir")); /** output file for the cluster centers. */ protected PrintWriter m_CenterOutput = null; /** * temporary variable holding cluster assignments while iterating. */ protected int[] m_ClusterAssignments; /** cutoff factor - percentage of splits done in Improve-Structure part only relevant, if all children lost. */ protected double m_CutOffFactor = 0.5; /** Index in ranges for LOW. */ public static int R_LOW = 0; /** Index in ranges for HIGH. */ public static int R_HIGH = 1; /** Index in ranges for WIDTH. */ public static int R_WIDTH = 2; /** * KDTrees class if KDTrees are used. */ protected KDTree m_KDTree = new KDTree(); /** whether to use the KDTree (the KDTree is only initialized to be * configurable from the GUI). */ protected boolean m_UseKDTree = false; /** counts iterations done in main loop. */ protected int m_IterationCount = 0; /** counter to say how often kMeans was stopped by loop counter. */ protected int m_KMeansStopped = 0; /** Number of splits prepared. */ protected int m_NumSplits = 0; /** Number of splits accepted (including cutoff factor decisions). */ protected int m_NumSplitsDone = 0; /** Number of splits accepted just because of cutoff factor. */ protected int m_NumSplitsStillDone = 0; /** * level of debug output, 0 is no output. */ protected int m_DebugLevel = 0; /** print the centers. */ public static int D_PRINTCENTERS = 1; /** follows the splitting of the centers. */ public static int D_FOLLOWSPLIT = 2; /** have a closer look at converge children. */ public static int D_CONVCHCLOSER = 3; /** check on random vectors. */ public static int D_RANDOMVECTOR = 4; /** check on kdtree. */ public static int D_KDTREE = 5; /** follow iterations. */ public static int D_ITERCOUNT = 6; /** functions were maybe misused. */ public static int D_METH_MISUSE = 80; /** for current debug. */ public static int D_CURR = 88; /** general debugging. */ public static int D_GENERAL = 99; /** Flag: I'm debugging. */ public boolean m_CurrDebugFlag = true; /** * the default constructor. */ public XMeans() { super(); m_SeedDefault = 10; setSeed(m_SeedDefault); } /** * Returns a string describing this clusterer. * @return a description of the evaluator suitable for * displaying in the explorer/experimenter gui */ public String globalInfo() { return "Cluster data using the X-means algorithm.\n\n" + "X-Means is K-Means extended by an Improve-Structure part In this " + "part of the algorithm the centers are attempted to be split in " + "its region. The decision between the children of each center and " + "itself is done comparing the BIC-values of the two structures.\n\n" + "For more information see:\n\n" + getTechnicalInformation().toString(); } /** * Returns an instance of a TechnicalInformation object, containing * detailed information about the technical background of this class, * e.g., paper reference or book this class is based on. * * @return the technical information about this class */ public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; result = new TechnicalInformation(Type.INPROCEEDINGS); result.setValue(Field.AUTHOR, "Dan Pelleg and Andrew W. Moore"); result.setValue(Field.TITLE, "X-means: Extending K-means with Efficient Estimation of the Number of Clusters"); result.setValue(Field.BOOKTITLE, "Seventeenth International Conference on Machine Learning"); result.setValue(Field.YEAR, "2000"); result.setValue(Field.PAGES, "727-734"); result.setValue(Field.PUBLISHER, "Morgan Kaufmann"); return result; } /** * Returns default capabilities of the clusterer. * * @return the capabilities of this clusterer */ public Capabilities getCapabilities() { Capabilities result = super.getCapabilities(); result.disableAll(); result.enable(Capability.NO_CLASS); // attributes result.enable(Capability.NUMERIC_ATTRIBUTES); result.enable(Capability.DATE_ATTRIBUTES); result.enable(Capability.MISSING_VALUES); return result; } /** * Generates the X-Means clusterer. * * @param data set of instances serving as training data * @throws Exception if the clusterer has not been * generated successfully */ public void buildClusterer(Instances data) throws Exception { // can clusterer handle the data? getCapabilities().testWithFail(data); if (m_MinNumClusters > m_MaxNumClusters) { throw new Exception("XMeans: min number of clusters " + "can't be greater than max number of clusters!"); } m_NumSplits = 0; m_NumSplitsDone = 0; m_NumSplitsStillDone = 0; // replace missing values m_ReplaceMissingFilter = new ReplaceMissingValues(); m_ReplaceMissingFilter.setInputFormat(data); m_Instances = Filter.useFilter(data, m_ReplaceMissingFilter); // initialize random function Random random0 = new Random(m_Seed); // num of clusters to start with m_NumClusters = m_MinNumClusters; // set distance function to default if (m_DistanceF == null) { m_DistanceF = new EuclideanDistance(); } m_DistanceF.setInstances(m_Instances); checkInstances(); if (m_DebugVectorsFile.exists() && m_DebugVectorsFile.isFile()) initDebugVectorsInput(); // make list of indexes for m_Instances int[] allInstList = new int[m_Instances.numInstances()]; for (int i = 0; i < m_Instances.numInstances(); i++) { allInstList[i] = i; } // set model used (just for convenience) m_Model = new Instances(m_Instances, 0); // produce the starting centers if (m_CenterInput != null) { // read centers from file m_ClusterCenters = new Instances(m_CenterInput); m_NumClusters = m_ClusterCenters.numInstances(); } else // makes the first centers randomly m_ClusterCenters = makeCentersRandomly(random0, m_Instances, m_NumClusters); PFD(D_FOLLOWSPLIT, "\n*** Starting centers "); for (int k = 0; k < m_ClusterCenters.numInstances(); k++) { PFD(D_FOLLOWSPLIT, "Center " + k + ": " + m_ClusterCenters.instance(k)); } PrCentersFD(D_PRINTCENTERS); boolean finished = false; Instances children; // builds up a KDTree if (m_UseKDTree) m_KDTree.setInstances(m_Instances); // loop counter of main loop m_IterationCount = 0; /** * "finished" does get true as soon as: * 1. number of clusters gets >= m_MaxClusters, * 2. in the last round, none of the centers have been split * * if number of clusters is already >= m_MaxClusters * part 1 (= Improve-Params) is done at least once. */ while (!finished && !stopIteration(m_IterationCount, m_MaxIterations)) { /* ==================================================================== * 1. Improve-Params * conventional K-means */ PFD(D_FOLLOWSPLIT, "\nBeginning of main loop - centers:"); PrCentersFD(D_FOLLOWSPLIT); PFD(D_ITERCOUNT, "\n*** 1. Improve-Params " + m_IterationCount + ". time"); m_IterationCount++; // prepare to converge boolean converged = false; // initialize assignments to -1 m_ClusterAssignments = initAssignments(m_Instances.numInstances()); // stores a list of indexes of instances belonging to each center int[][] instOfCent = new int[m_ClusterCenters.numInstances()][]; // KMeans loop counter int kMeansIteration = 0; // converge in conventional K-means ---------------------------------- PFD(D_FOLLOWSPLIT, "\nConverge in K-Means:"); while (!converged && !stopKMeansIteration(kMeansIteration, m_MaxKMeans)) { kMeansIteration++; converged = true; // assign instances to centers ------------------------------------- converged = assignToCenters(m_UseKDTree ? m_KDTree : null, m_ClusterCenters, instOfCent, allInstList, m_ClusterAssignments, kMeansIteration); PFD(D_FOLLOWSPLIT, "\nMain loop - Assign - centers:"); PrCentersFD(D_FOLLOWSPLIT); // compute new centers = centers of mass of points converged = recomputeCenters(m_ClusterCenters, // clusters instOfCent, // their instances m_Model); // model information PFD(D_FOLLOWSPLIT, "\nMain loop - Recompute - centers:"); PrCentersFD(D_FOLLOWSPLIT); } PFD(D_FOLLOWSPLIT, ""); PFD(D_FOLLOWSPLIT, "End of Part: 1. Improve-Params - conventional K-means"); /** ===================================================================== * 2. Improve-Structur */ // BIC before split distortioning the centres m_Mle = distortion(instOfCent, m_ClusterCenters); m_Bic = calculateBIC(instOfCent, m_ClusterCenters, m_Mle); PFD(D_FOLLOWSPLIT, "m_Bic " + m_Bic); int currNumCent = m_ClusterCenters.numInstances(); Instances splitCenters = new Instances(m_ClusterCenters, currNumCent * 2); // store BIC values of parent and children double[] pbic = new double [currNumCent]; double[] cbic = new double [currNumCent]; // split each center for (int i = 0; i < currNumCent // this could help to optimize the algorithm // && currNumCent + numSplits <= m_MaxNumClusters ; i++) { PFD(D_FOLLOWSPLIT, "\nsplit center " + i + " " + m_ClusterCenters.instance(i)); Instance currCenter = m_ClusterCenters.instance(i); int[] currInstList = instOfCent[i]; int currNumInst = instOfCent[i].length; // not enough instances; than continue with next if (currNumInst <= 2) { pbic[i] = Double.MAX_VALUE; cbic[i] = 0.0; // add center itself as dummy splitCenters.add(currCenter); splitCenters.add(currCenter); continue; } // split centers ---------------------------------------------- double variance = m_Mle[i] / (double)currNumInst; children = splitCenter(random0, currCenter, variance, m_Model); // initialize assignments to -1 int[] oneCentAssignments = initAssignments(currNumInst); int[][] instOfChCent = new int [2][]; // todo maybe split didn't work // converge the children -------------------------------------- converged = false; int kMeansForChildrenIteration = 0; PFD(D_FOLLOWSPLIT, "\nConverge, K-Means for children: " + i); while (!converged && !stopKMeansIteration(kMeansForChildrenIteration, m_MaxKMeansForChildren)) { kMeansForChildrenIteration++; converged = assignToCenters(children, instOfChCent, currInstList, oneCentAssignments); if (!converged) { recomputeCentersFast(children, instOfChCent, m_Model); } } // store new centers for later decision if they are taken splitCenters.add(children.instance(0)); splitCenters.add(children.instance(1)); PFD(D_FOLLOWSPLIT, "\nconverged cildren "); PFD(D_FOLLOWSPLIT, " " + children.instance(0)); PFD(D_FOLLOWSPLIT, " " + children.instance(1)); // compare parent and children model by their BIC-value pbic[i] = calculateBIC(currInstList, currCenter, m_Mle[i], m_Model); double[] chMLE = distortion(instOfChCent, children); cbic[i] = calculateBIC(instOfChCent, children, chMLE); } // end of loop over clusters // decide which one to split and make new list of cluster centers Instances newClusterCenters = null; newClusterCenters = newCentersAfterSplit(pbic, cbic, m_CutOffFactor, splitCenters); /** * Compare with before Improve-Structure */ int newNumClusters = newClusterCenters.numInstances(); if (newNumClusters != m_NumClusters) { PFD(D_FOLLOWSPLIT, "Compare with non-split"); // initialize assignments to -1 int[] newClusterAssignments = initAssignments(m_Instances.numInstances()); // stores a list of indexes of instances belonging to each center int[][] newInstOfCent = new int[newClusterCenters.numInstances()][]; // assign instances to centers ------------------------------------- converged = assignToCenters(m_UseKDTree ? m_KDTree : null, newClusterCenters, newInstOfCent, allInstList, newClusterAssignments, m_IterationCount); double[] newMle = distortion(newInstOfCent, newClusterCenters); double newBic = calculateBIC(newInstOfCent, newClusterCenters, newMle); PFD(D_FOLLOWSPLIT, "newBic " + newBic); if (newBic > m_Bic) { PFD(D_FOLLOWSPLIT, "*** decide for new clusters"); m_Bic = newBic; m_ClusterCenters = newClusterCenters; m_ClusterAssignments = newClusterAssignments; } else { PFD(D_FOLLOWSPLIT, "*** keep old clusters"); } } newNumClusters = m_ClusterCenters.numInstances(); // decide if finished: max num cluster reached // or last centers where not split at all if ((newNumClusters >= m_MaxNumClusters) || (newNumClusters == m_NumClusters)) { finished = true; } m_NumClusters = newNumClusters; } } /** * Checks for nominal attributes in the dataset. * Class attribute is ignored. * @param data the data to check * @return false if no nominal attributes are present */ public boolean checkForNominalAttributes(Instances data) { int i = 0; while (i < data.numAttributes()) { if ((i != data.classIndex()) && data.attribute(i++).isNominal()) { return true; } } return false; } /** * Set array of int, used to store assignments, to -1. * @param ass integer array used for storing assignments * @return integer array used for storing assignments */ protected int[] initAssignments(int[] ass) { for (int i = 0; i < ass.length; i++) ass[i] = -1; return ass; } /** * Creates and initializes integer array, used to store assignments. * @param numInstances length of array used for assignments * @return integer array used for storing assignments */ protected int[] initAssignments(int numInstances) { int[] ass = new int[numInstances]; for (int i = 0; i < numInstances; i++) ass[i] = -1; return ass; } /** * Creates and initializes boolean array. * @param len length of new array * @return the new array */ boolean[] initBoolArray(int len) { boolean[] boolArray = new boolean [len]; for (int i = 0; i < len; i++) { boolArray[i] = false; } return boolArray; } /** * Returns new center list. * * The following steps 1. and 2. both take care that the number of centers * does not exceed maxCenters. * * 1. Compare BIC values of parent and children and takes the one as * new centers which do win (= BIC-value is smaller). * * 2. If in 1. none of the children are chosen * && and cutoff factor is > 0 * cutoff factor is taken as the percentage of "best" centers that are * still taken. * @param pbic array of parents BIC-values * @param cbic array of childrens BIC-values * @param cutoffFactor cutoff factor * @param splitCenters all children * @return the new centers */ protected Instances newCentersAfterSplit(double[] pbic, double[] cbic, double cutoffFactor, Instances splitCenters) { // store if split won boolean splitPerCutoff = false; boolean takeSomeAway = false; boolean[] splitWon = initBoolArray(m_ClusterCenters.numInstances()); int numToSplit = 0; Instances newCenters = null; // how many would be split, because the children have a better bic value for (int i = 0; i < cbic.length; i++) { if (cbic[i] > pbic[i]) { // decide for splitting ---------------------------------------- splitWon[i] = true; numToSplit++; PFD(D_FOLLOWSPLIT, "Center " + i + " decide for children"); } else { // decide for parents and finished stays true ----------------- PFD(D_FOLLOWSPLIT, "Center " + i + " decide for parent"); } } // no splits yet so split per cutoff factor if ((numToSplit == 0) && (cutoffFactor > 0)) { splitPerCutoff = true; // how many to split per cutoff factor numToSplit = (int) ((double) m_ClusterCenters.numInstances() * m_CutOffFactor); } // prepare indexes of values in ascending order double[] diff = new double [m_NumClusters]; for (int j = 0; j < diff.length; j++) { diff[j] = pbic[j] - cbic[j]; } int[] sortOrder = Utils.sort(diff); // check if maxNumClusters would be exceeded int possibleToSplit = m_MaxNumClusters - m_NumClusters; if (possibleToSplit > numToSplit) { // still enough possible, do the whole amount possibleToSplit = numToSplit; } else takeSomeAway = true; // prepare for splitting the one that are supposed to be split if (splitPerCutoff) { for (int j = 0; (j < possibleToSplit) && (cbic[sortOrder[j]] > 0.0); j++) { splitWon[sortOrder[j]] = true; } m_NumSplitsStillDone += possibleToSplit; } else { // take some splits away if max number of clusters would be exceeded if (takeSomeAway) { int count = 0; int j = 0; for (;j < splitWon.length && count < possibleToSplit; j++){ if (splitWon[sortOrder[j]] == true) count++; } while (j < splitWon.length) { splitWon[sortOrder[j]] = false; j++; } } } // finally split if (possibleToSplit > 0) newCenters = newCentersAfterSplit(splitWon, splitCenters); else newCenters = m_ClusterCenters; return newCenters; } /** * Returns new centers. Depending on splitWon: if true takes children, if * false takes parent = current center. * * @param splitWon * array of boolean to indicate to take split or not * @param splitCenters * list of splitted centers * @return the new centers */ protected Instances newCentersAfterSplit(boolean[] splitWon, Instances splitCenters) { Instances newCenters = new Instances(splitCenters, 0); int sIndex = 0; for (int i = 0; i < splitWon.length; i++) { if (splitWon[i]) { m_NumSplitsDone++; newCenters.add(splitCenters.instance(sIndex++)); newCenters.add(splitCenters.instance(sIndex++)); } else { sIndex++; sIndex++; newCenters.add(m_ClusterCenters.instance(i)); } } return newCenters; } /** * Controls that counter does not exceed max iteration value. Special function * for kmeans iterations. * * @param iterationCount * current value of counter * @param max * maximum value for counter * @return true if iteration should be stopped */ protected boolean stopKMeansIteration(int iterationCount, int max) { boolean stopIterate = false; if (max >= 0) stopIterate = (iterationCount >= max); if (stopIterate) m_KMeansStopped++; return stopIterate; } /** * Checks if iterationCount has to be checked and if yes * (this means max is > 0) compares it with max. * * @param iterationCount the current iteration count * @param max the maximum number of iterations * @return true if maximum has been reached */ protected boolean stopIteration(int iterationCount, int max) { boolean stopIterate = false; if (max >= 0) stopIterate = (iterationCount >= max); return stopIterate; } /** * Recompute the new centers. New cluster center is center of mass of its * instances. Returns true if cluster stays the same. * @param centers the input and output centers * @param instOfCent the instances to the centers * @param model data model information * @return true if converged. */ protected boolean recomputeCenters(Instances centers, int[][] instOfCent, Instances model) { boolean converged = true; for (int i = 0; i < centers.numInstances(); i++) { double val; for (int j = 0; j < model.numAttributes(); j++) { val = meanOrMode(m_Instances, instOfCent[i], j); for (int k = 0; k < instOfCent[i].length; k++) if (converged && m_ClusterCenters.instance(i).value(j) != val) converged = false; if (!converged) m_ClusterCenters.instance(i).setValue(j, val); } } return converged; } /** * Recompute the new centers - 2nd version * Same as recomputeCenters, but does not check if center stays the same. * * @param centers the input center and output centers * @param instOfCentIndexes the indexes of the instances to the centers * @param model data model information */ protected void recomputeCentersFast(Instances centers, int[][] instOfCentIndexes, Instances model ) { for (int i = 0; i < centers.numInstances(); i++) { double val; for (int j = 0; j < model.numAttributes(); j++) { val = meanOrMode(m_Instances, instOfCentIndexes[i], j); centers.instance(i).setValue(j, val); } } } /** * Computes Mean Or Mode of one attribute on a subset of m_Instances. * The subset is defined by an index list. * @param instances all instances * @param instList the indexes of the instances the mean is computed from * @param attIndex the index of the attribute * @return mean value */ protected double meanOrMode(Instances instances, int[] instList, int attIndex) { double result, found; int[] counts; int numInst = instList.length; if (instances.attribute(attIndex).isNumeric()) { result = found = 0; for (int j = 0; j < numInst; j++) { Instance currInst = instances.instance(instList[j]); if (!currInst.isMissing(attIndex)) { found += currInst.weight(); result += currInst.weight() * currInst.value(attIndex); } } if (Utils.eq(found, 0)) { return 0; } else { return result / found; } } else if (instances.attribute(attIndex).isNominal()) { counts = new int[instances.attribute(attIndex).numValues()]; for (int j = 0; j < numInst; j++) { Instance currInst = instances.instance(instList[j]); if (!currInst.isMissing(attIndex)) { counts[(int) currInst.value(attIndex)] += currInst.weight(); } } return (double)Utils.maxIndex(counts); } else { return 0; } } /** * Assigns instances to centers. * * @param tree KDTree on all instances * @param centers all the input centers * @param instOfCent the instances to each center * @param allInstList list of all instances * @param assignments assignments of instances to centers * @param iterationCount the number of iteration * @return true if converged * @throws Exception is something goes wrong */ protected boolean assignToCenters(KDTree tree, Instances centers, int[][] instOfCent, int[] allInstList, int[] assignments, int iterationCount) throws Exception { boolean converged = true; if (tree != null) { // using KDTree structure for assigning converged = assignToCenters(tree, centers, instOfCent, assignments, iterationCount); } else { converged = assignToCenters(centers, instOfCent, allInstList, assignments); } return converged; } /** * Assign instances to centers using KDtree. * First part of conventionell K-Means, returns true if new assignment * is the same as the last one. * * @param kdtree KDTree on all instances * @param centers all the input centers * @param instOfCent the instances to each center * @param assignments assignments of instances to centers * @param iterationCount the number of iteration * @return true if converged * @throws Exception in case instances are not assigned to cluster */ protected boolean assignToCenters(KDTree kdtree, Instances centers, int[][] instOfCent, int[] assignments, int iterationCount) throws Exception { int numCent = centers.numInstances(); int numInst = m_Instances.numInstances(); int[] oldAssignments = new int[numInst]; // WARNING: assignments is "input/output-parameter" // should not be null if (assignments == null) { assignments = new int[numInst]; for (int i = 0; i < numInst; i++) { assignments[0] = -1; } } // WARNING: instOfCent is "input/output-parameter" // should not be null if (instOfCent == null) { instOfCent = new int [numCent][]; } // save old assignments for (int i = 0; i < assignments.length; i++) { oldAssignments[i] = assignments[i]; } // use tree to get new assignments kdtree.centerInstances(centers, assignments, Math.pow(.8, iterationCount)); boolean converged = true; // compare with previous assignment for (int i = 0; converged && (i < assignments.length); i++) { converged = (oldAssignments[i] == assignments[i]); if (assignments[i] == -1) throw new Exception("Instance " + i + " has not been assigned to cluster."); } if (!converged) { int[] numInstOfCent = new int[numCent]; for (int i = 0; i < numCent; i++) numInstOfCent[i] = 0; // count num of assignments per center for (int i = 0; i < numInst; i++) numInstOfCent[assignments[i]]++; // prepare instancelists per center for (int i = 0; i < numCent; i++){ instOfCent[i] = new int[numInstOfCent[i]]; } // write instance lists per center for (int i = 0; i < numCent; i++) { int index = -1; for (int j = 0; j < numInstOfCent[i]; j++) { index = nextAssignedOne(i, index, assignments); instOfCent[i][j] = index; } } } return converged; } /** * Assign instances to centers. * Part of conventionell K-Means, returns true if new assignment * is the same as the last one. * * @param centers all the input centers * @param instOfCent the instances to each center * @param allInstList list of all indexes * @param assignments assignments of instances to centers * @return true if converged * @throws Exception if something goes wrong */ protected boolean assignToCenters(Instances centers, int[][] instOfCent, int[] allInstList, int[] assignments) throws Exception { // todo: undecided situations boolean converged = true; // true if new assignment is the same // as the old one int numInst = allInstList.length; int numCent = centers.numInstances(); int[] numInstOfCent = new int [numCent]; for (int i = 0; i < numCent; i++) numInstOfCent[i] = 0; // WARNING: assignments is "input/output-parameter" // should not be null if (assignments == null) { assignments = new int[numInst]; for (int i = 0; i < numInst; i++) { assignments[i] = -1; } } // WARNING: instOfCent is "input/output-parameter" // should not be null if (instOfCent == null) { instOfCent = new int [numCent][]; } // set assignments for (int i = 0; i < numInst; i++) { Instance inst = m_Instances.instance(allInstList[i]); int newC = clusterProcessedInstance(inst, centers); if (converged && newC != assignments[i]) { converged = false; } numInstOfCent[newC]++; if (!converged) assignments[i] = newC; } // the following is only done // if assignments are not the same, because too much effort if (!converged) { PFD(D_FOLLOWSPLIT, "assignToCenters -> it has NOT converged"); for (int i = 0; i < numCent; i++) { instOfCent[i] = new int [numInstOfCent[i]]; } for (int i = 0; i < numCent; i++) { int index = -1; for (int j = 0; j < numInstOfCent[i]; j++) { index = nextAssignedOne(i, index, assignments); instOfCent[i][j] = allInstList[index]; } } } else PFD(D_FOLLOWSPLIT, "assignToCenters -> it has converged"); return converged; } /** * Searches along the assignment array for the next entry of the center * in question. * @param cent index of the center * @param lastIndex index to start searching * @param assignments assignments * @return index of the instance the center cent is assigned to */ protected int nextAssignedOne(int cent, int lastIndex, int[] assignments) { int len = assignments.length; int index = lastIndex + 1; while (index < len) { if (assignments[index] == cent) { return (index); } index++; } return (-1); } /** * Split centers in their region. Generates random vector of * length = variance and * adds and substractsx to cluster vector to get two new clusters. * * @param random random function * @param center the center that is split here * @param variance variance of the cluster * @param model data model valid * @return a pair of new centers * @throws Exception something in AlgVector goes wrong */ protected Instances splitCenter(Random random, Instance center, double variance, Instances model) throws Exception { m_NumSplits++; AlgVector r = null; Instances children = new Instances(model, 2); if (m_DebugVectorsFile.exists() && m_DebugVectorsFile.isFile()) { Instance nextVector = getNextDebugVectorsInstance(model); PFD(D_RANDOMVECTOR, "Random Vector from File " + nextVector); r = new AlgVector(nextVector); } else { // random vector of length = variance r = new AlgVector(model, random); } r.changeLength(Math.pow(variance, 0.5)); PFD(D_RANDOMVECTOR, "random vector *variance "+ r); // add random vector to center AlgVector c = new AlgVector(center); AlgVector c2 = (AlgVector) c.clone(); c = c.add(r); Instance newCenter = c.getAsInstance(model, random); children.add(newCenter); PFD(D_FOLLOWSPLIT, "first child "+ newCenter); // substract random vector to center c2 = c2.substract(r); newCenter = c2.getAsInstance(model, random); children.add(newCenter); PFD(D_FOLLOWSPLIT, "second child "+ newCenter); return children; } /** * Split centers in their region. * (*Alternative version of splitCenter()*) * * @param random the random number generator * @param instances of the region * @param model the model for the centers * (should be the same as that of instances) * @return a pair of new centers */ protected Instances splitCenters(Random random, Instances instances, Instances model) { Instances children = new Instances(model, 2); int instIndex = Math.abs(random.nextInt()) % instances.numInstances(); children.add(instances.instance(instIndex)); int instIndex2 = instIndex; int count = 0; while ((instIndex2 == instIndex) && count < 10) { count++; instIndex2 = Math.abs(random.nextInt()) % instances.numInstances(); } children.add(instances.instance(instIndex2)); return children; } /** * Generates new centers randomly. Used for starting centers. * * @param random0 random number generator * @param model data model of the instances * @param numClusters number of clusters * @return new centers */ protected Instances makeCentersRandomly(Random random0, Instances model, int numClusters) { Instances clusterCenters = new Instances(model, numClusters); m_NumClusters = numClusters; // makes the new centers randomly for (int i = 0; i < numClusters; i++) { int instIndex = Math.abs(random0.nextInt()) % m_Instances.numInstances(); clusterCenters.add(m_Instances.instance(instIndex)); } return clusterCenters; } /** * Returns the BIC-value for the given center and instances. * @param instList The indices of the instances that belong to the center * @param center the center. * @param mle maximum likelihood * @param model the data model * @return the BIC value */ protected double calculateBIC(int[] instList, Instance center, double mle, Instances model) { int[][] w1 = new int[1][instList.length]; for (int i = 0; i < instList.length; i++) { w1[0][i] = instList[i]; } double[] m = {mle}; Instances w2 = new Instances(model, 1); w2.add(center); return calculateBIC(w1, w2, m); } /** * Calculates the BIC for the given set of centers and instances. * @param instOfCent The instances that belong to their respective centers * @param centers the centers * @param mle maximum likelihood * @return The BIC for the input. */ protected double calculateBIC(int[][] instOfCent, Instances centers, double[] mle) { double loglike = 0.0; int numInstTotal = 0; int numCenters = centers.numInstances(); int numDimensions = centers.numAttributes(); int numParameters = (numCenters - 1) + //probabilities numCenters * numDimensions + //means numCenters; // variance params for (int i = 0; i < centers.numInstances(); i++) { loglike += logLikelihoodEstimate(instOfCent[i].length, centers.instance(i), mle[i], centers.numInstances() * 2); numInstTotal += instOfCent[i].length; } /* diff thats how we did it loglike -= ((centers.numAttributes() + 1.0) * centers.numInstances() * 1) * Math.log(count); */ loglike -= numInstTotal * Math.log(numInstTotal); //System.out.println ("numInstTotal " + numInstTotal + // "calculateBIC res " + loglike); loglike -= (numParameters / 2.0) * Math.log(numInstTotal); //System.out.println ("numParam " + // + numParameters + // " calculateBIC res " + loglike); return loglike; } /** * Calculates the log-likelihood of the data for the given model, taken * at the maximum likelihood point. * * @param numInst number of instances that belong to the center * @param center the center * @param distortion distortion * @param numCent number of centers * @return the likelihood estimate */ protected double logLikelihoodEstimate(int numInst, Instance center, double distortion, int numCent) { // R(n) num of instances of the center -> numInst // K num of centers -> not used // //todo take the diff comments away double loglike = 0; /* if is new */ if (numInst > 1) { /* diff variance is new */ // // distortion = Sum over instances x of the center(x-center) // different to paper; sum should be squared // // (Sum of distances to center) / R(n) - 1.0 // different to paper; should be R(n)-K double variance = distortion / (numInst - 1.0); // // -R(n)/2 * log(pi*2) // double p1 = - (numInst / 2.0) * Math.log(Math.PI * 2.0); /* diff thats how we had it double p2 = -((ni * center.numAttributes()) / 2) * distortion; */ // // -(R(n)*M)/2 * log(variance) // double p2 = - (numInst * center.numAttributes()) / 2 * Math.log(variance); /* diff thats how we had it, the difference is a bug in x-means double p3 = - (numInst - numCent) / 2; */ // // -(R(n)-1)/2 // double p3 = - (numInst - 1.0) / 2.0; // // R(n)*log(R(n)) // double p4 = numInst * Math.log(numInst); /* diff x-means doesn't have this part double p5 = - numInst * Math.log(numInstTotal); */ /* loglike = -(ni / 2) * Math.log(Math.PI * 2) - (ni * center.numAttributes()) / 2.0) * logdistortion - (ni - k) / 2.0 + ni * Math.log(ni) - ni * Math.log(r); */ loglike = p1 + p2 + p3 + p4; // diff + p5; //the log(r) is something that can be reused. //as is the log(2 PI), these could provide extra speed up later on. //since distortion is so expensive to compute, I only do that once. } return loglike; } /** * Calculates the maximum likelihood estimate for the variance. * @param instOfCent indices of instances to each center * @param centers the centers * @return the list of distortions distortion. */ protected double[] distortion(int[][] instOfCent, Instances centers) { double[] distortion = new double[centers.numInstances()]; for (int i = 0; i < centers.numInstances(); i++) { distortion[i] = 0.0; for (int j = 0; j < instOfCent[i].length; j++) { distortion[i] += m_DistanceF.distance(m_Instances .instance(instOfCent[i][j]), centers.instance(i)); } } /* * diff not done in x-means res *= 1.0 / (count - centers.numInstances()); */ return distortion; } /** * Clusters an instance. * * @param instance * the instance to assign a cluster to. * @param centers * the centers to cluster the instance to. * @return a cluster index. */ protected int clusterProcessedInstance(Instance instance, Instances centers) { double minDist = Integer.MAX_VALUE; int bestCluster = 0; for (int i = 0; i < centers.numInstances(); i++) { double dist = m_DistanceF.distance(instance, centers.instance(i)); if (dist < minDist) { minDist = dist; bestCluster = i; } } ; return bestCluster; } /** * Clusters an instance that has been through the filters. * * @param instance * the instance to assign a cluster to * @return a cluster number */ protected int clusterProcessedInstance(Instance instance) { double minDist = Integer.MAX_VALUE; int bestCluster = 0; for (int i = 0; i < m_NumClusters; i++) { double dist = m_DistanceF .distance(instance, m_ClusterCenters.instance(i)); if (dist < minDist) { minDist = dist; bestCluster = i; } } return bestCluster; } /** * Classifies a given instance. * * @param instance the instance to be assigned to a cluster * @return the number of the assigned cluster as an integer * if the class is enumerated, otherwise the predicted value * @throws Exception if instance could not be classified * successfully */ public int clusterInstance(Instance instance) throws Exception { m_ReplaceMissingFilter.input(instance); Instance inst = m_ReplaceMissingFilter.output(); return clusterProcessedInstance(inst); } /** * Returns the number of clusters. * * @return the number of clusters generated for a training dataset. */ public int numberOfClusters() { return m_NumClusters; } /** * Returns an enumeration describing the available options. * @return an enumeration of all the available options **/ public Enumeration listOptions() { Vector result = new Vector(); result.addElement(new Option( "\tmaximum number of overall iterations\n" + "\t(default 1).", "I", 1, "-I <num>")); result.addElement(new Option( "\tmaximum number of iterations in the kMeans loop in\n" + "\tthe Improve-Parameter part \n" + "\t(default 1000).", "M", 1, "-M <num>")); result.addElement(new Option( "\tmaximum number of iterations in the kMeans loop\n" + "\tfor the splitted centroids in the Improve-Structure part \n" + "\t(default 1000).", "J", 1, "-J <num>")); result.addElement(new Option( "\tminimum number of clusters\n" + "\t(default 2).", "L", 1, "-L <num>")); result.addElement(new Option( "\tmaximum number of clusters\n" + "\t(default 4).", "H", 1, "-H <num>")); result.addElement(new Option( "\tdistance value for binary attributes\n" + "\t(default 1.0).", "B", 1, "-B <value>")); result.addElement(new Option( "\tUses the KDTree internally\n" + "\t(default no).", "use-kdtree", 0, "-use-kdtree")); result.addElement(new Option( "\tFull class name of KDTree class to use, followed\n" + "\tby scheme options.\n" + "\teg: \"weka.core.neighboursearch.kdtrees.KDTree -P\"\n" + "\t(default no KDTree class used).", "K", 1, "-K <KDTree class specification>")); result.addElement(new Option( "\tcutoff factor, takes the given percentage of the splitted \n" + "\tcentroids if none of the children win\n" + "\t(default 0.0).", "C", 1, "-C <value>")); result.addElement(new Option( "\tFull class name of Distance function class to use, followed\n" + "\tby scheme options.\n" + "\t(default weka.core.EuclideanDistance).", "D", 1, "-D <distance function class specification>")); result.addElement(new Option( "\tfile to read starting centers from (ARFF format).", "N", 1, "-N <file name>")); result.addElement(new Option( "\tfile to write centers to (ARFF format).", "O", 1, "-O <file name>")); result.addElement(new Option( "\tThe debug level.\n" + "\t(default 0)", "U", 1, "-U <int>")); result.addElement(new Option( "\tThe debug vectors file.", "Y", 1, "-Y <file name>")); Enumeration en = super.listOptions(); while (en.hasMoreElements()) result.addElement(en.nextElement()); return result.elements(); } /** * Returns the tip text for this property. * @return tip text for this property */ public String minNumClustersTipText() { return "set minimum number of clusters"; } /** * Sets the minimum number of clusters to generate. * * @param n the minimum number of clusters to generate */ public void setMinNumClusters(int n) { m_MinNumClusters = n; } /** * Gets the minimum number of clusters to generate. * @return the minimum number of clusters to generate */ public int getMinNumClusters() { return m_MinNumClusters; } /** * Returns the tip text for this property. * @return tip text for this property */ public String maxNumClustersTipText() { return "set maximum number of clusters"; } /** * Sets the maximum number of clusters to generate. * @param n the maximum number of clusters to generate */ public void setMaxNumClusters(int n) { if (n >= m_MinNumClusters) { m_MaxNumClusters = n; } } /** * Gets the maximum number of clusters to generate. * @return the maximum number of clusters to generate */ public int getMaxNumClusters() { return m_MaxNumClusters; } /** * Returns the tip text for this property. * @return tip text for this property */ public String maxIterationsTipText() { return "the maximum number of iterations to perform"; } /** * Sets the maximum number of iterations to perform. * @param i the number of iterations * @throws Exception if i is less than 1 */ public void setMaxIterations(int i) throws Exception { if (i < 0) throw new Exception("Only positive values for iteration number" + " allowed (Option I)."); m_MaxIterations = i; } /** * Gets the maximum number of iterations. * @return the number of iterations */ public int getMaxIterations() { return m_MaxIterations; } /** * Returns the tip text for this property. * @return tip text for this property */ public String maxKMeansTipText() { return "the maximum number of iterations to perform in KMeans"; } /** * Set the maximum number of iterations to perform in KMeans. * @param i the number of iterations */ public void setMaxKMeans(int i) { m_MaxKMeans = i; m_MaxKMeansForChildren = i; } /** * Gets the maximum number of iterations in KMeans. * @return the number of iterations */ public int getMaxKMeans() { return m_MaxKMeans; } /** * Returns the tip text for this property. * @return tip text for this property */ public String maxKMeansForChildrenTipText() { return "the maximum number of iterations KMeans that is performed on the child centers"; } /** * Sets the maximum number of iterations KMeans that is performed * on the child centers. * @param i the number of iterations */ public void setMaxKMeansForChildren(int i) { m_MaxKMeansForChildren = i; } /** * Gets the maximum number of iterations in KMeans. * @return the number of iterations */ public int getMaxKMeansForChildren() { return m_MaxKMeansForChildren; } /** * Returns the tip text for this property. * @return tip text for this property */ public String cutOffFactorTipText() { return "the cut-off factor to use"; } /** * Sets a new cutoff factor. * @param i the new cutoff factor */ public void setCutOffFactor(double i) { m_CutOffFactor = i; } /** * Gets the cutoff factor. * @return the cutoff factor */ public double getCutOffFactor() { return m_CutOffFactor; } /** * Returns the tip text for this property. * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String binValueTipText() { return "Set the value that represents true in the new attributes."; } /** * Gets value that represents true in a new numeric attribute. * (False is always represented by 0.0.) * @return the value that represents true in a new numeric attribute */ public double getBinValue() { return m_BinValue; } /** * Sets the distance value between true and false of binary attributes. * and "same" and "different" of nominal attributes * @param value the distance */ public void setBinValue(double value) { m_BinValue = value; } /** * Returns the tip text for this property. * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String distanceFTipText() { return "The distance function to use."; } /** * gets the "binary" distance value. * @param distanceF the distance function with all options set */ public void setDistanceF(DistanceFunction distanceF) { m_DistanceF = distanceF; } /** * Gets the distance function. * @return the distance function */ public DistanceFunction getDistanceF() { return m_DistanceF; } /** * Gets the distance function specification string, which contains the * class name of the distance function class and any options to it. * * @return the distance function specification string */ protected String getDistanceFSpec() { DistanceFunction d = getDistanceF(); if (d instanceof OptionHandler) { return d.getClass().getName() + " " + Utils.joinOptions(((OptionHandler) d).getOptions()); } return d.getClass().getName(); } /** * Returns the tip text for this property. * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String debugVectorsFileTipText() { return "The file containing the debug vectors (only for debugging!)."; } /** * Sets the file that has the random vectors stored. * Only used for debugging reasons. * @param value the file to read the random vectors from */ public void setDebugVectorsFile(File value) { m_DebugVectorsFile = value; } /** * Gets the file name for a file that has the random vectors stored. * Only used for debugging purposes. * @return the file to read the vectors from */ public File getDebugVectorsFile() { return m_DebugVectorsFile; } /** * Initialises the debug vector input. * @throws Exception if there is error * opening the debug input file. */ public void initDebugVectorsInput() throws Exception { m_DebugVectorsInput = new BufferedReader(new FileReader(m_DebugVectorsFile)); m_DebugVectors = new Instances(m_DebugVectorsInput); m_DebugVectorsIndex = 0; } /** * Read an instance from debug vectors file. * @param model the data model for the instance. * @throws Exception if there are no debug vector * in m_DebugVectors. * @return the next debug vector. */ public Instance getNextDebugVectorsInstance(Instances model) throws Exception { if (m_DebugVectorsIndex >= m_DebugVectors.numInstances()) throw new Exception("no more prefabricated Vectors"); Instance nex = m_DebugVectors.instance(m_DebugVectorsIndex); nex.setDataset(model); m_DebugVectorsIndex++; return nex; } /** * Returns the tip text for this property. * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String inputCenterFileTipText() { return "The file to read the list of centers from."; } /** * Sets the file to read the list of centers from. * * @param value the file to read centers from */ public void setInputCenterFile(File value) { m_InputCenterFile = value; } /** * Gets the file to read the list of centers from. * * @return the file to read the centers from */ public File getInputCenterFile() { return m_InputCenterFile; } /** * Returns the tip text for this property. * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String outputCenterFileTipText() { return "The file to write the list of centers to."; } /** * Sets file to write the list of centers to. * * @param value file to write centers to */ public void setOutputCenterFile(File value) { m_OutputCenterFile = value; } /** * Gets the file to write the list of centers to. * * @return filename of the file to write centers to */ public File getOutputCenterFile() { return m_OutputCenterFile; } /** * Returns the tip text for this property. * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String KDTreeTipText() { return "The KDTree to use."; } /** * Sets the KDTree class. * @param k a KDTree object with all options set */ public void setKDTree(KDTree k) { m_KDTree = k; } /** * Gets the KDTree class. * * @return the configured KDTree */ public KDTree getKDTree() { return m_KDTree; } /** * Returns the tip text for this property. * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String useKDTreeTipText() { return "Whether to use the KDTree."; } /** * Sets whether to use the KDTree or not. * * @param value if true the KDTree is used */ public void setUseKDTree(boolean value) { m_UseKDTree = value; } /** * Gets whether the KDTree is used or not. * * @return true if KDTrees are used */ public boolean getUseKDTree() { return m_UseKDTree; } /** * Gets the KDTree specification string, which contains the class name of * the KDTree class and any options to the KDTree. * * @return the KDTree string. */ protected String getKDTreeSpec() { KDTree c = getKDTree(); if (c instanceof OptionHandler) { return c.getClass().getName() + " " + Utils.joinOptions(((OptionHandler)c).getOptions()); } return c.getClass().getName(); } /** * Returns the tip text for this property. * * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String debugLevelTipText() { return "The debug level to use."; } /** * Sets the debug level. * debug level = 0, means no output * @param d debuglevel */ public void setDebugLevel(int d) { m_DebugLevel = d; } /** * Gets the debug level. * @return debug level */ public int getDebugLevel() { return m_DebugLevel; } /** * Checks the instances. * No checks in this KDTree but it calls the check of the distance function. */ protected void checkInstances () { // m_DistanceF.checkInstances(); } /** * Parses a given list of options. <p/> * <!-- options-start --> * Valid options are: <p/> * * <pre> -I &lt;num&gt; * maximum number of overall iterations * (default 1).</pre> * * <pre> -M &lt;num&gt; * maximum number of iterations in the kMeans loop in * the Improve-Parameter part * (default 1000).</pre> * * <pre> -J &lt;num&gt; * maximum number of iterations in the kMeans loop * for the splitted centroids in the Improve-Structure part * (default 1000).</pre> * * <pre> -L &lt;num&gt; * minimum number of clusters * (default 2).</pre> * * <pre> -H &lt;num&gt; * maximum number of clusters * (default 4).</pre> * * <pre> -B &lt;value&gt; * distance value for binary attributes * (default 1.0).</pre> * * <pre> -use-kdtree * Uses the KDTree internally * (default no).</pre> * * <pre> -K &lt;KDTree class specification&gt; * Full class name of KDTree class to use, followed * by scheme options. * eg: "weka.core.neighboursearch.kdtrees.KDTree -P" * (default no KDTree class used).</pre> * * <pre> -C &lt;value&gt; * cutoff factor, takes the given percentage of the splitted * centroids if none of the children win * (default 0.0).</pre> * * <pre> -D &lt;distance function class specification&gt; * Full class name of Distance function class to use, followed * by scheme options. * (default weka.core.EuclideanDistance).</pre> * * <pre> -N &lt;file name&gt; * file to read starting centers from (ARFF format).</pre> * * <pre> -O &lt;file name&gt; * file to write centers to (ARFF format).</pre> * * <pre> -U &lt;int&gt; * The debug level. * (default 0)</pre> * * <pre> -Y &lt;file name&gt; * The debug vectors file.</pre> * * <pre> -S &lt;num&gt; * Random number seed. * (default 10)</pre> * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { String optionString; String funcString; optionString = Utils.getOption('I', options); if (optionString.length() != 0) setMaxIterations(Integer.parseInt(optionString)); else setMaxIterations(1); optionString = Utils.getOption('M', options); if (optionString.length() != 0) setMaxKMeans(Integer.parseInt(optionString)); else setMaxKMeans(1000); optionString = Utils.getOption('J', options); if (optionString.length() != 0) setMaxKMeansForChildren(Integer.parseInt(optionString)); else setMaxKMeansForChildren(1000); optionString = Utils.getOption('L', options); if (optionString.length() != 0) setMinNumClusters(Integer.parseInt(optionString)); else setMinNumClusters(2); optionString = Utils.getOption('H', options); if (optionString.length() != 0) setMaxNumClusters(Integer.parseInt(optionString)); else setMaxNumClusters(4); optionString = Utils.getOption('B', options); if (optionString.length() != 0) setBinValue(Double.parseDouble(optionString)); else setBinValue(1.0); setUseKDTree(Utils.getFlag("use-kdtree", options)); if (getUseKDTree()) { funcString = Utils.getOption('K', options); if (funcString.length() != 0) { String[] funcSpec = Utils.splitOptions(funcString); if (funcSpec.length == 0) { throw new Exception("Invalid function specification string"); } String funcName = funcSpec[0]; funcSpec[0] = ""; setKDTree((KDTree) Utils.forName(KDTree.class, funcName, funcSpec)); } else { setKDTree(new KDTree()); } } else { setKDTree(new KDTree()); } optionString = Utils.getOption('C', options); if (optionString.length() != 0) setCutOffFactor(Double.parseDouble(optionString)); else setCutOffFactor(0.0); funcString = Utils.getOption('D', options); if (funcString.length() != 0) { String[] funcSpec = Utils.splitOptions(funcString); if (funcSpec.length == 0) { throw new Exception("Invalid function specification string"); } String funcName = funcSpec[0]; funcSpec[0] = ""; setDistanceF((DistanceFunction) Utils.forName(DistanceFunction.class, funcName, funcSpec)); } else { setDistanceF(new EuclideanDistance()); } optionString = Utils.getOption('N', options); if (optionString.length() != 0) { setInputCenterFile(new File(optionString)); m_CenterInput = new BufferedReader(new FileReader(optionString)); } else { setInputCenterFile(new File(System.getProperty("user.dir"))); m_CenterInput = null; } optionString = Utils.getOption('O', options); if (optionString.length() != 0) { setOutputCenterFile(new File(optionString)); m_CenterOutput = new PrintWriter(new FileOutputStream(optionString)); } else { setOutputCenterFile(new File(System.getProperty("user.dir"))); m_CenterOutput = null; } optionString = Utils.getOption('U', options); int debugLevel = 0; if (optionString.length() != 0) { try { debugLevel = Integer.parseInt(optionString); } catch (NumberFormatException e) { throw new Exception(optionString + "is an illegal value for option -U"); } } setDebugLevel(debugLevel); optionString = Utils.getOption('Y', options); if (optionString.length() != 0) { setDebugVectorsFile(new File(optionString)); } else { setDebugVectorsFile(new File(System.getProperty("user.dir"))); m_DebugVectorsInput = null; m_DebugVectors = null; } super.setOptions(options); } /** * Gets the current settings of SimpleKMeans. * @return an array of strings suitable for passing to setOptions */ public String[] getOptions() { int i; Vector result; String[] options; result = new Vector(); result.add("-I"); result.add("" + getMaxIterations()); result.add("-M"); result.add("" + getMaxKMeans()); result.add("-J"); result.add("" + getMaxKMeansForChildren()); result.add("-L"); result.add("" + getMinNumClusters()); result.add("-H"); result.add("" + getMaxNumClusters()); result.add("-B"); result.add("" + getBinValue()); if (getUseKDTree()) { result.add("-use-kdtree"); result.add("-K"); result.add("" + getKDTreeSpec()); } result.add("-C"); result.add("" + getCutOffFactor()); if (getDistanceF() != null) { result.add("-D"); result.add("" + getDistanceFSpec()); } if (getInputCenterFile().exists() && getInputCenterFile().isFile()) { result.add("-N"); result.add("" + getInputCenterFile()); } if (getOutputCenterFile().exists() && getOutputCenterFile().isFile()) { result.add("-O"); result.add("" + getOutputCenterFile()); } int dL = getDebugLevel(); if (dL > 0) { result.add("-U"); result.add("" + getDebugLevel()); } if (getDebugVectorsFile().exists() && getDebugVectorsFile().isFile()) { result.add("-Y"); result.add("" + getDebugVectorsFile()); } options = super.getOptions(); for (i = 0; i < options.length; i++) result.add(options[i]); return (String[]) result.toArray(new String[result.size()]); } /** * Return a string describing this clusterer. * @return a description of the clusterer as a string */ public String toString() { StringBuffer temp = new StringBuffer(); temp.append("\nXMeans\n======\n"); temp.append("Requested iterations : " + m_MaxIterations + "\n"); temp.append("Iterations performed : " + m_IterationCount+ "\n"); if (m_KMeansStopped > 0) { temp.append("kMeans did not converge\n"); temp.append(" but was stopped by max-loops " + m_KMeansStopped + " times (max kMeans-iter)\n"); } temp.append("Splits prepared : " + m_NumSplits + "\n"); temp.append("Splits performed : " + m_NumSplitsDone + "\n"); temp.append("Cutoff factor : " + m_CutOffFactor + "\n"); double perc; if (m_NumSplitsDone > 0) perc = (((double)m_NumSplitsStillDone)/((double) m_NumSplitsDone)) * 100.0; else perc = 0.0; temp.append("Percentage of splits accepted \n" + "by cutoff factor : " + Utils.doubleToString(perc,2) + " %\n"); temp.append("------\n"); temp.append("Cutoff factor : " + m_CutOffFactor + "\n"); temp.append("------\n"); temp.append("\nCluster centers : " + m_NumClusters + " centers\n"); for (int i = 0; i < m_NumClusters; i++) { temp.append("\nCluster "+i+"\n "); for (int j = 0; j < m_ClusterCenters.numAttributes(); j++) { if (m_ClusterCenters.attribute(j).isNominal()) { temp.append(" "+m_ClusterCenters.attribute(j). value((int)m_ClusterCenters.instance(i).value(j))); } else { temp.append(" "+m_ClusterCenters.instance(i).value(j)); } } } if (m_Mle != null) temp.append("\n\nDistortion: " + Utils.doubleToString(Utils.sum(m_Mle),6) + "\n"); temp.append("BIC-Value : " + Utils.doubleToString(m_Bic,6) + "\n"); return temp.toString(); } /** * Print centers for debug. * @param debugLevel level that gives according messages */ protected void PrCentersFD(int debugLevel) { if (debugLevel == m_DebugLevel) { for (int i = 0; i < m_ClusterCenters.numInstances(); i++) { System.out.println(m_ClusterCenters.instance(i)); } } } /** * Tests on debug status. * @param debugLevel level that gives according messages * @return true if debug level is set */ protected boolean TFD(int debugLevel) { return (debugLevel == m_DebugLevel); } /** * Does debug printouts. * @param debugLevel level that gives according messages * @param output string that is printed */ protected void PFD(int debugLevel, String output) { if (debugLevel == m_DebugLevel) System.out.println(output); } /** * Does debug printouts. * @param output string that is printed */ protected void PFD_CURR(String output) { if (m_CurrDebugFlag) System.out.println(output); } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 5538 $"); } /** * Main method for testing this class. * @param argv should contain options */ public static void main(String[] argv) { runClusterer(new XMeans(), argv); } }
71,140
28.506844
263
java
tsml-java
tsml-java-master/src/main/java/weka/clusterers/sIB.java
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* * sIB.java * Copyright (C) 2008 University of Waikato, Hamilton, New Zealand * */ package weka.clusterers; import weka.core.Capabilities; import weka.core.Instance; import weka.core.Instances; import weka.core.Option; import weka.core.RevisionHandler; import weka.core.RevisionUtils; import weka.core.TechnicalInformation; import weka.core.TechnicalInformationHandler; import weka.core.Utils; import weka.core.Capabilities.Capability; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import weka.core.matrix.Matrix; import weka.filters.unsupervised.attribute.ReplaceMissingValues; import java.io.Serializable; import java.util.ArrayList; import java.util.Enumeration; import java.util.Random; import java.util.Vector; import weka.core.DenseInstance; /** <!-- globalinfo-start --> * Cluster data using the sequential information bottleneck algorithm.<br/> * <br/> * Note: only hard clustering scheme is supported. sIB assign for each instance the cluster that have the minimum cost/distance to the instance. The trade-off beta is set to infinite so 1/beta is zero.<br/> * <br/> * For more information, see:<br/> * <br/> * Noam Slonim, Nir Friedman, Naftali Tishby: Unsupervised document classification using sequential information maximization. In: Proceedings of the 25th International ACM SIGIR Conference on Research and Development in Information Retrieval, 129-136, 2002. * <p/> <!-- globalinfo-end --> * <!-- technical-bibtex-start --> * BibTeX: * <pre> * &#64;inproceedings{Slonim2002, * author = {Noam Slonim and Nir Friedman and Naftali Tishby}, * booktitle = {Proceedings of the 25th International ACM SIGIR Conference on Research and Development in Information Retrieval}, * pages = {129-136}, * title = {Unsupervised document classification using sequential information maximization}, * year = {2002} * } * </pre> * <p/> <!-- technical-bibtex-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -I &lt;num&gt; * maximum number of iterations * (default 100).</pre> * * <pre> -M &lt;num&gt; * minimum number of changes in a single iteration * (default 0).</pre> * * <pre> -N &lt;num&gt; * number of clusters. * (default 2).</pre> * * <pre> -R &lt;num&gt; * number of restarts. * (default 5).</pre> * * <pre> -U * set not to normalize the data * (default true).</pre> * * <pre> -V * set to output debug info * (default false).</pre> * * <pre> -S &lt;num&gt; * Random number seed. * (default 1)</pre> * <!-- options-end --> * * @author Noam Slonim * @author <a href="mailto:lh92@cs.waikato.ac.nz">Anna Huang</a> * @version $Revision: 5538 $ */ public class sIB extends RandomizableClusterer implements TechnicalInformationHandler { /** for serialization. */ private static final long serialVersionUID = -8652125897352654213L; /** * Inner class handling status of the input data * * @see Serializable */ private class Input implements Serializable, RevisionHandler { /** for serialization */ static final long serialVersionUID = -2464453171263384037L; /** Prior probability of each instance */ private double[] Px; /** Prior probability of each attribute */ private double[] Py; /** Joint distribution of attribute and instance */ private Matrix Pyx; /** P[y|x] */ private Matrix Py_x; /** Mutual information between the instances and the attributes */ private double Ixy; /** Entropy of the attributes */ private double Hy; /** Entropy of the instances */ private double Hx; /** Sum values of the dataset */ private double sumVals; /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 5538 $"); } } /** * Internal class handling the whole partition * * @see Serializable */ private class Partition implements Serializable, RevisionHandler { /** for serialization */ static final long serialVersionUID = 4957194978951259946L; /** Cluster assignment for each instance */ private int[] Pt_x; /** Prior probability of each cluster */ private double[] Pt; /** sIB equation score, to evaluate the quality of the partition */ private double L; /** Number of changes during the generation of this partition */ private int counter; /** Attribute probablities for each cluster */ private Matrix Py_t; /** * Create a new empty <code>Partition</code> instance. */ public Partition() { Pt_x = new int[m_numInstances]; for (int i = 0; i < m_numInstances; i++) { Pt_x[i] = -1; } Pt = new double[m_numCluster]; Py_t = new Matrix(m_numAttributes, m_numCluster); counter = 0; } /** * Find all the instances that have been assigned to cluster i * @param i index of the cluster * @return an arraylist of the instance ids that have been assigned to cluster i */ private ArrayList<Integer> find(int i) { ArrayList<Integer> indices = new ArrayList<Integer>(); for (int x = 0; x < Pt_x.length; x++) { if (Pt_x[x] == i) { indices.add(x); } } return indices; } /** * Find the size of the cluster i * @param i index of the cluster * @return the size of cluster i */ private int size(int i) { int count = 0; for (int x = 0; x < Pt_x.length; x++) { if (Pt_x[x] == i) { count++; } } return count; } /** * Copy the current partition into T * @param T the target partition object */ private void copy(Partition T) { if (T == null) { T = new Partition(); } System.arraycopy(Pt_x, 0, T.Pt_x, 0, Pt_x.length); System.arraycopy(Pt, 0, T.Pt, 0, Pt.length); T.L = L; T.counter = counter; double[][] mArray = Py_t.getArray(); double[][] tgtArray = T.Py_t.getArray(); for (int i = 0; i < mArray.length; i++) { System.arraycopy(mArray[i], 0, tgtArray[i], 0, mArray[0].length); } } /** * Output the current partition * @param insts * @return a string that describes the partition */ public String toString() { StringBuffer text = new StringBuffer(); text.append("score (L) : " + Utils.doubleToString(L, 4) + "\n"); text.append("number of changes : " + counter +"\n"); for (int i = 0; i < m_numCluster; i++) { text.append("\nCluster "+i+"\n"); text.append("size : "+size(i)+"\n"); text.append("prior prob : "+Utils.doubleToString(Pt[i], 4)+"\n"); } return text.toString(); } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 5538 $"); } } /** Training data */ private Instances m_data; /** Number of clusters */ private int m_numCluster = 2; /** Number of restarts */ private int m_numRestarts = 5; /** Verbose? */ private boolean m_verbose = false; /** Uniform prior probability of the documents */ private boolean m_uniformPrior = true; /** Max number of iterations during each restart */ private int m_maxLoop = 100; /** Minimum number of changes */ private int m_minChange = 0; /** Globally replace missing values */ private ReplaceMissingValues m_replaceMissing; /** Number of instances */ private int m_numInstances; /** Number of attributes */ private int m_numAttributes; /** Randomly generate initial partition */ private Random random; /** Holds the best partition built */ private Partition bestT; /** Holds the statistics about the input dataset */ private Input input; /** * Generates a clusterer. * * @param data the training instances * @throws Exception if something goes wrong */ public void buildClusterer(Instances data) throws Exception { // can clusterer handle the data ? getCapabilities().testWithFail(data); m_replaceMissing = new ReplaceMissingValues(); Instances instances = new Instances(data); instances.setClassIndex(-1); m_replaceMissing.setInputFormat(instances); data = weka.filters.Filter.useFilter(instances, m_replaceMissing); instances = null; // initialize all fields that are not being set via options m_data = data; m_numInstances = m_data.numInstances(); m_numAttributes = m_data.numAttributes(); random = new Random(getSeed()); // initialize the statistics of the input training data input = sIB_ProcessInput(); // object to hold the best partition bestT = new Partition(); // the real clustering double bestL = Double.NEGATIVE_INFINITY; for (int k = 0; k < m_numRestarts; k++) { if(m_verbose) { System.out.format("restart number %s...\n", k); } // initialize the partition and optimize it Partition tmpT = sIB_InitT(input); tmpT = sIB_OptimizeT(tmpT, input); // if a better partition is found, save it if (tmpT.L > bestL) { tmpT.copy(bestT); bestL = bestT.L; } if(m_verbose) { System.out.println("\nPartition status : "); System.out.println("------------------"); System.out.println(tmpT.toString()+"\n"); } } if(m_verbose){ System.out.println("\nBest Partition"); System.out.println("==============="); System.out.println(bestT.toString()); } // save memory m_data = new Instances(m_data, 0); } /** * Cluster a given instance, this is the method defined in Clusterer * interface do nothing but just return the cluster assigned to it */ public int clusterInstance(Instance instance) throws Exception { double prior = (double) 1 / input.sumVals; double[] distances = new double[m_numCluster]; for(int i = 0; i < m_numCluster; i++){ double Pnew = bestT.Pt[i] + prior; double pi1 = prior / Pnew; double pi2 = bestT.Pt[i] / Pnew; distances[i] = Pnew * JS(instance, i, pi1, pi2); } return Utils.minIndex(distances); } /** * Process the input and compute the statistics of the training data * @return an Input object which holds the statistics about the training data */ private Input sIB_ProcessInput() { double valSum = 0.0; for (int i = 0; i < m_numInstances; i++) { valSum = 0.0; for (int v = 0; v < m_data.instance(i).numValues(); v++) { valSum += m_data.instance(i).valueSparse(v); } if (valSum <= 0) { if(m_verbose){ System.out.format("Instance %s sum of value = %s <= 0, removed.\n", i, valSum); } m_data.delete(i); m_numInstances--; } } // get the term-document matrix Input input = new Input(); input.Py_x = getTransposedNormedMatrix(m_data); if (m_uniformPrior) { input.Pyx = input.Py_x.copy(); normalizePrior(m_data); } else { input.Pyx = getTransposedMatrix(m_data); } input.sumVals = getTotalSum(m_data); input.Pyx.timesEquals((double) 1 / input.sumVals); // prior probability of documents, ie. sum the columns from the Pyx matrix input.Px = new double[m_numInstances]; for (int i = 0; i < m_numInstances; i++) { for (int j = 0; j < m_numAttributes; j++) { input.Px[i] += input.Pyx.get(j, i); } } // prior probability of terms, ie. sum the rows from the Pyx matrix input.Py = new double[m_numAttributes]; for (int i = 0; i < input.Pyx.getRowDimension(); i++) { for (int j = 0; j < input.Pyx.getColumnDimension(); j++) { input.Py[i] += input.Pyx.get(i, j); } } MI(input.Pyx, input); return input; } /** * Initialize the partition * @param input object holding the statistics of the training data * @return the initialized partition */ private Partition sIB_InitT(Input input) { Partition T = new Partition(); int avgSize = (int) Math.ceil((double) m_numInstances / m_numCluster); ArrayList<Integer> permInstsIdx = new ArrayList<Integer>(); ArrayList<Integer> unassigned = new ArrayList<Integer>(); for (int i = 0; i < m_numInstances; i++) { unassigned.add(i); } while (unassigned.size() != 0) { int t = random.nextInt(unassigned.size()); permInstsIdx.add(unassigned.get(t)); unassigned.remove(t); } for (int i = 0; i < m_numCluster; i++) { int r2 = avgSize > permInstsIdx.size() ? permInstsIdx.size() : avgSize; for (int j = 0; j < r2; j++) { T.Pt_x[permInstsIdx.get(j)] = i; } for (int j = 0; j < r2; j++) { permInstsIdx.remove(0); } } // initialize the prior prob of each cluster, and the probability // for each attribute within the cluster for (int i = 0; i < m_numCluster; i++) { ArrayList<Integer> indices = T.find(i); for (int j = 0; j < indices.size(); j++) { T.Pt[i] += input.Px[indices.get(j)]; } double[][] mArray = input.Pyx.getArray(); for (int j = 0; j < m_numAttributes; j++) { double sum = 0.0; for (int k = 0; k < indices.size(); k++) { sum += mArray[j][indices.get(k)]; } sum /= T.Pt[i]; T.Py_t.set(j, i, sum); } } if(m_verbose) { System.out.println("Initializing..."); } return T; } /** * Optimize the partition * @param tmpT partition to be optimized * @param input object describing the statistics of the training dataset * @return the optimized partition */ private Partition sIB_OptimizeT(Partition tmpT, Input input) { boolean done = false; int change = 0, loopCounter = 0; if(m_verbose) { System.out.println("Optimizing..."); System.out.println("-------------"); } while (!done) { change = 0; for (int i = 0; i < m_numInstances; i++) { int old_t = tmpT.Pt_x[i]; // If the current cluster only has one instance left, leave it. if (tmpT.size(old_t) == 1) { if(m_verbose){ System.out.format("cluster %s has only 1 doc remain\n", old_t); } continue; } // draw the instance out from its previous cluster reduce_x(i, old_t, tmpT, input); // re-cluster the instance int new_t = clusterInstance(i, input, tmpT); if (new_t != old_t) { change++; updateAssignment(i, new_t, tmpT, input.Px[i], input.Py_x); } } tmpT.counter += change; if(m_verbose){ System.out.format("iteration %s , changes : %s\n", loopCounter, change); } done = checkConvergence(change, loopCounter); loopCounter++; } // compute the sIB score tmpT.L = sIB_local_MI(tmpT.Py_t, tmpT.Pt); if(m_verbose){ System.out.format("score (L) : %s \n", Utils.doubleToString(tmpT.L, 4)); } return tmpT; } /** * Draw a instance out from a cluster. * @param instIdx index of the instance to be drawn out * @param t index of the cluster which the instance previously belong to * @param T the current working partition * @param input the input statistics */ private void reduce_x(int instIdx, int t, Partition T, Input input) { // Update the prior probability of the cluster ArrayList<Integer> indices = T.find(t); double sum = 0.0; for (int i = 0; i < indices.size(); i++) { if (indices.get(i) == instIdx) continue; sum += input.Px[indices.get(i)]; } T.Pt[t] = sum; if (T.Pt[t] < 0) { System.out.format("Warning: probability < 0 (%s)\n", T.Pt[t]); T.Pt[t] = 0; } // Update prob of each attribute in the cluster double[][] mArray = input.Pyx.getArray(); for (int i = 0; i < m_numAttributes; i++) { sum = 0.0; for (int j = 0; j < indices.size(); j++) { if (indices.get(j) == instIdx) continue; sum += mArray[i][indices.get(j)]; } T.Py_t.set(i, t, sum / T.Pt[t]); } } /** * Put an instance into a new cluster and update. * @param instIdx instance to be updated * @param newt index of the new cluster this instance has been assigned to * @param T the current working partition * @param Px an array of prior probabilities of the instances */ private void updateAssignment(int instIdx, int newt, Partition T, double Px, Matrix Py_x) { T.Pt_x[instIdx] = newt; // update probability of attributes in the cluster double mass = Px + T.Pt[newt]; double pi1 = Px / mass; double pi2 = T.Pt[newt] / mass; for (int i = 0; i < m_numAttributes; i++) { T.Py_t.set(i, newt, pi1 * Py_x.get(i, instIdx) + pi2 * T.Py_t.get(i, newt)); } T.Pt[newt] = mass; } /** * Check whether the current iteration is converged * @param change number of changes in current iteration * @param loops number of iterations done * @return true if the iteration is converged, false otherwise */ private boolean checkConvergence(int change, int loops) { if (change <= m_minChange || loops >= m_maxLoop) { if(m_verbose){ System.out.format("\nsIB converged after %s iterations with %s changes\n", loops, change); } return true; } return false; } /** * Cluster an instance into the nearest cluster. * @param instIdx Index of the instance to be clustered * @param input Object which describe the statistics of the training dataset * @param T Partition * @return index of the cluster that has the minimum distance to the instance */ private int clusterInstance(int instIdx, Input input, Partition T) { double[] distances = new double[m_numCluster]; for (int i = 0; i < m_numCluster; i++) { double Pnew = input.Px[instIdx] + T.Pt[i]; double pi1 = input.Px[instIdx] / Pnew; double pi2 = T.Pt[i] / Pnew; distances[i] = Pnew * JS(instIdx, input, T, i, pi1, pi2); } return Utils.minIndex(distances); } /** * Compute the JS divergence between an instance and a cluster, used for training data * @param instIdx index of the instance * @param input statistics of the input data * @param T the whole partition * @param t index of the cluster * @param pi1 * @param pi2 * @return the JS divergence */ private double JS(int instIdx, Input input, Partition T, int t, double pi1, double pi2) { if (Math.min(pi1, pi2) <= 0) { System.out.format("Warning: zero or negative weights in JS calculation! (pi1 %s, pi2 %s)\n", pi1, pi2); return 0; } Instance inst = m_data.instance(instIdx); double kl1 = 0.0, kl2 = 0.0, tmp = 0.0; for (int i = 0; i < inst.numValues(); i++) { tmp = input.Py_x.get(inst.index(i), instIdx); if(tmp != 0) { kl1 += tmp * Math.log(tmp / (tmp * pi1 + pi2 * T.Py_t.get(inst.index(i), t))); } } for (int i = 0; i < m_numAttributes; i++) { if ((tmp = T.Py_t.get(i, t)) != 0) { kl2 += tmp * Math.log(tmp / (input.Py_x.get(i, instIdx) * pi1 + pi2 * tmp)); } } return pi1 * kl1 + pi2 * kl2; } /** * Compute the JS divergence between an instance and a cluster, used for test data * @param inst instance to be clustered * @param t index of the cluster * @param pi1 * @param pi2 * @return the JS divergence */ private double JS(Instance inst, int t, double pi1, double pi2) { if (Math.min(pi1, pi2) <= 0) { System.out.format("Warning: zero or negative weights in JS calculation! (pi1 %s, pi2 %s)\n", pi1, pi2); return 0; } double sum = Utils.sum(inst.toDoubleArray()); double kl1 = 0.0, kl2 = 0.0, tmp = 0.0; for (int i = 0; i < inst.numValues(); i++) { tmp = inst.valueSparse(i) / sum; if(tmp != 0) { kl1 += tmp * Math.log(tmp / (tmp * pi1 + pi2 * bestT.Py_t.get(inst.index(i), t))); } } for (int i = 0; i < m_numAttributes; i++) { if ((tmp = bestT.Py_t.get(i, t)) != 0) { kl2 += tmp * Math.log(tmp / (inst.value(i) * pi1 / sum + pi2 * tmp)); } } return pi1 * kl1 + pi2 * kl2; } /** * Compute the sIB score * @param m a term-cluster matrix, with m[i, j] is the probability of term i given cluster j * @param Pt an array of cluster prior probabilities * @return the sIB score which indicates the quality of the partition */ private double sIB_local_MI(Matrix m, double[] Pt) { double Hy = 0.0, Ht = 0.0; for (int i = 0; i < Pt.length; i++) { Ht += Pt[i] * Math.log(Pt[i]); } Ht = -Ht; for (int i = 0; i < m_numAttributes; i++) { double Py = 0.0; for (int j = 0; j < m_numCluster; j++) { Py += m.get(i, j) * Pt[j]; } if(Py == 0) continue; Hy += Py * Math.log(Py); } Hy = -Hy; double Hyt = 0.0, tmp = 0.0; for (int i = 0; i < m.getRowDimension(); i++) { for (int j = 0; j < m.getColumnDimension(); j++) { if ((tmp = m.get(i, j)) == 0 || Pt[j] == 0) { continue; } tmp *= Pt[j]; Hyt += tmp * Math.log(tmp); } } return Hy + Ht + Hyt; } /** * Get the sum of value of the dataset * @param data set of instances to handle * @return sum of all the attribute values for all the instances in the dataset */ private double getTotalSum(Instances data) { double sum = 0.0; for (int i = 0; i < data.numInstances(); i++) { for (int v = 0; v < data.instance(i).numValues(); v++) { sum += data.instance(i).valueSparse(v); } } return sum; } /** * Transpose the document-term matrix to term-document matrix * @param data instances with document-term info * @return a term-document matrix transposed from the input dataset */ private Matrix getTransposedMatrix(Instances data) { double[][] temp = new double[data.numAttributes()][data.numInstances()]; for (int i = 0; i < data.numInstances(); i++) { Instance inst = data.instance(i); for (int v = 0; v < inst.numValues(); v++) { temp[inst.index(v)][i] = inst.valueSparse(v); } } Matrix My_x = new Matrix(temp); return My_x; } /** * Normalize the document vectors * @param data instances to be normalized */ private void normalizePrior(Instances data) { for (int i = 0; i < data.numInstances(); i++) { normalizeInstance(data.instance(i)); } } /** * Normalize the instance * @param inst instance to be normalized * @return a new Instance with normalized values */ private Instance normalizeInstance(Instance inst) { double[] vals = inst.toDoubleArray(); double sum = Utils.sum(vals); for(int i = 0; i < vals.length; i++) { vals[i] /= sum; } return new DenseInstance(inst.weight(), vals); } private Matrix getTransposedNormedMatrix(Instances data) { Matrix matrix = new Matrix(data.numAttributes(), data.numInstances()); for(int i = 0; i < data.numInstances(); i++){ double[] vals = data.instance(i).toDoubleArray(); double sum = Utils.sum(vals); for (int v = 0; v < vals.length; v++) { vals[v] /= sum; matrix.set(v, i, vals[v]); } } return matrix; } /** * Compute the MI between instances and attributes * @param m the term-document matrix * @param input object that describes the statistics about the training data */ private void MI(Matrix m, Input input){ int minDimSize = m.getColumnDimension() < m.getRowDimension() ? m.getColumnDimension() : m.getRowDimension(); if(minDimSize < 2){ System.err.println("Warning : This is not a JOINT distribution"); input.Hx = Entropy (m); input.Hy = 0; input.Ixy = 0; return; } input.Hx = Entropy(input.Px); input.Hy = Entropy(input.Py); double entropy = input.Hx + input.Hy; for (int i=0; i < m_numInstances; i++) { Instance inst = m_data.instance(i); for (int v = 0; v < inst.numValues(); v++) { double tmp = m.get(inst.index(v), i); if(tmp <= 0) continue; entropy += tmp * Math.log(tmp); } } input.Ixy = entropy; if(m_verbose) { System.out.println("Ixy = " + input.Ixy); } } /** * Compute the entropy score based on an array of probabilities * @param probs array of non-negative and normalized probabilities * @return the entropy value */ private double Entropy(double[] probs){ for (int i = 0; i < probs.length; i++){ if (probs[i] <= 0) { if(m_verbose) { System.out.println("Warning: Negative probability."); } return Double.NaN; } } // could be unormalized, when normalization is not specified if(Math.abs(Utils.sum(probs)-1) >= 1e-6) { if(m_verbose) { System.out.println("Warning: Not normalized."); } return Double.NaN; } double mi = 0.0; for (int i = 0; i < probs.length; i++) { mi += probs[i] * Math.log(probs[i]); } mi = -mi; return mi; } /** * Compute the entropy score based on a matrix * @param p a matrix with non-negative and normalized probabilities * @return the entropy value */ private double Entropy(Matrix p) { double mi = 0; for (int i = 0; i < p.getRowDimension(); i++) { for (int j = 0; j < p.getColumnDimension(); j++) { if(p.get(i, j) == 0){ continue; } mi += p.get(i, j) + Math.log(p.get(i, j)); } } mi = -mi; return mi; } /** * Parses a given list of options. <p/> * <!-- options-start --> * Valid options are: <p/> * * <pre> -I &lt;num&gt; * maximum number of iterations * (default 100).</pre> * * <pre> -M &lt;num&gt; * minimum number of changes in a single iteration * (default 0).</pre> * * <pre> -N &lt;num&gt; * number of clusters. * (default 2).</pre> * * <pre> -R &lt;num&gt; * number of restarts. * (default 5).</pre> * * <pre> -U * set not to normalize the data * (default true).</pre> * * <pre> -V * set to output debug info * (default false).</pre> * * <pre> -S &lt;num&gt; * Random number seed. * (default 1)</pre> * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { String optionString = Utils.getOption('I', options); if (optionString.length() != 0) { setMaxIterations(Integer.parseInt(optionString)); } optionString = Utils.getOption('M', options); if (optionString.length() != 0) { setMinChange((new Integer(optionString)).intValue()); } optionString = Utils.getOption('N', options); if (optionString.length() != 0) { setNumClusters(Integer.parseInt(optionString)); } optionString = Utils.getOption('R', options); if (optionString.length() != 0) { setNumRestarts((new Integer(optionString)).intValue()); } setNotUnifyNorm(Utils.getFlag('U', options)); setDebug(Utils.getFlag('V', options)); super.setOptions(options); } /** * Returns an enumeration describing the available options. * @return an enumeration of all the available options. */ public Enumeration listOptions() { Vector<Option> result = new Vector<Option>(); result.addElement(new Option("\tmaximum number of iterations\n" + "\t(default 100).", "I", 1, "-I <num>")); result.addElement(new Option( "\tminimum number of changes in a single iteration\n" + "\t(default 0).", "M", 1, "-M <num>")); result.addElement(new Option("\tnumber of clusters.\n" + "\t(default 2).", "N", 1, "-N <num>")); result.addElement(new Option("\tnumber of restarts.\n" + "\t(default 5).", "R", 1, "-R <num>")); result.addElement(new Option("\tset not to normalize the data\n" + "\t(default true).", "U", 0, "-U")); result.addElement(new Option("\tset to output debug info\n" + "\t(default false).", "V", 0, "-V")); Enumeration en = super.listOptions(); while (en.hasMoreElements()) result.addElement((Option) en.nextElement()); return result.elements(); } /** * Gets the current settings. * @return an array of strings suitable for passing to setOptions() */ public String[] getOptions() { Vector<String> result; result = new Vector<String>(); result.add("-I"); result.add("" + getMaxIterations()); result.add("-M"); result.add("" + getMinChange()); result.add("-N"); result.add("" + getNumClusters()); result.add("-R"); result.add("" + getNumRestarts()); if(getNotUnifyNorm()) { result.add("-U"); } if(getDebug()) { result.add("-V"); } String[] options = super.getOptions(); for (int i = 0; i < options.length; i++){ result.add(options[i]); } return result.toArray(new String[result.size()]); } /** * Returns the tip text for this property * @return tip text for this property suitable for * displaying in the explorer/experimenter gui */ public String debugTipText() { return "If set to true, clusterer may output additional info to " + "the console."; } /** * Set debug mode - verbose output * @param v true for verbose output */ public void setDebug (boolean v) { m_verbose = v; } /** * Get debug mode * @return true if debug mode is set */ public boolean getDebug () { return m_verbose; } /** * Returns the tip text for this property. * @return tip text for this property */ public String maxIterationsTipText() { return "set maximum number of iterations (default 100)"; } /** * Set the max number of iterations * @param i max number of iterations */ public void setMaxIterations(int i) { m_maxLoop = i; } /** * Get the max number of iterations * @return max number of iterations */ public int getMaxIterations() { return m_maxLoop; } /** * Returns the tip text for this property. * @return tip text for this property */ public String minChangeTipText() { return "set minimum number of changes (default 0)"; } /** * set the minimum number of changes * @param m the minimum number of changes */ public void setMinChange(int m) { m_minChange = m; } /** * get the minimum number of changes * @return the minimum number of changes */ public int getMinChange() { return m_minChange; } /** * Returns the tip text for this property. * @return tip text for this property */ public String numClustersTipText() { return "set number of clusters (default 2)"; } /** * Set the number of clusters * @param n number of clusters */ public void setNumClusters(int n) { m_numCluster = n; } /** * Get the number of clusters * @return the number of clusters */ public int getNumClusters() { return m_numCluster; } /** * Get the number of clusters * @return the number of clusters */ public int numberOfClusters() { return m_numCluster; } /** * Returns the tip text for this property. * @return tip text for this property */ public String numRestartsTipText() { return "set number of restarts (default 5)"; } /** * Set the number of restarts * @param i number of restarts */ public void setNumRestarts(int i) { m_numRestarts = i; } /** * Get the number of restarts * @return number of restarts */ public int getNumRestarts(){ return m_numRestarts; } /** * Returns the tip text for this property. * @return tip text for this property */ public String notUnifyNormTipText() { return "set whether to normalize each instance to a unify prior probability (eg. 1)."; } /** * Set whether to normalize instances to unify prior probability * before building the clusterer * @param b true to normalize, otherwise false */ public void setNotUnifyNorm(boolean b){ m_uniformPrior = !b; } /** * Get whether to normalize instances to unify prior probability * before building the clusterer * @return true if set to normalize, false otherwise */ public boolean getNotUnifyNorm() { return !m_uniformPrior; } /** * Returns a string describing this clusterer * @return a description of the clusterer suitable for * displaying in the explorer/experimenter gui */ public String globalInfo() { return "Cluster data using the sequential information bottleneck algorithm.\n\n" + "Note: only hard clustering scheme is supported. sIB assign for each " + "instance the cluster that have the minimum cost/distance to the instance. " + "The trade-off beta is set to infinite so 1/beta is zero.\n\n" + "For more information, see:\n\n" +getTechnicalInformation().toString(); } /** * Returns an instance of a TechnicalInformation object, containing * detailed information about the technical background of this class, * e.g., paper reference or book this class is based on. * @return the technical information about this class */ public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; result = new TechnicalInformation(Type.INPROCEEDINGS); result.setValue(Field.AUTHOR, "Noam Slonim and Nir Friedman and Naftali Tishby"); result.setValue(Field.YEAR, "2002"); result.setValue(Field.TITLE, "Unsupervised document classification using sequential information maximization"); result.setValue(Field.BOOKTITLE, "Proceedings of the 25th International ACM SIGIR Conference on Research and Development in Information Retrieval"); result.setValue(Field.PAGES, "129-136"); return result; } /** * Returns default capabilities of the clusterer. * @return the capabilities of this clusterer */ public Capabilities getCapabilities() { Capabilities result = super.getCapabilities(); result.disableAll(); result.enable(Capability.NO_CLASS); // attributes result.enable(Capability.NUMERIC_ATTRIBUTES); return result; } public String toString(){ StringBuffer text = new StringBuffer(); text.append("\nsIB\n===\n"); text.append("\nNumber of clusters: " + m_numCluster + "\n"); for (int j = 0; j < m_numCluster; j++) { text.append("\nCluster: " + j + " Size : " + bestT.size(j) + " Prior probability: " + Utils.doubleToString(bestT.Pt[j], 4) + "\n\n"); for (int i = 0; i < m_numAttributes; i++) { text.append("Attribute: " + m_data.attribute(i).name() + "\n"); text.append("Probability given the cluster = " + Utils.doubleToString(bestT.Py_t.get(i, j), 4) + "\n"); } } return text.toString(); } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 5538 $"); } public static void main(String[] argv) { runClusterer(new sIB(), argv); } }
36,081
27.981526
257
java
tsml-java
tsml-java-master/src/main/java/weka/clusterers/forOPTICSAndDBScan/DataObjects/DataObject.java
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* * Copyright (C) 2004 * & Matthias Schubert (schubert@dbs.ifi.lmu.de) * & Zhanna Melnikova-Albrecht (melnikov@cip.ifi.lmu.de) * & Rainer Holzmann (holzmann@cip.ifi.lmu.de) */ package weka.clusterers.forOPTICSAndDBScan.DataObjects; import weka.core.Instance; /** * <p> * DataObject.java <br/> * Authors: Rainer Holzmann, Zhanna Melnikova-Albrecht, Matthias Schubert <br/> * Date: Aug 19, 2004 <br/> * Time: 5:48:59 PM <br/> * $ Revision 1.4 $ <br/> * </p> * * @author Matthias Schubert (schubert@dbs.ifi.lmu.de) * @author Zhanna Melnikova-Albrecht (melnikov@cip.ifi.lmu.de) * @author Rainer Holzmann (holzmann@cip.ifi.lmu.de) * @version $Revision: 1.2 $ */ public interface DataObject { static final int UNCLASSIFIED = -1; static final int NOISE = Integer.MIN_VALUE; static final double UNDEFINED = Integer.MAX_VALUE; // ***************************************************************************************************************** // methods // ***************************************************************************************************************** /** * Compares two DataObjects in respect to their attribute-values * @param dataObject The DataObject, that is compared with this.dataObject * @return Returns true, if the DataObjects correspond in each value, else returns false */ boolean equals(DataObject dataObject); /** * Calculates the distance between dataObject and this.dataObject * @param dataObject The DataObject, that is used for distance-calculation with this.dataObject * @return double-value The distance between dataObject and this.dataObject */ double distance(DataObject dataObject); /** * Returns the original instance * @return originalInstance */ Instance getInstance(); /** * Returns the key for this DataObject * @return key */ String getKey(); /** * Sets the key for this DataObject * @param key The key is represented as string */ void setKey(String key); /** * Sets the clusterID (cluster), to which this DataObject belongs to * @param clusterID Number of the Cluster */ void setClusterLabel(int clusterID); /** * Returns the clusterID, to which this DataObject belongs to * @return clusterID */ int getClusterLabel(); /** * Marks this dataObject as processed * @param processed True, if the DataObject has been already processed, false else */ void setProcessed(boolean processed); /** * Gives information about the status of a dataObject * @return True, if this dataObject has been processed, else false */ boolean isProcessed(); /** * Sets a new coreDistance for this dataObject * @param c_dist coreDistance */ void setCoreDistance(double c_dist); /** * Returns the coreDistance for this dataObject * @return coreDistance */ double getCoreDistance(); /** * Sets a new reachability-distance for this dataObject */ void setReachabilityDistance(double r_dist); /** * Returns the reachabilityDistance for this dataObject */ double getReachabilityDistance(); }
4,010
29.853846
120
java
tsml-java
tsml-java-master/src/main/java/weka/clusterers/forOPTICSAndDBScan/DataObjects/EuclidianDataObject.java
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* * Copyright (C) 2004 * & Matthias Schubert (schubert@dbs.ifi.lmu.de) * & Zhanna Melnikova-Albrecht (melnikov@cip.ifi.lmu.de) * & Rainer Holzmann (holzmann@cip.ifi.lmu.de) */ package weka.clusterers.forOPTICSAndDBScan.DataObjects; import weka.clusterers.forOPTICSAndDBScan.Databases.Database; import weka.core.Attribute; import weka.core.Instance; import weka.core.RevisionHandler; import weka.core.RevisionUtils; import weka.core.Utils; import java.io.Serializable; /** * <p> * EuclidianDataObject.java <br/> * Authors: Rainer Holzmann, Zhanna Melnikova-Albrecht, Matthias Schubert <br/> * Date: Aug 19, 2004 <br/> * Time: 5:50:22 PM <br/> * $ Revision 1.4 $ <br/> * </p> * * @author Matthias Schubert (schubert@dbs.ifi.lmu.de) * @author Zhanna Melnikova-Albrecht (melnikov@cip.ifi.lmu.de) * @author Rainer Holzmann (holzmann@cip.ifi.lmu.de) * @version $Revision: 1.5 $ */ public class EuclidianDataObject implements DataObject, Serializable, RevisionHandler { /** for serialization */ private static final long serialVersionUID = -4408119914898291075L; /** * Holds the original instance */ private Instance instance; /** * Holds the (unique) key that is associated with this DataObject */ private String key; /** * Holds the ID of the cluster, to which this DataObject is assigned */ private int clusterID; /** * Holds the status for this DataObject (true, if it has been processed, else false) */ private boolean processed; /** * Holds the coreDistance for this DataObject */ private double c_dist; /** * Holds the reachabilityDistance for this DataObject */ private double r_dist; /** * Holds the database, that is the keeper of this DataObject */ private Database database; // ***************************************************************************************************************** // constructors // ***************************************************************************************************************** /** * Constructs a new DataObject. The original instance is kept as instance-variable * @param originalInstance the original instance */ public EuclidianDataObject(Instance originalInstance, String key, Database database) { this.database = database; this.key = key; instance = originalInstance; clusterID = DataObject.UNCLASSIFIED; processed = false; c_dist = DataObject.UNDEFINED; r_dist = DataObject.UNDEFINED; } // ***************************************************************************************************************** // methods // ***************************************************************************************************************** /** * Compares two DataObjects in respect to their attribute-values * @param dataObject The DataObject, that is compared with this.dataObject * @return Returns true, if the DataObjects correspond in each value, else returns false */ public boolean equals(DataObject dataObject) { if (this == dataObject) return true; if (!(dataObject instanceof EuclidianDataObject)) return false; final EuclidianDataObject euclidianDataObject = (EuclidianDataObject) dataObject; if (getInstance().equalHeaders(euclidianDataObject.getInstance())) { for (int i = 0; i < getInstance().numValues(); i++) { double i_value_Instance_1 = getInstance().valueSparse(i); double i_value_Instance_2 = euclidianDataObject.getInstance().valueSparse(i); if (i_value_Instance_1 != i_value_Instance_2) return false; } return true; } return false; } /** * Calculates the euclidian-distance between dataObject and this.dataObject * @param dataObject The DataObject, that is used for distance-calculation with this.dataObject * @return double-value The euclidian-distance between dataObject and this.dataObject * NaN, if the computation could not be performed */ public double distance(DataObject dataObject) { double dist = 0.0; if (!(dataObject instanceof EuclidianDataObject)) return Double.NaN; if (getInstance().equalHeaders(dataObject.getInstance())) { for (int i = 0; i < getInstance().numValues(); i++) { double cDistance = computeDistance(getInstance().index(i), getInstance().valueSparse(i), dataObject.getInstance().valueSparse(i)); dist += Math.pow(cDistance, 2.0); } return Math.sqrt(dist); } return Double.NaN; } /** * Performs euclidian-distance-calculation between two given values * @param index of the attribute within the DataObject's instance * @param v value_1 * @param v1 value_2 * @return double norm-distance between value_1 and value_2 */ private double computeDistance(int index, double v, double v1) { switch (getInstance().attribute(index).type()) { case Attribute.NOMINAL: return (Utils.isMissingValue(v) || Utils.isMissingValue(v1) || ((int) v != (int) v1)) ? 1 : 0; case Attribute.NUMERIC: if (Utils.isMissingValue(v) || Utils.isMissingValue(v1)) { if (Utils.isMissingValue(v) && Utils.isMissingValue(v1)) return 1; else { return (Utils.isMissingValue(v)) ? norm(v1, index) : norm(v, index); } } else return norm(v, index) - norm(v1, index); default: return 0; } } /** * Normalizes a given value of a numeric attribute. * * @param x the value to be normalized * @param i the attribute's index */ private double norm(double x, int i) { if (Double.isNaN(database.getAttributeMinValues()[i]) || Utils.eq(database.getAttributeMaxValues()[i], database.getAttributeMinValues()[i])) { return 0; } else { return (x - database.getAttributeMinValues()[i]) / (database.getAttributeMaxValues()[i] - database.getAttributeMinValues()[i]); } } /** * Returns the original instance * @return originalInstance */ public Instance getInstance() { return instance; } /** * Returns the key for this DataObject * @return key */ public String getKey() { return key; } /** * Sets the key for this DataObject * @param key The key is represented as string */ public void setKey(String key) { this.key = key; } /** * Sets the clusterID (cluster), to which this DataObject belongs to * @param clusterID Number of the Cluster */ public void setClusterLabel(int clusterID) { this.clusterID = clusterID; } /** * Returns the clusterID, to which this DataObject belongs to * @return clusterID */ public int getClusterLabel() { return clusterID; } /** * Marks this dataObject as processed * @param processed True, if the DataObject has been already processed, false else */ public void setProcessed(boolean processed) { this.processed = processed; } /** * Gives information about the status of a dataObject * @return True, if this dataObject has been processed, else false */ public boolean isProcessed() { return processed; } /** * Sets a new coreDistance for this dataObject * @param c_dist coreDistance */ public void setCoreDistance(double c_dist) { this.c_dist = c_dist; } /** * Returns the coreDistance for this dataObject * @return coreDistance */ public double getCoreDistance() { return c_dist; } /** * Sets a new reachability-distance for this dataObject */ public void setReachabilityDistance(double r_dist) { this.r_dist = r_dist; } /** * Returns the reachabilityDistance for this dataObject */ public double getReachabilityDistance() { return r_dist; } public String toString() { return instance.toString(); } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 1.5 $"); } }
9,547
30.72093
120
java
tsml-java
tsml-java-master/src/main/java/weka/clusterers/forOPTICSAndDBScan/DataObjects/ManhattanDataObject.java
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* * Copyright (C) 2004 * & Matthias Schubert (schubert@dbs.ifi.lmu.de) * & Zhanna Melnikova-Albrecht (melnikov@cip.ifi.lmu.de) * & Rainer Holzmann (holzmann@cip.ifi.lmu.de) */ package weka.clusterers.forOPTICSAndDBScan.DataObjects; import weka.clusterers.forOPTICSAndDBScan.Databases.Database; import weka.core.Attribute; import weka.core.Instance; import weka.core.RevisionHandler; import weka.core.RevisionUtils; import weka.core.Utils; import java.io.Serializable; /** * <p> * ManhattanDataObject.java <br/> * Authors: Rainer Holzmann, Zhanna Melnikova-Albrecht, Matthias Schubert <br/> * Date: Aug 19, 2004 <br/> * Time: 5:50:22 PM <br/> * $ Revision 1.4 $ <br/> * </p> * * @author Matthias Schubert (schubert@dbs.ifi.lmu.de) * @author Zhanna Melnikova-Albrecht (melnikov@cip.ifi.lmu.de) * @author Rainer Holzmann (holzmann@cip.ifi.lmu.de) * @version $Revision: 1.5 $ */ public class ManhattanDataObject implements DataObject, Serializable, RevisionHandler { /** for serialization */ private static final long serialVersionUID = -3417720553766544582L; /** * Holds the original instance */ private Instance instance; /** * Holds the (unique) key that is associated with this DataObject */ private String key; /** * Holds the ID of the cluster, to which this DataObject is assigned */ private int clusterID; /** * Holds the status for this DataObject (true, if it has been processed, else false) */ private boolean processed; /** * Holds the coreDistance for this DataObject */ private double c_dist; /** * Holds the reachabilityDistance for this DataObject */ private double r_dist; /** * Holds the database, that is the keeper of this DataObject */ private Database database; // ***************************************************************************************************************** // constructors // ***************************************************************************************************************** /** * Constructs a new DataObject. The original instance is kept as instance-variable * @param originalInstance the original instance */ public ManhattanDataObject(Instance originalInstance, String key, Database database) { this.database = database; this.key = key; instance = originalInstance; clusterID = DataObject.UNCLASSIFIED; processed = false; c_dist = DataObject.UNDEFINED; r_dist = DataObject.UNDEFINED; } // ***************************************************************************************************************** // methods // ***************************************************************************************************************** /** * Compares two DataObjects in respect to their attribute-values * @param dataObject The DataObject, that is compared with this.dataObject * @return Returns true, if the DataObjects correspond in each value, else returns false */ public boolean equals(DataObject dataObject) { if (this == dataObject) return true; if (!(dataObject instanceof ManhattanDataObject)) return false; final ManhattanDataObject manhattanDataObject = (ManhattanDataObject) dataObject; if (getInstance().equalHeaders(manhattanDataObject.getInstance())) { for (int i = 0; i < getInstance().numValues(); i++) { double i_value_Instance_1 = getInstance().valueSparse(i); double i_value_Instance_2 = manhattanDataObject.getInstance().valueSparse(i); if (i_value_Instance_1 != i_value_Instance_2) return false; } return true; } return false; } /** * Calculates the manhattan-distance between dataObject and this.dataObject * @param dataObject The DataObject, that is used for distance-calculation with this.dataObject * @return double-value The manhattan-distance between dataObject and this.dataObject * NaN, if the computation could not be performed */ public double distance(DataObject dataObject) { double dist = 0.0; if (!(dataObject instanceof ManhattanDataObject)) return Double.NaN; if (getInstance().equalHeaders(dataObject.getInstance())) { for (int i = 0; i < getInstance().numValues(); i++) { double cDistance = computeDistance(getInstance().index(i), getInstance().valueSparse(i), dataObject.getInstance().valueSparse(i)); dist += Math.abs(cDistance); } return dist; } return Double.NaN; } /** * Performs manhattan-distance-calculation between two given values * @param index of the attribute within the DataObject's instance * @param v value_1 * @param v1 value_2 * @return double norm-distance between value_1 and value_2 */ private double computeDistance(int index, double v, double v1) { switch (getInstance().attribute(index).type()) { case Attribute.NOMINAL: return (Utils.isMissingValue(v) || Utils.isMissingValue(v1) || ((int) v != (int) v1)) ? 1 : 0; case Attribute.NUMERIC: if (Utils.isMissingValue(v) || Utils.isMissingValue(v1)) { if (Utils.isMissingValue(v) && Utils.isMissingValue(v1)) return 1; else { return (Utils.isMissingValue(v)) ? norm(v1, index) : norm(v, index); } } else return norm(v, index) - norm(v1, index); default: return 0; } } /** * Normalizes a given value of a numeric attribute. * * @param x the value to be normalized * @param i the attribute's index */ private double norm(double x, int i) { if (Double.isNaN(database.getAttributeMinValues()[i]) || Utils.eq(database.getAttributeMaxValues()[i], database.getAttributeMinValues()[i])) { return 0; } else { return (x - database.getAttributeMinValues()[i]) / (database.getAttributeMaxValues()[i] - database.getAttributeMinValues()[i]); } } /** * Returns the original instance * @return originalInstance */ public Instance getInstance() { return instance; } /** * Returns the key for this DataObject * @return key */ public String getKey() { return key; } /** * Sets the key for this DataObject * @param key The key is represented as string */ public void setKey(String key) { this.key = key; } /** * Sets the clusterID (cluster), to which this DataObject belongs to * @param clusterID Number of the Cluster */ public void setClusterLabel(int clusterID) { this.clusterID = clusterID; } /** * Returns the clusterID, to which this DataObject belongs to * @return clusterID */ public int getClusterLabel() { return clusterID; } /** * Marks this dataObject as processed * @param processed True, if the DataObject has been already processed, false else */ public void setProcessed(boolean processed) { this.processed = processed; } /** * Gives information about the status of a dataObject * @return True, if this dataObject has been processed, else false */ public boolean isProcessed() { return processed; } /** * Sets a new coreDistance for this dataObject * @param c_dist coreDistance */ public void setCoreDistance(double c_dist) { this.c_dist = c_dist; } /** * Returns the coreDistance for this dataObject * @return coreDistance */ public double getCoreDistance() { return c_dist; } /** * Sets a new reachability-distance for this dataObject */ public void setReachabilityDistance(double r_dist) { this.r_dist = r_dist; } /** * Returns the reachabilityDistance for this dataObject */ public double getReachabilityDistance() { return r_dist; } public String toString() { return instance.toString(); } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 1.5 $"); } }
9,533
30.674419
120
java
tsml-java
tsml-java-master/src/main/java/weka/clusterers/forOPTICSAndDBScan/Databases/Database.java
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* * Copyright (C) 2004 * & Matthias Schubert (schubert@dbs.ifi.lmu.de) * & Zhanna Melnikova-Albrecht (melnikov@cip.ifi.lmu.de) * & Rainer Holzmann (holzmann@cip.ifi.lmu.de) */ package weka.clusterers.forOPTICSAndDBScan.Databases; import weka.clusterers.forOPTICSAndDBScan.DataObjects.DataObject; import weka.core.Instances; import java.util.Iterator; import java.util.List; /** * <p> * Database.java <br/> * Authors: Rainer Holzmann, Zhanna Melnikova-Albrecht, Matthias Schubert <br/> * Date: Aug 20, 2004 <br/> * Time: 1:03:43 PM <br/> * $ Revision 1.4 $ <br/> * </p> * * @author Matthias Schubert (schubert@dbs.ifi.lmu.de) * @author Zhanna Melnikova-Albrecht (melnikov@cip.ifi.lmu.de) * @author Rainer Holzmann (holzmann@cip.ifi.lmu.de) * @version $Revision: 1.2 $ */ public interface Database { // ***************************************************************************************************************** // methods // ***************************************************************************************************************** /** * Select a dataObject from the database * @param key The key that is associated with the dataObject * @return dataObject */ DataObject getDataObject(String key); /** * Returns the size of the database (the number of dataObjects in the database) * @return size */ int size(); /** * Returns an iterator over all the keys * @return iterator */ Iterator keyIterator(); /** * Returns an iterator over all the dataObjects in the database * @return iterator */ Iterator dataObjectIterator(); /** * Tests if the database contains the dataObject_Query * @param dataObject_Query The query-object * @return true if the database contains dataObject_Query, else false */ boolean contains(DataObject dataObject_Query); /** * Inserts a new dataObject into the database * @param dataObject */ void insert(DataObject dataObject); /** * Returns the original instances delivered from WEKA * @return instances */ Instances getInstances(); /** * Sets the minimum and maximum values for each attribute in different arrays * by walking through every DataObject of the database */ void setMinMaxValues(); /** * Returns the array of minimum-values for each attribute * @return attributeMinValues */ double[] getAttributeMinValues(); /** * Returns the array of maximum-values for each attribute * @return attributeMaxValues */ double[] getAttributeMaxValues(); /** * Performs an epsilon range query for this dataObject * @param epsilon Specifies the range for the query * @param queryDataObject The dataObject that is used as query-object for epsilon range query * @return List with all the DataObjects that are within the specified range */ List epsilonRangeQuery(double epsilon, DataObject queryDataObject); /** * Emits the k next-neighbours and performs an epsilon-range-query at the parallel. * The returned list contains two elements: * At index=0 --> list with all k next-neighbours; * At index=1 --> list with all dataObjects within epsilon; * @param k number of next neighbours * @param epsilon Specifies the range for the query * @param dataObject the start object * @return list with the k-next neighbours (PriorityQueueElements) and a list * with candidates from the epsilon-range-query (EpsilonRange_ListElements) */ List k_nextNeighbourQuery(int k, double epsilon, DataObject dataObject); /** * Calculates the coreDistance for the specified DataObject. * The returned list contains three elements: * At index=0 --> list with all k next-neighbours; * At index=1 --> list with all dataObjects within epsilon; * At index=2 --> coreDistance as Double-value * @param minPoints minPoints-many neighbours within epsilon must be found to have a non-undefined coreDistance * @param epsilon Specifies the range for the query * @param dataObject Calculate coreDistance for this dataObject * @return list with the k-next neighbours (PriorityQueueElements) and a list * with candidates from the epsilon-range-query (EpsilonRange_ListElements) and * the double-value for the calculated coreDistance */ List coreDistance(int minPoints, double epsilon, DataObject dataObject); }
5,320
34.238411
120
java
tsml-java
tsml-java-master/src/main/java/weka/clusterers/forOPTICSAndDBScan/Databases/SequentialDatabase.java
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* * Copyright (C) 2004 * & Matthias Schubert (schubert@dbs.ifi.lmu.de) * & Zhanna Melnikova-Albrecht (melnikov@cip.ifi.lmu.de) * & Rainer Holzmann (holzmann@cip.ifi.lmu.de) */ package weka.clusterers.forOPTICSAndDBScan.Databases; import weka.clusterers.forOPTICSAndDBScan.DataObjects.DataObject; import weka.clusterers.forOPTICSAndDBScan.Utils.EpsilonRange_ListElement; import weka.clusterers.forOPTICSAndDBScan.Utils.PriorityQueue; import weka.clusterers.forOPTICSAndDBScan.Utils.PriorityQueueElement; import weka.core.Instances; import weka.core.RevisionHandler; import weka.core.RevisionUtils; import java.io.Serializable; import java.util.ArrayList; import java.util.Iterator; import java.util.List; import java.util.TreeMap; /** * <p> * SequentialDatabase.java <br/> * Authors: Rainer Holzmann, Zhanna Melnikova-Albrecht, Matthias Schubert <br/> * Date: Aug 20, 2004 <br/> * Time: 1:23:38 PM <br/> * $ Revision 1.4 $ <br/> * </p> * * @author Matthias Schubert (schubert@dbs.ifi.lmu.de) * @author Zhanna Melnikova-Albrecht (melnikov@cip.ifi.lmu.de) * @author Rainer Holzmann (holzmann@cip.ifi.lmu.de) * @version $Revision: 1.4 $ */ public class SequentialDatabase implements Database, Serializable, RevisionHandler { /** for serialization */ private static final long serialVersionUID = 787245523118665778L; /** * Internal, sorted Treemap for storing all the DataObjects */ private TreeMap treeMap; /** * Holds the original instances delivered from WEKA */ private Instances instances; /** * Holds the minimum value for each attribute */ private double[] attributeMinValues; /** * Holds the maximum value for each attribute */ private double[] attributeMaxValues; // ***************************************************************************************************************** // constructors // ***************************************************************************************************************** /** * Constructs a new sequential database and holds the original instances * @param instances */ public SequentialDatabase(Instances instances) { this.instances = instances; treeMap = new TreeMap(); } // ***************************************************************************************************************** // methods // ***************************************************************************************************************** /** * Select a dataObject from the database * @param key The key that is associated with the dataObject * @return dataObject */ public DataObject getDataObject(String key) { return (DataObject) treeMap.get(key); } /** * Sets the minimum and maximum values for each attribute in different arrays * by walking through every DataObject of the database */ public void setMinMaxValues() { attributeMinValues = new double[getInstances().numAttributes()]; attributeMaxValues = new double[getInstances().numAttributes()]; //Init for (int i = 0; i < getInstances().numAttributes(); i++) { attributeMinValues[i] = attributeMaxValues[i] = Double.NaN; } Iterator iterator = dataObjectIterator(); while (iterator.hasNext()) { DataObject dataObject = (DataObject) iterator.next(); for (int j = 0; j < getInstances().numAttributes(); j++) { if (Double.isNaN(attributeMinValues[j])) { attributeMinValues[j] = dataObject.getInstance().value(j); attributeMaxValues[j] = dataObject.getInstance().value(j); } else { if (dataObject.getInstance().value(j) < attributeMinValues[j]) attributeMinValues[j] = dataObject.getInstance().value(j); if (dataObject.getInstance().value(j) > attributeMaxValues[j]) attributeMaxValues[j] = dataObject.getInstance().value(j); } } } } /** * Returns the array of minimum-values for each attribute * @return attributeMinValues */ public double[] getAttributeMinValues() { return attributeMinValues; } /** * Returns the array of maximum-values for each attribute * @return attributeMaxValues */ public double[] getAttributeMaxValues() { return attributeMaxValues; } /** * Performs an epsilon range query for this dataObject * @param epsilon Specifies the range for the query * @param queryDataObject The dataObject that is used as query-object for epsilon range query * @return List with all the DataObjects that are within the specified range */ public List epsilonRangeQuery(double epsilon, DataObject queryDataObject) { ArrayList epsilonRange_List = new ArrayList(); Iterator iterator = dataObjectIterator(); while (iterator.hasNext()) { DataObject dataObject = (DataObject) iterator.next(); double distance = queryDataObject.distance(dataObject); if (distance < epsilon) { epsilonRange_List.add(dataObject); } } return epsilonRange_List; } /** * Emits the k next-neighbours and performs an epsilon-range-query at the parallel. * The returned list contains two elements: * At index=0 --> list with all k next-neighbours; * At index=1 --> list with all dataObjects within epsilon; * @param k number of next neighbours * @param epsilon Specifies the range for the query * @param dataObject the start object * @return list with the k-next neighbours (PriorityQueueElements) and a list * with candidates from the epsilon-range-query (EpsilonRange_ListElements) */ public List k_nextNeighbourQuery(int k, double epsilon, DataObject dataObject) { Iterator iterator = dataObjectIterator(); List return_List = new ArrayList(); List nextNeighbours_List = new ArrayList(); List epsilonRange_List = new ArrayList(); PriorityQueue priorityQueue = new PriorityQueue(); while (iterator.hasNext()) { DataObject next_dataObject = (DataObject) iterator.next(); double dist = dataObject.distance(next_dataObject); if (dist <= epsilon) epsilonRange_List.add(new EpsilonRange_ListElement(dist, next_dataObject)); if (priorityQueue.size() < k) { priorityQueue.add(dist, next_dataObject); } else { if (dist < priorityQueue.getPriority(0)) { priorityQueue.next(); //removes the highest distance priorityQueue.add(dist, next_dataObject); } } } while (priorityQueue.hasNext()) { nextNeighbours_List.add(0, priorityQueue.next()); } return_List.add(nextNeighbours_List); return_List.add(epsilonRange_List); return return_List; } /** * Calculates the coreDistance for the specified DataObject. * The returned list contains three elements: * At index=0 --> list with all k next-neighbours; * At index=1 --> list with all dataObjects within epsilon; * At index=2 --> coreDistance as Double-value * @param minPoints minPoints-many neighbours within epsilon must be found to have a non-undefined coreDistance * @param epsilon Specifies the range for the query * @param dataObject Calculate coreDistance for this dataObject * @return list with the k-next neighbours (PriorityQueueElements) and a list * with candidates from the epsilon-range-query (EpsilonRange_ListElements) and * the double-value for the calculated coreDistance */ public List coreDistance(int minPoints, double epsilon, DataObject dataObject) { List list = k_nextNeighbourQuery(minPoints, epsilon, dataObject); if (((List) list.get(1)).size() < minPoints) { list.add(new Double(DataObject.UNDEFINED)); return list; } else { List nextNeighbours_List = (List) list.get(0); PriorityQueueElement priorityQueueElement = (PriorityQueueElement) nextNeighbours_List.get(nextNeighbours_List.size() - 1); if (priorityQueueElement.getPriority() <= epsilon) { list.add(new Double(priorityQueueElement.getPriority())); return list; } else { list.add(new Double(DataObject.UNDEFINED)); return list; } } } /** * Returns the size of the database (the number of dataObjects in the database) * @return size */ public int size() { return treeMap.size(); } /** * Returns an iterator over all the keys * @return iterator */ public Iterator keyIterator() { return treeMap.keySet().iterator(); } /** * Returns an iterator over all the dataObjects in the database * @return iterator */ public Iterator dataObjectIterator() { return treeMap.values().iterator(); } /** * Tests if the database contains the dataObject_Query * @param dataObject_Query The query-object * @return true if the database contains dataObject_Query, else false */ public boolean contains(DataObject dataObject_Query) { Iterator iterator = dataObjectIterator(); while (iterator.hasNext()) { DataObject dataObject = (DataObject) iterator.next(); if (dataObject.equals(dataObject_Query)) return true; } return false; } /** * Inserts a new dataObject into the database * @param dataObject */ public void insert(DataObject dataObject) { treeMap.put(dataObject.getKey(), dataObject); } /** * Returns the original instances delivered from WEKA * @return instances */ public Instances getInstances() { return instances; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 1.4 $"); } }
11,180
34.722045
120
java
tsml-java
tsml-java-master/src/main/java/weka/clusterers/forOPTICSAndDBScan/OPTICS_GUI/GraphPanel.java
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* * Copyright (C) 2004 * & Matthias Schubert (schubert@dbs.ifi.lmu.de) * & Zhanna Melnikova-Albrecht (melnikov@cip.ifi.lmu.de) * & Rainer Holzmann (holzmann@cip.ifi.lmu.de) */ package weka.clusterers.forOPTICSAndDBScan.OPTICS_GUI; import weka.clusterers.forOPTICSAndDBScan.DataObjects.DataObject; import weka.core.FastVector; import weka.core.RevisionHandler; import weka.core.RevisionUtils; import weka.core.Utils; import java.awt.Color; import java.awt.Dimension; import java.awt.Graphics; import java.awt.event.MouseEvent; import java.awt.event.MouseMotionAdapter; import javax.swing.JComponent; /** * <p> * GraphPanel.java <br/> * Authors: Rainer Holzmann, Zhanna Melnikova-Albrecht <br/> * Date: Sep 16, 2004 <br/> * Time: 10:28:19 AM <br/> * $ Revision 1.4 $ <br/> * </p> * * @author Zhanna Melnikova-Albrecht (melnikov@cip.ifi.lmu.de) * @author Rainer Holzmann (holzmann@cip.ifi.lmu.de) * @version $Revision: 1.4 $ */ public class GraphPanel extends JComponent implements RevisionHandler { /** for serialization */ private static final long serialVersionUID = 7917937528738361470L; /** * Holds the clustering results */ private FastVector resultVector; /** * Holds the value that is multiplied with the original values of core- and reachability * distances in order to get better graphical views */ private int verticalAdjustment; /** * Specifies the color for displaying core-distances */ private Color coreDistanceColor; /** * Specifies the color for displaying reachability-distances */ private Color reachabilityDistanceColor; /** * Specifies the width for displaying the distances */ private int widthSlider; /** * Holds the flag for showCoreDistances */ private boolean showCoreDistances; /** * Holds the flag for showrRechabilityDistances */ private boolean showReachabilityDistances; /** * Holds the index of the last toolTip */ private int recentIndex = -1; // ***************************************************************************************************************** // constructors // ***************************************************************************************************************** public GraphPanel(FastVector resultVector, int verticalAdjustment, boolean showCoreDistances, boolean showReachbilityDistances) { this.resultVector = resultVector; this.verticalAdjustment = verticalAdjustment; coreDistanceColor = new Color(100, 100, 100); reachabilityDistanceColor = Color.orange; widthSlider = 5; this.showCoreDistances = showCoreDistances; this.showReachabilityDistances = showReachbilityDistances; addMouseMotionListener(new MouseHandler()); } // ***************************************************************************************************************** // methods // ***************************************************************************************************************** /** * Draws the OPTICS Plot * @param g */ protected void paintComponent(Graphics g) { if (isOpaque()) { Dimension size = getSize(); g.setColor(getBackground()); g.fillRect(0, 0, size.width, size.height); } int stepSize = 0; int cDist = 0; int rDist = 0; for (int vectorIndex = 0; vectorIndex < resultVector.size(); vectorIndex++) { double coreDistance = ((DataObject) resultVector.elementAt(vectorIndex)).getCoreDistance(); double reachDistance = ((DataObject) resultVector.elementAt(vectorIndex)).getReachabilityDistance(); if (coreDistance == DataObject.UNDEFINED) cDist = getHeight(); else cDist = (int) (coreDistance * verticalAdjustment); if (reachDistance == DataObject.UNDEFINED) rDist = getHeight(); else rDist = (int) (reachDistance * verticalAdjustment); int x = vectorIndex + stepSize; if (isShowCoreDistances()) { /** * Draw coreDistance */ g.setColor(coreDistanceColor); g.fillRect(x, getHeight() - cDist, widthSlider, cDist); } if (isShowReachabilityDistances()) { int sizer = widthSlider; if (!isShowCoreDistances()) sizer = 0; /** * Draw reachabilityDistance */ g.setColor(reachabilityDistanceColor); g.fillRect(x + sizer, getHeight() - rDist, widthSlider, rDist); } if (isShowCoreDistances() && isShowReachabilityDistances()) { stepSize += (widthSlider * 2); } else stepSize += widthSlider; } } /** * Sets a new resultVector * @param resultVector */ public void setResultVector(FastVector resultVector) { this.resultVector = resultVector; } /** * Displays a toolTip for the selected DataObject * @param toolTip */ public void setNewToolTip(String toolTip) { setToolTipText(toolTip); } /** * Adjusts the size of this panel in respect of the shown content * @param serObject SERObject that contains the OPTICS clustering results */ public void adjustSize(SERObject serObject) { int i = 0; if (isShowCoreDistances() && isShowReachabilityDistances()) i = 10; else if ((isShowCoreDistances() && !isShowReachabilityDistances()) || !isShowCoreDistances() && isShowReachabilityDistances()) i = 5; setSize(new Dimension((i * serObject.getDatabaseSize()) + serObject.getDatabaseSize(), getHeight())); setPreferredSize(new Dimension((i * serObject.getDatabaseSize()) + serObject.getDatabaseSize(), getHeight())); } /** * Returns the flag for showCoreDistances * @return True or false */ public boolean isShowCoreDistances() { return showCoreDistances; } /** * Sets the flag for showCoreDistances * @param showCoreDistances */ public void setShowCoreDistances(boolean showCoreDistances) { this.showCoreDistances = showCoreDistances; } /** * Returns the flag for showReachabilityDistances * @return True or false */ public boolean isShowReachabilityDistances() { return showReachabilityDistances; } /** * Sets the flag for showReachabilityDistances * @param showReachabilityDistances */ public void setShowReachabilityDistances(boolean showReachabilityDistances) { this.showReachabilityDistances = showReachabilityDistances; } /** * Sets a new value for the vertical verticalAdjustment * @param verticalAdjustment */ public void setVerticalAdjustment(int verticalAdjustment) { this.verticalAdjustment = verticalAdjustment; } /** * Sets a new color for the coreDistance * @param coreDistanceColor */ public void setCoreDistanceColor(Color coreDistanceColor) { this.coreDistanceColor = coreDistanceColor; repaint(); } /** * Sets a new color for the reachabilityDistance * @param reachabilityDistanceColor */ public void setReachabilityDistanceColor(Color reachabilityDistanceColor) { this.reachabilityDistanceColor = reachabilityDistanceColor; repaint(); } // ***************************************************************************************************************** // inner classes // ***************************************************************************************************************** private class MouseHandler extends MouseMotionAdapter implements RevisionHandler { /** * Invoked when the mouse button has been moved on a component * (with no buttons no down). */ public void mouseMoved(MouseEvent e) { showToolTip(e.getX()); } /** * Shows a toolTip with the dataObjects parameters (c-dist, r-dist, key, attributes . . .) * @param x MouseCoordinate X * @return boolean */ private boolean showToolTip(int x) { int i = 0; if (isShowCoreDistances() && isShowReachabilityDistances()) i = 11; else if ((isShowCoreDistances() && !isShowReachabilityDistances()) || !isShowCoreDistances() && isShowReachabilityDistances() || !isShowCoreDistances() && !isShowReachabilityDistances()) i = 6; if ((x / i) == recentIndex) return false; else recentIndex = x / i; DataObject dataObject = null; try { dataObject = (DataObject) resultVector.elementAt(recentIndex); } catch (Exception e) { } if (dataObject != null) { if (!isShowCoreDistances() && !isShowReachabilityDistances()) { setNewToolTip("<html><body><b>Please select a distance" + "</b></body></html>" ); } else setNewToolTip("<html><body><table>" + "<tr><td>DataObject:</td><td>" + dataObject + "</td></tr>" + "<tr><td>Key:</td><td>" + dataObject.getKey() + "</td></tr>" + "<tr><td>" + (isShowCoreDistances() ? "<b>" : "") + "Core-Distance:" + (isShowCoreDistances() ? "</b>" : "") + "</td><td>" + (isShowCoreDistances() ? "<b>" : "") + ((dataObject.getCoreDistance() == DataObject.UNDEFINED) ? "UNDEFINED" : Utils.doubleToString(dataObject.getCoreDistance(), 3, 5)) + (isShowCoreDistances() ? "</b>" : "") + "</td></tr>" + "<tr><td>" + (isShowReachabilityDistances() ? "<b>" : "") + "Reachability-Distance:" + (isShowReachabilityDistances() ? "</b>" : "") + "</td><td>" + (isShowReachabilityDistances() ? "<b>" : "") + ((dataObject.getReachabilityDistance() == DataObject.UNDEFINED) ? "UNDEFINED" : Utils.doubleToString(dataObject.getReachabilityDistance(), 3, 5)) + (isShowReachabilityDistances() ? "</b>" : "") + "</td></tr>" + "</table></body></html>" ); } return true; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 1.4 $"); } } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 1.4 $"); } }
12,431
33.437673
120
java
tsml-java
tsml-java-master/src/main/java/weka/clusterers/forOPTICSAndDBScan/OPTICS_GUI/ResultVectorTableModel.java
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* * Copyright (C) 2004 * & Matthias Schubert (schubert@dbs.ifi.lmu.de) * & Zhanna Melnikova-Albrecht (melnikov@cip.ifi.lmu.de) * & Rainer Holzmann (holzmann@cip.ifi.lmu.de) */ package weka.clusterers.forOPTICSAndDBScan.OPTICS_GUI; import weka.clusterers.forOPTICSAndDBScan.DataObjects.DataObject; import weka.core.FastVector; import weka.core.RevisionHandler; import weka.core.RevisionUtils; import weka.core.Utils; import javax.swing.table.AbstractTableModel; /** * <p> * ResultVectorTableModel.java <br/> * Authors: Rainer Holzmann, Zhanna Melnikova-Albrecht <br/> * Date: Sep 12, 2004 <br/> * Time: 9:23:31 PM <br/> * $ Revision 1.4 $ <br/> * </p> * * @author Zhanna Melnikova-Albrecht (melnikov@cip.ifi.lmu.de) * @author Rainer Holzmann (holzmann@cip.ifi.lmu.de) * @version $Revision: 1.4 $ */ public class ResultVectorTableModel extends AbstractTableModel implements RevisionHandler { /** for serialization */ private static final long serialVersionUID = -7732711470435549210L; /** * Holds the ClusterOrder (dataObjects with their r_dist and c_dist) for the GUI */ private FastVector resultVector; // ***************************************************************************************************************** // constructors // ***************************************************************************************************************** /** * Constructs a default <code>DefaultTableModel</code> * which is a table of zero columns and zero rows. */ public ResultVectorTableModel(FastVector resultVector) { this.resultVector = resultVector; } // ***************************************************************************************************************** // methods // ***************************************************************************************************************** /** * Returns the number of rows of this model. * The number of rows is the number of dataObjects stored in the resultVector * @return the number of rows of this model */ public int getRowCount() { if (resultVector == null) return 0; else return resultVector.size(); } /** * Returns the number of columns of this model. * The number of columns is 4 (dataObject.key, dataobject, c_dist, r_dist) * @return int The number of columns of this model */ public int getColumnCount() { if (resultVector == null) return 0; return 4; } /** * Returns the value for the JTable for a given position. * @param row The row of the value * @param column The column of the value * @return value * */ public Object getValueAt(int row, int column) { DataObject dataObject = (DataObject) resultVector.elementAt(row); switch (column) { case 0: return dataObject.getKey(); case 1: return dataObject; case 2: return ((dataObject.getCoreDistance() == DataObject.UNDEFINED) ? "UNDEFINED" : Utils.doubleToString(dataObject.getCoreDistance(), 3, 5)); case 3: return ((dataObject.getReachabilityDistance() == DataObject.UNDEFINED) ? "UNDEFINED" : Utils.doubleToString(dataObject.getReachabilityDistance(), 3, 5)); default: return ""; } } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 1.4 $"); } }
4,508
32.4
120
java
tsml-java
tsml-java-master/src/main/java/weka/clusterers/forOPTICSAndDBScan/OPTICS_GUI/SERFileFilter.java
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* * Copyright (C) 2004 * & Matthias Schubert (schubert@dbs.ifi.lmu.de) * & Zhanna Melnikova-Albrecht (melnikov@cip.ifi.lmu.de) * & Rainer Holzmann (holzmann@cip.ifi.lmu.de) */ package weka.clusterers.forOPTICSAndDBScan.OPTICS_GUI; import weka.core.RevisionHandler; import weka.core.RevisionUtils; import java.io.File; import javax.swing.filechooser.FileFilter; /** * <p> * SERFileFilter.java <br/> * Authors: Rainer Holzmann, Zhanna Melnikova-Albrecht <br/> * Date: Sep 15, 2004 <br/> * Time: 6:54:56 PM <br/> * $ Revision 1.4 $ <br/> * </p> * * @author Zhanna Melnikova-Albrecht (melnikov@cip.ifi.lmu.de) * @author Rainer Holzmann (holzmann@cip.ifi.lmu.de) * @version $Revision: 1.3 $ */ public class SERFileFilter extends FileFilter implements RevisionHandler { /** * Holds the extension of the FileFilter */ private String extension; /** * Holds the description for this File-Type */ private String description; // ***************************************************************************************************************** // constructors // ***************************************************************************************************************** public SERFileFilter(String extension, String description) { this.extension = extension; this.description = description; } // ***************************************************************************************************************** // methods // ***************************************************************************************************************** /** * Whether the given file is accepted by this filter. */ public boolean accept(File f) { if (f != null) { if (f.isDirectory()) { return true; } String filename = f.getName(); int i = filename.lastIndexOf('.'); if (i > 0 && i < filename.length() - 1) { extension = filename.substring(i + 1).toLowerCase(); } if (extension.equals("ser")) return true; } return false; } /** * The description of this filter. * @see javax.swing.filechooser.FileView#getName */ public String getDescription() { return description; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 1.3 $"); } }
3,310
29.1
120
java
tsml-java
tsml-java-master/src/main/java/weka/clusterers/forOPTICSAndDBScan/OPTICS_GUI/SERObject.java
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* * Copyright (C) 2004 * & Matthias Schubert (schubert@dbs.ifi.lmu.de) * & Zhanna Melnikova-Albrecht (melnikov@cip.ifi.lmu.de) * & Rainer Holzmann (holzmann@cip.ifi.lmu.de) */ package weka.clusterers.forOPTICSAndDBScan.OPTICS_GUI; import weka.core.FastVector; import weka.core.RevisionHandler; import weka.core.RevisionUtils; import java.io.Serializable; /** * <p> * SERObject.java <br/> * Authors: Rainer Holzmann, Zhanna Melnikova-Albrecht <br/> * Date: Sep 15, 2004 <br/> * Time: 9:43:00 PM <br/> * $ Revision 1.4 $ <br/> * </p> * * @author Zhanna Melnikova-Albrecht (melnikov@cip.ifi.lmu.de) * @author Rainer Holzmann (holzmann@cip.ifi.lmu.de) * @version $Revision: 1.4 $ */ public class SERObject implements Serializable, RevisionHandler { /** for serialization */ private static final long serialVersionUID = -6022057864970639151L; private FastVector resultVector; private int databaseSize; private int numberOfAttributes; private double epsilon; private int minPoints; private boolean opticsOutputs; private String database_Type; private String database_distanceType; private int numberOfGeneratedClusters; private String elapsedTime; // ***************************************************************************************************************** // constructors // ***************************************************************************************************************** public SERObject(FastVector resultVector, int databaseSize, int numberOfAttributes, double epsilon, int minPoints, boolean opticsOutputs, String database_Type, String database_distanceType, int numberOfGeneratedClusters, String elapsedTime) { this.resultVector = resultVector; this.databaseSize = databaseSize; this.numberOfAttributes = numberOfAttributes; this.epsilon = epsilon; this.minPoints = minPoints; this.opticsOutputs = opticsOutputs; this.database_Type = database_Type; this.database_distanceType = database_distanceType; this.numberOfGeneratedClusters = numberOfGeneratedClusters; this.elapsedTime = elapsedTime; } // ***************************************************************************************************************** // methods // ***************************************************************************************************************** /** * Returns the resultVector * @return FastVector resultVector */ public FastVector getResultVector() { return resultVector; } /** * Returns the database's size * @return int databaseSize */ public int getDatabaseSize() { return databaseSize; } /** * Returns the number of Attributes of the specified database * @return int numberOfAttributes */ public int getNumberOfAttributes() { return numberOfAttributes; } /** * Returns the value of epsilon * @return double epsilon */ public double getEpsilon() { return epsilon; } /** * Returns the number of minPoints * @return int minPoints */ public int getMinPoints() { return minPoints; } /** * Returns the flag for writing actions * @return True if the outputs are to write to a file, else false */ public boolean isOpticsOutputs() { return opticsOutputs; } /** * Returns the type of the used index (database) * @return String Index-type */ public String getDatabase_Type() { return database_Type; } /** * Returns the distance-type * @return String Distance-type */ public String getDatabase_distanceType() { return database_distanceType; } /** * Returns the number of generated clusters * @return int numberOfGeneratedClusters */ public int getNumberOfGeneratedClusters() { return numberOfGeneratedClusters; } /** * Returns the elapsed-time * @return String elapsedTime */ public String getElapsedTime() { return elapsedTime + " sec"; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 1.4 $"); } }
5,335
28.480663
120
java
tsml-java
tsml-java-master/src/main/java/weka/clusterers/forOPTICSAndDBScan/Utils/EpsilonRange_ListElement.java
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* * Copyright (C) 2004 * & Matthias Schubert (schubert@dbs.ifi.lmu.de) * & Zhanna Melnikova-Albrecht (melnikov@cip.ifi.lmu.de) * & Rainer Holzmann (holzmann@cip.ifi.lmu.de) */ package weka.clusterers.forOPTICSAndDBScan.Utils; import weka.clusterers.forOPTICSAndDBScan.DataObjects.DataObject; import weka.core.RevisionHandler; import weka.core.RevisionUtils; /** * <p> * EpsilonRange_ListElement.java <br/> * Authors: Rainer Holzmann, Zhanna Melnikova-Albrecht, Matthias Schubert <br/> * Date: Sep 7, 2004 <br/> * Time: 2:12:34 PM <br/> * $ Revision 1.4 $ <br/> * </p> * * @author Matthias Schubert (schubert@dbs.ifi.lmu.de) * @author Zhanna Melnikova-Albrecht (melnikov@cip.ifi.lmu.de) * @author Rainer Holzmann (holzmann@cip.ifi.lmu.de) * @version $Revision: 1.3 $ */ public class EpsilonRange_ListElement implements RevisionHandler { /** * Holds the dataObject */ private DataObject dataObject; /** * Holds the distance that was calculated for this dataObject */ private double distance; // ***************************************************************************************************************** // constructors // ***************************************************************************************************************** /** * Constructs a new Element that is stored in the ArrayList which is * built in the k_nextNeighbourQuery-method from a specified database. * This structure is chosen to deliver not only the DataObjects that * are within the epsilon-range but also deliver the distances that * were calculated. This reduces the amount of distance-calculations * within some data-mining-algorithms. * @param distance The calculated distance for this dataObject * @param dataObject A dataObject that is within the epsilon-range */ public EpsilonRange_ListElement(double distance, DataObject dataObject) { this.distance = distance; this.dataObject = dataObject; } // ***************************************************************************************************************** // methods // ***************************************************************************************************************** /** * Returns the distance that was calulcated for this dataObject * (The distance between this dataObject and the dataObject for which an epsilon-range-query * was performed.) * @return distance */ public double getDistance() { return distance; } /** * Returns this dataObject * @return dataObject */ public DataObject getDataObject() { return dataObject; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 1.3 $"); } }
3,678
33.383178
120
java
tsml-java
tsml-java-master/src/main/java/weka/clusterers/forOPTICSAndDBScan/Utils/PriorityQueue.java
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* * Copyright (C) 2004 * & Matthias Schubert (schubert@dbs.ifi.lmu.de) * & Zhanna Melnikova-Albrecht (melnikov@cip.ifi.lmu.de) * & Rainer Holzmann (holzmann@cip.ifi.lmu.de) */ package weka.clusterers.forOPTICSAndDBScan.Utils; import weka.core.RevisionHandler; import weka.core.RevisionUtils; import java.util.ArrayList; /** * <p> * PriorityQueue.java <br/> * Authors: Rainer Holzmann, Zhanna Melnikova-Albrecht, Matthias Schubert <br/> * Date: Aug 27, 2004 <br/> * Time: 5:36:35 PM <br/> * $ Revision 1.4 $ <br/> * </p> * * @author Matthias Schubert (schubert@dbs.ifi.lmu.de) * @author Zhanna Melnikova-Albrecht (melnikov@cip.ifi.lmu.de) * @author Rainer Holzmann (holzmann@cip.ifi.lmu.de) * @version $Revision: 1.3 $ */ public class PriorityQueue implements RevisionHandler { /** * Used to store the binary heap */ private ArrayList queue; // ***************************************************************************************************************** // constructors // ***************************************************************************************************************** /** * Creates a new PriorityQueue backed on a binary heap. The queue is * dynamically growing and shrinking and it is descending, that is: the highest * priority is always in the root. */ public PriorityQueue() { queue = new ArrayList(); } // ***************************************************************************************************************** // methods // ***************************************************************************************************************** /** * Adds a new Object to the queue * @param priority The priority associated with the object * @param o */ public void add(double priority, Object o) { queue.add(new PriorityQueueElement(priority, o)); heapValueUpwards(); } /** * Returns the priority for the object at the specified index * @param index the index of the object * @return priority */ public double getPriority(int index) { return ((PriorityQueueElement) queue.get(index)).getPriority(); } /** * Restores the heap after inserting a new object */ private void heapValueUpwards() { int a = size(); int c = a / 2; PriorityQueueElement recentlyInsertedElement = (PriorityQueueElement) queue.get(a - 1); while (c > 0 && getPriority(c - 1) < recentlyInsertedElement.getPriority()) { queue.set(a - 1, queue.get(c - 1)); //shift parent-node down a = c; //(c <= 0) => no parent-node remains c = a / 2; } queue.set(a - 1, recentlyInsertedElement); } /** * Restores the heap after removing the next element */ private void heapValueDownwards() { int a = 1; int c = 2 * a; //descendant PriorityQueueElement priorityQueueElement = (PriorityQueueElement) queue.get(a - 1); if (c < size() && (getPriority(c) > getPriority(c - 1))) c++; while (c <= size() && getPriority(c - 1) > priorityQueueElement.getPriority()) { queue.set(a - 1, queue.get(c - 1)); a = c; c = 2 * a; if (c < size() && (getPriority(c) > getPriority(c - 1))) c++; } queue.set(a - 1, priorityQueueElement); } /** * Returns the queue's size * @return size */ public int size() { return queue.size(); } /** * Tests, if the queue has some more elements left * @return true, if there are any elements left, else false */ public boolean hasNext() { return !(size() == 0); } /** * Returns the element with the highest priority * @return next element */ public PriorityQueueElement next() { PriorityQueueElement next = (PriorityQueueElement) queue.get(0); queue.set(0, queue.get(size() - 1)); queue.remove(size() - 1); if (hasNext()) { heapValueDownwards(); } return next; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 1.3 $"); } }
5,159
30.272727
120
java
tsml-java
tsml-java-master/src/main/java/weka/clusterers/forOPTICSAndDBScan/Utils/PriorityQueueElement.java
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* * Copyright (C) 2004 * & Matthias Schubert (schubert@dbs.ifi.lmu.de) * & Zhanna Melnikova-Albrecht (melnikov@cip.ifi.lmu.de) * & Rainer Holzmann (holzmann@cip.ifi.lmu.de) */ package weka.clusterers.forOPTICSAndDBScan.Utils; import weka.core.RevisionHandler; import weka.core.RevisionUtils; /** * <p> * PriorityQueueElement.java <br/> * Authors: Rainer Holzmann, Zhanna Melnikova-Albrecht, Matthias Schubert <br/> * Date: Aug 31, 2004 <br/> * Time: 6:43:18 PM <br/> * $ Revision 1.4 $ <br/> * </p> * * @author Matthias Schubert (schubert@dbs.ifi.lmu.de) * @author Zhanna Melnikova-Albrecht (melnikov@cip.ifi.lmu.de) * @author Rainer Holzmann (holzmann@cip.ifi.lmu.de) * @version $Revision: 1.3 $ */ public class PriorityQueueElement implements RevisionHandler { /** * Holds the priority for the object (in this case: the distance) */ private double priority; /** * Holds the original object */ private Object o; // ***************************************************************************************************************** // constructors // ***************************************************************************************************************** public PriorityQueueElement(double priority, Object o) { this.priority = priority; this.o = o; } // ***************************************************************************************************************** // methods // ***************************************************************************************************************** /** * Returns the priority for this object * @return priority */ public double getPriority() { return priority; } /** * Returns the object * @return */ public Object getObject() { return o; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 1.3 $"); } }
2,825
29.387097
120
java
tsml-java
tsml-java-master/src/main/java/weka/clusterers/forOPTICSAndDBScan/Utils/UpdateQueue.java
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* * Copyright (C) 2004 * & Matthias Schubert (schubert@dbs.ifi.lmu.de) * & Zhanna Melnikova-Albrecht (melnikov@cip.ifi.lmu.de) * & Rainer Holzmann (holzmann@cip.ifi.lmu.de) */ package weka.clusterers.forOPTICSAndDBScan.Utils; import weka.core.RevisionHandler; import weka.core.RevisionUtils; import java.util.ArrayList; import java.util.TreeMap; /** * <p> * UpdateQueue.java <br/> * Authors: Rainer Holzmann, Zhanna Melnikova-Albrecht, Matthias Schubert <br/> * Date: Aug 27, 2004 <br/> * Time: 5:36:35 PM <br/> * $ Revision 1.4 $ <br/> * </p> * * @author Matthias Schubert (schubert@dbs.ifi.lmu.de) * @author Zhanna Melnikova-Albrecht (melnikov@cip.ifi.lmu.de) * @author Rainer Holzmann (holzmann@cip.ifi.lmu.de) * @version $Revision: 1.3 $ */ public class UpdateQueue implements RevisionHandler { /** * Used to store the binary heap */ private ArrayList queue; /** * Used to get efficient access to the stored Objects */ private TreeMap objectPositionsInHeap; // ***************************************************************************************************************** // constructors // ***************************************************************************************************************** /** * Creates a new PriorityQueue (backed on a binary heap) with the ability to efficiently * update the priority of the stored objects in the heap. The ascending (!) queue is * dynamically growing and shrinking. */ public UpdateQueue() { queue = new ArrayList(); objectPositionsInHeap = new TreeMap(); } // ***************************************************************************************************************** // methods // ***************************************************************************************************************** /** * Adds a new Object to the queue * @param priority The priority associated with the object (in this case: the reachability-distance) * @param objectKey The key for this object * @param o */ public void add(double priority, Object o, String objectKey) { int objectPosition = 0; if (objectPositionsInHeap.containsKey(objectKey)) { objectPosition = ((Integer) objectPositionsInHeap.get(objectKey)).intValue(); if (((UpdateQueueElement) queue.get(objectPosition)).getPriority() <= priority) return; queue.set(objectPosition++, new UpdateQueueElement(priority, o, objectKey)); } else { queue.add(new UpdateQueueElement(priority, o, objectKey)); objectPosition = size(); } heapValueUpwards(objectPosition); } /** * Returns the priority for the object at the specified index * @param index the index of the object * @return priority */ public double getPriority(int index) { return ((UpdateQueueElement) queue.get(index)).getPriority(); } /** * Restores the heap after inserting a new object */ private void heapValueUpwards(int pos) { int a = pos; int c = a / 2; UpdateQueueElement recentlyInsertedElement = (UpdateQueueElement) queue.get(a - 1); /** ascending order! */ while (c > 0 && getPriority(c - 1) > recentlyInsertedElement.getPriority()) { queue.set(a - 1, queue.get(c - 1)); //shift parent-node down objectPositionsInHeap.put(((UpdateQueueElement) queue.get(a - 1)).getObjectKey(), new Integer(a - 1)); a = c; //(c <= 0) => no parent-node remains c = a / 2; } queue.set(a - 1, recentlyInsertedElement); objectPositionsInHeap.put(((UpdateQueueElement) queue.get(a - 1)).getObjectKey(), new Integer(a - 1)); } /** * Restores the heap after removing the next element */ private void heapValueDownwards() { int a = 1; int c = 2 * a; //descendant UpdateQueueElement updateQueueElement = (UpdateQueueElement) queue.get(a - 1); if (c < size() && (getPriority(c) < getPriority(c - 1))) c++; while (c <= size() && getPriority(c - 1) < updateQueueElement.getPriority()) { queue.set(a - 1, queue.get(c - 1)); objectPositionsInHeap.put(((UpdateQueueElement) queue.get(a - 1)).getObjectKey(), new Integer(a - 1)); a = c; c = 2 * a; if (c < size() && (getPriority(c) < getPriority(c - 1))) c++; } queue.set(a - 1, updateQueueElement); objectPositionsInHeap.put(((UpdateQueueElement) queue.get(a - 1)).getObjectKey(), new Integer(a - 1)); } /** * Returns the queue's size * @return size */ public int size() { return queue.size(); } /** * Tests, if the queue has some more elements left * @return true, if there are any elements left, else false */ public boolean hasNext() { return !(queue.size() == 0); } /** * Returns the element with the lowest priority * @return next element */ public UpdateQueueElement next() { UpdateQueueElement next = (UpdateQueueElement) queue.get(0); queue.set(0, queue.get(size() - 1)); queue.remove(size() - 1); objectPositionsInHeap.remove(next.getObjectKey()); if (hasNext()) { heapValueDownwards(); } return next; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 1.3 $"); } }
6,473
33.43617
120
java
tsml-java
tsml-java-master/src/main/java/weka/clusterers/forOPTICSAndDBScan/Utils/UpdateQueueElement.java
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* * Copyright (C) 2004 * & Matthias Schubert (schubert@dbs.ifi.lmu.de) * & Zhanna Melnikova-Albrecht (melnikov@cip.ifi.lmu.de) * & Rainer Holzmann (holzmann@cip.ifi.lmu.de) */ package weka.clusterers.forOPTICSAndDBScan.Utils; import weka.core.RevisionHandler; import weka.core.RevisionUtils; /** * <p> * UpdateQueueElement.java <br/> * Authors: Rainer Holzmann, Zhanna Melnikova-Albrecht, Matthias Schubert <br/> * Date: Aug 31, 2004 <br/> * Time: 6:43:18 PM <br/> * $ Revision 1.4 $ <br/> * </p> * * @author Matthias Schubert (schubert@dbs.ifi.lmu.de) * @author Zhanna Melnikova-Albrecht (melnikov@cip.ifi.lmu.de) * @author Rainer Holzmann (holzmann@cip.ifi.lmu.de) * @version $Revision: 1.3 $ */ public class UpdateQueueElement implements RevisionHandler { /** * Holds the priority for the object (in this case: the reachability-distance) */ private double priority; /** * Holds the original object */ private Object o; /** * Holds the key for this object */ private String objectKey; // ***************************************************************************************************************** // constructors // ***************************************************************************************************************** public UpdateQueueElement(double priority, Object o, String objectKey) { this.priority = priority; this.o = o; this.objectKey = objectKey; } // ***************************************************************************************************************** // methods // ***************************************************************************************************************** /** * Returns the priority for this object * @return priority */ public double getPriority() { return priority; } /** * Returns the object * @return */ public Object getObject() { return o; } /** * Returns the key * @return objectKey */ public String getObjectKey() { return objectKey; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 1.3 $"); } }
3,102
28
120
java
tsml-java
tsml-java-master/src/main/java/weka/core/AbstractInstance.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * DenseInstance.java * Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand * */ package weka.core; import java.io.Serializable; import java.util.Enumeration; /** * Abstract class providing common functionality for the original instance * implementations. * * @author Eibe Frank (eibe@cs.waikato.ac.nz) * @version $Revision: 9134 $ */ public abstract class AbstractInstance implements Instance, Serializable, RevisionHandler { /** for serialization */ static final long serialVersionUID = 1482635194499365155L; /** * The dataset the instance has access to. Null if the instance doesn't have * access to any dataset. Only if an instance has access to a dataset, it * knows about the actual attribute types. */ protected/* @spec_public@ */Instances m_Dataset; /** The instance's attribute values. */ protected/* @spec_public non_null@ */double[] m_AttValues; /** The instance's weight. */ protected double m_Weight; /** Default max number of digits after the decimal point for numeric values */ public static int s_numericAfterDecimalPoint = 6; /** * Returns the attribute with the given index. * * @param index the attribute's index * @return the attribute at the given position * @throws UnassignedDatasetException if instance doesn't have access to a * dataset */ // @ requires m_Dataset != null; @Override public/* @pure@ */Attribute attribute(int index) { if (m_Dataset == null) { throw new UnassignedDatasetException( "DenseInstance doesn't have access to a dataset!"); } return m_Dataset.attribute(index); } /** * Returns the attribute with the given index in the sparse representation. * * @param indexOfIndex the index of the attribute's index * @return the attribute at the given position * @throws UnassignedDatasetException if instance doesn't have access to a * dataset */ // @ requires m_Dataset != null; @Override public/* @pure@ */Attribute attributeSparse(int indexOfIndex) { if (m_Dataset == null) { throw new UnassignedDatasetException( "DenseInstance doesn't have access to a dataset!"); } return m_Dataset.attribute(index(indexOfIndex)); } /** * Returns class attribute. * * @return the class attribute * @throws UnassignedDatasetException if the class is not set or the instance * doesn't have access to a dataset */ // @ requires m_Dataset != null; @Override public/* @pure@ */Attribute classAttribute() { if (m_Dataset == null) { throw new UnassignedDatasetException( "DenseInstance doesn't have access to a dataset!"); } return m_Dataset.classAttribute(); } /** * Returns the class attribute's index. * * @return the class index as an integer * @throws UnassignedDatasetException if instance doesn't have access to a * dataset */ // @ requires m_Dataset != null; // @ ensures \result == m_Dataset.classIndex(); @Override public/* @pure@ */int classIndex() { if (m_Dataset == null) { throw new UnassignedDatasetException( "DenseInstance doesn't have access to a dataset!"); } return m_Dataset.classIndex(); } /** * Tests if an instance's class is missing. * * @return true if the instance's class is missing * @throws UnassignedClassException if the class is not set or the instance * doesn't have access to a dataset */ // @ requires classIndex() >= 0; @Override public/* @pure@ */boolean classIsMissing() { if (classIndex() < 0) { throw new UnassignedClassException("Class is not set!"); } return isMissing(classIndex()); } /** * Returns an instance's class value in internal format. (ie. as a * floating-point number) * * @return the corresponding value as a double (If the corresponding attribute * is nominal (or a string) then it returns the value's index as a * double). * @throws UnassignedClassException if the class is not set or the instance * doesn't have access to a dataset */ // @ requires classIndex() >= 0; @Override public/* @pure@ */double classValue() { if (classIndex() < 0) { throw new UnassignedClassException("Class is not set!"); } return value(classIndex()); } /** * Returns the dataset this instance has access to. (ie. obtains information * about attribute types from) Null if the instance doesn't have access to a * dataset. * * @return the dataset the instance has accesss to */ // @ ensures \result == m_Dataset; @Override public/* @pure@ */Instances dataset() { return m_Dataset; } /** * Deletes an attribute at the given position (0 to numAttributes() - 1). Only * succeeds if the instance does not have access to any dataset because * otherwise inconsistencies could be introduced. * * @param position the attribute's position * @throws RuntimeException if the instance has access to a dataset */ // @ requires m_Dataset != null; @Override public void deleteAttributeAt(int position) { if (m_Dataset != null) { throw new RuntimeException("DenseInstance has access to a dataset!"); } forceDeleteAttributeAt(position); } /** * Returns an enumeration of all the attributes. * * @return enumeration of all the attributes * @throws UnassignedDatasetException if the instance doesn't have access to a * dataset */ // @ requires m_Dataset != null; @Override public/* @pure@ */Enumeration enumerateAttributes() { if (m_Dataset == null) { throw new UnassignedDatasetException( "DenseInstance doesn't have access to a dataset!"); } return m_Dataset.enumerateAttributes(); } /** * Tests if the headers of two instances are equivalent. * * @param inst another instance * @return true if the header of the given instance is equivalent to this * instance's header * @throws UnassignedDatasetException if instance doesn't have access to any * dataset */ // @ requires m_Dataset != null; @Override public/* @pure@ */boolean equalHeaders(Instance inst) { if (m_Dataset == null) { throw new UnassignedDatasetException( "DenseInstance doesn't have access to a dataset!"); } return m_Dataset.equalHeaders(inst.dataset()); } /** * Checks if the headers of two instances are equivalent. If not, then returns * a message why they differ. * * @param dataset another instance * @return null if the header of the given instance is equivalent to this * instance's header, otherwise a message with details on why they * differ */ @Override public String equalHeadersMsg(Instance inst) { if (m_Dataset == null) throw new UnassignedDatasetException( "DenseInstance doesn't have access to a dataset!"); return m_Dataset.equalHeadersMsg(inst.dataset()); } /** * Tests whether an instance has a missing value. Skips the class attribute if * set. * * @return true if instance has a missing value. * @throws UnassignedDatasetException if instance doesn't have access to any * dataset */ // @ requires m_Dataset != null; @Override public/* @pure@ */boolean hasMissingValue() { if (m_Dataset == null) { throw new UnassignedDatasetException( "DenseInstance doesn't have access to a dataset!"); } for (int i = 0; i < numValues(); i++) { if (index(i) != classIndex()) { if (isMissingSparse(i)) { return true; } } } return false; } /** * Inserts an attribute at the given position (0 to numAttributes()). Only * succeeds if the instance does not have access to any dataset because * otherwise inconsistencies could be introduced. * * @param position the attribute's position * @throws RuntimeException if the instance has accesss to a dataset * @throws IllegalArgumentException if the position is out of range */ // @ requires m_Dataset == null; // @ requires 0 <= position && position <= numAttributes(); @Override public void insertAttributeAt(int position) { if (m_Dataset != null) { throw new RuntimeException("DenseInstance has accesss to a dataset!"); } if ((position < 0) || (position > numAttributes())) { throw new IllegalArgumentException("Can't insert attribute: index out " + "of range"); } forceInsertAttributeAt(position); } /** * Tests if a specific value is "missing". * * @param attIndex the attribute's index * @return true if the value is "missing" */ @Override public/* @pure@ */boolean isMissing(int attIndex) { if (Utils.isMissingValue(value(attIndex))) { return true; } return false; } /** * Tests if a specific value is "missing", given an index in the sparse * representation. * * @param indexOfIndex the index of the attribute's index * @return true if the value is "missing" */ @Override public/* @pure@ */boolean isMissingSparse(int indexOfIndex) { if (Utils.isMissingValue(valueSparse(indexOfIndex))) { return true; } return false; } /** * Tests if a specific value is "missing". The given attribute has to belong * to a dataset. * * @param att the attribute * @return true if the value is "missing" */ @Override public/* @pure@ */boolean isMissing(Attribute att) { return isMissing(att.index()); } /** * Returns the number of class labels. * * @return the number of class labels as an integer if the class attribute is * nominal, 1 otherwise. * @throws UnassignedDatasetException if instance doesn't have access to any * dataset */ // @ requires m_Dataset != null; @Override public/* @pure@ */int numClasses() { if (m_Dataset == null) { throw new UnassignedDatasetException( "DenseInstance doesn't have access to a dataset!"); } return m_Dataset.numClasses(); } /** * Sets the class value of an instance to be "missing". A deep copy of the * vector of attribute values is performed before the value is set to be * missing. * * @throws UnassignedClassException if the class is not set * @throws UnassignedDatasetException if the instance doesn't have access to a * dataset */ // @ requires classIndex() >= 0; @Override public void setClassMissing() { if (classIndex() < 0) { throw new UnassignedClassException("Class is not set!"); } setMissing(classIndex()); } /** * Sets the class value of an instance to the given value (internal * floating-point format). A deep copy of the vector of attribute values is * performed before the value is set. * * @param value the new attribute value (If the corresponding attribute is * nominal (or a string) then this is the new value's index as a * double). * @throws UnassignedClassException if the class is not set * @throws UnaddignedDatasetException if the instance doesn't have access to a * dataset */ // @ requires classIndex() >= 0; @Override public void setClassValue(double value) { if (classIndex() < 0) { throw new UnassignedClassException("Class is not set!"); } setValue(classIndex(), value); } /** * Sets the class value of an instance to the given value. A deep copy of the * vector of attribute values is performed before the value is set. * * @param value the new class value (If the class is a string attribute and * the value can't be found, the value is added to the attribute). * @throws UnassignedClassException if the class is not set * @throws UnassignedDatasetException if the dataset is not set * @throws IllegalArgumentException if the attribute is not nominal or a * string, or the value couldn't be found for a nominal attribute */ // @ requires classIndex() >= 0; @Override public final void setClassValue(String value) { if (classIndex() < 0) { throw new UnassignedClassException("Class is not set!"); } setValue(classIndex(), value); } /** * Sets the reference to the dataset. Does not check if the instance is * compatible with the dataset. Note: the dataset does not know about this * instance. If the structure of the dataset's header gets changed, this * instance will not be adjusted automatically. * * @param instances the reference to the dataset */ @Override public final void setDataset(Instances instances) { m_Dataset = instances; } /** * Sets a specific value to be "missing". Performs a deep copy of the vector * of attribute values before the value is set to be missing. * * @param attIndex the attribute's index */ @Override public final void setMissing(int attIndex) { setValue(attIndex, Utils.missingValue()); } /** * Sets a specific value to be "missing". Performs a deep copy of the vector * of attribute values before the value is set to be missing. The given * attribute has to belong to a dataset. * * @param att the attribute */ @Override public final void setMissing(Attribute att) { setMissing(att.index()); } /** * Sets a value of a nominal or string attribute to the given value. Performs * a deep copy of the vector of attribute values before the value is set. * * @param attIndex the attribute's index * @param value the new attribute value (If the attribute is a string * attribute and the value can't be found, the value is added to the * attribute). * @throws UnassignedDatasetException if the dataset is not set * @throws IllegalArgumentException if the selected attribute is not nominal * or a string, or the supplied value couldn't be found for a * nominal attribute */ // @ requires m_Dataset != null; @Override public final void setValue(int attIndex, String value) { int valIndex; if (m_Dataset == null) { throw new UnassignedDatasetException( "DenseInstance doesn't have access to a dataset!"); } if (!attribute(attIndex).isNominal() && !attribute(attIndex).isString()) { throw new IllegalArgumentException( "Attribute neither nominal nor string!"); } valIndex = attribute(attIndex).indexOfValue(value); if (valIndex == -1) { if (attribute(attIndex).isNominal()) { throw new IllegalArgumentException( "Value not defined for given nominal attribute!"); } else { attribute(attIndex).forceAddValue(value); valIndex = attribute(attIndex).indexOfValue(value); } } setValue(attIndex, valIndex); } /** * Sets a specific value in the instance to the given value (internal * floating-point format). Performs a deep copy of the vector of attribute * values before the value is set, so if you are planning on calling setValue * many times it may be faster to create a new instance using toDoubleArray. * The given attribute has to belong to a dataset. * * @param att the attribute * @param value the new attribute value (If the corresponding attribute is * nominal (or a string) then this is the new value's index as a * double). */ @Override public final void setValue(Attribute att, double value) { setValue(att.index(), value); } /** * Sets a value of an nominal or string attribute to the given value. Performs * a deep copy of the vector of attribute values before the value is set, so * if you are planning on calling setValue many times it may be faster to * create a new instance using toDoubleArray. The given attribute has to * belong to a dataset. * * @param att the attribute * @param value the new attribute value (If the attribute is a string * attribute and the value can't be found, the value is added to the * attribute). * @throws IllegalArgumentException if the the attribute is not nominal or a * string, or the value couldn't be found for a nominal attribute */ @Override public final void setValue(Attribute att, String value) { if (!att.isNominal() && !att.isString()) { throw new IllegalArgumentException( "Attribute neither nominal nor string!"); } int valIndex = att.indexOfValue(value); if (valIndex == -1) { if (att.isNominal()) { throw new IllegalArgumentException( "Value not defined for given nominal attribute!"); } else { att.forceAddValue(value); valIndex = att.indexOfValue(value); } } setValue(att.index(), valIndex); } /** * Sets the weight of an instance. * * @param weight the weight */ @Override public final void setWeight(double weight) { m_Weight = weight; } /** * Returns the relational value of a relational attribute. * * @param attIndex the attribute's index * @return the corresponding relation as an Instances object * @throws IllegalArgumentException if the attribute is not a relation-valued * attribute * @throws UnassignedDatasetException if the instance doesn't belong to a * dataset. */ // @ requires m_Dataset != null; @Override public final/* @pure@ */Instances relationalValue(int attIndex) { if (m_Dataset == null) { throw new UnassignedDatasetException( "DenseInstance doesn't have access to a dataset!"); } return relationalValue(m_Dataset.attribute(attIndex)); } /** * Returns the relational value of a relational attribute. * * @param att the attribute * @return the corresponding relation as an Instances object, null if missing * @throws IllegalArgumentException if the attribute is not a relation-valued * attribute * @throws UnassignedDatasetException if the instance doesn't belong to a * dataset. */ @Override public final/* @pure@ */Instances relationalValue(Attribute att) { int attIndex = att.index(); if (att.isRelationValued()) { if (isMissing(attIndex)) { return null; } return att.relation((int) value(attIndex)); } else { throw new IllegalArgumentException("Attribute isn't relation-valued!"); } } /** * Returns the value of a nominal, string, date, or relational attribute for * the instance as a string. * * @param attIndex the attribute's index * @return the value as a string * @throws IllegalArgumentException if the attribute is not a nominal, string, * date, or relation-valued attribute. * @throws UnassignedDatasetException if the instance doesn't belong to a * dataset. */ // @ requires m_Dataset != null; @Override public final/* @pure@ */String stringValue(int attIndex) { if (m_Dataset == null) { throw new UnassignedDatasetException( "DenseInstance doesn't have access to a dataset!"); } return stringValue(m_Dataset.attribute(attIndex)); } /** * Returns the value of a nominal, string, date, or relational attribute for * the instance as a string. * * @param att the attribute * @return the value as a string * @throws IllegalArgumentException if the attribute is not a nominal, string, * date, or relation-valued attribute. * @throws UnassignedDatasetException if the instance doesn't belong to a * dataset. */ @Override public final/* @pure@ */String stringValue(Attribute att) { int attIndex = att.index(); if (isMissing(attIndex)) { return "?"; } switch (att.type()) { case Attribute.NOMINAL: case Attribute.STRING: return att.value((int) value(attIndex)); case Attribute.DATE: return att.formatDate(value(attIndex)); case Attribute.RELATIONAL: return att.relation((int) value(attIndex)).stringWithoutHeader(); default: throw new IllegalArgumentException( "Attribute isn't nominal, string or date!"); } } /** * Returns the description of one instance with any numeric values printed at * the supplied maximum number of decimal places. If the instance doesn't have * access to a dataset, it returns the internal floating-point values. Quotes * string values that contain whitespace characters. * * @param afterDecimalPoint the maximum number of digits permitted after the * decimal point for a numeric value * * @return the instance's description as a string */ @Override public final String toStringMaxDecimalDigits(int afterDecimalPoint) { StringBuffer text = new StringBuffer(toStringNoWeight(afterDecimalPoint)); if (m_Weight != 1.0) { text.append(",{" + Utils.doubleToString(m_Weight, afterDecimalPoint) + "}"); } return text.toString(); } /** * Returns the description of one instance. If the instance doesn't have * access to a dataset, it returns the internal floating-point values. Quotes * string values that contain whitespace characters. * * @return the instance's description as a string */ @Override public String toString() { return toStringMaxDecimalDigits(s_numericAfterDecimalPoint); } /** * Returns the description of one value of the instance as a string. If the * instance doesn't have access to a dataset, it returns the internal * floating-point value. Quotes string values that contain whitespace * characters, or if they are a question mark. * * @param attIndex the attribute's index * @return the value's description as a string */ @Override public final String toString(int attIndex) { return toString(attIndex, s_numericAfterDecimalPoint); } /** * Returns the description of one value of the instance as a string. If the * instance doesn't have access to a dataset, it returns the internal * floating-point value. Quotes string values that contain whitespace * characters, or if they are a question mark. * * @param attIndex the attribute's index * @param afterDecimalPoint the maximum number of digits permitted after the * decimal point for numeric values * @return the value's description as a string */ @Override public final/* @pure@ */String toString(int attIndex, int afterDecimalPoint) { StringBuffer text = new StringBuffer(); if (isMissing(attIndex)) { text.append("?"); } else { if (m_Dataset == null) { text.append(Utils.doubleToString(value(attIndex), afterDecimalPoint)); } else { switch (m_Dataset.attribute(attIndex).type()) { case Attribute.NOMINAL: case Attribute.STRING: case Attribute.DATE: case Attribute.RELATIONAL: text.append(Utils.quote(stringValue(attIndex))); break; case Attribute.NUMERIC: text.append(Utils.doubleToString(value(attIndex), afterDecimalPoint)); break; default: throw new IllegalStateException("Unknown attribute type"); } } } return text.toString(); } /** * Returns the description of one value of the instance as a string. If the * instance doesn't have access to a dataset it returns the internal * floating-point value. Quotes string values that contain whitespace * characters, or if they are a question mark. The given attribute has to * belong to a dataset. * * @param att the attribute * @return the value's description as a string */ @Override public final String toString(Attribute att) { return toString(att.index()); } /** * Returns the description of one value of the instance as a string. If the * instance doesn't have access to a dataset it returns the internal * floating-point value. Quotes string values that contain whitespace * characters, or if they are a question mark. The given attribute has to * belong to a dataset. * * @param att the attribute * @param afterDecimalPoint the maximum number of decimal places to print * @return the value's description as a string */ @Override public final String toString(Attribute att, int afterDecimalPoint) { return toString(att.index(), afterDecimalPoint); } /** * Returns an instance's attribute value in internal format. The given * attribute has to belong to a dataset. * * @param att the attribute * @return the specified value as a double (If the corresponding attribute is * nominal (or a string) then it returns the value's index as a * double). */ @Override public/* @pure@ */double value(Attribute att) { return value(att.index()); } /** * Returns an instance's attribute value in internal format, given an index in * the sparse representation. * * @param indexOfIndex the index of the attribute's index * @return the specified value as a double (If the corresponding attribute is * nominal (or a string) then it returns the value's index as a * double). */ @Override public/* @pure@ */double valueSparse(int indexOfIndex) { return m_AttValues[indexOfIndex]; } /** * Returns the instance's weight. * * @return the instance's weight as a double */ @Override public final/* @pure@ */double weight() { return m_Weight; } /** * Returns the revision string. * * @return the revision */ @Override public String getRevision() { return RevisionUtils.extract("$Revision: 9134 $"); } /** * Deletes an attribute at the given position (0 to numAttributes() - 1). * * @param position the attribute's position */ protected abstract void forceDeleteAttributeAt(int position); /** * Inserts an attribute at the given position (0 to numAttributes()) and sets * its value to be missing. * * @param position the attribute's position */ protected abstract void forceInsertAttributeAt(int position); }
27,060
30.429733
80
java
tsml-java
tsml-java-master/src/main/java/weka/core/AbstractStringDistanceFunction.java
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* * AbstractStringDistanceFunction.java * Copyright (C) 2008 Bruno Woltzenlogel Paleo (http://www.logic.at/people/bruno/ ; http://bruno-wp.blogspot.com/) * */ package weka.core; import weka.core.neighboursearch.PerformanceStats; /** * Represents the abstract ancestor for string-based distance functions, like * EditDistance. * * @author Bruno Woltzenlogel Paleo * @version $Revision: 1.1 $ */ public abstract class AbstractStringDistanceFunction extends NormalizableDistance { /** * Constructor that doesn't set the data */ public AbstractStringDistanceFunction() { super(); } /** * Constructor that sets the data * * @param data the set of instances that will be used for * later distance comparisons */ public AbstractStringDistanceFunction(Instances data) { super(data); } /** * Updates the current distance calculated so far with the new difference * between two attributes. The difference between the attributes was * calculated with the difference(int,double,double) method. * * @param currDist the current distance calculated so far * @param diff the difference between two new attributes * @return the update distance * @see #difference(int, double, double) */ protected double updateDistance(double currDist, double diff) { return (currDist + (diff * diff)); } /** * Computes the difference between two given attribute * values. * * @param index the attribute index * @param val1 the first value * @param val2 the second value * @return the difference */ protected double difference(int index, String string1, String string2) { switch (m_Data.attribute(index).type()) { case Attribute.STRING: double diff = stringDistance(string1, string2); if (m_DontNormalize == true) { return diff; } else { if (string1.length() > string2.length()) { return diff/((double) string1.length()); } else { return diff/((double) string2.length()); } } default: return 0; } } /** * Calculates the distance between two instances. Offers speed up (if the * distance function class in use supports it) in nearest neighbour search by * taking into account the cutOff or maximum distance. Depending on the * distance function class, post processing of the distances by * postProcessDistances(double []) may be required if this function is used. * * @param first the first instance * @param second the second instance * @param cutOffValue If the distance being calculated becomes larger than * cutOffValue then the rest of the calculation is * discarded. * @param stats the performance stats object * @return the distance between the two given instances or * Double.POSITIVE_INFINITY if the distance being * calculated becomes larger than cutOffValue. */ @Override public double distance(Instance first, Instance second, double cutOffValue, PerformanceStats stats) { double sqDistance = 0; int numAttributes = m_Data.numAttributes(); validate(); double diff; for (int i = 0; i < numAttributes; i++) { diff = 0; if (m_ActiveIndices[i]) { diff = difference(i, first.stringValue(i), second.stringValue(i)); } sqDistance = updateDistance(sqDistance, diff); if (sqDistance > (cutOffValue * cutOffValue)) return Double.POSITIVE_INFINITY; } double distance = Math.sqrt(sqDistance); return distance; } /** * Calculates the distance between two strings. * Must be implemented by any non-abstract StringDistance class * * @param stringA the first string * @param stringB the second string * @return the distance between the two given strings */ abstract double stringDistance(String stringA, String stringB); }
4,724
31.142857
117
java
tsml-java
tsml-java-master/src/main/java/weka/core/AdditionalMeasureProducer.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * AdditionalMeasureProducer.java * Copyright (C) 2000-2012 University of Waikato, Hamilton, New Zealand * */ package weka.core; import java.util.Enumeration; /** * Interface to something that can produce measures other than those * calculated by evaluation modules. * * @author Mark Hall (mhall@cs.waikato.ac.nz) * @version $Revision: 8034 $ */ public interface AdditionalMeasureProducer { /** * Returns an enumeration of the measure names. Additional measures * must follow the naming convention of starting with "measure", eg. * double measureBlah() * @return an enumeration of the measure names */ Enumeration enumerateMeasures(); /** * Returns the value of the named measure * @param measureName the name of the measure to query for its value * @return the value of the named measure * @exception IllegalArgumentException if the named measure is not supported */ double getMeasure(String measureName); }
1,646
31.294118
78
java
tsml-java
tsml-java-master/src/main/java/weka/core/Aggregateable.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * Aggregateable.java * Copyright (C) 2013 University of Waikato, Hamilton, New Zealand */ package weka.core; /** * Interface to something that can aggregate an object of the same type with * itself. * * @author Mark Hall (mhall{[at]}pentaho{[dot]}com) * @version $Revision: 9784 $ */ public interface Aggregateable<E> { /** * Aggregate an object with this one * * @param toAggregate the object to aggregate * @return the result of aggregation * @throws Exception if the supplied object can't be aggregated for some * reason */ E aggregate(E toAggregate) throws Exception; /** * Call to complete the aggregation process. Allows implementers to do any * final processing based on how many objects were aggregated. * * @throws Exception if the aggregation can't be finalized for some reason */ void finalizeAggregation() throws Exception; }
1,591
30.84
76
java
tsml-java
tsml-java-master/src/main/java/weka/core/AlgVector.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * AlgVector.java * Copyright (C) 2002-2012 University of Waikato, Hamilton, New Zealand * */ package weka.core; import java.io.Serializable; import java.util.Random; /** * Class for performing operations on an algebraic vector * of floating-point values. * * @author Gabi Schmidberger (gabi@cs.waikato.ac.nz) * @version $Revision: 8034 $ */ public class AlgVector implements Cloneable, Serializable, RevisionHandler { /** for serialization */ private static final long serialVersionUID = -4023736016850256591L; /** The values of the matrix */ protected double[] m_Elements; /** * Constructs a vector and initializes it with default values. * * @param n the number of elements */ public AlgVector(int n) { m_Elements = new double[n]; initialize(); } /** * Constructs a vector using a given array. * * @param array the values of the matrix */ public AlgVector(double[] array) { m_Elements = new double[array.length]; for (int i = 0; i < array.length; i++) { m_Elements[i] = array[i]; } } /** * Constructs a vector using a given data format. * The vector has an element for each numerical attribute. * The other attributes (nominal, string) are ignored. * Random is used to initialize the attributes. * * @param format the data format to use * @param random for initializing the attributes * @throws Exception if something goes wrong */ public AlgVector(Instances format, Random random) throws Exception { int len = format.numAttributes(); for (int i = 0; i < format.numAttributes(); i++) { if (!format.attribute(i).isNumeric()) len--; } if (len > 0) { m_Elements = new double[len]; initialize(random); } } /** * Constructs a vector using an instance. * The vector has an element for each numerical attribute. * The other attributes (nominal, string) are ignored. * * @param instance with numeric attributes, that AlgVector gets build from * @throws Exception if instance doesn't have access to the data format or * no numeric attributes in the data */ public AlgVector(Instance instance) throws Exception { int len = instance.numAttributes(); for (int i = 0; i < instance.numAttributes(); i++) { if (!instance.attribute(i).isNumeric()) len--; } if (len > 0) { m_Elements = new double[len]; int n = 0; for (int i = 0; i < instance.numAttributes(); i++) { if (!instance.attribute(i).isNumeric()) continue; m_Elements[n] = instance.value(i); n++; } } else { throw new IllegalArgumentException("No numeric attributes in data!"); } } /** * Creates and returns a clone of this object. * * @return a clone of this instance. * @throws CloneNotSupportedException if an error occurs */ public Object clone() throws CloneNotSupportedException { AlgVector v = (AlgVector)super.clone(); v.m_Elements = new double[numElements()]; for (int i = 0; i < numElements(); i++) { v.m_Elements[i] = m_Elements[i]; } return v; } /** * Resets the elements to the default value which is 0.0. */ protected void initialize() { for (int i = 0; i < m_Elements.length; i++) { m_Elements[i] = 0.0; } } /** * Initializes the values with random numbers between 0 and 1. * * @param random the random number generator to use for initializing */ protected void initialize(Random random) { for (int i = 0; i < m_Elements.length; i++) { m_Elements[i] = random.nextDouble(); } } /** * Returns the value of a cell in the matrix. * * @param index the row's index * @return the value of the cell of the vector */ public final double getElement(int index) { return m_Elements[index]; } /** * Returns the number of elements in the vector. * * @return the number of rows */ public final int numElements() { return m_Elements.length; } /** * Sets an element of the matrix to the given value. * * @param index the elements index * @param value the new value */ public final void setElement(int index, double value) { m_Elements[index] = value; } /** * Sets the elements of the vector to values of the given array. * Performs a deep copy. * * @param elements an array of doubles */ public final void setElements(double[] elements) { for (int i = 0; i < elements.length; i++) { m_Elements[i] = elements[i]; } } /** * Gets the elements of the vector and returns them as double array. * * @return an array of doubles */ public double[] getElements() { double [] elements = new double[this.numElements()]; for (int i = 0; i < elements.length; i++) { elements[i] = m_Elements[i]; } return elements; } /** * Gets the elements of the vector as an instance. * !! NON-numeric data is ignored sofar * * @param model the dataset structure to fit the data to * @param random in case of nominal values a random label is taken * @return an array of doubles * @throws Exception if length of vector is not number of numerical attributes */ public Instance getAsInstance(Instances model, Random random) throws Exception { Instance newInst = null; if (m_Elements != null) { newInst = new DenseInstance(model.numAttributes()); newInst.setDataset(model); for (int i = 0, j = 0; i < model.numAttributes(); i++) { if (model.attribute(i).isNumeric()) { if (j >= m_Elements.length) throw new Exception("Datatypes are not compatible."); newInst.setValue(i, m_Elements[j++]); } if (model.attribute(i).isNominal()) { int newVal = (int) (random.nextDouble() * (double) (model.attribute(i).numValues())); if (newVal == (int) model.attribute(i).numValues()) newVal -= 1; newInst.setValue(i, newVal); } } } return newInst; } /** * Returns the sum of this vector with another. * * @param other the vector to add * @return a vector containing the sum. */ public final AlgVector add(AlgVector other) { AlgVector b = null; if (m_Elements != null) { int n = m_Elements.length; try { b = (AlgVector)clone(); } catch (CloneNotSupportedException ex) { b = new AlgVector(n); } for(int i = 0; i < n; i++) { b.m_Elements[i] = m_Elements[i] + other.m_Elements[i]; } } return b; } /** * Returns the difference of this vector minus another. * * @param other the vector to subtract * @return a vector containing the difference vector. */ public final AlgVector substract(AlgVector other) { int n = m_Elements.length; AlgVector b; try { b = (AlgVector)clone(); } catch (CloneNotSupportedException ex) { b = new AlgVector(n); } for(int i = 0; i < n; i++) { b.m_Elements[i] = m_Elements[i] - other.m_Elements[i]; } return b; } /** * Returns the inner (or dot) product of two vectors * * @param b the multiplication matrix * @return the double representing the dot product */ public final double dotMultiply(AlgVector b) { double sum = 0.0; if (m_Elements != null) { int n = m_Elements.length; for(int i = 0; i < n; i++) { sum += m_Elements[i] * b.m_Elements[i]; } } return sum; } /** * Computes the scalar product of this vector with a scalar * * @param s the scalar */ public final void scalarMultiply(double s) { if (m_Elements != null) { int n = m_Elements.length; for(int i = 0; i < n; i++) { m_Elements[i] = s * m_Elements[i]; } } } /** * Changes the length of a vector. * * @param len the new length of the vector */ public void changeLength(double len) { double factor = this.norm(); factor = len / factor; scalarMultiply(factor); } /** * Returns the norm of the vector * * @return the norm of the vector */ public double norm() { if (m_Elements != null) { int n = m_Elements.length; double sum = 0.0; for(int i = 0; i < n; i++) { sum += m_Elements[i] * m_Elements[i]; } return Math.pow(sum, 0.5); } else return 0.0; } /** * Norms this vector to length 1.0 */ public final void normVector() { double len = this.norm(); this.scalarMultiply(1 / len); } /** * Converts a vector to a string * * @return the converted string */ public String toString() { StringBuffer text = new StringBuffer(); for (int i = 0; i < m_Elements.length; i++) { if (i > 0) text.append(","); text.append(Utils.doubleToString(m_Elements[i],6)); } text.append("\n"); return text.toString(); } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } /** * Main method for testing this class, can take an ARFF file as first argument. * * @param args commandline options * @throws Exception if something goes wrong in testing */ public static void main(String[] args) throws Exception { double[] first = {2.3, 1.2, 5.0}; try { AlgVector test = new AlgVector(first); System.out.println("test:\n " + test); } catch (Exception e) { e.printStackTrace(); } } }
10,336
23.265258
81
java
tsml-java
tsml-java-master/src/main/java/weka/core/AllJavadoc.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * AllJavadoc.java * Copyright (C) 2006-2012 University of Waikato, Hamilton, New Zealand */ package weka.core; import java.util.HashSet; import java.util.Vector; /** * Applies all known Javadoc-derived classes to a source file. * <!-- options-start --> * Valid options are: <p/> * * <pre> -W &lt;classname&gt; * The class to load.</pre> * * <pre> -nostars * Suppresses the '*' in the Javadoc.</pre> * * <pre> -dir &lt;dir&gt; * The directory above the package hierarchy of the class.</pre> * * <pre> -silent * Suppresses printing in the console.</pre> * <!-- options-end --> * * @author fracpete (fracpete at waikato dot ac dot nz) * @version $Revision: 8034 $ */ public class AllJavadoc extends Javadoc { /** contains all the */ protected static Vector<Javadoc> m_Javadocs; /** determine all classes derived from Javadoc and instantiate them */ static { // get all classnames, besides this one HashSet<String> set = new HashSet<String>(ClassDiscovery.find(Javadoc.class, Javadoc.class.getPackage().getName())); if (set.contains(AllJavadoc.class.getName())) set.remove(AllJavadoc.class.getName()); // instantiate them m_Javadocs = new Vector<Javadoc>(); for (String classname: set) { try { Class cls = Class.forName(classname); m_Javadocs.add((Javadoc)cls.newInstance()); } catch (Exception e) { e.printStackTrace(); } } } /** * sets the classname of the class to generate the Javadoc for * * @param value the new classname */ public void setClassname(String value) { super.setClassname(value); for (int i = 0; i < m_Javadocs.size(); i++) ((Javadoc) m_Javadocs.get(i)).setClassname(value); } /** * sets whether to prefix the Javadoc with "*" * * @param value true if stars are to be used */ public void setUseStars(boolean value) { super.setUseStars(value); for (int i = 0; i < m_Javadocs.size(); i++) ((Javadoc) m_Javadocs.get(i)).setUseStars(value); } /** * sets whether to suppress output in the console * * @param value true if output is to be suppressed */ public void setSilent(boolean value) { super.setSilent(value); for (int i = 0; i < m_Javadocs.size(); i++) ((Javadoc) m_Javadocs.get(i)).setSilent(value); } /** * generates and returns the Javadoc for the specified start/end tag pair. * * @param index the index in the start/end tag array * @return the generated Javadoc * @throws Exception in case the generation fails */ protected String generateJavadoc(int index) throws Exception { throw new Exception("Not used!"); } /** * updates the Javadoc in the given source code, using all the found * Javadoc updaters. * * @param content the source code * @return the updated source code * @throws Exception in case the generation fails */ protected String updateJavadoc(String content) throws Exception { String result; int i; result = content; for (i = 0; i < m_Javadocs.size(); i++) { result = ((Javadoc) m_Javadocs.get(i)).updateJavadoc(result); } return result; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } /** * Parses the given commandline parameters and generates the Javadoc. * * @param args the commandline parameters for the object */ public static void main(String[] args) { runJavadoc(new AllJavadoc(), args); } }
4,306
26.259494
120
java
tsml-java
tsml-java-master/src/main/java/weka/core/Attribute.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * Attribute.java * Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand * */ package weka.core; import java.io.IOException; import java.io.Serializable; import java.io.StreamTokenizer; import java.io.StringReader; import java.text.ParseException; import java.text.SimpleDateFormat; import java.util.ArrayList; import java.util.Date; import java.util.Enumeration; import java.util.Hashtable; import java.util.List; import java.util.Properties; /** * Class for handling an attribute. Once an attribute has been created, * it can't be changed. <p> * * The following attribute types are supported: * <ul> * <li> numeric: <br/> * This type of attribute represents a floating-point number. * </li> * <li> nominal: <br/> * This type of attribute represents a fixed set of nominal values. * </li> * <li> string: <br/> * This type of attribute represents a dynamically expanding set of * nominal values. Usually used in text classification. * </li> * <li> date: <br/> * This type of attribute represents a date, internally represented as * floating-point number storing the milliseconds since January 1, * 1970, 00:00:00 GMT. The string representation of the date must be * <a href="http://www.iso.org/iso/en/prods-services/popstds/datesandtime.html" target="_blank"> * ISO-8601</a> compliant, the default is <code>yyyy-MM-dd'T'HH:mm:ss</code>. * </li> * <li> relational: <br/> * This type of attribute can contain other attributes and is, e.g., * used for representing Multi-Instance data. (Multi-Instance data * consists of a nominal attribute containing the bag-id, then a * relational attribute with all the attributes of the bag, and * finally the class attribute.) * </li> * </ul> * * Typical usage (code from the main() method of this class): <p> * * <code> * ... <br> * * // Create numeric attributes "length" and "weight" <br> * Attribute length = new Attribute("length"); <br> * Attribute weight = new Attribute("weight"); <br><br> * * // Create list to hold nominal values "first", "second", "third" <br> * List<String> my_nominal_values = new ArrayList<String>(3); <br> * my_nominal_values.add("first"); <br> * my_nominal_values.add("second"); <br> * my_nominal_values.add("third"); <br><br> * * // Create nominal attribute "position" <br> * Attribute position = new Attribute("position", my_nominal_values);<br> * * ... <br> * </code><p> * * @author Eibe Frank (eibe@cs.waikato.ac.nz) * @version $Revision: 9515 $ */ public class Attribute implements Copyable, Serializable, RevisionHandler { /** for serialization */ static final long serialVersionUID = -742180568732916383L; /** Constant set for numeric attributes. */ public static final int NUMERIC = 0; /** Constant set for nominal attributes. */ public static final int NOMINAL = 1; /** Constant set for attributes with string values. */ public static final int STRING = 2; /** Constant set for attributes with date values. */ public static final int DATE = 3; /** Constant set for relation-valued attributes. */ public static final int RELATIONAL = 4; /** Constant set for symbolic attributes. */ public static final int ORDERING_SYMBOLIC = 0; /** Constant set for ordered attributes. */ public static final int ORDERING_ORDERED = 1; /** Constant set for modulo-ordered attributes. */ public static final int ORDERING_MODULO = 2; /** The keyword used to denote the start of an arff attribute declaration */ public final static String ARFF_ATTRIBUTE = "@attribute"; /** A keyword used to denote a numeric attribute */ public final static String ARFF_ATTRIBUTE_INTEGER = "integer"; /** A keyword used to denote a numeric attribute */ public final static String ARFF_ATTRIBUTE_REAL = "real"; /** A keyword used to denote a numeric attribute */ public final static String ARFF_ATTRIBUTE_NUMERIC = "numeric"; /** The keyword used to denote a string attribute */ public final static String ARFF_ATTRIBUTE_STRING = "string"; /** The keyword used to denote a date attribute */ public final static String ARFF_ATTRIBUTE_DATE = "date"; /** The keyword used to denote a relation-valued attribute */ public final static String ARFF_ATTRIBUTE_RELATIONAL = "relational"; /** The keyword used to denote the end of the declaration of a subrelation */ public final static String ARFF_END_SUBRELATION = "@end"; /** Dummy first value for String attributes (useful for sparse instances) */ public final static String DUMMY_STRING_VAL = "*WEKA*DUMMY*STRING*FOR*STRING*ATTRIBUTES*"; /** Strings longer than this will be stored compressed. */ private static final int STRING_COMPRESS_THRESHOLD = 200; /** The attribute's name. */ private /*@ spec_public non_null @*/ String m_Name; /** The attribute's type. */ private /*@ spec_public @*/ int m_Type; /*@ invariant m_Type == NUMERIC || m_Type == DATE || m_Type == STRING || m_Type == NOMINAL || m_Type == RELATIONAL; */ /** The attribute's values (if nominal or string). */ private /*@ spec_public @*/ ArrayList<Object> m_Values; /** Mapping of values to indices (if nominal or string). */ private Hashtable<Object,Integer> m_Hashtable; /** The header information for a relation-valued attribute. */ private Instances m_Header; /** Date format specification for date attributes */ private SimpleDateFormat m_DateFormat; /** The attribute's index. */ private /*@ spec_public @*/ int m_Index; /** The attribute's metadata. */ private ProtectedProperties m_Metadata; /** The attribute's ordering. */ private int m_Ordering; /** Whether the attribute is regular. */ private boolean m_IsRegular; /** Whether the attribute is averagable. */ private boolean m_IsAveragable; /** Whether the attribute has a zeropoint. */ private boolean m_HasZeropoint; /** The attribute's weight. */ private double m_Weight; /** The attribute's lower numeric bound. */ private double m_LowerBound; /** Whether the lower bound is open. */ private boolean m_LowerBoundIsOpen; /** The attribute's upper numeric bound. */ private double m_UpperBound; /** Whether the upper bound is open */ private boolean m_UpperBoundIsOpen; /** * Constructor for a numeric attribute. * * @param attributeName the name for the attribute */ //@ requires attributeName != null; //@ ensures m_Name == attributeName; public Attribute(String attributeName) { this(attributeName, new ProtectedProperties(new Properties())); } /** * Constructor for a numeric attribute, where metadata is supplied. * * @param attributeName the name for the attribute * @param metadata the attribute's properties */ //@ requires attributeName != null; //@ requires metadata != null; //@ ensures m_Name == attributeName; public Attribute(String attributeName, ProtectedProperties metadata) { m_Name = attributeName; m_Index = -1; m_Values = null; m_Hashtable = null; m_Header = null; m_Type = NUMERIC; setMetadata(metadata); } /** * Constructor for a date attribute. * * @param attributeName the name for the attribute * @param dateFormat a string suitable for use with * SimpleDateFormatter for parsing dates. */ //@ requires attributeName != null; //@ requires dateFormat != null; //@ ensures m_Name == attributeName; public Attribute(String attributeName, String dateFormat) { this(attributeName, dateFormat, new ProtectedProperties(new Properties())); } /** * Constructor for a date attribute, where metadata is supplied. * * @param attributeName the name for the attribute * @param dateFormat a string suitable for use with * SimpleDateFormatter for parsing dates. * @param metadata the attribute's properties */ //@ requires attributeName != null; //@ requires dateFormat != null; //@ requires metadata != null; //@ ensures m_Name == attributeName; public Attribute(String attributeName, String dateFormat, ProtectedProperties metadata) { m_Name = attributeName; m_Index = -1; m_Values = null; m_Hashtable = null; m_Header = null; m_Type = DATE; if (dateFormat != null) { m_DateFormat = new SimpleDateFormat(dateFormat); } else { m_DateFormat = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss"); } m_DateFormat.setLenient(false); setMetadata(metadata); } /** * Constructor for nominal attributes and string attributes. * If a null vector of attribute values is passed to the method, * the attribute is assumed to be a string. * * @param attributeName the name for the attribute * @param attributeValues a vector of strings denoting the * attribute values. Null if the attribute is a string attribute. */ //@ requires attributeName != null; //@ ensures m_Name == attributeName; public Attribute(String attributeName, List<String> attributeValues) { this(attributeName, attributeValues, new ProtectedProperties(new Properties())); } /** * Constructor for nominal attributes and string attributes, where * metadata is supplied. If a null vector of attribute values is passed * to the method, the attribute is assumed to be a string. * * @param attributeName the name for the attribute * @param attributeValues a vector of strings denoting the * attribute values. Null if the attribute is a string attribute. * @param metadata the attribute's properties */ //@ requires attributeName != null; //@ requires metadata != null; /*@ ensures m_Name == attributeName; ensures m_Index == -1; ensures attributeValues == null && m_Type == STRING || attributeValues != null && m_Type == NOMINAL && m_Values.size() == attributeValues.size(); signals (IllegalArgumentException ex) (* if duplicate strings in attributeValues *); */ public Attribute(String attributeName, List<String> attributeValues, ProtectedProperties metadata) { m_Name = attributeName; m_Index = -1; if (attributeValues == null) { m_Values = new ArrayList<Object>(); m_Hashtable = new Hashtable<Object,Integer>(); m_Header = null; m_Type = STRING; // Make sure there is at least one value so that string attribute // values are always represented when output as part of a sparse instance. addStringValue(DUMMY_STRING_VAL); } else { m_Values = new ArrayList<Object>(attributeValues.size()); m_Hashtable = new Hashtable<Object,Integer>(attributeValues.size()); m_Header = null; for (int i = 0; i < attributeValues.size(); i++) { Object store = attributeValues.get(i); if (((String)store).length() > STRING_COMPRESS_THRESHOLD) { try { store = new SerializedObject(attributeValues.get(i), true); } catch (Exception ex) { System.err.println("Couldn't compress nominal attribute value -" + " storing uncompressed."); } } if (m_Hashtable.containsKey(store)) { throw new IllegalArgumentException("A nominal attribute (" + attributeName + ") cannot" + " have duplicate labels (" + store + ")."); } m_Values.add(store); m_Hashtable.put(store, new Integer(i)); } m_Type = NOMINAL; } setMetadata(metadata); } /** * Constructor for relation-valued attributes. * * @param attributeName the name for the attribute * @param header an Instances object specifying the header of the relation. */ public Attribute(String attributeName, Instances header) { this(attributeName, header, new ProtectedProperties(new Properties())); } /** * Constructor for relation-valued attributes. * * @param attributeName the name for the attribute * @param header an Instances object specifying the header of the relation. * @param metadata the attribute's properties */ public Attribute(String attributeName, Instances header, ProtectedProperties metadata) { if (header.numInstances() > 0) { throw new IllegalArgumentException("Header for relation-valued " + "attribute should not contain " + "any instances"); } m_Name = attributeName; m_Index = -1; m_Values = new ArrayList<Object>(); m_Hashtable = new Hashtable<Object,Integer>(); m_Header = header; m_Type = RELATIONAL; setMetadata(metadata); } /** * Produces a shallow copy of this attribute. * * @return a copy of this attribute with the same index */ //@ also ensures \result instanceof Attribute; public /*@ pure non_null @*/ Object copy() { Attribute copy = new Attribute(m_Name); copy.m_Index = m_Index; copy.m_Type = m_Type; copy.m_Values = m_Values; copy.m_Hashtable = m_Hashtable; copy.m_DateFormat = m_DateFormat; copy.m_Header = m_Header; copy.setMetadata(m_Metadata); return copy; } /** * Returns an enumeration of all the attribute's values if the * attribute is nominal, string, or relation-valued, null otherwise. * * @return enumeration of all the attribute's values */ public final /*@ pure @*/ Enumeration enumerateValues() { if (isNominal() || isString()) { final Enumeration ee = new WekaEnumeration(m_Values); return new Enumeration () { public boolean hasMoreElements() { return ee.hasMoreElements(); } public Object nextElement() { Object oo = ee.nextElement(); if (oo instanceof SerializedObject) { return ((SerializedObject)oo).getObject(); } else { return oo; } } }; } return null; } /** * Tests if given attribute is equal to this attribute. * * @param other the Object to be compared to this attribute * @return true if the given attribute is equal to this attribute */ public final /*@ pure @*/ boolean equals(Object other) { return (equalsMsg(other) == null); } /** * Tests if given attribute is equal to this attribute. If they're not * the same a message detailing why they differ will be returned, otherwise * null. * * @param other the Object to be compared to this attribute * @return null if the given attribute is equal to this attribute */ public final String equalsMsg(Object other) { if (other == null) return "Comparing with null object"; if (!(other.getClass().equals(this.getClass()))) return "Object has wrong class"; Attribute att = (Attribute) other; if (!m_Name.equals(att.m_Name)) return "Names differ: " + m_Name + " != " + att.m_Name; if (isNominal() && att.isNominal()) { if (m_Values.size() != att.m_Values.size()) return "Different number of labels: " + m_Values.size() + " != " + att.m_Values.size(); for (int i = 0; i < m_Values.size(); i++) { if (!m_Values.get(i).equals(att.m_Values.get(i))) return "Labels differ at position " + (i+1) + ": " + m_Values.get(i) + " != " + att.m_Values.get(i); } return null; } if (isRelationValued() && att.isRelationValued()) return m_Header.equalHeadersMsg(att.m_Header); if ((type() != att.type())) return "Types differ: " + typeToString(this) + " != " + typeToString(att); return null; } /** * Returns a string representation of the attribute type. * * @param att the attribute to return the type string for * @return the string representation of the attribute type */ public static String typeToString(Attribute att) { return typeToString(att.type()); } /** * Returns a string representation of the attribute type. * * @param type the type of the attribute * @return the string representation of the attribute type */ public static String typeToString(int type) { String result; switch(type) { case NUMERIC: result = "numeric"; break; case NOMINAL: result = "nominal"; break; case STRING: result = "string"; break; case DATE: result = "date"; break; case RELATIONAL: result = "relational"; break; default: result = "unknown(" + type + ")"; } return result; } /** * Returns a short string representation of the attribute type. * * @param att the attribute to return the type string for * @return the string representation of the attribute type */ public static String typeToStringShort(Attribute att) { return typeToStringShort(att.type()); } /** * Returns a short string representation of the attribute type. * * @param type the type of the attribute * @return the string representation of the attribute type */ public static String typeToStringShort(int type) { String result; switch(type) { case NUMERIC: result = "Num"; break; case NOMINAL: result = "Nom"; break; case STRING: result = "Str"; break; case DATE: result = "Dat"; break; case RELATIONAL: result = "Rel"; break; default: result = "???"; } return result; } /** * Returns the index of this attribute. * * @return the index of this attribute */ //@ ensures \result == m_Index; public final /*@ pure @*/ int index() { return m_Index; } /** * Returns the index of a given attribute value. (The index of * the first occurence of this value.) * * @param value the value for which the index is to be returned * @return the index of the given attribute value if attribute * is nominal or a string, -1 if it is not or the value * can't be found */ public final int indexOfValue(String value) { if (!isNominal() && !isString()) return -1; Object store = value; if (value.length() > STRING_COMPRESS_THRESHOLD) { try { store = new SerializedObject(value, true); } catch (Exception ex) { System.err.println("Couldn't compress string attribute value -" + " searching uncompressed."); } } Integer val = (Integer)m_Hashtable.get(store); if (val == null) return -1; else return val.intValue(); } /** * Test if the attribute is nominal. * * @return true if the attribute is nominal */ //@ ensures \result <==> (m_Type == NOMINAL); public final /*@ pure @*/ boolean isNominal() { return (m_Type == NOMINAL); } /** * Tests if the attribute is numeric. * * @return true if the attribute is numeric */ //@ ensures \result <==> ((m_Type == NUMERIC) || (m_Type == DATE)); public final /*@ pure @*/ boolean isNumeric() { return ((m_Type == NUMERIC) || (m_Type == DATE)); } /** * Tests if the attribute is relation valued. * * @return true if the attribute is relation valued */ //@ ensures \result <==> (m_Type == RELATIONAL); public final /*@ pure @*/ boolean isRelationValued() { return (m_Type == RELATIONAL); } /** * Tests if the attribute is a string. * * @return true if the attribute is a string */ //@ ensures \result <==> (m_Type == STRING); public final /*@ pure @*/ boolean isString() { return (m_Type == STRING); } /** * Tests if the attribute is a date type. * * @return true if the attribute is a date type */ //@ ensures \result <==> (m_Type == DATE); public final /*@ pure @*/ boolean isDate() { return (m_Type == DATE); } /** * Returns the attribute's name. * * @return the attribute's name as a string */ //@ ensures \result == m_Name; public final /*@ pure @*/ String name() { return m_Name; } /** * Returns the number of attribute values. Returns 0 for * attributes that are not either nominal, string, or * relation-valued. * * @return the number of attribute values */ public final /*@ pure @*/ int numValues() { if (!isNominal() && !isString() && !isRelationValued()) { return 0; } else { return m_Values.size(); } } /** * Returns a description of this attribute in ARFF format. Quotes * strings if they contain whitespace characters, or if they * are a question mark. * * @return a description of this attribute as a string */ public final String toString() { StringBuffer text = new StringBuffer(); text.append(ARFF_ATTRIBUTE).append(" ").append(Utils.quote(m_Name)).append(" "); switch (m_Type) { case NOMINAL: text.append('{'); Enumeration enu = enumerateValues(); while (enu.hasMoreElements()) { text.append(Utils.quote((String) enu.nextElement())); if (enu.hasMoreElements()) text.append(','); } text.append('}'); break; case NUMERIC: text.append(ARFF_ATTRIBUTE_NUMERIC); break; case STRING: text.append(ARFF_ATTRIBUTE_STRING); break; case DATE: text.append(ARFF_ATTRIBUTE_DATE).append(" ").append(Utils.quote(m_DateFormat.toPattern())); break; case RELATIONAL: text.append(ARFF_ATTRIBUTE_RELATIONAL).append("\n"); Enumeration enm = m_Header.enumerateAttributes(); while (enm.hasMoreElements()) { text.append(enm.nextElement()).append("\n"); } text.append(ARFF_END_SUBRELATION).append(" ").append(Utils.quote(m_Name)); break; default: text.append("UNKNOWN"); break; } return text.toString(); } /** * Returns the attribute's type as an integer. * * @return the attribute's type. */ //@ ensures \result == m_Type; public final /*@ pure @*/ int type() { return m_Type; } /** * Returns the Date format pattern in case this attribute is of type DATE, * otherwise an empty string. * * @return the date format pattern * @see SimpleDateFormat */ public final String getDateFormat() { if (isDate()) return m_DateFormat.toPattern(); else return ""; } /** * Returns a value of a nominal or string attribute. Returns an * empty string if the attribute is neither a string nor a nominal * attribute. * * @param valIndex the value's index * @return the attribute's value as a string */ public final /*@ non_null pure @*/ String value(int valIndex) { if (!isNominal() && !isString()) { return ""; } else { Object val = m_Values.get(valIndex); // If we're storing strings compressed, uncompress it. if (val instanceof SerializedObject) { val = ((SerializedObject)val).getObject(); } return (String) val; } } /** * Returns the header info for a relation-valued attribute, * null if the attribute is not relation-valued. * * @return the attribute's value as an Instances object */ public final /*@ non_null pure @*/ Instances relation() { if (!isRelationValued()) { return null; } else { return m_Header; } } /** * Returns a value of a relation-valued attribute. Returns * null if the attribute is not relation-valued. * * @param valIndex the value's index * @return the attribute's value as an Instances object */ public final /*@ non_null pure @*/ Instances relation(int valIndex) { if (!isRelationValued()) { return null; } else { return (Instances) m_Values.get(valIndex); } } /** * Constructor for a numeric attribute with a particular index. * * @param attributeName the name for the attribute * @param index the attribute's index */ //@ requires attributeName != null; //@ requires index >= 0; //@ ensures m_Name == attributeName; //@ ensures m_Index == index; public Attribute(String attributeName, int index) { this(attributeName); m_Index = index; } /** * Constructor for date attributes with a particular index. * * @param attributeName the name for the attribute * @param dateFormat a string suitable for use with * SimpleDateFormatter for parsing dates. Null for a default format * string. * @param index the attribute's index */ //@ requires attributeName != null; //@ requires index >= 0; //@ ensures m_Name == attributeName; //@ ensures m_Index == index; public Attribute(String attributeName, String dateFormat, int index) { this(attributeName, dateFormat); m_Index = index; } /** * Constructor for nominal attributes and string attributes with * a particular index. * If a null vector of attribute values is passed to the method, * the attribute is assumed to be a string. * * @param attributeName the name for the attribute * @param attributeValues a vector of strings denoting the attribute values. * Null if the attribute is a string attribute. * @param index the attribute's index */ //@ requires attributeName != null; //@ requires index >= 0; //@ ensures m_Name == attributeName; //@ ensures m_Index == index; public Attribute(String attributeName, List<String> attributeValues, int index) { this(attributeName, attributeValues); m_Index = index; } /** * Constructor for a relation-valued attribute with a particular index. * * @param attributeName the name for the attribute * @param header the header information for this attribute * @param index the attribute's index */ //@ requires attributeName != null; //@ requires index >= 0; //@ ensures m_Name == attributeName; //@ ensures m_Index == index; public Attribute(String attributeName, Instances header, int index) { this(attributeName, header); m_Index = index; } /** * Adds a string value to the list of valid strings for attributes * of type STRING and returns the index of the string. * * @param value The string value to add * @return the index assigned to the string, or -1 if the attribute is not * of type Attribute.STRING */ /*@ requires value != null; ensures isString() && 0 <= \result && \result < m_Values.size() || ! isString() && \result == -1; */ public int addStringValue(String value) { if (!isString()) { return -1; } Object store = value; if (value.length() > STRING_COMPRESS_THRESHOLD) { try { store = new SerializedObject(value, true); } catch (Exception ex) { System.err.println("Couldn't compress string attribute value -" + " storing uncompressed."); } } Integer index = (Integer)m_Hashtable.get(store); if (index != null) { return index.intValue(); } else { int intIndex = m_Values.size(); m_Values.add(store); m_Hashtable.put(store, new Integer(intIndex)); return intIndex; } } /** * Clear the map and list of values and set them to contain * just the supplied value * * @param value the current (and only) value of this String attribute */ public void setStringValue(String value) { if (!isString()) { return; } m_Hashtable.clear(); m_Values.clear(); addStringValue(value); } /** * Adds a string value to the list of valid strings for attributes * of type STRING and returns the index of the string. This method is * more efficient than addStringValue(String) for long strings. * * @param src The Attribute containing the string value to add. * @param index the index of the string value in the source attribute. * @return the index assigned to the string, or -1 if the attribute is not * of type Attribute.STRING */ /*@ requires src != null; requires 0 <= index && index < src.m_Values.size(); ensures isString() && 0 <= \result && \result < m_Values.size() || ! isString() && \result == -1; */ public int addStringValue(Attribute src, int index) { if (!isString()) { return -1; } Object store = src.m_Values.get(index); Integer oldIndex = (Integer)m_Hashtable.get(store); if (oldIndex != null) { return oldIndex.intValue(); } else { int intIndex = m_Values.size(); m_Values.add(store); m_Hashtable.put(store, new Integer(intIndex)); return intIndex; } } /** * Adds a relation to a relation-valued attribute. * * @param value The value to add * @return the index assigned to the value, or -1 if the attribute is not * of type Attribute.RELATIONAL */ public int addRelation(Instances value) { if (!isRelationValued()) { return -1; } if (!m_Header.equalHeaders(value)) { throw new IllegalArgumentException("Incompatible value for " + "relation-valued attribute.\n" + m_Header.equalHeadersMsg(value)); } Integer index = (Integer)m_Hashtable.get(value); if (index != null) { return index.intValue(); } else { int intIndex = m_Values.size(); m_Values.add(value); m_Hashtable.put(value, new Integer(intIndex)); return intIndex; } } /** * Adds an attribute value. Creates a fresh list of attribute * values before adding it. * * @param value the attribute value */ final void addValue(String value) { m_Values = Utils.cast(m_Values.clone()); m_Hashtable = Utils.cast(m_Hashtable.clone()); forceAddValue(value); } /** * Produces a shallow copy of this attribute with a new name. * * @param newName the name of the new attribute * @return a copy of this attribute with the same index */ //@ requires newName != null; //@ ensures \result.m_Name == newName; //@ ensures \result.m_Index == m_Index; //@ ensures \result.m_Type == m_Type; public final /*@ pure non_null @*/ Attribute copy(String newName) { Attribute copy = new Attribute(newName); copy.m_Index = m_Index; copy.m_DateFormat = m_DateFormat; copy.m_Type = m_Type; copy.m_Values = m_Values; copy.m_Hashtable = m_Hashtable; copy.m_Header = m_Header; copy.setMetadata(m_Metadata); return copy; } /** * Removes a value of a nominal, string, or relation-valued * attribute. Creates a fresh list of attribute values before * removing it. * * @param index the value's index * @throws IllegalArgumentException if the attribute is not * of the correct type */ //@ requires isNominal() || isString() || isRelationValued(); //@ requires 0 <= index && index < m_Values.size(); final void delete(int index) { if (!isNominal() && !isString() && !isRelationValued()) throw new IllegalArgumentException("Can only remove value of " + "nominal, string or relation-" + " valued attribute!"); else { m_Values = Utils.cast(m_Values.clone()); m_Values.remove(index); if (!isRelationValued()) { Hashtable<Object,Integer> hash = new Hashtable<Object,Integer>(m_Hashtable.size()); Enumeration enu = m_Hashtable.keys(); while (enu.hasMoreElements()) { Object string = enu.nextElement(); Integer valIndexObject = (Integer)m_Hashtable.get(string); int valIndex = valIndexObject.intValue(); if (valIndex > index) { hash.put(string, new Integer(valIndex - 1)); } else if (valIndex < index) { hash.put(string, valIndexObject); } } m_Hashtable = hash; } } } /** * Adds an attribute value. * * @param value the attribute value */ //@ requires value != null; //@ ensures m_Values.size() == \old(m_Values.size()) + 1; final void forceAddValue(String value) { Object store = value; if (value.length() > STRING_COMPRESS_THRESHOLD) { try { store = new SerializedObject(value, true); } catch (Exception ex) { System.err.println("Couldn't compress string attribute value -" + " storing uncompressed."); } } m_Values.add(store); m_Hashtable.put(store, new Integer(m_Values.size() - 1)); } /** * Sets the index of this attribute. * * @param index the index of this attribute */ //@ requires 0 <= index; //@ assignable m_Index; //@ ensures m_Index == index; final void setIndex(int index) { m_Index = index; } /** * Sets a value of a nominal attribute or string attribute. * Creates a fresh list of attribute values before it is set. * * @param index the value's index * @param string the value * @throws IllegalArgumentException if the attribute is not nominal or * string. */ //@ requires string != null; //@ requires isNominal() || isString(); //@ requires 0 <= index && index < m_Values.size(); final void setValue(int index, String string) { switch (m_Type) { case NOMINAL: case STRING: m_Values = Utils.cast(m_Values.clone()); m_Hashtable = Utils.cast(m_Hashtable.clone()); Object store = string; if (string.length() > STRING_COMPRESS_THRESHOLD) { try { store = new SerializedObject(string, true); } catch (Exception ex) { System.err.println("Couldn't compress string attribute value -" + " storing uncompressed."); } } m_Hashtable.remove(m_Values.get(index)); m_Values.set(index, store); m_Hashtable.put(store, new Integer(index)); break; default: throw new IllegalArgumentException("Can only set values for nominal" + " or string attributes!"); } } /** * Sets a value of a relation-valued attribute. * Creates a fresh list of attribute values before it is set. * * @param index the value's index * @param data the value * @throws IllegalArgumentException if the attribute is not * relation-valued. */ final void setValue(int index, Instances data) { if (isRelationValued()) { if (!data.equalHeaders(m_Header)) { throw new IllegalArgumentException("Can't set relational value. " + "Headers not compatible.\n" + data.equalHeadersMsg(m_Header)); } m_Values = Utils.cast(m_Values.clone()); m_Values.set(index, data); } else { throw new IllegalArgumentException("Can only set value for" + " relation-valued attributes!"); } } /** * Returns the given amount of milliseconds formatted according to the * current Date format. * * @param date the date, represented in milliseconds since * January 1, 1970, 00:00:00 GMT, to return as string * @return the formatted date */ //@ requires isDate(); public /*@pure@*/ String formatDate(double date) { switch (m_Type) { case DATE: return m_DateFormat.format(new Date((long)date)); default: throw new IllegalArgumentException("Can only format date values for date" + " attributes!"); } } /** * Parses the given String as Date, according to the current format and * returns the corresponding amount of milliseconds. * * @param string the date to parse * @return the date in milliseconds since January 1, 1970, 00:00:00 GMT * @throws ParseException if parsing fails */ //@ requires isDate(); //@ requires string != null; public double parseDate(String string) throws ParseException { switch (m_Type) { case DATE: long time = m_DateFormat.parse(string).getTime(); // TODO put in a safety check here if we can't store the value in a double. return (double)time; default: throw new IllegalArgumentException("Can only parse date values for date" + " attributes!"); } } /** * Returns the properties supplied for this attribute. * * @return metadata for this attribute */ public final /*@ pure @*/ ProtectedProperties getMetadata() { return m_Metadata; } /** * Returns the ordering of the attribute. One of the following: * * ORDERING_SYMBOLIC - attribute values should be treated as symbols. * ORDERING_ORDERED - attribute values have a global ordering. * ORDERING_MODULO - attribute values have an ordering which wraps. * * @return the ordering type of the attribute */ public final /*@ pure @*/ int ordering() { return m_Ordering; } /** * Returns whether the attribute values are equally spaced. * * @return whether the attribute is regular or not */ public final /*@ pure @*/ boolean isRegular() { return m_IsRegular; } /** * Returns whether the attribute can be averaged meaningfully. * * @return whether the attribute can be averaged or not */ public final /*@ pure @*/ boolean isAveragable() { return m_IsAveragable; } /** * Returns whether the attribute has a zeropoint and may be * added meaningfully. * * @return whether the attribute has a zeropoint or not */ public final /*@ pure @*/ boolean hasZeropoint() { return m_HasZeropoint; } /** * Returns the attribute's weight. * * @return the attribute's weight as a double */ public final /*@ pure @*/ double weight() { return m_Weight; } /** * Sets the new attribute's weight * * @param value the new weight */ public void setWeight(double value) { Properties props; Enumeration names; String name; m_Weight = value; // generate new metadata object props = new Properties(); names = m_Metadata.propertyNames(); while (names.hasMoreElements()) { name = (String) names.nextElement(); if (!name.equals("weight")) props.setProperty(name, m_Metadata.getProperty(name)); } props.setProperty("weight", "" + m_Weight); m_Metadata = new ProtectedProperties(props); } /** * Returns the lower bound of a numeric attribute. * * @return the lower bound of the specified numeric range */ public final /*@ pure @*/ double getLowerNumericBound() { return m_LowerBound; } /** * Returns whether the lower numeric bound of the attribute is open. * * @return whether the lower numeric bound is open or not (closed) */ public final /*@ pure @*/ boolean lowerNumericBoundIsOpen() { return m_LowerBoundIsOpen; } /** * Returns the upper bound of a numeric attribute. * * @return the upper bound of the specified numeric range */ public final /*@ pure @*/ double getUpperNumericBound() { return m_UpperBound; } /** * Returns whether the upper numeric bound of the attribute is open. * * @return whether the upper numeric bound is open or not (closed) */ public final /*@ pure @*/ boolean upperNumericBoundIsOpen() { return m_UpperBoundIsOpen; } /** * Determines whether a value lies within the bounds of the attribute. * * @param value the value to check * @return whether the value is in range */ public final /*@ pure @*/ boolean isInRange(double value) { // dates and missing values are a special case if (m_Type == DATE || Utils.isMissingValue(value)) return true; if (m_Type != NUMERIC) { // do label range check int intVal = (int) value; if (intVal < 0 || intVal >= m_Hashtable.size()) return false; } else { // do numeric bounds check if (m_LowerBoundIsOpen) { if (value <= m_LowerBound) return false; } else { if (value < m_LowerBound) return false; } if (m_UpperBoundIsOpen) { if (value >= m_UpperBound) return false; } else { if (value > m_UpperBound) return false; } } return true; } /** * Sets the metadata for the attribute. Processes the strings stored in the * metadata of the attribute so that the properties can be set up for the * easy-access metadata methods. Any strings sought that are omitted will * cause default values to be set. * * The following properties are recognised: * ordering, averageable, zeropoint, regular, weight, and range. * * All other properties can be queried and handled appropriately by classes * calling the getMetadata() method. * * @param metadata the metadata * @throws IllegalArgumentException if the properties are not consistent */ //@ requires metadata != null; private void setMetadata(ProtectedProperties metadata) { m_Metadata = metadata; if (m_Type == DATE) { m_Ordering = ORDERING_ORDERED; m_IsRegular = true; m_IsAveragable = false; m_HasZeropoint = false; } else { // get ordering String orderString = m_Metadata.getProperty("ordering",""); // numeric ordered attributes are averagable and zeropoint by default String def; if (m_Type == NUMERIC && orderString.compareTo("modulo") != 0 && orderString.compareTo("symbolic") != 0) def = "true"; else def = "false"; // determine boolean states m_IsAveragable = (m_Metadata.getProperty("averageable",def).compareTo("true") == 0); m_HasZeropoint = (m_Metadata.getProperty("zeropoint",def).compareTo("true") == 0); // averagable or zeropoint implies regular if (m_IsAveragable || m_HasZeropoint) def = "true"; m_IsRegular = (m_Metadata.getProperty("regular",def).compareTo("true") == 0); // determine ordering if (orderString.compareTo("symbolic") == 0) m_Ordering = ORDERING_SYMBOLIC; else if (orderString.compareTo("ordered") == 0) m_Ordering = ORDERING_ORDERED; else if (orderString.compareTo("modulo") == 0) m_Ordering = ORDERING_MODULO; else { if (m_Type == NUMERIC || m_IsAveragable || m_HasZeropoint) m_Ordering = ORDERING_ORDERED; else m_Ordering = ORDERING_SYMBOLIC; } } // consistency checks if (m_IsAveragable && !m_IsRegular) throw new IllegalArgumentException("An averagable attribute must be" + " regular"); if (m_HasZeropoint && !m_IsRegular) throw new IllegalArgumentException("A zeropoint attribute must be" + " regular"); if (m_IsRegular && m_Ordering == ORDERING_SYMBOLIC) throw new IllegalArgumentException("A symbolic attribute cannot be" + " regular"); if (m_IsAveragable && m_Ordering != ORDERING_ORDERED) throw new IllegalArgumentException("An averagable attribute must be" + " ordered"); if (m_HasZeropoint && m_Ordering != ORDERING_ORDERED) throw new IllegalArgumentException("A zeropoint attribute must be" + " ordered"); // determine weight m_Weight = 1.0; String weightString = m_Metadata.getProperty("weight"); if (weightString != null) { try{ m_Weight = Double.valueOf(weightString).doubleValue(); } catch (NumberFormatException e) { // Check if value is really a number throw new IllegalArgumentException("Not a valid attribute weight: '" + weightString + "'"); } } // determine numeric range if (m_Type == NUMERIC) setNumericRange(m_Metadata.getProperty("range")); } /** * Sets the numeric range based on a string. If the string is null the range * will default to [-inf,+inf]. A square brace represents a closed interval, a * curved brace represents an open interval, and 'inf' represents infinity. * Examples of valid range strings: "[-inf,20)","(-13.5,-5.2)","(5,inf]" * * @param rangeString the string to parse as the attribute's numeric range * @throws IllegalArgumentException if the range is not valid */ //@ requires rangeString != null; private void setNumericRange(String rangeString) { // set defaults m_LowerBound = Double.NEGATIVE_INFINITY; m_LowerBoundIsOpen = false; m_UpperBound = Double.POSITIVE_INFINITY; m_UpperBoundIsOpen = false; if (rangeString == null) return; // set up a tokenzier to parse the string StreamTokenizer tokenizer = new StreamTokenizer(new StringReader(rangeString)); tokenizer.resetSyntax(); tokenizer.whitespaceChars(0, ' '); tokenizer.wordChars(' '+1,'\u00FF'); tokenizer.ordinaryChar('['); tokenizer.ordinaryChar('('); tokenizer.ordinaryChar(','); tokenizer.ordinaryChar(']'); tokenizer.ordinaryChar(')'); try { // get opening brace tokenizer.nextToken(); if (tokenizer.ttype == '[') m_LowerBoundIsOpen = false; else if (tokenizer.ttype == '(') m_LowerBoundIsOpen = true; else throw new IllegalArgumentException("Expected opening brace on range," + " found: " + tokenizer.toString()); // get lower bound tokenizer.nextToken(); if (tokenizer.ttype != tokenizer.TT_WORD) throw new IllegalArgumentException("Expected lower bound in range," + " found: " + tokenizer.toString()); if (tokenizer.sval.compareToIgnoreCase("-inf") == 0) m_LowerBound = Double.NEGATIVE_INFINITY; else if (tokenizer.sval.compareToIgnoreCase("+inf") == 0) m_LowerBound = Double.POSITIVE_INFINITY; else if (tokenizer.sval.compareToIgnoreCase("inf") == 0) m_LowerBound = Double.NEGATIVE_INFINITY; else try { m_LowerBound = Double.valueOf(tokenizer.sval).doubleValue(); } catch (NumberFormatException e) { throw new IllegalArgumentException("Expected lower bound in range," + " found: '" + tokenizer.sval + "'"); } // get separating comma if (tokenizer.nextToken() != ',') throw new IllegalArgumentException("Expected comma in range," + " found: " + tokenizer.toString()); // get upper bound tokenizer.nextToken(); if (tokenizer.ttype != tokenizer.TT_WORD) throw new IllegalArgumentException("Expected upper bound in range," + " found: " + tokenizer.toString()); if (tokenizer.sval.compareToIgnoreCase("-inf") == 0) m_UpperBound = Double.NEGATIVE_INFINITY; else if (tokenizer.sval.compareToIgnoreCase("+inf") == 0) m_UpperBound = Double.POSITIVE_INFINITY; else if (tokenizer.sval.compareToIgnoreCase("inf") == 0) m_UpperBound = Double.POSITIVE_INFINITY; else try { m_UpperBound = Double.valueOf(tokenizer.sval).doubleValue(); } catch (NumberFormatException e) { throw new IllegalArgumentException("Expected upper bound in range," + " found: '" + tokenizer.sval + "'"); } // get closing brace tokenizer.nextToken(); if (tokenizer.ttype == ']') m_UpperBoundIsOpen = false; else if (tokenizer.ttype == ')') m_UpperBoundIsOpen = true; else throw new IllegalArgumentException("Expected closing brace on range," + " found: " + tokenizer.toString()); // check for rubbish on end if (tokenizer.nextToken() != tokenizer.TT_EOF) throw new IllegalArgumentException("Expected end of range string," + " found: " + tokenizer.toString()); } catch (IOException e) { throw new IllegalArgumentException("IOException reading attribute range" + " string: " + e.getMessage()); } if (m_UpperBound < m_LowerBound) throw new IllegalArgumentException("Upper bound (" + m_UpperBound + ") on numeric range is" + " less than lower bound (" + m_LowerBound + ")!"); } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 9515 $"); } /** * Simple main method for testing this class. * * @param ops the commandline options */ //@ requires ops != null; //@ requires \nonnullelements(ops); public static void main(String[] ops) { try { // Create numeric attributes "length" and "weight" Attribute length = new Attribute("length"); Attribute weight = new Attribute("weight"); // Create date attribute "date" Attribute date = new Attribute("date", "yyyy-MM-dd HH:mm:ss"); System.out.println(date); double dd = date.parseDate("2001-04-04 14:13:55"); System.out.println("Test date = " + dd); System.out.println(date.formatDate(dd)); dd = new Date().getTime(); System.out.println("Date now = " + dd); System.out.println(date.formatDate(dd)); // Create vector to hold nominal values "first", "second", "third" List<String> my_nominal_values = new ArrayList<String>(3); my_nominal_values.add("first"); my_nominal_values.add("second"); my_nominal_values.add("third"); // Create nominal attribute "position" Attribute position = new Attribute("position", my_nominal_values); // Print the name of "position" System.out.println("Name of \"position\": " + position.name()); // Print the values of "position" Enumeration attValues = position.enumerateValues(); while (attValues.hasMoreElements()) { String string = (String)attValues.nextElement(); System.out.println("Value of \"position\": " + string); } // Shallow copy attribute "position" Attribute copy = (Attribute) position.copy(); // Test if attributes are the same System.out.println("Copy is the same as original: " + copy.equals(position)); // Print index of attribute "weight" (should be unset: -1) System.out.println("Index of attribute \"weight\" (should be -1): " + weight.index()); // Print index of value "first" of attribute "position" System.out.println("Index of value \"first\" of \"position\" (should be 0): " + position.indexOfValue("first")); // Tests type of attribute "position" System.out.println("\"position\" is numeric: " + position.isNumeric()); System.out.println("\"position\" is nominal: " + position.isNominal()); System.out.println("\"position\" is string: " + position.isString()); // Prints name of attribute "position" System.out.println("Name of \"position\": " + position.name()); // Prints number of values of attribute "position" System.out.println("Number of values for \"position\": " + position.numValues()); // Prints the values (againg) for (int i = 0; i < position.numValues(); i++) { System.out.println("Value " + i + ": " + position.value(i)); } // Prints the attribute "position" in ARFF format System.out.println(position); // Checks type of attribute "position" using constants switch (position.type()) { case Attribute.NUMERIC: System.out.println("\"position\" is numeric"); break; case Attribute.NOMINAL: System.out.println("\"position\" is nominal"); break; case Attribute.STRING: System.out.println("\"position\" is string"); break; case Attribute.DATE: System.out.println("\"position\" is date"); break; case Attribute.RELATIONAL: System.out.println("\"position\" is relation-valued"); break; default: System.out.println("\"position\" has unknown type"); } ArrayList<Attribute> atts = new ArrayList<Attribute>(1); atts.add(position); Instances relation = new Instances("Test", atts, 0); Attribute relationValuedAtt = new Attribute("test", relation); System.out.println(relationValuedAtt); } catch (Exception e) { e.printStackTrace(); } } }
52,706
29.343696
110
java
tsml-java
tsml-java-master/src/main/java/weka/core/AttributeExpression.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * AttributeExpression.java * Copyright (C) 2006-2012 University of Waikato, Hamilton, New Zealand * */ package weka.core; import java.io.Serializable; import java.util.Stack; import java.util.StringTokenizer; import java.util.Vector; /** * A general purpose class for parsing mathematical expressions * involving attribute values. Values can be provided in an array * or in an Instance. Values are accessed in the expression by * prefixing their index (starting at 1) with the character 'a'. * * <pre> Example expression: a1^2*a5/log(a7*4.0) </pre> * * Supported opperators: +, -, *, /, ^, log, abs, cos, exp, sqrt, * floor, ceil, rint, tan, sin, (, ). * * @author Mark Hall * @version $Revision: 8034 $ */ public class AttributeExpression implements Serializable, RevisionHandler { /** for serialization */ static final long serialVersionUID = 402130123261736245L; /** * Interface implemented by operators and operants. */ private interface ExpressionComponent {}; /** * Inner class handling an attribute index as an operand */ private class AttributeOperand implements ExpressionComponent, Serializable, RevisionHandler { /** for serialization */ static final long serialVersionUID = -7674280127286031105L; /** the index of the attribute */ protected int m_attributeIndex; /** true if the value of the attribute are to be multiplied by -1 */ protected boolean m_negative; /** * Constructor * * @param operand * @param sign * @throws Exception */ public AttributeOperand(String operand, boolean sign) throws Exception { // strip the leading 'a' and set the index m_attributeIndex = (Integer.parseInt(operand.substring(1)))-1; m_negative = sign; } /** * Return a string describing this object * @return a string descibing the attribute operand */ public String toString() { String result = ""; if (m_negative) { result += '-'; } return result+"a"+(m_attributeIndex+1); } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } } /** * Inner class for storing numeric constant opperands */ private class NumericOperand implements ExpressionComponent, Serializable, RevisionHandler { /** for serialization */ static final long serialVersionUID = 9037007836243662859L; /** numeric constant */ protected double m_numericConst; /** * Constructor * * @param operand * @param sign * @throws Exception */ public NumericOperand(String operand, boolean sign) throws Exception { m_numericConst = Double.valueOf(operand).doubleValue(); if (sign) { m_numericConst *= -1.0; } } /** * Return a string describing this object * @return a string descibing the numeric operand */ public String toString() { return ""+m_numericConst; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } } /** * Inner class for storing operators */ private class Operator implements ExpressionComponent, Serializable, RevisionHandler { /** for serialization */ static final long serialVersionUID = -2760353522666004638L; /** the operator */ protected char m_operator; /** * Constructor * * @param opp the operator */ public Operator(char opp) { if (!isOperator(opp)) { throw new IllegalArgumentException("Unrecognized operator:" + opp); } m_operator = opp; } /** * Apply this operator to the supplied arguments * @param first the first argument * @param second the second argument * @return the result */ protected double applyOperator(double first, double second) { switch (m_operator) { case '+' : return (first+second); case '-' : return (first-second); case '*' : return (first*second); case '/' : return (first/second); case '^' : return Math.pow(first,second); } return Double.NaN; } /** * Apply this operator (function) to the supplied argument * @param value the argument * @return the result */ protected double applyFunction(double value) { switch (m_operator) { case 'l' : return Math.log(value); case 'b' : return Math.abs(value); case 'c' : return Math.cos(value); case 'e' : return Math.exp(value); case 's' : return Math.sqrt(value); case 'f' : return Math.floor(value); case 'h' : return Math.ceil(value); case 'r' : return Math.rint(value); case 't' : return Math.tan(value); case 'n' : return Math.sin(value); } return Double.NaN; } /** * Return a string describing this object * @return a string descibing the operator */ public String toString() { return ""+m_operator; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } } /** Operator stack */ private Stack<String> m_operatorStack = new Stack<String>(); /** Supported operators. l = log, b = abs, c = cos, e = exp, s = sqrt, f = floor, h = ceil, r = rint, t = tan, n = sin */ private static final String OPERATORS = "+-*/()^lbcesfhrtn"; /** Unary functions. l = log, b = abs, c = cos, e = exp, s = sqrt, f = floor, h = ceil, r = rint, t = tan, n = sin */ private static final String UNARY_FUNCTIONS = "lbcesfhrtn"; /** Holds the original infix expression */ private String m_originalInfix; /** Holds the expression in postfix form */ private Vector<ExpressionComponent> m_postFixExpVector; /** True if the next numeric constant or attribute index is negative */ private boolean m_signMod = false; /** Holds the previous token */ private String m_previousTok = ""; /** * Handles the processing of an infix operand to postfix * @param tok the infix operand * @throws Exception if there is difficulty parsing the operand */ private void handleOperand(String tok) throws Exception { // if it contains an 'a' then its an attribute index if (tok.indexOf('a') != -1) { m_postFixExpVector.addElement(new AttributeOperand(tok,m_signMod)); } else { try { // should be a numeric constant m_postFixExpVector.addElement(new NumericOperand(tok, m_signMod)); } catch (NumberFormatException ne) { throw new Exception("Trouble parsing numeric constant"); } } m_signMod = false; } /** * Handles the processing of an infix operator to postfix * @param tok the infix operator * @throws Exception if there is difficulty parsing the operator */ private void handleOperator(String tok) throws Exception { boolean push = true; char tokchar = tok.charAt(0); if (tokchar == ')') { String popop = " "; do { popop = (String)(m_operatorStack.pop()); if (popop.charAt(0) != '(') { m_postFixExpVector.addElement(new Operator(popop.charAt(0))); } } while (popop.charAt(0) != '('); } else { int infixToc = infixPriority(tok.charAt(0)); while (!m_operatorStack.empty() && stackPriority(((String)(m_operatorStack.peek())).charAt(0)) >= infixToc) { // try an catch double operators and see if the current one can // be interpreted as the sign of an upcoming number if (m_previousTok.length() == 1 && isOperator(m_previousTok.charAt(0)) && m_previousTok.charAt(0) != ')') { if (tok.charAt(0) == '-') { m_signMod = true; } else { m_signMod = false; } push = false; break; } else { String popop = (String)(m_operatorStack.pop()); m_postFixExpVector.addElement(new Operator(popop.charAt(0))); } } if (m_postFixExpVector.size() == 0) { if (tok.charAt(0) == '-') { m_signMod = true; push = false; } } if (push) { m_operatorStack.push(tok); } } } /** * Converts a string containing a mathematical expression in infix form * to postfix form. The result is stored in the vector m_postfixExpVector * * @param infixExp the infix expression to convert * @throws Exception if something goes wrong during the conversion */ public void convertInfixToPostfix(String infixExp) throws Exception { m_originalInfix = infixExp; infixExp = Utils.removeSubstring(infixExp, " "); infixExp = Utils.replaceSubstring(infixExp,"log","l"); infixExp = Utils.replaceSubstring(infixExp,"abs","b"); infixExp = Utils.replaceSubstring(infixExp,"cos","c"); infixExp = Utils.replaceSubstring(infixExp,"exp","e"); infixExp = Utils.replaceSubstring(infixExp,"sqrt","s"); infixExp = Utils.replaceSubstring(infixExp,"floor","f"); infixExp = Utils.replaceSubstring(infixExp,"ceil","h"); infixExp = Utils.replaceSubstring(infixExp,"rint","r"); infixExp = Utils.replaceSubstring(infixExp,"tan","t"); infixExp = Utils.replaceSubstring(infixExp,"sin","n"); StringTokenizer tokenizer = new StringTokenizer(infixExp, OPERATORS, true); m_postFixExpVector = new Vector<ExpressionComponent>(); while (tokenizer.hasMoreTokens()) { String tok = tokenizer.nextToken(); if (tok.length() > 1) { handleOperand(tok); } else { // probably an operator, but could be a single char operand if (isOperator(tok.charAt(0))) { handleOperator(tok); } else { // should be a numeric constant handleOperand(tok); } } m_previousTok = tok; } while (!m_operatorStack.empty()) { String popop = (String)(m_operatorStack.pop()); if (popop.charAt(0) == '(' || popop.charAt(0) == ')') { throw new Exception("Mis-matched parenthesis!"); } m_postFixExpVector.addElement(new Operator(popop.charAt(0))); } } /** * Evaluate the expression using the supplied Instance. * Assumes that the infix expression has been converted to * postfix and stored in m_postFixExpVector * * @param instance the Instance containing values to apply * the expression to * @throws Exception if something goes wrong */ public double evaluateExpression(Instance instance) throws Exception { double [] vals = new double [instance.numAttributes()+1]; for(int i = 0; i < instance.numAttributes(); i++) { if (instance.isMissing(i)) { vals[i] = Utils.missingValue(); } else { vals[i] = instance.value(i); } } evaluateExpression(vals); return vals[vals.length - 1]; } /** * Evaluate the expression using the supplied array of attribute values. * The result is stored in the last element of the array. Assumes that * the infix expression has been converted to postfix and stored in * m_postFixExpVector * @param vals the values to apply the expression to * @throws Exception if something goes wrong */ public void evaluateExpression(double [] vals) throws Exception { Stack<Double> operands = new Stack<Double>(); for (int i=0;i<m_postFixExpVector.size();i++) { Object nextob = m_postFixExpVector.elementAt(i); if (nextob instanceof NumericOperand) { operands.push(new Double(((NumericOperand)nextob).m_numericConst)); } else if (nextob instanceof AttributeOperand) { double value = vals[((AttributeOperand)nextob).m_attributeIndex]; /*if (Utils.isMissingValue(value)) { vals[vals.length-1] = Utils.missingValue(); break; }*/ if (((AttributeOperand)nextob).m_negative) { value = -value; } operands.push(new Double(value)); } else if (nextob instanceof Operator) { char op = ((Operator)nextob).m_operator; if (isUnaryFunction(op)) { double operand = ((Double)operands.pop()).doubleValue(); double result = ((Operator)nextob).applyFunction(operand); operands.push(new Double(result)); } else { double second = ((Double)operands.pop()).doubleValue(); double first = ((Double)operands.pop()).doubleValue(); double result = ((Operator)nextob).applyOperator(first,second); operands.push(new Double(result)); } } else { throw new Exception("Unknown object in postfix vector!"); } } if (operands.size() != 1) { throw new Exception("Problem applying function"); } Double result = ((Double)operands.pop()); if (result.isNaN() || result.isInfinite()) { vals[vals.length-1] = Utils.missingValue(); } else { vals[vals.length-1] = result.doubleValue(); } } /** * Returns true if a token is an operator * @param tok the token to check * @return true if the supplied token is an operator */ private boolean isOperator(char tok) { if (OPERATORS.indexOf(tok) == -1) { return false; } return true; } /** * Returns true if a token is a unary function * @param tok the token to check * @return true if the supplied token is a unary function */ private boolean isUnaryFunction(char tok) { if (UNARY_FUNCTIONS.indexOf(tok) == -1) { return false; } return true; } /** * Return the infix priority of an operator * @param opp the operator * @return the infix priority */ private int infixPriority(char opp) { switch (opp) { case 'l' : case 'b' : case 'c' : case 'e' : case 's' : case 'f' : case 'h' : case 'r' : case 't' : case 'n' : return 3; case '^' : return 2; case '*' : return 2; case '/' : return 2; case '+' : return 1; case '-' : return 1; case '(' : return 4; case ')' : return 0; default : throw new IllegalArgumentException("Unrecognized operator:" + opp); } } /** * Return the stack priority of an operator * @param opp the operator * @return the stack priority */ private int stackPriority(char opp) { switch (opp) { case 'l' : case 'b' : case 'c' : case 'e' : case 's' : case 'f' : case 'h' : case 'r' : case 't' : case 'n' : return 3; case '^' : return 2; case '*' : return 2; case '/' : return 2; case '+' : return 1; case '-' : return 1; case '(' : return 0; case ')' : return -1; default : throw new IllegalArgumentException("Unrecognized operator:" + opp); } } /** * Return the postfix expression * * @return the postfix expression as a String */ public String getPostFixExpression() { return m_postFixExpVector.toString(); } public String toString() { return m_originalInfix; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } }
15,819
25.904762
79
java
tsml-java
tsml-java-master/src/main/java/weka/core/AttributeLocator.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * StringLocator.java * Copyright (C) 2005-2012 University of Waikato, Hamilton, New Zealand */ package weka.core; import java.io.Serializable; import java.util.Vector; /** * This class locates and records the indices of a certain type of attributes, * recursively in case of Relational attributes. * * @author fracpete (fracpete at waikato dot ac dot nz) * @version $Revision: 8034 $ * @see Attribute#RELATIONAL */ public class AttributeLocator implements Serializable, Comparable<AttributeLocator>, RevisionHandler { /** for serialization */ private static final long serialVersionUID = -2932848827681070345L; /** the attribute indices that may be inspected */ protected int[] m_AllowedIndices = null; /** contains the attribute locations, either true or false Boolean objects */ protected Vector<Boolean> m_Attributes = null; /** contains the locator locations, either null or a AttributeLocator reference */ protected Vector<AttributeLocator> m_Locators = null; /** the type of the attribute */ protected int m_Type = -1; /** the referenced data */ protected Instances m_Data = null; /** the indices */ protected int[] m_Indices = null; /** the indices of locator objects */ protected int[] m_LocatorIndices = null; /** * Initializes the AttributeLocator with the given data for the specified * type of attribute. Checks all attributes. * * @param data the data to work on * @param type the type of attribute to locate */ public AttributeLocator(Instances data, int type) { this(data, type, 0, data.numAttributes() - 1); } /** * Initializes the AttributeLocator with the given data for the specified * type of attribute. Checks only the given range. * * @param data the data to work on * @param type the type of attribute to locate * @param fromIndex the first index to inspect (including) * @param toIndex the last index to check (including) */ public AttributeLocator(Instances data, int type, int fromIndex, int toIndex) { super(); int[] indices = new int[toIndex - fromIndex + 1]; for (int i = 0; i < indices.length; i++) indices[i] = fromIndex + i; initialize(data, type, indices); } /** * initializes the AttributeLocator with the given data for the specified * type of attribute. Checks only the given attribute indices. * * @param data the data to work on * @param type the type of attribute to locate * @param indices the attribute indices to check */ public AttributeLocator(Instances data, int type, int[] indices) { super(); initialize(data, type, indices); } /** * initializes the AttributeLocator * * @param data the data to base the search for attributes on * @param type the type of attribute to look for * @param indices the indices that are allowed to check */ protected void initialize(Instances data, int type, int[] indices) { m_Data = new Instances(data, 0); m_Type = type; m_AllowedIndices = new int[indices.length]; System.arraycopy(indices, 0, m_AllowedIndices, 0, indices.length); locate(); m_Indices = find(true); m_LocatorIndices = find(false); } /** * returns the type of attribute that is located * * @return the type of attribute */ public int getType() { return m_Type; } /** * returns the indices that are allowed to check for the attribute type * * @return the indices that are checked for the attribute type */ public int[] getAllowedIndices() { return m_AllowedIndices; } /** * sets up the structure */ protected void locate() { int i; m_Attributes = new Vector<Boolean>(); m_Locators = new Vector<AttributeLocator>(); for (i = 0; i < m_AllowedIndices.length; i++) { if (m_Data.attribute(m_AllowedIndices[i]).type() == Attribute.RELATIONAL) m_Locators.add(new AttributeLocator(m_Data.attribute(m_AllowedIndices[i]).relation(), getType())); else m_Locators.add(null); if (m_Data.attribute(m_AllowedIndices[i]).type() == getType()) m_Attributes.add(new Boolean(true)); else m_Attributes.add(new Boolean(false)); } } /** * returns the underlying data * * @return the underlying Instances object */ public Instances getData() { return m_Data; } /** * returns the indices of the searched-for attributes (if TRUE) or the indices * of AttributeLocator objects (if FALSE) * * @param findAtts if true the indices of attributes are located, * otherwise the ones of AttributeLocator objects * @return the indices of the attributes or the AttributeLocator objects */ protected int[] find(boolean findAtts) { int i; int[] result; Vector<Integer> indices; // determine locations indices = new Vector<Integer>(); if (findAtts) { for (i = 0; i < m_Attributes.size(); i++) { if (((Boolean) m_Attributes.get(i)).booleanValue()) indices.add(new Integer(i)); } } else { for (i = 0; i < m_Locators.size(); i++) { if (m_Locators.get(i) != null) indices.add(new Integer(i)); } } // fill array result = new int[indices.size()]; for (i = 0; i < indices.size(); i++) result[i] = ((Integer) indices.get(i)).intValue(); return result; } /** * returns actual index in the Instances object. * * @param index the index in the m_AllowedIndices array * @return the actual index in the instances object */ public int getActualIndex(int index) { return m_AllowedIndices[index]; } /** * Returns the indices of the attributes. These indices are referring * to the m_AllowedIndices array, not the actual indices in the Instances * object. * * @return the indices of the attributes * @see #getActualIndex(int) */ public int[] getAttributeIndices() { return m_Indices; } /** * Returns the indices of the AttributeLocator objects. These indices are * referring to the m_AllowedIndices array, not the actual indices in the * Instances object. * * @return the indices of the AttributeLocator objects * @see #getActualIndex(int) */ public int[] getLocatorIndices() { return m_LocatorIndices; } /** * Returns the AttributeLocator at the given index. This index refers to * the index of the m_AllowedIndices array, not the actual Instances object. * * @param index the index of the locator to retrieve * @return the AttributeLocator at the given index */ public AttributeLocator getLocator(int index) { return (AttributeLocator) m_Locators.get(index); } /** * Compares this object with the specified object for order. Returns a * negative integer, zero, or a positive integer as this object is less * than, equal to, or greater than the specified object. Only type and * indices are checked. * * @param o the object to compare with * @return -1 if less than, 0 if equal, +1 if greater than the * given object */ public int compareTo(AttributeLocator o) { int result; int i; result = 0; // 1. check type if (this.getType() < o.getType()) { result = -1; } else if (this.getType() > o.getType()) { result = 1; } else { // 2. check indices if (this.getAllowedIndices().length < o.getAllowedIndices().length) { result = -1; } else if (this.getAllowedIndices().length > o.getAllowedIndices().length) { result = 1; } else { for (i = 0; i < this.getAllowedIndices().length; i++) { if (this.getAllowedIndices()[i] < o.getAllowedIndices()[i]) { result = -1; break; } else if (this.getAllowedIndices()[i] > o.getAllowedIndices()[i]) { result = 1; break; } else { result = 0; } } } } return result; } /** * Indicates whether some other object is "equal to" this one. Only type * and indices are checked. * * @param o the AttributeLocator to check for equality * @return true if the AttributeLocators have the same type and * indices */ public boolean equals(Object o) { return (compareTo((AttributeLocator) o) == 0); } /** * returns a string representation of this object * * @return a string representation */ public String toString() { return m_Attributes.toString(); } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } }
9,459
27.154762
99
java
tsml-java
tsml-java-master/src/main/java/weka/core/AttributeStats.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * AttributeStats.java * Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand * */ package weka.core; import java.io.Serializable; /** * A Utility class that contains summary information on an * the values that appear in a dataset for a particular attribute. * * @author <a href="mailto:len@reeltwo.com">Len Trigg</a> * @version $Revision: 8034 $ */ public class AttributeStats implements Serializable, RevisionHandler { /** for serialization */ private static final long serialVersionUID = 4434688832743939380L; /** The number of int-like values */ public int intCount = 0; /** The number of real-like values (i.e. have a fractional part) */ public int realCount = 0; /** The number of missing values */ public int missingCount = 0; /** The number of distinct values */ public int distinctCount = 0; /** The number of values that only appear once */ public int uniqueCount = 0; /** The total number of values (i.e. number of instances) */ public int totalCount = 0; /** Stats on numeric value distributions */ // perhaps Stats should be moved from weka.experiment to weka.core public Stats numericStats; /** Counts of each nominal value */ public int [] nominalCounts; /** Weight mass for each nominal value */ public double[] nominalWeights; /** * Updates the counters for one more observed distinct value. * * @param value the value that has just been seen * @param count the number of times the value appeared * @param weight the weight mass of the value */ protected void addDistinct(double value, int count, double weight) { if (count > 0) { if (count == 1) { uniqueCount++; } if (Utils.eq(value, (double)((int)value))) { intCount += count; } else { realCount += count; } if (nominalCounts != null) { nominalCounts[(int)value] = count; nominalWeights[(int)value] = weight; } if (numericStats != null) { //numericStats.add(value, count); numericStats.add(value, weight); numericStats.calculateDerived(); } } distinctCount++; } /** * Returns a human readable representation of this AttributeStats instance. * * @return a String represtinging these AttributeStats. */ public String toString() { StringBuffer sb = new StringBuffer(); sb.append(Utils.padLeft("Type", 4)).append(Utils.padLeft("Nom", 5)); sb.append(Utils.padLeft("Int", 5)).append(Utils.padLeft("Real", 5)); sb.append(Utils.padLeft("Missing", 12)); sb.append(Utils.padLeft("Unique", 12)); sb.append(Utils.padLeft("Dist", 6)); if (nominalCounts != null) { sb.append(' '); for (int i = 0; i < nominalCounts.length; i++) { sb.append(Utils.padLeft("C[" + i + "]", 5)); } } sb.append('\n'); long percent; percent = Math.round(100.0 * intCount / totalCount); if (nominalCounts != null) { sb.append(Utils.padLeft("Nom", 4)).append(' '); sb.append(Utils.padLeft("" + percent, 3)).append("% "); sb.append(Utils.padLeft("" + 0, 3)).append("% "); } else { sb.append(Utils.padLeft("Num", 4)).append(' '); sb.append(Utils.padLeft("" + 0, 3)).append("% "); sb.append(Utils.padLeft("" + percent, 3)).append("% "); } percent = Math.round(100.0 * realCount / totalCount); sb.append(Utils.padLeft("" + percent, 3)).append("% "); sb.append(Utils.padLeft("" + missingCount, 5)).append(" /"); percent = Math.round(100.0 * missingCount / totalCount); sb.append(Utils.padLeft("" + percent, 3)).append("% "); sb.append(Utils.padLeft("" + uniqueCount, 5)).append(" /"); percent = Math.round(100.0 * uniqueCount / totalCount); sb.append(Utils.padLeft("" + percent, 3)).append("% "); sb.append(Utils.padLeft("" + distinctCount, 5)).append(' '); if (nominalCounts != null) { for (int i = 0; i < nominalCounts.length; i++) { sb.append(Utils.padLeft("" + nominalCounts[i], 5)); } } sb.append('\n'); return sb.toString(); } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } }
4,944
30.496815
77
java
tsml-java
tsml-java-master/src/main/java/weka/core/BatchPredictor.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * BatchPredictor.java * Copyright (C) 2012 University of Waikato, Hamilton, New Zealand. * */ package weka.core; /** * Interface to something that can produce predictions in a batch manner * when presented with a set of Instances. * * @author Mark Hall (mhall{[at]}pentaho{[dot]}com) * @version $Revision: 8937 $ */ public interface BatchPredictor { /** * Set the batch size to use. The implementer will * prefer (but not necessarily expect) this many instances * to be passed in to distributionsForInstances(). * * @param size the batch size to use */ void setBatchSize(String size); /** * Get the batch size to use. The implementer will prefer (but not * necessarily expect) this many instances to be passed in to * distributionsForInstances(). Allows the preferred batch size * to be encapsulated with the client. * * @return the batch size to use */ String getBatchSize(); /** * Batch scoring method * * @param insts the instances to get predictions for * @return an array of probability distributions, one for each instance * @throws Exception if a problem occurs */ double[][] distributionsForInstances(Instances insts) throws Exception; }
1,923
30.540984
74
java
tsml-java
tsml-java-master/src/main/java/weka/core/BinarySparseInstance.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * BinarySparseInstance.java * Copyright (C) 2002-2012 University of Waikato, Hamilton, New Zealand * */ package weka.core; import java.util.ArrayList; import java.util.Enumeration; /** * Class for storing a binary-data-only instance as a sparse vector. A sparse * instance only requires storage for those attribute values that are non-zero. * Since the objective is to reduce storage requirements for datasets with large * numbers of default values, this also includes nominal attributes -- the first * nominal value (i.e. that which has index 0) will not require explicit * storage, so rearrange your nominal attribute value orderings if necessary. * Missing values are not supported, and will be treated as 1 (true). * * @version $Revision: 9028 $ */ public class BinarySparseInstance extends SparseInstance { /** for serialization */ private static final long serialVersionUID = -5297388762342528737L; /** * Constructor that generates a sparse instance from the given instance. * Reference to the dataset is set to null. (ie. the instance doesn't have * access to information about the attribute types) * * @param instance the instance from which the attribute values and the weight * are to be copied */ public BinarySparseInstance(Instance instance) { m_Weight = instance.weight(); m_Dataset = null; m_NumAttributes = instance.numAttributes(); if (instance instanceof SparseInstance) { m_AttValues = null; m_Indices = ((SparseInstance) instance).m_Indices; } else { int[] tempIndices = new int[instance.numAttributes()]; int vals = 0; for (int i = 0; i < instance.numAttributes(); i++) { if (instance.value(i) != 0) { tempIndices[vals] = i; vals++; } } m_AttValues = null; m_Indices = new int[vals]; System.arraycopy(tempIndices, 0, m_Indices, 0, vals); } } /** * Constructor that copies the info from the given instance. Reference to the * dataset is set to null. (ie. the instance doesn't have access to * information about the attribute types) * * @param instance the instance from which the attribute info is to be copied */ public BinarySparseInstance(SparseInstance instance) { m_AttValues = null; m_Indices = instance.m_Indices; m_Weight = instance.m_Weight; m_NumAttributes = instance.m_NumAttributes; m_Dataset = null; } /** * Constructor that generates a sparse instance from the given parameters. * Reference to the dataset is set to null. (ie. the instance doesn't have * access to information about the attribute types) * * @param weight the instance's weight * @param attValues a vector of attribute values */ public BinarySparseInstance(double weight, double[] attValues) { m_Weight = weight; m_Dataset = null; m_NumAttributes = attValues.length; int[] tempIndices = new int[m_NumAttributes]; int vals = 0; for (int i = 0; i < m_NumAttributes; i++) { if (attValues[i] != 0) { tempIndices[vals] = i; vals++; } } m_AttValues = null; m_Indices = new int[vals]; System.arraycopy(tempIndices, 0, m_Indices, 0, vals); } /** * Constructor that inititalizes instance variable with given values. * Reference to the dataset is set to null. (ie. the instance doesn't have * access to information about the attribute types) * * @param weight the instance's weight * @param indices the indices of the given values in the full vector * @param maxNumValues the maximium number of values that can be stored */ public BinarySparseInstance(double weight, int[] indices, int maxNumValues) { m_AttValues = null; m_Indices = indices; m_Weight = weight; m_NumAttributes = maxNumValues; m_Dataset = null; } /** * Constructor of an instance that sets weight to one, all values to 1, and * the reference to the dataset to null. (ie. the instance doesn't have access * to information about the attribute types) * * @param numAttributes the size of the instance */ public BinarySparseInstance(int numAttributes) { m_AttValues = null; m_NumAttributes = numAttributes; m_Indices = new int[numAttributes]; for (int i = 0; i < m_Indices.length; i++) { m_Indices[i] = i; } m_Weight = 1; m_Dataset = null; } /** * Produces a shallow copy of this instance. The copy doesn't have access to a * dataset. * * @return the shallow copy */ @Override public Object copy() { return new BinarySparseInstance(this); } /** * Merges this instance with the given instance and returns the result. * Dataset is set to null. * * @param inst the instance to be merged with this one * @return the merged instances */ @Override public Instance mergeInstance(Instance inst) { int[] indices = new int[numValues() + inst.numValues()]; int m = 0; for (int j = 0; j < numValues(); j++) { indices[m++] = index(j); } for (int j = 0; j < inst.numValues(); j++) { if (inst.valueSparse(j) != 0) { indices[m++] = numAttributes() + inst.index(j); } } if (m != indices.length) { // Need to truncate int[] newInd = new int[m]; System.arraycopy(indices, 0, newInd, 0, m); indices = newInd; } return new BinarySparseInstance(1.0, indices, numAttributes() + inst.numAttributes()); } /** * Does nothing, since we don't support missing values. * * @param array containing the means and modes */ @Override public void replaceMissingValues(double[] array) { // Does nothing, since we don't store missing values. } /** * Sets a specific value in the instance to the given value (internal * floating-point format). Performs a deep copy of the vector of attribute * values before the value is set. * * @param attIndex the attribute's index * @param value the new attribute value (If the corresponding attribute is * nominal (or a string) then this is the new value's index as a * double). */ @Override public void setValue(int attIndex, double value) { int index = locateIndex(attIndex); if ((index >= 0) && (m_Indices[index] == attIndex)) { if (value == 0) { int[] tempIndices = new int[m_Indices.length - 1]; System.arraycopy(m_Indices, 0, tempIndices, 0, index); System.arraycopy(m_Indices, index + 1, tempIndices, index, m_Indices.length - index - 1); m_Indices = tempIndices; } } else { if (value != 0) { int[] tempIndices = new int[m_Indices.length + 1]; System.arraycopy(m_Indices, 0, tempIndices, 0, index + 1); tempIndices[index + 1] = attIndex; System.arraycopy(m_Indices, index + 1, tempIndices, index + 2, m_Indices.length - index - 1); m_Indices = tempIndices; } } } /** * Sets a specific value in the instance to the given value (internal * floating-point format). Performs a deep copy of the vector of attribute * values before the value is set. * * @param indexOfIndex the index of the attribute's index * @param value the new attribute value (If the corresponding attribute is * nominal (or a string) then this is the new value's index as a * double). */ @Override public void setValueSparse(int indexOfIndex, double value) { if (value == 0) { int[] tempIndices = new int[m_Indices.length - 1]; System.arraycopy(m_Indices, 0, tempIndices, 0, indexOfIndex); System.arraycopy(m_Indices, indexOfIndex + 1, tempIndices, indexOfIndex, m_Indices.length - indexOfIndex - 1); m_Indices = tempIndices; } } /** * Returns the values of each attribute as an array of doubles. * * @return an array containing all the instance attribute values */ @Override public double[] toDoubleArray() { double[] newValues = new double[m_NumAttributes]; for (int i = 0; i < m_Indices.length; i++) { newValues[m_Indices[i]] = 1.0; } return newValues; } /** * Returns the description of one instance in sparse format. If the instance * doesn't have access to a dataset, it returns the internal floating-point * values. Quotes string values that contain whitespace characters. * * @return the instance's description as a string */ @Override public String toString() { StringBuffer text = new StringBuffer(); text.append('{'); for (int i = 0; i < m_Indices.length; i++) { if (i > 0) { text.append(","); } if (m_Dataset == null) { text.append(m_Indices[i] + " 1"); } else { if (m_Dataset.attribute(m_Indices[i]).isNominal() || m_Dataset.attribute(m_Indices[i]).isString()) { text.append(m_Indices[i] + " " + Utils.quote(m_Dataset.attribute(m_Indices[i]).value(1))); } else { text.append(m_Indices[i] + " 1"); } } } text.append('}'); if (m_Weight != 1.0) { text.append(",{" + Utils.doubleToString(m_Weight, AbstractInstance.s_numericAfterDecimalPoint) + "}"); } return text.toString(); } /** * Returns an instance's attribute value in internal format. * * @param attIndex the attribute's index * @return the specified value as a double (If the corresponding attribute is * nominal (or a string) then it returns the value's index as a * double). */ @Override public double value(int attIndex) { int index = locateIndex(attIndex); if ((index >= 0) && (m_Indices[index] == attIndex)) { return 1.0; } else { return 0.0; } } /** * Returns an instance's attribute value in internal format. Does exactly the * same thing as value() if applied to an Instance. * * @param indexOfIndex the index of the attribute's index * @return the specified value as a double (If the corresponding attribute is * nominal (or a string) then it returns the value's index as a * double). */ @Override public final double valueSparse(int indexOfIndex) { int index = m_Indices[indexOfIndex]; // Throws if out of bounds return 1; } /** * Deletes an attribute at the given position (0 to numAttributes() - 1). * * @param position the attribute's position */ @Override protected void forceDeleteAttributeAt(int position) { int index = locateIndex(position); m_NumAttributes--; if ((index >= 0) && (m_Indices[index] == position)) { int[] tempIndices = new int[m_Indices.length - 1]; System.arraycopy(m_Indices, 0, tempIndices, 0, index); for (int i = index; i < m_Indices.length - 1; i++) { tempIndices[i] = m_Indices[i + 1] - 1; } m_Indices = tempIndices; } else { int[] tempIndices = new int[m_Indices.length]; System.arraycopy(m_Indices, 0, tempIndices, 0, index + 1); for (int i = index + 1; i < m_Indices.length - 1; i++) { tempIndices[i] = m_Indices[i] - 1; } m_Indices = tempIndices; } } /** * Inserts an attribute at the given position (0 to numAttributes()) and sets * its value to 1. * * @param position the attribute's position */ @Override protected void forceInsertAttributeAt(int position) { int index = locateIndex(position); m_NumAttributes++; if ((index >= 0) && (m_Indices[index] == position)) { int[] tempIndices = new int[m_Indices.length + 1]; System.arraycopy(m_Indices, 0, tempIndices, 0, index); tempIndices[index] = position; for (int i = index; i < m_Indices.length; i++) { tempIndices[i + 1] = m_Indices[i] + 1; } m_Indices = tempIndices; } else { int[] tempIndices = new int[m_Indices.length + 1]; System.arraycopy(m_Indices, 0, tempIndices, 0, index + 1); tempIndices[index + 1] = position; for (int i = index + 1; i < m_Indices.length; i++) { tempIndices[i + 1] = m_Indices[i] + 1; } m_Indices = tempIndices; } } /** * Main method for testing this class. * * @param options the command line options - ignored */ public static void main(String[] options) { try { // Create numeric attributes "length" and "weight" Attribute length = new Attribute("length"); Attribute weight = new Attribute("weight"); // Create vector to hold nominal values "first", "second", "third" ArrayList<String> my_nominal_values = new ArrayList<String>(3); my_nominal_values.add("first"); my_nominal_values.add("second"); // Create nominal attribute "position" Attribute position = new Attribute("position", my_nominal_values); // Create vector of the above attributes ArrayList<Attribute> attributes = new ArrayList<Attribute>(3); attributes.add(length); attributes.add(weight); attributes.add(position); // Create the empty dataset "race" with above attributes Instances race = new Instances("race", attributes, 0); // Make position the class attribute race.setClassIndex(position.index()); // Create empty instance with three attribute values BinarySparseInstance inst = new BinarySparseInstance(3); // Set instance's values for the attributes "length", "weight", and // "position" inst.setValue(length, 5.3); inst.setValue(weight, 300); inst.setValue(position, "first"); // Set instance's dataset to be the dataset "race" inst.setDataset(race); // Print the instance System.out.println("The instance: " + inst); // Print the first attribute System.out.println("First attribute: " + inst.attribute(0)); // Print the class attribute System.out.println("Class attribute: " + inst.classAttribute()); // Print the class index System.out.println("Class index: " + inst.classIndex()); // Say if class is missing System.out.println("Class is missing: " + inst.classIsMissing()); // Print the instance's class value in internal format System.out.println("Class value (internal format): " + inst.classValue()); // Print a shallow copy of this instance SparseInstance copy = (SparseInstance) inst.copy(); System.out.println("Shallow copy: " + copy); // Set dataset for shallow copy copy.setDataset(inst.dataset()); System.out.println("Shallow copy with dataset set: " + copy); // Print out all values in internal format System.out.print("All stored values in internal format: "); for (int i = 0; i < inst.numValues(); i++) { if (i > 0) { System.out.print(","); } System.out.print(inst.valueSparse(i)); } System.out.println(); // Set all values to zero System.out.print("All values set to zero: "); while (inst.numValues() > 0) { inst.setValueSparse(0, 0); } for (int i = 0; i < inst.numValues(); i++) { if (i > 0) { System.out.print(","); } System.out.print(inst.valueSparse(i)); } System.out.println(); // Set all values to one System.out.print("All values set to one: "); for (int i = 0; i < inst.numAttributes(); i++) { inst.setValue(i, 1); } for (int i = 0; i < inst.numValues(); i++) { if (i > 0) { System.out.print(","); } System.out.print(inst.valueSparse(i)); } System.out.println(); // Unset dataset for copy, delete first attribute, and insert it again copy.setDataset(null); copy.deleteAttributeAt(0); copy.insertAttributeAt(0); copy.setDataset(inst.dataset()); System.out.println("Copy with first attribute deleted and inserted: " + copy); // Same for second attribute copy.setDataset(null); copy.deleteAttributeAt(1); copy.insertAttributeAt(1); copy.setDataset(inst.dataset()); System.out.println("Copy with second attribute deleted and inserted: " + copy); // Same for last attribute copy.setDataset(null); copy.deleteAttributeAt(2); copy.insertAttributeAt(2); copy.setDataset(inst.dataset()); System.out.println("Copy with third attribute deleted and inserted: " + copy); // Enumerate attributes (leaving out the class attribute) System.out.println("Enumerating attributes (leaving out class):"); Enumeration enu = inst.enumerateAttributes(); while (enu.hasMoreElements()) { Attribute att = (Attribute) enu.nextElement(); System.out.println(att); } // Headers are equivalent? System.out.println("Header of original and copy equivalent: " + inst.equalHeaders(copy)); // Test for missing values System.out.println("Length of copy missing: " + copy.isMissing(length)); System.out.println("Weight of copy missing: " + copy.isMissing(weight.index())); System.out.println("Length of copy missing: " + Utils.isMissingValue(copy.value(length))); // Prints number of attributes and classes System.out.println("Number of attributes: " + copy.numAttributes()); System.out.println("Number of classes: " + copy.numClasses()); // Replace missing values double[] meansAndModes = { 2, 3, 0 }; copy.replaceMissingValues(meansAndModes); System.out.println("Copy with missing value replaced: " + copy); // Setting and getting values and weights copy.setClassMissing(); System.out.println("Copy with missing class: " + copy); copy.setClassValue(0); System.out.println("Copy with class value set to first value: " + copy); copy.setClassValue("second"); System.out.println("Copy with class value set to \"second\": " + copy); copy.setMissing(1); System.out.println("Copy with second attribute set to be missing: " + copy); copy.setMissing(length); System.out.println("Copy with length set to be missing: " + copy); copy.setValue(0, 0); System.out.println("Copy with first attribute set to 0: " + copy); copy.setValue(weight, 1); System.out.println("Copy with weight attribute set to 1: " + copy); copy.setValue(position, "second"); System.out.println("Copy with position set to \"second\": " + copy); copy.setValue(2, "first"); System.out.println("Copy with last attribute set to \"first\": " + copy); System.out.println("Current weight of instance copy: " + copy.weight()); copy.setWeight(2); System.out.println("Current weight of instance copy (set to 2): " + copy.weight()); System.out.println("Last value of copy: " + copy.toString(2)); System.out.println("Value of position for copy: " + copy.toString(position)); System.out.println("Last value of copy (internal format): " + copy.value(2)); System.out.println("Value of position for copy (internal format): " + copy.value(position)); } catch (Exception e) { e.printStackTrace(); } } /** * Returns the revision string. * * @return the revision */ @Override public String getRevision() { return RevisionUtils.extract("$Revision: 9028 $"); } }
20,311
31.920583
80
java
tsml-java
tsml-java-master/src/main/java/weka/core/Capabilities.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * Capabilities.java * Copyright (C) 2006-2012 University of Waikato, Hamilton, New Zealand */ package weka.core; import java.io.Serializable; import java.util.ArrayList; import java.util.Collections; import java.util.HashSet; import java.util.Iterator; import java.util.List; import java.util.Properties; import java.util.Vector; import weka.core.converters.ConverterUtils.DataSource; /** * A class that describes the capabilites (e.g., handling certain types of * attributes, missing values, types of classes, etc.) of a specific * classifier. By default, the classifier is capable of nothing. This * ensures that new features have to be enabled explicitly. <p/> * * A common code fragment for making use of the capabilities in a classifier * would be this: * <pre> * public void <b>buildClassifier</b>(Instances instances) throws Exception { * // can the classifier handle the data? * getCapabilities().<b>testWithFail(instances)</b>; * ... * // possible deletion of instances with missing class labels, etc. * </pre> * For only testing a single attribute, use this: * <pre> * ... * Attribute att = instances.attribute(0); * getCapabilities().<b>testWithFail(att)</b>; * ... * </pre> * Or for testing the class attribute (uses the capabilities that are * especially for the class): * <pre> * ... * Attribute att = instances.classAttribute(); * getCapabilities().<b>testWithFail(att, <i>true</i>)</b>; * ... * </pre> * * @author FracPete (fracpete at waikato dot ac dot nz) * @version $Revision: 9134 $ */ public class Capabilities implements Cloneable, Serializable, RevisionHandler { /** serialversion UID */ static final long serialVersionUID = -5478590032325567849L; /** the properties file for managing the tests */ public final static String PROPERTIES_FILE = "weka/core/Capabilities.props"; /** the actual properties */ protected static Properties PROPERTIES; /** defines an attribute type */ private final static int ATTRIBUTE = 1; /** defines a class type */ private final static int CLASS = 2; /** defines an attribute capability */ private final static int ATTRIBUTE_CAPABILITY = 4; /** defines a class capability */ private final static int CLASS_CAPABILITY = 8; /** defines a other capability */ private final static int OTHER_CAPABILITY = 16; /** enumeration of all capabilities */ public enum Capability { // attributes /** can handle nominal attributes */ NOMINAL_ATTRIBUTES(ATTRIBUTE + ATTRIBUTE_CAPABILITY, "Nominal attributes"), /** can handle binary attributes */ BINARY_ATTRIBUTES(ATTRIBUTE + ATTRIBUTE_CAPABILITY, "Binary attributes"), /** can handle unary attributes */ UNARY_ATTRIBUTES(ATTRIBUTE + ATTRIBUTE_CAPABILITY, "Unary attributes"), /** can handle empty nominal attributes */ EMPTY_NOMINAL_ATTRIBUTES(ATTRIBUTE + ATTRIBUTE_CAPABILITY, "Empty nominal attributes"), /** can handle numeric attributes */ NUMERIC_ATTRIBUTES(ATTRIBUTE + ATTRIBUTE_CAPABILITY, "Numeric attributes"), /** can handle date attributes */ DATE_ATTRIBUTES(ATTRIBUTE + ATTRIBUTE_CAPABILITY, "Date attributes"), /** can handle string attributes */ STRING_ATTRIBUTES(ATTRIBUTE + ATTRIBUTE_CAPABILITY, "String attributes"), /** can handle relational attributes */ RELATIONAL_ATTRIBUTES(ATTRIBUTE + ATTRIBUTE_CAPABILITY, "Relational attributes"), /** can handle missing values in attributes */ MISSING_VALUES(ATTRIBUTE_CAPABILITY, "Missing values"), // class /** can handle data without class attribute, eg clusterers */ NO_CLASS(CLASS_CAPABILITY, "No class"), /** can handle nominal classes */ NOMINAL_CLASS(CLASS + CLASS_CAPABILITY, "Nominal class"), /** can handle binary classes */ BINARY_CLASS(CLASS + CLASS_CAPABILITY, "Binary class"), /** can handle unary classes */ UNARY_CLASS(CLASS + CLASS_CAPABILITY, "Unary class"), /** can handle empty nominal classes */ EMPTY_NOMINAL_CLASS(CLASS + CLASS_CAPABILITY, "Empty nominal class"), /** can handle numeric classes */ NUMERIC_CLASS(CLASS + CLASS_CAPABILITY, "Numeric class"), /** can handle date classes */ DATE_CLASS(CLASS + CLASS_CAPABILITY, "Date class"), /** can handle string classes */ STRING_CLASS(CLASS + CLASS_CAPABILITY, "String class"), /** can handle relational classes */ RELATIONAL_CLASS(CLASS + CLASS_CAPABILITY, "Relational class"), /** can handle missing values in class attribute */ MISSING_CLASS_VALUES(CLASS_CAPABILITY, "Missing class values"), // other /** can handle multi-instance data */ ONLY_MULTIINSTANCE(OTHER_CAPABILITY, "Only multi-Instance data"); /** the flags for the capabilities */ private int m_Flags = 0; /** the display string */ private String m_Display; /** * initializes the capability with the given flags * * @param flags "meta-data" for the capability * @param display the display string (must be unique!) */ private Capability(int flags, String display) { m_Flags = flags; m_Display = display; } /** * returns true if the capability is an attribute * * @return true if the capability is an attribute */ public boolean isAttribute() { return ((m_Flags & ATTRIBUTE) == ATTRIBUTE); } /** * returns true if the capability is a class * * @return true if the capability is a class */ public boolean isClass() { return ((m_Flags & CLASS) == CLASS); } /** * returns true if the capability is an attribute capability * * @return true if the capability is an attribute capability */ public boolean isAttributeCapability() { return ((m_Flags & ATTRIBUTE_CAPABILITY) == ATTRIBUTE_CAPABILITY); } /** * returns true if the capability is a class capability * * @return true if the capability is a class capability */ public boolean isOtherCapability() { return ((m_Flags & OTHER_CAPABILITY) == OTHER_CAPABILITY); } /** * returns true if the capability is a other capability * * @return true if the capability is a other capability */ public boolean isClassCapability() { return ((m_Flags & CLASS_CAPABILITY) == CLASS_CAPABILITY); } /** * returns the display string of the capability * * @return the display string */ public String toString() { return m_Display; } }; /** the object that owns this capabilities instance */ protected CapabilitiesHandler m_Owner; /** the hashset for storing the active capabilities */ protected HashSet<Capability> m_Capabilities; /** the hashset for storing dependent capabilities, eg for meta-classifiers */ protected HashSet<Capability> m_Dependencies; /** the reason why the test failed, used to throw an exception */ protected Exception m_FailReason = null; /** the minimum number of instances in a dataset */ protected int m_MinimumNumberInstances = 1; /** whether to perform any tests at all */ protected boolean m_Test; /** whether to perform data based tests */ protected boolean m_InstancesTest; /** whether to perform attribute based tests */ protected boolean m_AttributeTest; /** whether to test for missing values */ protected boolean m_MissingValuesTest; /** whether to test for missing class values */ protected boolean m_MissingClassValuesTest; /** whether to test for minimum number of instances */ protected boolean m_MinimumNumberInstancesTest; /** * initializes the capabilities for the given owner * * @param owner the object that produced this Capabilities instance */ public Capabilities(CapabilitiesHandler owner) { super(); setOwner(owner); m_Capabilities = new HashSet<Capability>(); m_Dependencies = new HashSet<Capability>(); // load properties if (PROPERTIES == null) { try { PROPERTIES = Utils.readProperties(PROPERTIES_FILE); } catch (Exception e) { e.printStackTrace(); PROPERTIES = new Properties(); } } m_Test = Boolean.parseBoolean(PROPERTIES.getProperty("Test", "true")); m_InstancesTest = Boolean.parseBoolean(PROPERTIES.getProperty("InstancesTest", "true")) && m_Test; m_AttributeTest = Boolean.parseBoolean(PROPERTIES.getProperty("AttributeTest", "true")) && m_Test; m_MissingValuesTest = Boolean.parseBoolean(PROPERTIES.getProperty("MissingValuesTest", "true")) && m_Test; m_MissingClassValuesTest = Boolean.parseBoolean(PROPERTIES.getProperty("MissingClassValuesTest", "true")) && m_Test; m_MinimumNumberInstancesTest = Boolean.parseBoolean(PROPERTIES.getProperty("MinimumNumberInstancesTest", "true")) && m_Test; if (owner instanceof weka.classifiers.UpdateableClassifier || owner instanceof weka.clusterers.UpdateableClusterer) { setMinimumNumberInstances(0); } } /** * Creates and returns a copy of this object. * * @return a clone of this object */ public Object clone() { Capabilities result; result = new Capabilities(m_Owner); result.assign(this); return result; } /** * retrieves the data from the given Capabilities object * * @param c the capabilities object to initialize with */ public void assign(Capabilities c) { for (Capability cap: Capability.values()) { // capability if (c.handles(cap)) enable(cap); else disable(cap); // dependency if (c.hasDependency(cap)) enableDependency(cap); else disableDependency(cap); } setMinimumNumberInstances(c.getMinimumNumberInstances()); } /** * performs an AND conjunction with the capabilities of the given * Capabilities object and updates itself * * @param c the capabilities to AND with */ public void and(Capabilities c) { for (Capability cap: Capability.values()) { // capability if (handles(cap) && c.handles(cap)) m_Capabilities.add(cap); else m_Capabilities.remove(cap); // dependency if (hasDependency(cap) && c.hasDependency(cap)) m_Dependencies.add(cap); else m_Dependencies.remove(cap); } // minimum number of instances that both handlers need at least to work if (c.getMinimumNumberInstances() > getMinimumNumberInstances()) setMinimumNumberInstances(c.getMinimumNumberInstances()); } /** * performs an OR conjunction with the capabilities of the given * Capabilities object and updates itself * * @param c the capabilities to OR with */ public void or(Capabilities c) { for (Capability cap: Capability.values()) { // capability if (handles(cap) || c.handles(cap)) m_Capabilities.add(cap); else m_Capabilities.remove(cap); // dependency if (hasDependency(cap) || c.hasDependency(cap)) m_Dependencies.add(cap); else m_Dependencies.remove(cap); } if (c.getMinimumNumberInstances() < getMinimumNumberInstances()) setMinimumNumberInstances(c.getMinimumNumberInstances()); } /** * Returns true if the currently set capabilities support at least all of * the capabiliites of the given Capabilities object (checks only the enum!) * * @param c the capabilities to support at least * @return true if all the requested capabilities are supported */ public boolean supports(Capabilities c) { boolean result; result = true; for (Capability cap: Capability.values()) { if (c.handles(cap) && !handles(cap)) { result = false; break; } } return result; } /** * Returns true if the currently set capabilities support (or have a * dependency) at least all of the capabilities of the given Capabilities * object (checks only the enum!) * * @param c the capabilities (or dependencies) to support at least * @return true if all the requested capabilities are supported (or at * least have a dependency) */ public boolean supportsMaybe(Capabilities c) { boolean result; result = true; for (Capability cap: Capability.values()) { if (c.handles(cap) && !(handles(cap) || hasDependency(cap))) { result = false; break; } } return result; } /** * sets the owner of this capabilities object * * @param value the new owner */ public void setOwner(CapabilitiesHandler value) { m_Owner = value; } /** * returns the owner of this capabilities object * * @return the current owner of this capabilites object */ public CapabilitiesHandler getOwner() { return m_Owner; } /** * sets the minimum number of instances that have to be in the dataset * * @param value the minimum number of instances */ public void setMinimumNumberInstances(int value) { if (value >= 0) m_MinimumNumberInstances = value; } /** * returns the minimum number of instances that have to be in the dataset * * @return the minimum number of instances */ public int getMinimumNumberInstances() { return m_MinimumNumberInstances; } /** * Returns an Iterator over the stored capabilities * * @return iterator over the current capabilities */ public Iterator capabilities() { return m_Capabilities.iterator(); } /** * Returns an Iterator over the stored dependencies * * @return iterator over the current dependencies */ public Iterator dependencies() { return m_Dependencies.iterator(); } /** * enables the given capability. * Enabling NOMINAL_ATTRIBUTES also enables BINARY_ATTRIBUTES, * UNARY_ATTRIBUTES and EMPTY_NOMINAL_ATTRIBUTES. * Enabling BINARY_ATTRIBUTES also enables UNARY_ATTRIBUTES and * EMPTY_NOMINAL_ATTRIBUTES. * Enabling UNARY_ATTRIBUTES also enables EMPTY_NOMINAL_ATTRIBUTES. * But NOMINAL_CLASS only enables BINARY_CLASS, since normal schemes in Weka * don't work with datasets that have only 1 class label (or none). * * @param c the capability to enable */ public void enable(Capability c) { // attributes if (c == Capability.NOMINAL_ATTRIBUTES) { enable(Capability.BINARY_ATTRIBUTES); } else if (c == Capability.BINARY_ATTRIBUTES) { enable(Capability.UNARY_ATTRIBUTES); } else if (c == Capability.UNARY_ATTRIBUTES) { enable(Capability.EMPTY_NOMINAL_ATTRIBUTES); } // class else if (c == Capability.NOMINAL_CLASS) { enable(Capability.BINARY_CLASS); } m_Capabilities.add(c); } /** * enables the dependency flag for the given capability * Enabling NOMINAL_ATTRIBUTES also enables BINARY_ATTRIBUTES, * UNARY_ATTRIBUTES and EMPTY_NOMINAL_ATTRIBUTES. * Enabling BINARY_ATTRIBUTES also enables UNARY_ATTRIBUTES and * EMPTY_NOMINAL_ATTRIBUTES. * Enabling UNARY_ATTRIBUTES also enables EMPTY_NOMINAL_ATTRIBUTES. * But NOMINAL_CLASS only enables BINARY_CLASS, since normal schemes in Weka * don't work with datasets that have only 1 class label (or none). * * @param c the capability to enable the dependency flag for */ public void enableDependency(Capability c) { // attributes if (c == Capability.NOMINAL_ATTRIBUTES) { enableDependency(Capability.BINARY_ATTRIBUTES); } else if (c == Capability.BINARY_ATTRIBUTES) { enableDependency(Capability.UNARY_ATTRIBUTES); } else if (c == Capability.UNARY_ATTRIBUTES) { enableDependency(Capability.EMPTY_NOMINAL_ATTRIBUTES); } // class else if (c == Capability.NOMINAL_CLASS) { enableDependency(Capability.BINARY_CLASS); } m_Dependencies.add(c); } /** * enables all class types * * @see #disableAllClasses() * @see #getClassCapabilities() */ public void enableAllClasses() { for (Capability cap: Capability.values()) { if (cap.isClass()) enable(cap); } } /** * enables all class type dependencies * * @see #disableAllClassDependencies() * @see #getClassCapabilities() */ public void enableAllClassDependencies() { for (Capability cap: Capability.values()) { if (cap.isClass()) enableDependency(cap); } } /** * enables all attribute types * * @see #disableAllAttributes() * @see #getAttributeCapabilities() */ public void enableAllAttributes() { for (Capability cap: Capability.values()) { if (cap.isAttribute()) enable(cap); } } /** * enables all attribute type dependencies * * @see #disableAllAttributeDependencies() * @see #getAttributeCapabilities() */ public void enableAllAttributeDependencies() { for (Capability cap: Capability.values()) { if (cap.isAttribute()) enableDependency(cap); } } /** * enables all attribute and class types (including dependencies) */ public void enableAll() { enableAllAttributes(); enableAllAttributeDependencies(); enableAllClasses(); enableAllClassDependencies(); enable(Capability.MISSING_VALUES); enable(Capability.MISSING_CLASS_VALUES); } /** * disables the given capability * Disabling NOMINAL_ATTRIBUTES also disables BINARY_ATTRIBUTES, * UNARY_ATTRIBUTES and EMPTY_NOMINAL_ATTRIBUTES. * Disabling BINARY_ATTRIBUTES also disables UNARY_ATTRIBUTES and * EMPTY_NOMINAL_ATTRIBUTES. * Disabling UNARY_ATTRIBUTES also disables EMPTY_NOMINAL_ATTRIBUTES. * The same hierarchy applies to the class capabilities. * * @param c the capability to disable */ public void disable(Capability c) { // attributes if (c == Capability.NOMINAL_ATTRIBUTES) { disable(Capability.BINARY_ATTRIBUTES); } else if (c == Capability.BINARY_ATTRIBUTES) { disable(Capability.UNARY_ATTRIBUTES); } else if (c == Capability.UNARY_ATTRIBUTES) { disable(Capability.EMPTY_NOMINAL_ATTRIBUTES); } // class else if (c == Capability.NOMINAL_CLASS) { disable(Capability.BINARY_CLASS); } else if (c == Capability.BINARY_CLASS) { disable(Capability.UNARY_CLASS); } else if (c == Capability.UNARY_CLASS) { disable(Capability.EMPTY_NOMINAL_CLASS); } m_Capabilities.remove(c); } /** * disables the dependency of the given capability * Disabling NOMINAL_ATTRIBUTES also disables BINARY_ATTRIBUTES, * UNARY_ATTRIBUTES and EMPTY_NOMINAL_ATTRIBUTES. * Disabling BINARY_ATTRIBUTES also disables UNARY_ATTRIBUTES and * EMPTY_NOMINAL_ATTRIBUTES. * Disabling UNARY_ATTRIBUTES also disables EMPTY_NOMINAL_ATTRIBUTES. * The same hierarchy applies to the class capabilities. * * @param c the capability to disable the dependency flag for */ public void disableDependency(Capability c) { // attributes if (c == Capability.NOMINAL_ATTRIBUTES) { disableDependency(Capability.BINARY_ATTRIBUTES); } else if (c == Capability.BINARY_ATTRIBUTES) { disableDependency(Capability.UNARY_ATTRIBUTES); } else if (c == Capability.UNARY_ATTRIBUTES) { disableDependency(Capability.EMPTY_NOMINAL_ATTRIBUTES); } // class else if (c == Capability.NOMINAL_CLASS) { disableDependency(Capability.BINARY_CLASS); } else if (c == Capability.BINARY_CLASS) { disableDependency(Capability.UNARY_CLASS); } else if (c == Capability.UNARY_CLASS) { disableDependency(Capability.EMPTY_NOMINAL_CLASS); } m_Dependencies.remove(c); } /** * disables all class types * * @see #enableAllClasses() * @see #getClassCapabilities() */ public void disableAllClasses() { for (Capability cap: Capability.values()) { if (cap.isClass()) disable(cap); } } /** * disables all class type dependencies * * @see #enableAllClassDependencies() * @see #getClassCapabilities() */ public void disableAllClassDependencies() { for (Capability cap: Capability.values()) { if (cap.isClass()) disableDependency(cap); } } /** * disables all attribute types * * @see #enableAllAttributes() * @see #getAttributeCapabilities() */ public void disableAllAttributes() { for (Capability cap: Capability.values()) { if (cap.isAttribute()) disable(cap); } } /** * disables all attribute type dependencies * * @see #enableAllAttributeDependencies() * @see #getAttributeCapabilities() */ public void disableAllAttributeDependencies() { for (Capability cap: Capability.values()) { if (cap.isAttribute()) disableDependency(cap); } } /** * disables all attribute and class types (including dependencies) */ public void disableAll() { disableAllAttributes(); disableAllAttributeDependencies(); disableAllClasses(); disableAllClassDependencies(); disable(Capability.MISSING_VALUES); disable(Capability.MISSING_CLASS_VALUES); disable(Capability.NO_CLASS); } /** * returns all class capabilities * * @return all capabilities regarding the class * @see #enableAllClasses() * @see #disableAllClasses() */ public Capabilities getClassCapabilities() { Capabilities result; result = new Capabilities(getOwner()); for (Capability cap: Capability.values()) { if (cap.isClassCapability()) { if (handles(cap)) result.m_Capabilities.add(cap); } } return result; } /** * returns all attribute capabilities * * @return all capabilities regarding attributes * @see #enableAllAttributes() * @see #disableAllAttributes() */ public Capabilities getAttributeCapabilities() { Capabilities result; result = new Capabilities(getOwner()); for (Capability cap: Capability.values()) { if (cap.isAttributeCapability()) { if (handles(cap)) result.m_Capabilities.add(cap); } } return result; } /** * returns all other capabilities, besides class and attribute related ones * * @return all other capabilities, besides class and attribute * related ones */ public Capabilities getOtherCapabilities() { Capabilities result; result = new Capabilities(getOwner()); for (Capability cap: Capability.values()) { if (cap.isOtherCapability()) { if (handles(cap)) result.m_Capabilities.add(cap); } } return result; } /** * returns true if the classifier handler has the specified capability * * @param c the capability to test * @return true if the classifier handler has the capability */ public boolean handles(Capability c) { return m_Capabilities.contains(c); } /** * returns true if the classifier handler has a dependency for the specified * capability * * @param c the capability to test * @return true if the classifier handler has a dependency for the * capability */ public boolean hasDependency(Capability c) { return m_Dependencies.contains(c); } /** * Checks whether there are any dependencies at all * * @return true if there is at least one dependency for a capability */ public boolean hasDependencies() { return (m_Dependencies.size() > 0); } /** * returns the reason why the tests failed, is null if tests succeeded * * @return the reason why the tests failed */ public Exception getFailReason() { return m_FailReason; } /** * Generates the message for, e.g., an exception. Adds the classname before the * actual message and returns that string. * * @param msg the actual content of the message, e.g., exception * @return the new message */ protected String createMessage(String msg) { String result; result = ""; if (getOwner() != null) result = getOwner().getClass().getName(); else result = "<anonymous>"; result += ": " + msg; return result; } /** * Test the given attribute, whether it can be processed by the handler, * given its capabilities. The method assumes that the specified attribute * is not the class attribute. * * @param att the attribute to test * @return true if all the tests succeeded * @see #test(Attribute, boolean) */ public boolean test(Attribute att) { return test(att, false); } /** * Test the given attribute, whether it can be processed by the handler, * given its capabilities. * * @param att the attribute to test * @param isClass whether this attribute is the class attribute * @return true if all the tests succeeded * @see #m_AttributeTest */ public boolean test(Attribute att, boolean isClass) { boolean result; Capability cap; Capability capBinary; Capability capUnary; Capability capEmpty; String errorStr; result = true; // shall we test the data? if (!m_AttributeTest) return result; // for exception if (isClass) errorStr = "class"; else errorStr = "attributes"; switch (att.type()) { case Attribute.NOMINAL: if (isClass) { cap = Capability.NOMINAL_CLASS; capBinary = Capability.BINARY_CLASS; capUnary = Capability.UNARY_CLASS; capEmpty = Capability.EMPTY_NOMINAL_CLASS; } else { cap = Capability.NOMINAL_ATTRIBUTES; capBinary = Capability.BINARY_ATTRIBUTES; capUnary = Capability.UNARY_ATTRIBUTES; capEmpty = Capability.EMPTY_NOMINAL_ATTRIBUTES; } if (handles(cap) && (att.numValues() > 2)) break; else if (handles(capBinary) && (att.numValues() == 2)) break; else if (handles(capUnary) && (att.numValues() == 1)) break; else if (handles(capEmpty) && (att.numValues() == 0)) break; if (att.numValues() == 0) { m_FailReason = new UnsupportedAttributeTypeException( createMessage("Cannot handle empty nominal " + errorStr + "!")); result = false; } if (att.numValues() == 1) { m_FailReason = new UnsupportedAttributeTypeException( createMessage("Cannot handle unary " + errorStr + "!")); result = false; } else if (att.numValues() == 2) { m_FailReason = new UnsupportedAttributeTypeException( createMessage("Cannot handle binary " + errorStr + "!")); result = false; } else { m_FailReason = new UnsupportedAttributeTypeException( createMessage("Cannot handle multi-valued nominal " + errorStr + "!")); result = false; } break; case Attribute.NUMERIC: if (isClass) cap = Capability.NUMERIC_CLASS; else cap = Capability.NUMERIC_ATTRIBUTES; if (!handles(cap)) { m_FailReason = new UnsupportedAttributeTypeException( createMessage("Cannot handle numeric " + errorStr + "!")); result = false; } break; case Attribute.DATE: if (isClass) cap = Capability.DATE_CLASS; else cap = Capability.DATE_ATTRIBUTES; if (!handles(cap)) { m_FailReason = new UnsupportedAttributeTypeException( createMessage("Cannot handle date " + errorStr + "!")); result = false; } break; case Attribute.STRING: if (isClass) cap = Capability.STRING_CLASS; else cap = Capability.STRING_ATTRIBUTES; if (!handles(cap)) { m_FailReason = new UnsupportedAttributeTypeException( createMessage("Cannot handle string " + errorStr + "!")); result = false; } break; case Attribute.RELATIONAL: if (isClass) cap = Capability.RELATIONAL_CLASS; else cap = Capability.RELATIONAL_ATTRIBUTES; if (!handles(cap)) { m_FailReason = new UnsupportedAttributeTypeException( createMessage("Cannot handle relational " + errorStr + "!")); result = false; } // attributes in the relation of this attribute must be tested // separately with a different Capabilites object break; default: m_FailReason = new UnsupportedAttributeTypeException( createMessage("Cannot handle unknown attribute type '" + att.type() + "'!")); result = false; } return result; } /** * Tests the given data, whether it can be processed by the handler, * given its capabilities. Classifiers implementing the * <code>MultiInstanceCapabilitiesHandler</code> interface are checked * automatically for their multi-instance Capabilities (if no bags, then * only the bag-structure, otherwise only the first bag). * * @param data the data to test * @return true if all the tests succeeded * @see #test(Instances, int, int) */ public boolean test(Instances data) { return test(data, 0, data.numAttributes() - 1); } /** * Tests a certain range of attributes of the given data, whether it can be * processed by the handler, given its capabilities. Classifiers * implementing the <code>MultiInstanceCapabilitiesHandler</code> interface * are checked automatically for their multi-instance Capabilities (if no * bags, then only the bag-structure, otherwise only the first bag). * * @param data the data to test * @param fromIndex the range of attributes - start (incl.) * @param toIndex the range of attributes - end (incl.) * @return true if all the tests succeeded * @see MultiInstanceCapabilitiesHandler * @see #m_InstancesTest * @see #m_MissingValuesTest * @see #m_MissingClassValuesTest * @see #m_MinimumNumberInstancesTest */ public boolean test(Instances data, int fromIndex, int toIndex) { int i; int n; int m; Attribute att; Instance inst; boolean testClass; Capabilities cap; boolean missing; Iterator iter; // shall we test the data? if (!m_InstancesTest) return true; // no Capabilities? -> warning if ( (m_Capabilities.size() == 0) || ((m_Capabilities.size() == 1) && handles(Capability.NO_CLASS)) ) System.err.println(createMessage("No capabilities set!")); // any attributes? if (toIndex - fromIndex < 0) { m_FailReason = new WekaException( createMessage("No attributes!")); return false; } // do wee need to test the class attribute, i.e., is the class attribute // within the range of attributes? testClass = (data.classIndex() > -1) && (data.classIndex() >= fromIndex) && (data.classIndex() <= toIndex); // attributes for (i = fromIndex; i <= toIndex; i++) { att = data.attribute(i); // class is handled separately if (i == data.classIndex()) continue; // check attribute types if (!test(att)) return false; } // class if (!handles(Capability.NO_CLASS) && (data.classIndex() == -1)) { m_FailReason = new UnassignedClassException( createMessage("Class attribute not set!")); return false; } // special case: no class attribute can be handled if (handles(Capability.NO_CLASS) && (data.classIndex() > -1)) { cap = getClassCapabilities(); cap.disable(Capability.NO_CLASS); iter = cap.capabilities(); if (!iter.hasNext()) { m_FailReason = new WekaException( createMessage("Cannot handle any class attribute!")); return false; } } if (testClass && !handles(Capability.NO_CLASS)) { att = data.classAttribute(); if (!test(att, true)) return false; // special handling of RELATIONAL class // TODO: store additional Capabilities for this case // missing class labels if (m_MissingClassValuesTest) { if (!handles(Capability.MISSING_CLASS_VALUES)) { for (i = 0; i < data.numInstances(); i++) { if (data.instance(i).classIsMissing()) { m_FailReason = new WekaException( createMessage("Cannot handle missing class values!")); return false; } } } else { if (m_MinimumNumberInstancesTest) { int hasClass = 0; for (i = 0; i < data.numInstances(); i++) { if (!data.instance(i).classIsMissing()) hasClass++; } // not enough instances with class labels? if (hasClass < getMinimumNumberInstances()) { m_FailReason = new WekaException( createMessage("Not enough training instances with class labels (required: " + getMinimumNumberInstances() + ", provided: " + hasClass + ")!")); return false; } } } } } // missing values if (m_MissingValuesTest) { if (!handles(Capability.MISSING_VALUES)) { missing = false; for (i = 0; i < data.numInstances(); i++) { inst = data.instance(i); if (inst instanceof SparseInstance) { for (m = 0; m < inst.numValues(); m++) { n = inst.index(m); // out of scope? if (n < fromIndex) continue; if (n > toIndex) break; // skip class if (n == inst.classIndex()) continue; if (inst.isMissing(n)) { missing = true; break; } } } else { for (n = fromIndex; n <= toIndex; n++) { // skip class if (n == inst.classIndex()) continue; if (inst.isMissing(n)) { missing = true; break; } } } if (missing) { m_FailReason = new NoSupportForMissingValuesException( createMessage("Cannot handle missing values!")); return false; } } } } // instances if (m_MinimumNumberInstancesTest) { if (data.numInstances() < getMinimumNumberInstances()) { m_FailReason = new WekaException( createMessage("Not enough training instances (required: " + getMinimumNumberInstances() + ", provided: " + data.numInstances() + ")!")); return false; } } // Multi-Instance? -> check structure (regardless of attribute range!) if (handles(Capability.ONLY_MULTIINSTANCE)) { // number of attributes? if (data.numAttributes() != 3) { m_FailReason = new WekaException( createMessage("Incorrect Multi-Instance format, must be 'bag-id, bag, class'!")); return false; } // type of attributes and position of class? if ( !data.attribute(0).isNominal() || !data.attribute(1).isRelationValued() || (data.classIndex() != data.numAttributes() - 1) ) { m_FailReason = new WekaException( createMessage("Incorrect Multi-Instance format, must be 'NOMINAL att, RELATIONAL att, CLASS att'!")); return false; } // check data immediately if (getOwner() instanceof MultiInstanceCapabilitiesHandler) { MultiInstanceCapabilitiesHandler handler = (MultiInstanceCapabilitiesHandler) getOwner(); cap = handler.getMultiInstanceCapabilities(); boolean result; if (data.numInstances() > 0 && data.attribute(1).numValues() > 0) result = cap.test(data.attribute(1).relation(0)); else result = cap.test(data.attribute(1).relation()); if (!result) { m_FailReason = cap.m_FailReason; return false; } } } // passed all tests! return true; } /** * tests the given attribute by calling the test(Attribute,boolean) method * and throws an exception if the test fails. The method assumes that the * specified attribute is not the class attribute. * * @param att the attribute to test * @throws Exception in case the attribute doesn't pass the tests * @see #test(Attribute,boolean) */ public void testWithFail(Attribute att) throws Exception { test(att, false); } /** * tests the given attribute by calling the test(Attribute,boolean) method * and throws an exception if the test fails. * * @param att the attribute to test * @param isClass whether this attribute is the class attribute * @throws Exception in case the attribute doesn't pass the tests * @see #test(Attribute,boolean) */ public void testWithFail(Attribute att, boolean isClass) throws Exception { if (!test(att, isClass)) throw m_FailReason; } /** * tests the given data by calling the test(Instances,int,int) method and * throws an exception if the test fails. * * @param data the data to test * @param fromIndex the range of attributes - start (incl.) * @param toIndex the range of attributes - end (incl.) * @throws Exception in case the data doesn't pass the tests * @see #test(Instances,int,int) */ public void testWithFail(Instances data, int fromIndex, int toIndex) throws Exception { if (!test(data, fromIndex, toIndex)) throw m_FailReason; } /** * tests the given data by calling the test(Instances) method and throws * an exception if the test fails. * * @param data the data to test * @throws Exception in case the data doesn't pass the tests * @see #test(Instances) */ public void testWithFail(Instances data) throws Exception { if (!test(data)) throw m_FailReason; } /** * returns a string representation of the capabilities * * @return a string representation of this object */ public String toString() { Vector<Capability> sorted; StringBuffer result; result = new StringBuffer(); // capabilities sorted = new Vector<Capability>(m_Capabilities); Collections.sort(sorted); result.append("Capabilities: " + sorted.toString() + "\n"); // dependencies sorted = new Vector<Capability>(m_Dependencies); Collections.sort(sorted); result.append("Dependencies: " + sorted.toString() + "\n"); // other stuff result.append("min # Instance: " + getMinimumNumberInstances() + "\n"); return result.toString(); } /** * turns the capabilities object into source code. The returned source code * is a block that creates a Capabilities object named 'objectname' and * enables all the capabilities of this Capabilities object. * * @param objectname the name of the Capabilities object being instantiated * @return the generated source code */ public String toSource(String objectname) { return toSource(objectname, 0); } /** * turns the capabilities object into source code. The returned source code * is a block that creates a Capabilities object named 'objectname' and * enables all the capabilities of this Capabilities object. * * @param objectname the name of the Capabilities object being instantiated * @param indent the number of blanks to indent * @return the generated source code */ public String toSource(String objectname, int indent) { StringBuffer result; String capsName; String capName; String indentStr; int i; result = new StringBuffer(); capsName = Capabilities.class.getName(); capName = Capabilities.Capability.class.getName().replaceAll("\\$", "."); indentStr = ""; for (i = 0; i < indent; i++) indentStr += " "; // object name result.append(indentStr + capsName + " " + objectname + " = new " + capsName + "(this);\n"); List<Capability> capsList = new ArrayList<Capability>(); boolean hasNominalAtt = false; boolean hasBinaryAtt = false; boolean hasUnaryAtt = false; boolean hasEmptyNomAtt = false; boolean hasNominalClass = false; // capabilities result.append("\n"); for (Capability cap: Capability.values()) { // capability if (handles(cap)) { if (cap == Capability.NOMINAL_ATTRIBUTES) { hasNominalAtt = true; } if (cap == Capability.NOMINAL_CLASS) { hasNominalClass = true; } if (cap == Capability.BINARY_ATTRIBUTES) { hasBinaryAtt = true; } if (cap == Capability.UNARY_ATTRIBUTES) { hasUnaryAtt = true; } if (cap == Capability.EMPTY_NOMINAL_ATTRIBUTES) { hasEmptyNomAtt = true; } capsList.add(cap); } } for (Capability cap : capsList) { if ((cap == Capability.BINARY_ATTRIBUTES && hasNominalAtt) || (cap == Capability.UNARY_ATTRIBUTES && hasBinaryAtt) || (cap == Capability.EMPTY_NOMINAL_ATTRIBUTES && hasUnaryAtt) || (cap == Capability.BINARY_CLASS && hasNominalClass)) { continue; } result.append( indentStr + objectname + ".enable(" + capName + "." + cap.name() + ");\n"); // dependency if (hasDependency(cap)) result.append( indentStr + objectname + ".enableDependency(" + capName + "." + cap.name() + ");\n"); } // capabilities result.append("\n"); // other result.append("\n"); result.append( indentStr + objectname + ".setMinimumNumberInstances(" + getMinimumNumberInstances() + ");\n"); result.append("\n"); return result.toString(); } /** * returns a Capabilities object specific for this data. The multi-instance * capability is not checked as well as the minimum number of instances * is not set. * * @param data the data to base the capabilities on * @return a data-specific capabilities object * @throws Exception in case an error occurrs, e.g., an unknown attribute * type */ public static Capabilities forInstances(Instances data) throws Exception { return forInstances(data, false); } /** * returns a Capabilities object specific for this data. The minimum number * of instances is not set, the check for multi-instance data is optional. * * @param data the data to base the capabilities on * @param multi if true then the structure is checked, too * @return a data-specific capabilities object * @throws Exception in case an error occurrs, e.g., an unknown attribute * type */ public static Capabilities forInstances(Instances data, boolean multi) throws Exception { Capabilities result; Capabilities multiInstance; int i; int n; int m; Instance inst; boolean missing; result = new Capabilities(null); // class if (data.classIndex() == -1) { result.enable(Capability.NO_CLASS); } else { switch (data.classAttribute().type()) { case Attribute.NOMINAL: if (data.classAttribute().numValues() == 1) result.enable(Capability.UNARY_CLASS); else if (data.classAttribute().numValues() == 2) result.enable(Capability.BINARY_CLASS); else result.enable(Capability.NOMINAL_CLASS); break; case Attribute.NUMERIC: result.enable(Capability.NUMERIC_CLASS); break; case Attribute.STRING: result.enable(Capability.STRING_CLASS); break; case Attribute.DATE: result.enable(Capability.DATE_CLASS); break; case Attribute.RELATIONAL: result.enable(Capability.RELATIONAL_CLASS); break; default: throw new UnsupportedAttributeTypeException( "Unknown class attribute type '" + data.classAttribute() + "'!"); } // missing class values for (i = 0; i < data.numInstances(); i++) { if (data.instance(i).classIsMissing()) { result.enable(Capability.MISSING_CLASS_VALUES); break; } } } // attributes for (i = 0; i < data.numAttributes(); i++) { // skip class if (i == data.classIndex()) continue; switch (data.attribute(i).type()) { case Attribute.NOMINAL: result.enable(Capability.UNARY_ATTRIBUTES); if (data.attribute(i).numValues() == 2) result.enable(Capability.BINARY_ATTRIBUTES); else if (data.attribute(i).numValues() > 2) result.enable(Capability.NOMINAL_ATTRIBUTES); break; case Attribute.NUMERIC: result.enable(Capability.NUMERIC_ATTRIBUTES); break; case Attribute.DATE: result.enable(Capability.DATE_ATTRIBUTES); break; case Attribute.STRING: result.enable(Capability.STRING_ATTRIBUTES); break; case Attribute.RELATIONAL: result.enable(Capability.RELATIONAL_ATTRIBUTES); break; default: throw new UnsupportedAttributeTypeException( "Unknown attribute type '" + data.attribute(i).type() + "'!"); } } // missing values missing = false; for (i = 0; i < data.numInstances(); i++) { inst = data.instance(i); if (inst instanceof SparseInstance) { for (m = 0; m < inst.numValues(); m++) { n = inst.index(m); // skip class if (n == inst.classIndex()) continue; if (inst.isMissing(n)) { missing = true; break; } } } else { for (n = 0; n < data.numAttributes(); n++) { // skip class if (n == inst.classIndex()) continue; if (inst.isMissing(n)) { missing = true; break; } } } if (missing) { result.enable(Capability.MISSING_VALUES); break; } } // multi-instance data? if (multi) { if ( (data.numAttributes() == 3) && (data.attribute(0).isNominal()) // bag-id && (data.attribute(1).isRelationValued()) // bag && (data.classIndex() == data.numAttributes() - 1) ) { multiInstance = new Capabilities(null); multiInstance.or(result.getClassCapabilities()); multiInstance.enable(Capability.NOMINAL_ATTRIBUTES); multiInstance.enable(Capability.RELATIONAL_ATTRIBUTES); multiInstance.enable(Capability.ONLY_MULTIINSTANCE); result.assign(multiInstance); } } return result; } /** * loads the given dataset and prints the Capabilities necessary to * process it. <p/> * * Valid parameters: <p/> * * -file filename <br/> * the file to load * * -c index * the explicit index of the class attribute (default: none) * * @param args the commandline arguments * @throws Exception if something goes wrong */ public static void main(String[] args) throws Exception { String tmpStr; String filename; DataSource source; Instances data; int classIndex; Capabilities cap; Iterator iter; if (args.length == 0) { System.out.println( "\nUsage: " + Capabilities.class.getName() + " -file <dataset> [-c <class index>]\n"); return; } // get parameters tmpStr = Utils.getOption("file", args); if (tmpStr.length() == 0) throw new Exception("No file provided with option '-file'!"); else filename = tmpStr; tmpStr = Utils.getOption("c", args); if (tmpStr.length() != 0) { if (tmpStr.equals("first")) classIndex = 0; else if (tmpStr.equals("last")) classIndex = -2; // last else classIndex = Integer.parseInt(tmpStr) - 1; } else { classIndex = -3; // not set } // load data source = new DataSource(filename); if (classIndex == -3) data = source.getDataSet(); else if (classIndex == -2) data = source.getDataSet(source.getStructure().numAttributes() - 1); else data = source.getDataSet(classIndex); // determine and print capabilities cap = forInstances(data); System.out.println("File: " + filename); System.out.println("Class index: " + ((data.classIndex() == -1) ? "not set" : "" + (data.classIndex() + 1))); System.out.println("Capabilities:"); iter = cap.capabilities(); while (iter.hasNext()) System.out.println("- " + iter.next()); } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 9134 $"); } }
49,077
28.161022
128
java
tsml-java
tsml-java-master/src/main/java/weka/core/CapabilitiesHandler.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * CapabilitiesHandler.java * Copyright (C) 2006-2012 University of Waikato, Hamilton, New Zealand */ package weka.core; /** * Classes implementing this interface return their capabilities in regards * to datasets. * * @author FracPete (fracpete at waikato dot ac dot nz) * @version $Revision: 8034 $ * @see Capabilities */ public interface CapabilitiesHandler { /** * Returns the capabilities of this object. * * @return the capabilities of this object * @see Capabilities */ public Capabilities getCapabilities(); }
1,257
29.682927
75
java
tsml-java
tsml-java-master/src/main/java/weka/core/ChebyshevDistance.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * ChebyshevDistance.java * Copyright (C) 2007-2012 University of Waikato, Hamilton, New Zealand * */ package weka.core; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; /** <!-- globalinfo-start --> * Implements the Chebyshev distance. The distance between two vectors is the greatest of their differences along any coordinate dimension.<br/> * <br/> * For more information, see:<br/> * <br/> * Wikipedia. Chebyshev distance. URL http://en.wikipedia.org/wiki/Chebyshev_distance. * <p/> <!-- globalinfo-end --> * <!-- technical-bibtex-start --> * BibTeX: * <pre> * &#64;misc{missing_id, * author = {Wikipedia}, * title = {Chebyshev distance}, * URL = {http://en.wikipedia.org/wiki/Chebyshev_distance} * } * </pre> * <p/> <!-- technical-bibtex-end --> * <!-- options-start --> * Valid options are: <p/> * * <pre> -D * Turns off the normalization of attribute * values in distance calculation.</pre> * * <pre> -R &lt;col1,col2-col4,...&gt; * Specifies list of columns to used in the calculation of the * distance. 'first' and 'last' are valid indices. * (default: first-last)</pre> * * <pre> -V * Invert matching sense of column indices.</pre> * <!-- options-end --> * * @author Fracpete (fracpete at waikato dot ac dot nz) * @version $Revision: 8034 $ */ public class ChebyshevDistance extends NormalizableDistance implements TechnicalInformationHandler { /** for serialization. */ private static final long serialVersionUID = -7739904999895461429L; /** * Constructs an Chebyshev Distance object, Instances must be still set. */ public ChebyshevDistance() { super(); } /** * Constructs an Chebyshev Distance object and automatically initializes the * ranges. * * @param data the instances the distance function should work on */ public ChebyshevDistance(Instances data) { super(data); } /** * Returns a string describing this object. * * @return a description of the evaluator suitable for * displaying in the explorer/experimenter gui */ public String globalInfo() { return "Implements the Chebyshev distance. The distance between two vectors " + "is the greatest of their differences along any coordinate dimension.\n\n" + "For more information, see:\n\n" + getTechnicalInformation().toString(); } /** * Returns an instance of a TechnicalInformation object, containing * detailed information about the technical background of this class, * e.g., paper reference or book this class is based on. * * @return the technical information about this class */ public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; result = new TechnicalInformation(Type.MISC); result.setValue(Field.AUTHOR, "Wikipedia"); result.setValue(Field.TITLE, "Chebyshev distance"); result.setValue(Field.URL, "http://en.wikipedia.org/wiki/Chebyshev_distance"); return result; } /** * Updates the current distance calculated so far with the new difference * between two attributes. The difference between the attributes was * calculated with the difference(int,double,double) method. * * @param currDist the current distance calculated so far * @param diff the difference between two new attributes * @return the update distance * @see #difference(int, double, double) */ protected double updateDistance(double currDist, double diff) { double result; result = currDist; diff = Math.abs(diff); if (diff > result) result = diff; return result; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } }
4,564
28.262821
144
java
tsml-java
tsml-java-master/src/main/java/weka/core/Check.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * CheckScheme.java * Copyright (C) 2006-2012 University of Waikato, Hamilton, New Zealand * */ package weka.core; import java.util.Enumeration; import java.util.Vector; /** * Abstract general class for testing in Weka. * * @author FracPete (fracpete at waikato dot ac dot nz) * @version $Revision: 8034 $ */ public abstract class Check implements OptionHandler, RevisionHandler { /** Debugging mode, gives extra output if true */ protected boolean m_Debug = false; /** Silent mode, for no output at all to stdout */ protected boolean m_Silent = false; /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ public Enumeration listOptions() { Vector<Option> result = new Vector<Option>(); result.addElement(new Option( "\tTurn on debugging output.", "D", 0, "-D")); result.addElement(new Option( "\tSilent mode - prints nothing to stdout.", "S", 0, "-S")); return result.elements(); } /** * Parses a given list of options. * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { setDebug(Utils.getFlag('D', options)); setSilent(Utils.getFlag('S', options)); } /** * Gets the current settings of the CheckClassifier. * * @return an array of strings suitable for passing to setOptions */ public String[] getOptions() { Vector<String> result; result = new Vector<String>(); if (getDebug()) result.add("-D"); if (getSilent()) result.add("-S"); return (String[]) result.toArray(new String[result.size()]); } /** * Tries to instantiate a new instance of the given class and checks whether * it is an instance of the specified class. For convenience one can also * specify a classname prefix (e.g., "weka.classifiers") to avoid long * classnames and then instantiate it with the shortened classname (e.g., * "trees.J48"). * * @param prefix the classname prefix (without trailing dot) * @param cls the class to check whether the generated object is an * instance of * @param classname the classname to instantiate * @param options optional options for the object * @return the configured object * @throws Exception if instantiation fails */ protected Object forName(String prefix, Class cls, String classname, String[] options) throws Exception { Object result; result = null; try { result = Utils.forName(cls, classname, options); } catch (Exception e) { // shall we try with prefix? if (e.getMessage().toLowerCase().indexOf("can't find") > -1) { try { result = Utils.forName(cls, prefix + "." + classname, options); } catch (Exception ex) { if (e.getMessage().toLowerCase().indexOf("can't find") > -1) { throw new Exception( "Can't find class called '" + classname + "' or '" + prefix + "." + classname + "'!"); } else { throw new Exception(ex); } } } else { throw new Exception(e); } } return result; } /** * Begin the tests, reporting results to System.out */ public abstract void doTests(); /** * Set debugging mode * * @param debug true if debug output should be printed */ public void setDebug(boolean debug) { m_Debug = debug; // disable silent mode, if necessary if (getDebug()) setSilent(false); } /** * Get whether debugging is turned on * * @return true if debugging output is on */ public boolean getDebug() { return m_Debug; } /** * Set slient mode, i.e., no output at all to stdout * * @param value whether silent mode is active or not */ public void setSilent(boolean value) { m_Silent = value; } /** * Get whether silent mode is turned on * * @return true if silent mode is on */ public boolean getSilent() { return m_Silent; } /** * prints the given message to stdout, if not silent mode * * @param msg the text to print to stdout */ protected void print(Object msg) { if (!getSilent()) System.out.print(msg); } /** * prints the given message (+ LF) to stdout, if not silent mode * * @param msg the message to println to stdout */ protected void println(Object msg) { print(msg + "\n"); } /** * prints a LF to stdout, if not silent mode */ protected void println() { print("\n"); } /** * runs the CheckScheme with the given options * * @param check the checkscheme to setup and run * @param options the commandline parameters to use */ protected static void runCheck(Check check, String[] options) { try { try { check.setOptions(options); Utils.checkForRemainingOptions(options); } catch (Exception ex) { String result = ex.getMessage() + "\n\n" + check.getClass().getName().replaceAll(".*\\.", "") + " Options:\n\n"; Enumeration enm = check.listOptions(); while (enm.hasMoreElements()) { Option option = (Option) enm.nextElement(); result += option.synopsis() + "\n" + option.description() + "\n"; } throw new Exception(result); } check.doTests(); } catch (Exception ex) { System.err.println(ex.getMessage()); } } }
6,272
25.029046
120
java
tsml-java
tsml-java-master/src/main/java/weka/core/CheckGOE.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * CheckGOE.java * Copyright (C) 2007-2012 University of Waikato, Hamilton, New Zealand * */ package weka.core; import java.beans.BeanInfo; import java.beans.Introspector; import java.beans.PropertyDescriptor; import java.util.Collections; import java.util.Enumeration; import java.util.HashSet; import java.util.Iterator; import java.util.Vector; /** * Simple command line checking of classes that are editable in the GOE.<p/> * * Usage: <p/> * <code> * CheckGOE -W classname -- test options * </code> <p/> * <!-- options-start --> * Valid options are: <p/> * * <pre> -D * Turn on debugging output.</pre> * * <pre> -S * Silent mode - prints nothing to stdout.</pre> * * <pre> -ignored &lt;comma-separated list of properties&gt; * Skipped properties. * (default: capabilities,options)</pre> * * <pre> -W * Full name of the class analysed. * eg: weka.classifiers.rules.ZeroR * (default weka.classifiers.rules.ZeroR)</pre> * <!-- options-end --> * * @author FracPete (fracpete at waikato dot ac dot nz) * @version $Revision: 8034 $ */ public class CheckGOE extends Check { /** the object to test */ protected Object m_Object = new weka.classifiers.rules.ZeroR(); /** whether the tests were successful */ protected boolean m_Success; /** properties that are skipped in the checkToolTips method * @see #checkToolTips() */ protected HashSet<String> m_IgnoredProperties = new HashSet<String>(); /** * default constructor */ public CheckGOE() { super(); // set default options try { setOptions(new String[0]); } catch (Exception e) { e.printStackTrace(); } } /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ public Enumeration listOptions() { Vector<Option> result = new Vector<Option>(); Enumeration en = super.listOptions(); while (en.hasMoreElements()) result.addElement((Option)en.nextElement()); result.addElement(new Option( "\tSkipped properties.\n" + "\t(default: capabilities,options)", "ignored", 1, "-ignored <comma-separated list of properties>")); result.addElement(new Option( "\tFull name of the class analysed.\n" +"\teg: weka.classifiers.rules.ZeroR\n" + "\t(default weka.classifiers.rules.ZeroR)", "W", 1, "-W")); return result.elements(); } /** * Parses a given list of options. <p/> * <!-- options-start --> * Valid options are: <p/> * * <pre> -D * Turn on debugging output.</pre> * * <pre> -S * Silent mode - prints nothing to stdout.</pre> * * <pre> -ignored &lt;comma-separated list of properties&gt; * Skipped properties. * (default: capabilities,options)</pre> * * <pre> -W * Full name of the class analysed. * eg: weka.classifiers.rules.ZeroR * (default weka.classifiers.rules.ZeroR)</pre> * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { String tmpStr; super.setOptions(options); tmpStr = Utils.getOption('W', options); if (tmpStr.length() == 0) tmpStr = weka.classifiers.rules.ZeroR.class.getName(); setObject(Utils.forName(Object.class, tmpStr, null)); tmpStr = Utils.getOption("ignored", options); if (tmpStr.length() == 0) tmpStr = "capabilities,options"; setIgnoredProperties(tmpStr); } /** * Gets the current settings of the object. * * @return an array of strings suitable for passing to setOptions */ public String[] getOptions() { Vector<String> result; String[] options; int i; result = new Vector<String>(); options = super.getOptions(); for (i = 0; i < options.length; i++) result.add(options[i]); result.add("-ignored"); result.add(getIgnoredProperties()); if (getObject() != null) { result.add("-W"); result.add(getObject().getClass().getName()); } return (String[]) result.toArray(new String[result.size()]); } /** * Set the object to work on.. * * @param value the object to use. */ public void setObject(Object value) { m_Object = value; } /** * Get the object used in the tests. * * @return the object used in the tests. */ public Object getObject() { return m_Object; } /** * Sets the properties to ignore in checkToolTips(). Comma-separated list. * * @param value the list of properties * @see #checkToolTips() */ public void setIgnoredProperties(String value) { String[] props; int i; m_IgnoredProperties.clear(); props = value.split(","); for (i = 0; i < props.length; i++) m_IgnoredProperties.add(props[i]); } /** * Get the ignored properties used in checkToolTips() as comma-separated * list (sorted). * * @return the ignored properties * @see #checkToolTips() */ public String getIgnoredProperties() { String result; Vector<String> list; Iterator iter; int i; list = new Vector<String>(); iter = m_IgnoredProperties.iterator(); while (iter.hasNext()) list.add((String) iter.next()); // sort if (list.size() > 1) Collections.sort(list); result = ""; for (i = 0; i < list.size(); i++) { if (i > 0) result += ","; result += list.get(i); } return result; } /** * returns the success of the tests * * @return true if the tests were successful */ public boolean getSuccess() { return m_Success; } /** * checks whether the object declares a globalInfo method. * * @return true if the test was passed */ public boolean checkGlobalInfo() { boolean result; Class<?> cls; print("Global info..."); result = true; cls = getObject().getClass(); // test for globalInfo method try { cls.getMethod("globalInfo", (Class[]) null); } catch (Exception e) { result = false; } if (result) println("yes"); else println("no"); return result; } /** * checks whether the object declares tip text method for all its * properties. * * @return true if the test was passed */ public boolean checkToolTips() { boolean result; Class<?> cls; BeanInfo info; PropertyDescriptor[] desc; int i; Vector<String> missing; String suffix; print("Tool tips..."); result = true; suffix = "TipText"; cls = getObject().getClass(); // get properties try { info = Introspector.getBeanInfo(cls, Object.class); desc = info.getPropertyDescriptors(); } catch (Exception e) { e.printStackTrace(); desc = null; } // test for TipText methods if (desc != null) { missing = new Vector<String>(); for (i = 0; i < desc.length; i++) { // skip property? if (m_IgnoredProperties.contains(desc[i].getName())) continue; if ((desc[i].getReadMethod() == null) || (desc[i].getWriteMethod() == null)) continue; try { cls.getMethod(desc[i].getName() + suffix, (Class[]) null); } catch (Exception e) { result = false; missing.add(desc[i].getName() + suffix); } } if (result) println("yes"); else println("no (missing: " + missing + ")"); } else { println("maybe"); } return result; } /** * Runs some diagnostic tests on the object. Output is * printed to System.out (if not silent). */ public void doTests() { println("Object: " + m_Object.getClass().getName() + "\n"); println("--> Tests"); m_Success = checkGlobalInfo(); if (m_Success) m_Success = checkToolTips(); } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } /** * Main method for using the CheckGOE. * * @param args the options to the CheckGOE */ public static void main(String[] args) { CheckGOE check = new CheckGOE(); runCheck(check, args); if (check.getSuccess()) System.exit(0); else System.exit(1); } }
9,254
22.489848
77
java
tsml-java
tsml-java-master/src/main/java/weka/core/CheckOptionHandler.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * CheckOptionHandler.java * Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand * */ package weka.core; import java.util.Enumeration; import java.util.Vector; /** * Simple command line checking of classes that implement OptionHandler.<p/> * * Usage: <p/> * <code> * CheckOptionHandler -W optionHandlerClassName -- test options * </code> <p/> * <!-- options-start --> * Valid options are: <p/> * * <pre> -D * Turn on debugging output.</pre> * * <pre> -S * Silent mode - prints nothing to stdout.</pre> * * <pre> -W * Full name of the OptionHandler analysed. * eg: weka.classifiers.rules.ZeroR * (default weka.classifiers.rules.ZeroR)</pre> * * <pre> * Options specific to option handler weka.classifiers.rules.ZeroR: * </pre> * * <pre> -D * If set, classifier is run in debug mode and * may output additional info to the console</pre> * <!-- options-end --> * * Options after -- are used as user options in testing the * OptionHandler * * @author Len Trigg (trigg@cs.waikato.ac.nz) * @author FracPete (fracpete at waikato dot ac dot nz) * @version $Revision: 8034 $ */ public class CheckOptionHandler extends Check { /** the optionhandler to test */ protected OptionHandler m_OptionHandler = new weka.classifiers.rules.ZeroR(); /** the user-supplied options */ protected String[] m_UserOptions = new String[0]; /** whether the tests were successful */ protected boolean m_Success; /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ public Enumeration listOptions() { Vector<Option> result = new Vector<Option>(); Enumeration en = super.listOptions(); while (en.hasMoreElements()) result.addElement((Option)en.nextElement()); result.addElement(new Option( "\tFull name of the OptionHandler analysed.\n" +"\teg: weka.classifiers.rules.ZeroR\n" + "\t(default weka.classifiers.rules.ZeroR)", "W", 1, "-W")); if (m_OptionHandler != null) { result.addElement(new Option( "", "", 0, "\nOptions specific to option handler " + m_OptionHandler.getClass().getName() + ":")); Enumeration enm = m_OptionHandler.listOptions(); while (enm.hasMoreElements()) result.addElement((Option)enm.nextElement()); } return result.elements(); } /** * Parses a given list of options. <p/> * <!-- options-start --> * Valid options are: <p/> * * <pre> -D * Turn on debugging output.</pre> * * <pre> -S * Silent mode - prints nothing to stdout.</pre> * * <pre> -W * Full name of the OptionHandler analysed. * eg: weka.classifiers.rules.ZeroR * (default weka.classifiers.rules.ZeroR)</pre> * * <pre> * Options specific to option handler weka.classifiers.rules.ZeroR: * </pre> * * <pre> -D * If set, classifier is run in debug mode and * may output additional info to the console</pre> * <!-- options-end --> * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { String tmpStr; super.setOptions(options); tmpStr = Utils.getOption('W', options); if (tmpStr.length() == 0) tmpStr = weka.classifiers.rules.ZeroR.class.getName(); setUserOptions(Utils.partitionOptions(options)); setOptionHandler( (OptionHandler) Utils.forName( OptionHandler.class, tmpStr, null)); } /** * Gets the current settings of the CheckClassifier. * * @return an array of strings suitable for passing to setOptions */ public String[] getOptions() { Vector<String> result; String[] options; int i; result = new Vector<String>(); options = super.getOptions(); for (i = 0; i < options.length; i++) result.add(options[i]); if (getOptionHandler() != null) { result.add("-W"); result.add(getOptionHandler().getClass().getName()); } if (m_OptionHandler != null) { options = m_OptionHandler.getOptions(); result.add("--"); for (i = 0; i < options.length; i++) result.add(options[i]); } return (String[]) result.toArray(new String[result.size()]); } /** * Set the OptionHandler to work on.. * * @param value the OptionHandler to use. */ public void setOptionHandler(OptionHandler value) { m_OptionHandler = value; } /** * Get the OptionHandler used in the tests. * * @return the OptionHandler used in the tests. */ public OptionHandler getOptionHandler() { return m_OptionHandler; } /** * Sets the user-supplied options (creates a copy) * * @param value the user-supplied options to use */ public void setUserOptions(String[] value) { m_UserOptions = getCopy(value); } /** * Gets the current user-supplied options (creates a copy) * * @return the user-supplied options */ public String[] getUserOptions() { return getCopy(m_UserOptions); } /** * returns the success of the tests * * @return true if the tests were successful */ public boolean getSuccess() { return m_Success; } /** * Prints the given options to a string. * * @param options the options to be joined * @return the options as one long string */ protected String printOptions(String[] options) { if (options == null) { return("<null>"); } else { return Utils.joinOptions(options); } } /** * Compares the two given sets of options. * * @param options1 the first set of options * @param options2 the second set of options * @throws Exception if the two sets of options differ */ protected void compareOptions(String[] options1, String[] options2) throws Exception { if (options1 == null) { throw new Exception("first set of options is null!"); } if (options2 == null) { throw new Exception("second set of options is null!"); } if (options1.length != options2.length) { throw new Exception("problem found!\n" + "First set: " + printOptions(options1) + '\n' + "Second set: " + printOptions(options2) + '\n' + "options differ in length"); } for (int i = 0; i < options1.length; i++) { if (!options1[i].equals(options2[i])) { throw new Exception("problem found!\n" + "\tFirst set: " + printOptions(options1) + '\n' + "\tSecond set: " + printOptions(options2) + '\n' + '\t' + options1[i] + " != " + options2[i]); } } } /** * creates a copy of the given options * * @param options the options to copy * @return the copy */ protected String[] getCopy(String[] options) { String[] result; result = new String[options.length]; System.arraycopy(options, 0, result, 0, options.length); return result; } /** * returns a new instance of the OptionHandler's class * * @return a new instance */ protected OptionHandler getDefaultHandler() { OptionHandler result; try { result = (OptionHandler) m_OptionHandler.getClass().newInstance(); } catch (Exception e) { e.printStackTrace(); result = null; } return result; } /** * returns the default options the default OptionHandler will return * * @return the default options */ protected String[] getDefaultOptions() { String[] result; OptionHandler o; o = getDefaultHandler(); if (o == null) { println("WARNING: couldn't create default handler, cannot use default options!"); result = new String[0]; } else { result = o.getOptions(); } return result; } /** * checks whether the listOptions method works * * @return index 0 is true if the test was passed, index 1 is always false */ public boolean checkListOptions() { boolean result; print("ListOptions..."); try { Enumeration enu = getOptionHandler().listOptions(); if (getDebug() && enu.hasMoreElements()) println(""); while (enu.hasMoreElements()) { Option option = (Option) enu.nextElement(); if (getDebug()) { println(option.synopsis()); println(option.description()); } } println("yes"); result = true; } catch (Exception e) { println("no"); result = false; if (getDebug()) println(e); } return result; } /** * checks whether the user-supplied options can be processed at all * * @return index 0 is true if the test was passed, index 1 is always false */ public boolean checkSetOptions() { boolean result; print("SetOptions..."); try { getDefaultHandler().setOptions(getUserOptions()); println("yes"); result = true; } catch (Exception e) { println("no"); result = false; if (getDebug()) println(e); } return result; } /** * checks whether the default options can be processed completely * or some invalid options are returned by the getOptions() method. * * @return index 0 is true if the test was passed, index 1 is always false */ public boolean checkDefaultOptions() { boolean result; String[] options; print("Default options..."); options = getDefaultOptions(); try { getDefaultHandler().setOptions(options); Utils.checkForRemainingOptions(options); println("yes"); result = true; } catch (Exception e) { println("no"); result = false; if (getDebug()) println(e); } return result; } /** * checks whether the user-supplied options can be processed completely * or some "left-over" options remain * * @return index 0 is true if the test was passed, index 1 is always false */ public boolean checkRemainingOptions() { boolean result; String[] options; print("Remaining options..."); options = getUserOptions(); try { getDefaultHandler().setOptions(options); if (getDebug()) println("\n remaining: " + printOptions(options)); println("yes"); result = true; } catch (Exception e) { println("no"); result = false; if (getDebug()) println(e); } return result; } /** * checks whether the user-supplied options stay the same after settting, * getting and re-setting again * * @return index 0 is true if the test was passed, index 1 is always false */ public boolean checkCanonicalUserOptions() { boolean result; OptionHandler handler; String[] userOptions; String[] userOptionsCheck; print("Canonical user options..."); try { handler = getDefaultHandler(); handler.setOptions(getUserOptions()); if (getDebug()) print("\n Getting canonical user options: "); userOptions = handler.getOptions(); if (getDebug()) println(printOptions(userOptions)); if (getDebug()) println(" Setting canonical user options"); handler.setOptions((String[])userOptions.clone()); if (getDebug()) println(" Checking canonical user options"); userOptionsCheck = handler.getOptions(); compareOptions(userOptions, userOptionsCheck); println("yes"); result = true; } catch (Exception e) { println("no"); result = false; if (getDebug()) println(e); } return result; } /** * checks whether the optionhandler can be re-setted again to default * options after the user-supplied options have been set. * * @return index 0 is true if the test was passed, index 1 is always false */ public boolean checkResettingOptions() { boolean result; String[] defaultOptions; String[] defaultOptionsCheck; OptionHandler handler; print("Resetting options..."); try { if (getDebug()) println("\n Setting user options"); handler = getDefaultHandler(); handler.setOptions(getUserOptions()); defaultOptions = getDefaultOptions(); if (getDebug()) println(" Resetting to default options"); handler.setOptions(getCopy(defaultOptions)); if (getDebug()) println(" Checking default options match previous default"); defaultOptionsCheck = handler.getOptions(); compareOptions(defaultOptions, defaultOptionsCheck); println("yes"); result = true; } catch (Exception e) { println("no"); result = false; if (getDebug()) println(e); } return result; } /** * Runs some diagnostic tests on an optionhandler object. Output is * printed to System.out (if not silent). */ public void doTests() { println("OptionHandler: " + m_OptionHandler.getClass().getName() + "\n"); if (getDebug()) { println("--> Info"); print("Default options: "); println(printOptions(getDefaultOptions())); print("User options: "); println(printOptions(getUserOptions())); } println("--> Tests"); m_Success = checkListOptions(); if (m_Success) m_Success = checkSetOptions(); if (m_Success) m_Success = checkDefaultOptions(); if (m_Success) m_Success = checkRemainingOptions(); if (m_Success) m_Success = checkCanonicalUserOptions(); if (m_Success) m_Success = checkResettingOptions(); } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } /** * Main method for using the CheckOptionHandler. * * @param args the options to the CheckOptionHandler */ public static void main(String[] args) { CheckOptionHandler check = new CheckOptionHandler(); runCheck(check, args); if (check.getSuccess()) System.exit(0); else System.exit(1); } }
14,914
23.858333
87
java
tsml-java
tsml-java-master/src/main/java/weka/core/CheckScheme.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * CheckScheme.java * Copyright (C) 2006-2012 University of Waikato, Hamilton, New Zealand * */ package weka.core; import java.util.Enumeration; import java.util.Random; import java.util.StringTokenizer; import java.util.Vector; /** * Abstract general class for testing schemes in Weka. Derived classes are * also used for JUnit tests. * * @author FracPete (fracpete at waikato dot ac dot nz) * @version $Revision: 8034 $ * @see TestInstances */ public abstract class CheckScheme extends Check { /** a class for postprocessing the test-data */ public static class PostProcessor implements RevisionHandler { /** * Provides a hook for derived classes to further modify the data. Currently, * the data is just passed through. * * @param data the data to process * @return the processed data */ public Instances process(Instances data) { return data; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } } /** The number of instances in the datasets */ protected int m_NumInstances = 20; /** the number of nominal attributes */ protected int m_NumNominal = 2; /** the number of numeric attributes */ protected int m_NumNumeric = 1; /** the number of string attributes */ protected int m_NumString = 1; /** the number of date attributes */ protected int m_NumDate = 1; /** the number of relational attributes */ protected int m_NumRelational = 1; /** the number of instances in relational attributes (applies also for bags * in multi-instance) */ protected int m_NumInstancesRelational = 10; /** for generating String attributes/classes */ protected String[] m_Words = TestInstances.DEFAULT_WORDS; /** for generating String attributes/classes */ protected String m_WordSeparators = TestInstances.DEFAULT_SEPARATORS; /** for post-processing the data even further */ protected PostProcessor m_PostProcessor = null; /** whether classpath problems occurred */ protected boolean m_ClasspathProblems = false; /** * Returns an enumeration describing the available options. * * @return an enumeration of all the available options. */ public Enumeration listOptions() { Vector<Option> result = new Vector<Option>(); Enumeration en = super.listOptions(); while (en.hasMoreElements()) result.addElement((Option)en.nextElement()); result.addElement(new Option( "\tThe number of instances in the datasets (default 20).", "N", 1, "-N <num>")); result.addElement(new Option( "\tThe number of nominal attributes (default 2).", "nominal", 1, "-nominal <num>")); result.addElement(new Option( "\tThe number of values for nominal attributes (default 1).", "nominal-values", 1, "-nominal-values <num>")); result.addElement(new Option( "\tThe number of numeric attributes (default 1).", "numeric", 1, "-numeric <num>")); result.addElement(new Option( "\tThe number of string attributes (default 1).", "string", 1, "-string <num>")); result.addElement(new Option( "\tThe number of date attributes (default 1).", "date", 1, "-date <num>")); result.addElement(new Option( "\tThe number of relational attributes (default 1).", "relational", 1, "-relational <num>")); result.addElement(new Option( "\tThe number of instances in relational/bag attributes (default 10).", "num-instances-relational", 1, "-num-instances-relational <num>")); result.addElement(new Option( "\tThe words to use in string attributes.", "words", 1, "-words <comma-separated-list>")); result.addElement(new Option( "\tThe word separators to use in string attributes.", "word-separators", 1, "-word-separators <chars>")); return result.elements(); } /** * Parses a given list of options. * * @param options the list of options as an array of strings * @throws Exception if an option is not supported */ public void setOptions(String[] options) throws Exception { String tmpStr; super.setOptions(options); tmpStr = Utils.getOption('N', options); if (tmpStr.length() != 0) setNumInstances(Integer.parseInt(tmpStr)); else setNumInstances(20); tmpStr = Utils.getOption("nominal", options); if (tmpStr.length() != 0) setNumNominal(Integer.parseInt(tmpStr)); else setNumNominal(2); tmpStr = Utils.getOption("numeric", options); if (tmpStr.length() != 0) setNumNumeric(Integer.parseInt(tmpStr)); else setNumNumeric(1); tmpStr = Utils.getOption("string", options); if (tmpStr.length() != 0) setNumString(Integer.parseInt(tmpStr)); else setNumString(1); tmpStr = Utils.getOption("date", options); if (tmpStr.length() != 0) setNumDate(Integer.parseInt(tmpStr)); else setNumDate(1); tmpStr = Utils.getOption("relational", options); if (tmpStr.length() != 0) setNumRelational(Integer.parseInt(tmpStr)); else setNumRelational(1); tmpStr = Utils.getOption("num-instances-relational", options); if (tmpStr.length() != 0) setNumInstancesRelational(Integer.parseInt(tmpStr)); else setNumInstancesRelational(10); tmpStr = Utils.getOption("words", options); if (tmpStr.length() != 0) setWords(tmpStr); else setWords(new TestInstances().getWords()); if (Utils.getOptionPos("word-separators", options) > -1) { tmpStr = Utils.getOption("word-separators", options); setWordSeparators(tmpStr); } else { setWordSeparators(TestInstances.DEFAULT_SEPARATORS); } } /** * Gets the current settings of the CheckClassifier. * * @return an array of strings suitable for passing to setOptions */ public String[] getOptions() { Vector<String> result; String[] options; int i; result = new Vector<String>(); options = super.getOptions(); for (i = 0; i < options.length; i++) result.add(options[i]); result.add("-N"); result.add("" + getNumInstances()); result.add("-nominal"); result.add("" + getNumNominal()); result.add("-numeric"); result.add("" + getNumNumeric()); result.add("-string"); result.add("" + getNumString()); result.add("-date"); result.add("" + getNumDate()); result.add("-relational"); result.add("" + getNumRelational()); result.add("-words"); result.add("" + getWords()); result.add("-word-separators"); result.add("" + getWordSeparators()); return (String[]) result.toArray(new String[result.size()]); } /** * sets the PostProcessor to use * * @param value the new PostProcessor * @see #m_PostProcessor */ public void setPostProcessor(PostProcessor value) { m_PostProcessor = value; } /** * returns the current PostProcessor, can be null * * @return the current PostProcessor */ public PostProcessor getPostProcessor() { return m_PostProcessor; } /** * returns TRUE if the classifier returned a "not in classpath" Exception * * @return true if CLASSPATH problems occurred */ public boolean hasClasspathProblems() { return m_ClasspathProblems; } /** * Begin the tests, reporting results to System.out */ public abstract void doTests(); /** * Sets the number of instances to use in the datasets (some classifiers * might require more instances). * * @param value the number of instances to use */ public void setNumInstances(int value) { m_NumInstances = value; } /** * Gets the current number of instances to use for the datasets. * * @return the number of instances */ public int getNumInstances() { return m_NumInstances; } /** * sets the number of nominal attributes * * @param value the number of nominal attributes */ public void setNumNominal(int value) { m_NumNominal = value; } /** * returns the current number of nominal attributes * * @return the number of nominal attributes */ public int getNumNominal() { return m_NumNominal; } /** * sets the number of numeric attributes * * @param value the number of numeric attributes */ public void setNumNumeric(int value) { m_NumNumeric = value; } /** * returns the current number of numeric attributes * * @return the number of numeric attributes */ public int getNumNumeric() { return m_NumNumeric; } /** * sets the number of string attributes * * @param value the number of string attributes */ public void setNumString(int value) { m_NumString = value; } /** * returns the current number of string attributes * * @return the number of string attributes */ public int getNumString() { return m_NumString; } /** * sets the number of data attributes * * @param value the number of date attributes */ public void setNumDate(int value) { m_NumDate = value; } /** * returns the current number of date attributes * * @return the number of date attributes */ public int getNumDate() { return m_NumDate; } /** * sets the number of relational attributes * * @param value the number of relational attributes */ public void setNumRelational(int value) { m_NumRelational = value; } /** * returns the current number of relational attributes * * @return the number of relational attributes */ public int getNumRelational() { return m_NumRelational; } /** * sets the number of instances in relational/bag attributes to produce * * @param value the number of instances */ public void setNumInstancesRelational(int value) { m_NumInstancesRelational = value; } /** * returns the current number of instances in relational/bag attributes to produce * * @return the number of instances */ public int getNumInstancesRelational() { return m_NumInstancesRelational; } /** * turns the comma-separated list into an array * * @param value the list to process * @return the list as array */ protected static String[] listToArray(String value) { StringTokenizer tok; Vector<String> list; list = new Vector<String>(); tok = new StringTokenizer(value, ","); while (tok.hasMoreTokens()) list.add(tok.nextToken()); return (String[]) list.toArray(new String[list.size()]); } /** * turns the array into a comma-separated list * * @param value the array to process * @return the array as list */ protected static String arrayToList(String[] value) { String result; int i; result = ""; for (i = 0; i < value.length; i++) { if (i > 0) result += ","; result += value[i]; } return result; } /** * returns a string representation of the attribute type * * @param type the attribute type to get a string rerpresentation for * @return the string representation */ public static String attributeTypeToString(int type) { String result; switch (type) { case Attribute.NUMERIC: result = "numeric"; break; case Attribute.NOMINAL: result = "nominal"; break; case Attribute.STRING: result = "string"; break; case Attribute.DATE: result = "date"; break; case Attribute.RELATIONAL: result = "relational"; break; default: result = "???"; } return result; } /** * Sets the comma-separated list of words to use for generating strings. The * list must contain at least 2 words, otherwise an exception will be thrown. * * @param value the list of words * @throws IllegalArgumentException if not at least 2 words are provided */ public void setWords(String value) { if (listToArray(value).length < 2) throw new IllegalArgumentException("At least 2 words must be provided!"); m_Words = listToArray(value); } /** * returns the words used for assembling strings in a comma-separated list. * * @return the words as comma-separated list */ public String getWords() { return arrayToList(m_Words); } /** * sets the word separators (chars) to use for assembling strings. * * @param value the characters to use as separators */ public void setWordSeparators(String value) { m_WordSeparators = value; } /** * returns the word separators (chars) to use for assembling strings. * * @return the current separators */ public String getWordSeparators() { return m_WordSeparators; } /** * Compare two datasets to see if they differ. * * @param data1 one set of instances * @param data2 the other set of instances * @throws Exception if the datasets differ */ protected void compareDatasets(Instances data1, Instances data2) throws Exception { if (!data2.equalHeaders(data1)) { throw new Exception("header has been modified\n" + data2.equalHeadersMsg(data1)); } if (!(data2.numInstances() == data1.numInstances())) { throw new Exception("number of instances has changed"); } for (int i = 0; i < data2.numInstances(); i++) { Instance orig = data1.instance(i); Instance copy = data2.instance(i); for (int j = 0; j < orig.numAttributes(); j++) { if (orig.isMissing(j)) { if (!copy.isMissing(j)) { throw new Exception("instances have changed"); } } else if (orig.value(j) != copy.value(j)) { throw new Exception("instances have changed"); } if (orig.weight() != copy.weight()) { throw new Exception("instance weights have changed"); } } } } /** * Add missing values to a dataset. * * @param data the instances to add missing values to * @param level the level of missing values to add (if positive, this * is the probability that a value will be set to missing, if negative * all but one value will be set to missing (not yet implemented)) * @param predictorMissing if true, predictor attributes will be modified * @param classMissing if true, the class attribute will be modified */ protected void addMissing(Instances data, int level, boolean predictorMissing, boolean classMissing) { int classIndex = data.classIndex(); Random random = new Random(1); for (int i = 0; i < data.numInstances(); i++) { Instance current = data.instance(i); for (int j = 0; j < data.numAttributes(); j++) { if (((j == classIndex) && classMissing) || ((j != classIndex) && predictorMissing)) { if (Math.abs(random.nextInt()) % 100 < level) current.setMissing(j); } } } } /** * Provides a hook for derived classes to further modify the data. * * @param data the data to process * @return the processed data * @see #m_PostProcessor */ protected Instances process(Instances data) { if (getPostProcessor() == null) return data; else return getPostProcessor().process(data); } }
16,368
25.529984
87
java
tsml-java
tsml-java-master/src/main/java/weka/core/ClassCache.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /** * ClassCache.java * Copyright (C) 2010-2012 University of Waikato, Hamilton, New Zealand */ package weka.core; import java.io.File; import java.io.FileFilter; import java.net.URISyntaxException; import java.net.URL; import java.net.URLClassLoader; import java.util.ArrayList; import java.util.Collections; import java.util.Enumeration; import java.util.HashSet; import java.util.Hashtable; import java.util.Iterator; import java.util.jar.JarEntry; import java.util.jar.JarFile; /** * A singleton that stores all classes on the classpath. * * @author fracpete (fracpete at waikato dot ac dot nz) * @version $Revision: 8034 $ */ public class ClassCache implements RevisionHandler { /** * For filtering classes. * * @author fracpete (fracpete at waikato dot ac dot nz) * @version $Revision: 8034 $ */ public static class ClassFileFilter implements FileFilter { /** * Checks whether the file is a class. * * @param pathname the file to check * @return true if a class file */ public boolean accept(File pathname) { return pathname.getName().endsWith(".class"); } } /** * For filtering classes. * * @author fracpete (fracpete at waikato dot ac dot nz) * @version $Revision: 8034 $ */ public static class DirectoryFilter implements FileFilter { /** * Checks whether the file is a directory. * * @param pathname the file to check * @return true if a directory */ public boolean accept(File pathname) { return pathname.isDirectory(); } } /** whether to output some debug information. */ public final static boolean VERBOSE = false; /** the key for the default package. */ public final static String DEFAULT_PACKAGE = "DEFAULT"; /** for caching all classes on the class path (package-name &lt;-&gt; HashSet with classnames). */ protected Hashtable<String,HashSet<String>> m_Cache; static { // notify if VERBOSE is still on if (VERBOSE) System.err.println(ClassCache.class.getName() + ": VERBOSE ON"); } /** * Initializes the cache. */ public ClassCache() { super(); initialize(); } /** * Fixes the classname, turns "/" and "\" into "." and removes ".class". * * @param classname the classname to process * @return the processed classname */ protected String cleanUp(String classname) { String result; result = classname; if (result.indexOf("/") > -1) result = result.replace("/", "."); if (result.indexOf("\\") > -1) result = result.replace("\\", "."); if (result.endsWith(".class")) result = result.substring(0, result.length() - 6); return result; } /** * Extracts the package name from the (clean) classname. * * @param classname the classname to extract the package from * @return the package name */ protected String extractPackage(String classname) { if (classname.indexOf(".") > -1) return classname.substring(0, classname.lastIndexOf(".")); else return DEFAULT_PACKAGE; } /** * Adds the classname to the cache. * * @param classname the classname, automatically removes ".class" and * turns "/" or "\" into "." * @return true if adding changed the cache */ public boolean add(String classname) { String pkgname; HashSet<String> names; // classname and package classname = cleanUp(classname); pkgname = extractPackage(classname); // add to cache if (!m_Cache.containsKey(pkgname)) m_Cache.put(pkgname, new HashSet<String>()); names = m_Cache.get(pkgname); return names.add(classname); } /** * Removes the classname from the cache. * * @param classname the classname to remove * @return true if the removal changed the cache */ public boolean remove(String classname) { String pkgname; HashSet<String> names; classname = cleanUp(classname); pkgname = extractPackage(classname); names = m_Cache.get(pkgname); if (names != null) return names.remove(classname); else return false; } /** * Fills the class cache with classes in the specified directory. * * @param prefix the package prefix so far, null for default package * @param dir the directory to search */ protected void initFromDir(String prefix, File dir) { File[] files; // check classes files = dir.listFiles(new ClassFileFilter()); for (File file: files) { if (prefix == null) add(file.getName()); else add(prefix + "." + file.getName()); } // descend in directories files = dir.listFiles(new DirectoryFilter()); for (File file: files) { if (prefix == null) initFromDir(file.getName(), file); else initFromDir(prefix + "." + file.getName(), file); } } /** * Fills the class cache with classes in the specified directory. * * @param dir the directory to search */ protected void initFromDir(File dir) { if (VERBOSE) System.out.println("Analyzing directory: " + dir); initFromDir(null, dir); } /** * Fills the class cache with classes from the specified jar. * * @param file the jar to inspect */ protected void initFromJar(File file) { JarFile jar; JarEntry entry; Enumeration enm; if (VERBOSE) System.out.println("Analyzing jar: " + file); if (!file.exists()) { System.out.println("Jar does not exist: " + file); return; } try { jar = new JarFile(file); enm = jar.entries(); while (enm.hasMoreElements()) { entry = (JarEntry) enm.nextElement(); if (entry.getName().endsWith(".class")) add(entry.getName()); } } catch (Exception e) { e.printStackTrace(); } } /** * Returns all the stored packages. * * @return the package names */ public Enumeration<String> packages() { return m_Cache.keys(); } /** * Returns all the classes for the given package. * * @param pkgname the package to get the classes for * @return the classes (sorted by name) */ public HashSet<String> getClassnames(String pkgname) { if (m_Cache.containsKey(pkgname)) return m_Cache.get(pkgname); else return new HashSet<String>(); } /** * Initializes the cache. */ protected void initialize() { String part; File file; URLClassLoader sysLoader; URL[] urls; m_Cache = new Hashtable<String,HashSet<String>>(); sysLoader = (URLClassLoader) getClass().getClassLoader(); urls = sysLoader.getURLs(); for (URL url: urls) { if (VERBOSE) System.out.println("Classpath-part: " + part); file = null; part = url.toString(); if (part.startsWith("file:")) { part = part.replace(" ", "%20"); try { file = new File(new java.net.URI(part)); } catch (URISyntaxException e) { e.printStackTrace(); } } else { file = new File(part); } if (file == null) { System.err.println("Skipping: " + part); continue; } // find classes if (file.isDirectory()) initFromDir(file); else if (file.exists()) initFromJar(file); } } /** * Find all classes that have the supplied matchText String in * their suffix. * * @param matchText the text to match * @return an array list of matching fully qualified class names. */ public ArrayList<String> find(String matchText) { ArrayList<String> result; Enumeration<String> packages; Iterator<String> names; String name; result = new ArrayList<String>(); packages = m_Cache.keys(); while (packages.hasMoreElements()) { names = m_Cache.get(packages.nextElement()).iterator(); while (names.hasNext()) { name = names.next(); if (name.contains(matchText)) result.add(name); } } if (result.size() > 1) Collections.sort(result); return result; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } /** * For testing only. * * @param args ignored */ public static void main(String[] args) { ClassCache cache = new ClassCache(); Enumeration<String> packages = cache.packages(); while (packages.hasMoreElements()) { String key = packages.nextElement(); System.out.println(key + ": " + cache.getClassnames(key).size()); } } }
9,294
23.786667
100
java
tsml-java
tsml-java-master/src/main/java/weka/core/ClassDiscovery.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * ClassDiscovery.java * Copyright (C) 2005-2012 University of Waikato, Hamilton, New Zealand * */ package weka.core; import java.io.File; import java.lang.reflect.Modifier; import java.net.URL; import java.util.ArrayList; import java.util.Collections; import java.util.Comparator; import java.util.Enumeration; import java.util.HashSet; import java.util.Hashtable; import java.util.StringTokenizer; import java.util.Vector; import java.util.jar.JarFile; /** * This class is used for discovering classes that implement a certain * interface or a derived from a certain class. * * @author FracPete (fracpete at waikato dot ac dot nz) * @version $Revision: 8034 $ * @see StringCompare */ public class ClassDiscovery implements RevisionHandler { /** whether to output some debug information. */ public final static boolean VERBOSE = false; /** for caching queries (classname+packagename &lt;-&gt; Vector with classnames). */ protected static Hashtable<String,Vector<String>> m_Cache; /** the overall class cache. */ protected static ClassCache m_ClassCache; /** notify if VERBOSE is still on */ static { if (VERBOSE) System.err.println(ClassDiscovery.class.getName() + ": VERBOSE ON"); } /** * Checks whether the "otherclass" is a subclass of the given "superclass". * * @param superclass the superclass to check against * @param otherclass this class is checked whether it is a subclass * of the the superclass * @return TRUE if "otherclass" is a true subclass */ public static boolean isSubclass(String superclass, String otherclass) { try { return isSubclass(Class.forName(superclass), Class.forName(otherclass)); } catch (Exception e) { return false; } } /** * Checks whether the "otherclass" is a subclass of the given "superclass". * * @param superclass the superclass to check against * @param otherclass this class is checked whether it is a subclass * of the the superclass * @return TRUE if "otherclass" is a true subclass */ public static boolean isSubclass(Class superclass, Class otherclass) { Class currentclass; boolean result; result = false; currentclass = otherclass; do { result = currentclass.equals(superclass); // topmost class reached? if (currentclass.equals(Object.class)) break; if (!result) currentclass = currentclass.getSuperclass(); } while (!result); return result; } /** * Checks whether the given class implements the given interface. * * @param intf the interface to look for in the given class * @param cls the class to check for the interface * @return TRUE if the class contains the interface */ public static boolean hasInterface(String intf, String cls) { try { return hasInterface(Class.forName(intf), Class.forName(cls)); } catch (Exception e) { return false; } } /** * Checks whether the given class implements the given interface. * * @param intf the interface to look for in the given class * @param cls the class to check for the interface * @return TRUE if the class contains the interface */ public static boolean hasInterface(Class intf, Class cls) { Class[] intfs; int i; boolean result; Class currentclass; result = false; currentclass = cls; do { // check all the interfaces, this class implements intfs = currentclass.getInterfaces(); for (i = 0; i < intfs.length; i++) { if (intfs[i].equals(intf)) { result = true; break; } } // get parent class if (!result) { currentclass = currentclass.getSuperclass(); // topmost class reached or no superclass? if ( (currentclass == null) || (currentclass.equals(Object.class)) ) break; } } while (!result); return result; } /** * If the given package can be found in this part of the classpath then * an URL object is returned, otherwise <code>null</code>. * * @param classpathPart the part of the classpath to look for the package * @param pkgname the package to look for * @return if found, the url as string, otherwise null */ protected static URL getURL(String classpathPart, String pkgname) { String urlStr; URL result; File classpathFile; File file; JarFile jarfile; Enumeration enm; String pkgnameTmp; result = null; urlStr = null; try { classpathFile = new File(classpathPart); // directory or jar? if (classpathFile.isDirectory()) { // does the package exist in this directory? file = new File(classpathPart + pkgname); if (file.exists()) urlStr = "file:" + classpathPart + pkgname; } else { // is package actually included in jar? jarfile = new JarFile(classpathPart); enm = jarfile.entries(); pkgnameTmp = pkgname.substring(1); // remove the leading "/" while (enm.hasMoreElements()) { if (enm.nextElement().toString().startsWith(pkgnameTmp)) { urlStr = "jar:file:" + classpathPart + "!" + pkgname; break; } } } } catch (Exception e) { // ignore } // try to generate URL from url string if (urlStr != null) { try { result = new URL(urlStr); } catch (Exception e) { System.err.println( "Trying to create URL from '" + urlStr + "' generates this exception:\n" + e); result = null; } } return result; } /** * Checks the given packages for classes that inherited from the given class, * in case it's a class, or implement this class, in case it's an interface. * * @param classname the class/interface to look for * @param pkgnames the packages to search in * @return a list with all the found classnames */ public static Vector<String> find(String classname, String[] pkgnames) { Vector<String> result; Class cls; result = new Vector<String>(); try { cls = Class.forName(classname); result = find(cls, pkgnames); } catch (Exception e) { e.printStackTrace(); } return result; } /** * Checks the given package for classes that inherited from the given class, * in case it's a class, or implement this class, in case it's an interface. * * @param classname the class/interface to look for * @param pkgname the package to search in * @return a list with all the found classnames */ public static Vector<String> find(String classname, String pkgname) { Vector<String> result; Class cls; result = new Vector<String>(); try { cls = Class.forName(classname); result = find(cls, pkgname); } catch (Exception e) { e.printStackTrace(); } return result; } /** * Checks the given packages for classes that inherited from the given class, * in case it's a class, or implement this class, in case it's an interface. * * @param cls the class/interface to look for * @param pkgnames the packages to search in * @return a list with all the found classnames */ public static Vector<String> find(Class cls, String[] pkgnames) { Vector<String> result; int i; HashSet<String> names; result = new Vector<String>(); names = new HashSet<String>(); for (i = 0; i < pkgnames.length; i++) names.addAll(find(cls, pkgnames[i])); // sort result result.addAll(names); Collections.sort(result, new StringCompare()); return result; } /** * Find all classes that have the supplied matchText String in * their suffix. * * @param matchText the text to match * @return an array list of matching fully qualified class names. */ public static ArrayList<String> find(String matchText) { return m_ClassCache.find(matchText); } /** * Checks the given package for classes that inherited from the given class, * in case it's a class, or implement this class, in case it's an interface. * * @param cls the class/interface to look for * @param pkgname the package to search in * @return a list with all the found classnames */ public static Vector<String> find(Class cls, String pkgname) { Vector<String> result; int i; Class clsNew; // already cached? result = getCache(cls, pkgname); if (result == null) { if (VERBOSE) System.out.println( "Searching for '" + cls.getName() + "' in '" + pkgname + "':"); result = new Vector<String>(); if (m_ClassCache.getClassnames(pkgname) != null) result.addAll(m_ClassCache.getClassnames(pkgname)); // check classes i = 0; while (i < result.size()) { try { clsNew = Class.forName((String) result.get(i)); // no abstract classes if (Modifier.isAbstract(clsNew.getModifiers())) { m_ClassCache.remove(result.get(i)); result.remove(i); } // must implement interface else if ( (cls.isInterface()) && (!hasInterface(cls, clsNew)) ) { result.remove(i); } // must be derived from class else if ( (!cls.isInterface()) && (!isSubclass(cls, clsNew)) ) { result.remove(i); } else { i++; } } catch (Exception e) { System.out.println("Accessing class '" + result.get(i) + "' resulted in error:"); e.printStackTrace(); } } // sort result Collections.sort(result, new StringCompare()); // add to cache addCache(cls, pkgname, result); } return result; } /** * adds all the sub-directories recursively to the list. * * @param prefix the path prefix * @param dir the directory to look in for sub-dirs * @param list the current list of sub-dirs * @return the new list of sub-dirs */ protected static HashSet<String> getSubDirectories(String prefix, File dir, HashSet<String> list) { File[] files; int i; String newPrefix; // add directory to the list if (prefix == null) newPrefix = ""; else if (prefix.length() == 0) newPrefix = dir.getName(); else newPrefix = prefix + "." + dir.getName(); if (newPrefix.length() != 0) list.add(newPrefix); // search for sub-directories files = dir.listFiles(); if (files != null) { for (i = 0; i < files.length; i++) { if (files[i].isDirectory()) list = getSubDirectories(newPrefix, files[i], list); } } return list; } /** * Lists all packages it can find in the classpath. * * @return a list with all the found packages */ public static Vector<String> findPackages() { Vector<String> result; Enumeration<String> packages; initCache(); result = new Vector<String>(); packages = m_ClassCache.packages(); while (packages.hasMoreElements()) result.add(packages.nextElement()); Collections.sort(result, new StringCompare()); return result; } /** * initializes the cache for the classnames. */ protected static void initCache() { if (m_Cache == null) m_Cache = new Hashtable<String,Vector<String>>(); if (m_ClassCache == null) m_ClassCache = new ClassCache(); } /** * adds the list of classnames to the cache. * * @param cls the class to cache the classnames for * @param pkgname the package name the classes were found in * @param classnames the list of classnames to cache */ protected static void addCache(Class cls, String pkgname, Vector<String> classnames) { initCache(); m_Cache.put(cls.getName() + "-" + pkgname, classnames); } /** * returns the list of classnames associated with this class and package, if * available, otherwise null. * * @param cls the class to get the classnames for * @param pkgname the package name for the classes * @return the classnames if found, otherwise null */ protected static Vector<String> getCache(Class cls, String pkgname) { initCache(); return m_Cache.get(cls.getName() + "-" + pkgname); } /** * clears the cache for class/classnames queries. */ public static void clearCache() { initCache(); m_Cache.clear(); } /** * Calls clearCache() and resets the cache of classes on the classpath * (i.e. forces a rescan of the classpath). */ public static void clearClassCache() { clearCache(); // make sure that any new classes are picked up m_ClassCache = new ClassCache(); } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } /** * Possible calls: * <ul> * <li> * weka.core.ClassDiscovery &lt;packages&gt;<br/> * Prints all the packages in the current classpath * </li> * <li> * weka.core.ClassDiscovery &lt;classname&gt; &lt;packagename(s)&gt;<br/> * Prints the classes it found. * </li> * </ul> * * @param args the commandline arguments */ public static void main(String[] args) { Vector<String> list; Vector<String> packages; int i; StringTokenizer tok; if ((args.length == 1) && (args[0].equals("packages"))) { list = findPackages(); for (i = 0; i < list.size(); i++) System.out.println(list.get(i)); } else if (args.length == 2) { // packages packages = new Vector<String>(); tok = new StringTokenizer(args[1], ","); while (tok.hasMoreTokens()) packages.add(tok.nextToken()); // search list = ClassDiscovery.find( args[0], (String[]) packages.toArray(new String[packages.size()])); // print result, if any System.out.println( "Searching for '" + args[0] + "' in '" + args[1] + "':\n" + " " + list.size() + " found."); for (i = 0; i < list.size(); i++) System.out.println(" " + (i+1) + ". " + list.get(i)); } else { System.out.println("\nUsage:"); System.out.println( ClassDiscovery.class.getName() + " packages"); System.out.println("\tlists all packages in the classpath"); System.out.println( ClassDiscovery.class.getName() + " <classname> <packagename(s)>"); System.out.println("\tlists classes derived from/implementing 'classname' that"); System.out.println("\tcan be found in 'packagename(s)' (comma-separated list"); System.out.println(); System.exit(1); } } /** * compares two strings. The following order is used:<br/> * <ul> * <li>case insensitive</li> * <li>german umlauts (&auml; , &ouml; etc.) or other non-ASCII letters * are treated as special chars</li> * <li>special chars &lt; numbers &lt; letters</li> * </ul> */ public static class StringCompare implements Comparator, RevisionHandler { /** * appends blanks to the string if its shorter than <code>len</code>. * * @param s the string to pad * @param len the minimum length for the string to have * @return the padded string */ private String fillUp(String s, int len) { while (s.length() < len) s += " "; return s; } /** * returns the group of the character: 0=special char, 1=number, 2=letter. * * @param c the character to check * @return the group */ private int charGroup(char c) { int result; result = 0; if ( (c >= 'a') && (c <= 'z') ) result = 2; else if ( (c >= '0') && (c <= '9') ) result = 1; return result; } /** * Compares its two arguments for order. * * @param o1 the first object * @param o2 the second object * @return -1 if o1&lt;o2, 0 if o1=o2 and 1 if o1&;gt;o2 */ public int compare(Object o1, Object o2) { String s1; String s2; int i; int result; int v1; int v2; result = 0; // they're equal // get lower case string s1 = o1.toString().toLowerCase(); s2 = o2.toString().toLowerCase(); // same length s1 = fillUp(s1, s2.length()); s2 = fillUp(s2, s1.length()); for (i = 0; i < s1.length(); i++) { // same char? if (s1.charAt(i) == s2.charAt(i)) { result = 0; } else { v1 = charGroup(s1.charAt(i)); v2 = charGroup(s2.charAt(i)); // different type (special, number, letter)? if (v1 != v2) { if (v1 < v2) result = -1; else result = 1; } else { if (s1.charAt(i) < s2.charAt(i)) result = -1; else result = 1; } break; } } return result; } /** * Indicates whether some other object is "equal to" this Comparator. * * @param obj the object to compare with this Comparator * @return true if the object is a StringCompare object as well */ public boolean equals(Object obj) { return (obj instanceof StringCompare); } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } } }
18,730
26.790801
101
java
tsml-java
tsml-java-master/src/main/java/weka/core/ClassloaderUtil.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * ClassloaderUtil.java * Copyright (C) 2008-2012 University of Waikato, Hamilton, New Zealand * */ package weka.core; import java.io.File; import java.io.IOException; import java.lang.reflect.Method; import java.net.URL; import java.net.URLClassLoader; /** * Utility class that can add jar files to the classpath dynamically. * * @author Mark Hall (mhall{[at]}pentaho{[dot]}org * @version $Revision: 8034 $ */ public class ClassloaderUtil implements RevisionHandler { // Parameters private static final Class[] parameters = new Class[]{URL.class}; /** * Add file to CLASSPATH * @param s File name * @throws IOException if something goes wrong when adding a file */ public static void addFile(String s) throws IOException { File f = new File(s); addFile(f); } /** * Add file to CLASSPATH * @param f File object * @throws IOException if something goes wrong when adding a file */ public static void addFile(File f) throws IOException { addURL(f.toURL()); } /** * Add URL to CLASSPATH * @param u URL * @throws IOException if something goes wrong when adding a url */ public static void addURL(URL u) throws IOException { ClassloaderUtil clu = new ClassloaderUtil(); // URLClassLoader sysLoader = (URLClassLoader) ClassLoader.getSystemClassLoader(); URLClassLoader sysLoader = (URLClassLoader) clu.getClass().getClassLoader(); URL urls[] = sysLoader.getURLs(); for (int i = 0; i < urls.length; i++) { if (urls[i].toString().toLowerCase().equals(u.toString().toLowerCase())) { System.err.println("URL " + u + " is already in the CLASSPATH"); return; } } Class<?> sysclass = URLClassLoader.class; try { Method method = sysclass.getDeclaredMethod("addURL", parameters); method.setAccessible(true); method.invoke(sysLoader, new Object[]{u}); } catch (Throwable t) { t.printStackTrace(); throw new IOException("Error, could not add URL to system classloader"); } } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } }
2,909
29
93
java
tsml-java
tsml-java-master/src/main/java/weka/core/CommandlineRunnable.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * CommandlineRunnable.java * Copyright (C) 2010-2012 University of Waikato, Hamilton, New Zealand * */ package weka.core; /** * Interface to something that can be run from the command line. * * @author Mark Hall (mhall{[at]}pentaho{[dot]}com) * @version $Revision: 8034 $ */ public interface CommandlineRunnable { /** * Execute the supplied object. * * @param toRun the object to execute * @param options any options to pass to the object * @throws IllegalArgumentException if the object is not of the expected * type. */ void run(Object toRun, String[] options) throws IllegalArgumentException; }
1,322
30.5
75
java
tsml-java
tsml-java-master/src/main/java/weka/core/ConjugateGradientOptimization.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * ConjugateGradientOptimization.java * Copyright (C) 2012 University of Waikato, Hamilton, New Zealand * */ package weka.core; import weka.core.TechnicalInformation.Field; import weka.core.TechnicalInformation.Type; import java.util.Arrays; /** * This subclass of Optimization.java implements conjugate gradient * descent rather than BFGS updates, by overriding findArgmin(), with * the same tests for convergence, and applies the same line search * code. Note that constraints are NOT actually supported. Using this * class instead of Optimization.java can reduce runtime when there are * many parameters. * * Uses the second hybrid method proposed in "An Efficient Hybrid * Conjugate Gradient Method for Unconstrained Optimization" by Dai * and Yuan (2001). See also information in the * getTechnicalInformation() method. * * @author Eibe Frank * @version $Revision: 8078 $ */ public abstract class ConjugateGradientOptimization extends Optimization implements RevisionHandler { /** * Returns an instance of a TechnicalInformation object, containing * detailed information about the technical background of this class, * e.g., paper reference or book this class is based on. * * @return the technical information about this class */ public TechnicalInformation getTechnicalInformation() { TechnicalInformation result; TechnicalInformation additional; result = new TechnicalInformation(Type.ARTICLE); result.setValue(Field.AUTHOR, "Y.H. Dai and Y. Yuan"); result.setValue(Field.YEAR, "2001"); result.setValue(Field.TITLE, "An Efficient Hybrid Conjugate Gradient Method for Unconstrained Optimization"); result.setValue(Field.JOURNAL, "Annals of Operations Research"); result.setValue(Field.VOLUME, "103"); result.setValue(Field.PAGES, "33-47"); additional = result.add(Type.ARTICLE); result.setValue(Field.AUTHOR, "W.W. Hager and H. Zhang"); result.setValue(Field.YEAR, "2006"); result.setValue(Field.TITLE, "A survey of nonlinear conjugate gradient methods"); result.setValue(Field.JOURNAL, "Pacific Journal of Optimization"); result.setValue(Field.VOLUME, "2"); result.setValue(Field.PAGES, "35-58"); return result; } /** * Constructor that sets MAXITS to 2000 by default and the parameter * in the second weak Wolfe condition to 0.1. */ public ConjugateGradientOptimization() { setMaxIteration(2000); m_BETA = 0.1; // To make line search more exact, recommended for non-linear CGD } /** * Main algorithm. NOTE: constraints are not actually supported. * * @param initX initial point of x, assuming no value's on the bound! * @param constraints both arrays must contain Double.NaN * @return the solution of x, null if number of iterations not enough * @throws Exception if an error occurs */ public double[] findArgmin(double[] initX, double[][] constraints) throws Exception{ int l = initX.length; // Initial value of obj. function, gradient and inverse of the Hessian m_f = objectiveFunction(initX); if(Double.isNaN(m_f)) { throw new Exception("Objective function value is NaN!"); } // Get gradient at initial point double[] grad = evaluateGradient(initX), oldGrad, oldX, deltaGrad = new double[l], deltaX=new double[l], direct = new double[l], x = new double[l]; // Turn gradient into direction and calculate squared length double sum = 0; for (int i = 0; i < grad.length; i++) { direct[i] = -grad[i]; sum += grad[i] * grad[i]; } // Same as in Optimization.java double stpmax = m_STPMX * Math.max(Math.sqrt(sum), l); boolean[] isFixed = new boolean[initX.length]; DynamicIntArray wsBdsIndx = new DynamicIntArray(initX.length); double[][] consts = new double [2][initX.length]; for (int i = 0; i < initX.length; i++) { if (!Double.isNaN(constraints[0][i]) || (!Double.isNaN(constraints[1][i]))) { throw new Exception("Cannot deal with constraints, sorry."); } consts[0][i] = constraints[0][i]; consts[1][i] = constraints[1][i]; x[i] = initX[i]; } boolean finished = false; for (int step = 0; step < m_MAXITS; step++){ if (m_Debug) { System.err.println("\nIteration # " + step + ":"); } oldX = x; oldGrad = grad; // Make a copy of direction vector because it may get modified in lnsrch double[] directB = Arrays.copyOf(direct, direct.length); // Perform a line search based on new direction m_IsZeroStep = false; x = lnsrch(x, grad, directB, stpmax, isFixed, constraints, wsBdsIndx); if (m_IsZeroStep) { throw new Exception("Exiting due to zero step."); } // Check converge on x boolean finish = false; double test = 0.0; for (int h = 0; h < x.length; h++){ deltaX[h] = x[h] - oldX[h]; double tmp = Math.abs(deltaX[h]) / Math.max(Math.abs(x[h]), 1.0); if(tmp > test) test = tmp; } if (test < m_Zero){ if (m_Debug) { System.err.println("\nDeltaX converged: " + test); } finished = true; break; } // Check zero gradient grad = evaluateGradient(x); test = 0.0; for (int g = 0; g < l; g++){ double tmp = Math.abs(grad[g]) * Math.max(Math.abs(directB[g]),1.0) / Math.max(Math.abs(m_f),1.0); if (tmp > test) test = tmp; } if (test < m_Zero){ if (m_Debug) { for (int i = 0; i < l; i++) { System.out.println(grad[i] + " " + directB[i] + " " + m_f); } System.err.println("Gradient converged: " + test); } finished = true; break; } // Calculate multiplier double betaHSNumerator = 0, betaDYNumerator = 0; double betaHSandDYDenominator = 0; for (int i = 0; i < grad.length; i++) { betaDYNumerator += grad[i] * grad[i]; betaHSNumerator += (grad[i] - oldGrad[i]) * grad[i]; betaHSandDYDenominator += (grad[i] - oldGrad[i]) * direct[i]; } double betaHS = betaHSNumerator / betaHSandDYDenominator; double betaDY = betaDYNumerator / betaHSandDYDenominator; if (m_Debug) { System.err.println("Beta HS: " + betaHS); System.err.println("Beta DY: " + betaDY); } for (int i = 0; i < direct.length; i++) { direct[i] = -grad[i] + Math.max(0, Math.min(betaHS, betaDY)) * direct[i]; } } if (finished) { if (m_Debug){ System.err.println("Minimum found."); } m_f = objectiveFunction(x); if(Double.isNaN(m_f)) { throw new Exception("Objective function value is NaN!"); } return x; } if(m_Debug) { System.err.println("Cannot find minimum -- too many iterations!"); } m_X = x; return null; } }
7,800
33.365639
115
java
tsml-java
tsml-java-master/src/main/java/weka/core/ContingencyTables.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * ContingencyTables.java * Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand * */ package weka.core; /** * Class implementing some statistical routines for contingency tables. * * @author Eibe Frank (eibe@cs.waikato.ac.nz) * @version $Revision: 8925 $ */ public class ContingencyTables implements RevisionHandler { /** The natural logarithm of 2 */ private static double log2 = Math.log(2); /** * Returns chi-squared probability for a given matrix. * * @param matrix the contigency table * @param yates is Yates' correction to be used? * @return the chi-squared probability */ public static double chiSquared(double [][] matrix, boolean yates) { int df = (matrix.length - 1) * (matrix[0].length - 1); return Statistics.chiSquaredProbability(chiVal(matrix, yates), df); } /** * Computes chi-squared statistic for a contingency table. * * @param matrix the contigency table * @param useYates is Yates' correction to be used? * @return the value of the chi-squared statistic */ public static double chiVal(double [][] matrix, boolean useYates) { int df, nrows, ncols, row, col; double[] rtotal, ctotal; double expect = 0, chival = 0, n = 0; boolean yates = true; nrows = matrix.length; ncols = matrix[0].length; rtotal = new double [nrows]; ctotal = new double [ncols]; for (row = 0; row < nrows; row++) { for (col = 0; col < ncols; col++) { rtotal[row] += matrix[row][col]; ctotal[col] += matrix[row][col]; n += matrix[row][col]; } } df = (nrows - 1)*(ncols - 1); if ((df > 1) || (!useYates)) { yates = false; } else if (df <= 0) { return 0; } chival = 0.0; for (row = 0; row < nrows; row++) { if (Utils.gr(rtotal[row], 0)) { for (col = 0; col < ncols; col++) { if (Utils.gr(ctotal[col], 0)) { expect = (ctotal[col] * rtotal[row]) / n; chival += chiCell (matrix[row][col], expect, yates); } } } } return chival; } /** * Tests if Cochran's criterion is fullfilled for the given * contingency table. Rows and columns with all zeros are not considered * relevant. * * @param matrix the contigency table to be tested * @return true if contingency table is ok, false if not */ public static boolean cochransCriterion(double[][] matrix) { double[] rtotal, ctotal; double n = 0, expect, smallfreq = 5; int smallcount = 0, nonZeroRows = 0, nonZeroColumns = 0, nrows, ncols, row, col; nrows = matrix.length; ncols = matrix[0].length; rtotal = new double [nrows]; ctotal = new double [ncols]; for (row = 0; row < nrows; row++) { for (col = 0; col < ncols; col++) { rtotal[row] += matrix[row][col]; ctotal[col] += matrix[row][col]; n += matrix[row][col]; } } for (row = 0; row < nrows; row++) { if (Utils.gr(rtotal[row], 0)) { nonZeroRows++; } } for (col = 0; col < ncols; col++) { if (Utils.gr(ctotal[col], 0)) { nonZeroColumns++; } } for (row = 0; row < nrows; row++) { if (Utils.gr(rtotal[row], 0)) { for (col = 0; col < ncols; col++) { if (Utils.gr(ctotal[col], 0)) { expect = (ctotal[col] * rtotal[row]) / n; if (Utils.sm(expect, smallfreq)) { if (Utils.sm(expect, 1)) { return false; } else { smallcount++; if (smallcount > (nonZeroRows * nonZeroColumns) / smallfreq) { return false; } } } } } } } return true; } /** * Computes Cramer's V for a contingency table. * * @param matrix the contingency table * @return Cramer's V */ public static double CramersV(double [][] matrix) { int row, col, nrows,ncols, min; double n = 0; nrows = matrix.length; ncols = matrix[0].length; for (row = 0; row < nrows; row++) { for (col = 0; col < ncols; col++) { n += matrix[row][col]; } } min = nrows < ncols ? nrows-1 : ncols-1; if ((min == 0) || Utils.eq(n, 0)) return 0; return Math.sqrt(chiVal(matrix, false) / (n * (double)min)); } /** * Computes the entropy of the given array. * * @param array the array * @return the entropy */ public static double entropy(double[] array) { double returnValue = 0, sum = 0; for (int i = 0; i < array.length; i++) { returnValue -= lnFunc(array[i]); sum += array[i]; } if (Utils.eq(sum, 0)) { return 0; } else { return (returnValue + lnFunc(sum)) / (sum * log2); } } /** * Computes conditional entropy of the rows given * the columns. * * @param matrix the contingency table * @return the conditional entropy of the rows given the columns */ public static double entropyConditionedOnColumns(double[][] matrix) { double returnValue = 0, sumForColumn, total = 0; for (int j = 0; j < matrix[0].length; j++) { sumForColumn = 0; for (int i = 0; i < matrix.length; i++) { returnValue = returnValue + lnFunc(matrix[i][j]); sumForColumn += matrix[i][j]; } returnValue = returnValue - lnFunc(sumForColumn); total += sumForColumn; } if (Utils.eq(total, 0)) { return 0; } return -returnValue / (total * log2); } /** * Computes conditional entropy of the columns given * the rows. * * @param matrix the contingency table * @return the conditional entropy of the columns given the rows */ public static double entropyConditionedOnRows(double[][] matrix) { double returnValue = 0, sumForRow, total = 0; for (int i = 0; i < matrix.length; i++) { sumForRow = 0; for (int j = 0; j < matrix[0].length; j++) { returnValue = returnValue + lnFunc(matrix[i][j]); sumForRow += matrix[i][j]; } returnValue = returnValue - lnFunc(sumForRow); total += sumForRow; } if (Utils.eq(total, 0)) { return 0; } return -returnValue / (total * log2); } /** * Computes conditional entropy of the columns given the rows * of the test matrix with respect to the train matrix. Uses a * Laplace prior. Does NOT normalize the entropy. * * @param train the train matrix * @param test the test matrix * @param numClasses the number of symbols for Laplace * @return the entropy */ public static double entropyConditionedOnRows(double[][] train, double[][] test, double numClasses) { double returnValue = 0, trainSumForRow, testSumForRow, testSum = 0; for (int i = 0; i < test.length; i++) { trainSumForRow = 0; testSumForRow = 0; for (int j = 0; j < test[0].length; j++) { returnValue -= test[i][j] * Math.log(train[i][j] + 1); trainSumForRow += train[i][j]; testSumForRow += test[i][j]; } testSum = testSumForRow; returnValue += testSumForRow * Math.log(trainSumForRow + numClasses); } return returnValue / (testSum * log2); } /** * Computes the rows' entropy for the given contingency table. * * @param matrix the contingency table * @return the rows' entropy */ public static double entropyOverRows(double[][] matrix) { double returnValue = 0, sumForRow, total = 0; for (int i = 0; i < matrix.length; i++) { sumForRow = 0; for (int j = 0; j < matrix[0].length; j++) { sumForRow += matrix[i][j]; } returnValue = returnValue - lnFunc(sumForRow); total += sumForRow; } if (Utils.eq(total, 0)) { return 0; } return (returnValue + lnFunc(total)) / (total * log2); } /** * Computes the columns' entropy for the given contingency table. * * @param matrix the contingency table * @return the columns' entropy */ public static double entropyOverColumns(double[][] matrix){ double returnValue = 0, sumForColumn, total = 0; for (int j = 0; j < matrix[0].length; j++){ sumForColumn = 0; for (int i = 0; i < matrix.length; i++) { sumForColumn += matrix[i][j]; } returnValue = returnValue - lnFunc(sumForColumn); total += sumForColumn; } if (Utils.eq(total, 0)) { return 0; } return (returnValue + lnFunc(total)) / (total * log2); } /** * Computes gain ratio for contingency table (split on rows). * Returns Double.MAX_VALUE if the split entropy is 0. * * @param matrix the contingency table * @return the gain ratio */ public static double gainRatio(double[][] matrix){ double preSplit = 0, postSplit = 0, splitEnt = 0, sumForRow, sumForColumn, total = 0, infoGain; // Compute entropy before split for (int i = 0; i < matrix[0].length; i++) { sumForColumn = 0; for (int j = 0; j < matrix.length; j++) sumForColumn += matrix[j][i]; preSplit += lnFunc(sumForColumn); total += sumForColumn; } preSplit -= lnFunc(total); // Compute entropy after split and split entropy for (int i = 0; i < matrix.length; i++) { sumForRow = 0; for (int j = 0; j < matrix[0].length; j++) { postSplit += lnFunc(matrix[i][j]); sumForRow += matrix[i][j]; } splitEnt += lnFunc(sumForRow); } postSplit -= splitEnt; splitEnt -= lnFunc(total); infoGain = preSplit - postSplit; if (Utils.eq(splitEnt, 0)) return 0; return infoGain / splitEnt; } /** * Returns negative base 2 logarithm of multiple hypergeometric * probability for a contingency table. * * @param matrix the contingency table * @return the log of the hypergeometric probability of the contingency table */ public static double log2MultipleHypergeometric(double[][] matrix) { double sum = 0, sumForRow, sumForColumn, total = 0; for (int i = 0; i < matrix.length; i++) { sumForRow = 0; for (int j = 0; j < matrix[i].length; j++) { sumForRow += matrix[i][j]; } sum += SpecialFunctions.lnFactorial(sumForRow); total += sumForRow; } for (int j = 0; j < matrix[0].length; j++) { sumForColumn = 0; for (int i = 0; i < matrix.length; i++) { sumForColumn += matrix [i][j]; } sum += SpecialFunctions.lnFactorial(sumForColumn); } for (int i = 0; i < matrix.length; i++) { for (int j = 0; j < matrix[i].length; j++) { sum -= SpecialFunctions.lnFactorial(matrix[i][j]); } } sum -= SpecialFunctions.lnFactorial(total); return -sum / log2; } /** * Reduces a matrix by deleting all zero rows and columns. * * @param matrix the matrix to be reduced * @return the matrix with all zero rows and columns deleted */ public static double[][] reduceMatrix(double[][] matrix) { int row, col, currCol, currRow, nrows, ncols, nonZeroRows = 0, nonZeroColumns = 0; double[] rtotal, ctotal; double[][] newMatrix; nrows = matrix.length; ncols = matrix[0].length; rtotal = new double [nrows]; ctotal = new double [ncols]; for (row = 0; row < nrows; row++) { for (col = 0; col < ncols; col++) { rtotal[row] += matrix[row][col]; ctotal[col] += matrix[row][col]; } } for (row = 0; row < nrows; row++) { if (Utils.gr(rtotal[row],0)) { nonZeroRows++; } } for (col = 0; col < ncols; col++) { if (Utils.gr(ctotal[col],0)) { nonZeroColumns++; } } newMatrix = new double[nonZeroRows][nonZeroColumns]; currRow = 0; for (row = 0; row < nrows; row++) { if (Utils.gr(rtotal[row],0)) { currCol = 0; for (col = 0; col < ncols; col++) { if (Utils.gr(ctotal[col],0)) { newMatrix[currRow][currCol] = matrix[row][col]; currCol++; } } currRow++; } } return newMatrix; } /** * Calculates the symmetrical uncertainty for base 2. * * @param matrix the contingency table * @return the calculated symmetrical uncertainty * */ public static double symmetricalUncertainty(double matrix[][]) { double sumForColumn, sumForRow, total = 0, columnEntropy = 0, rowEntropy = 0, entropyConditionedOnRows = 0, infoGain = 0; // Compute entropy for columns for (int i = 0; i < matrix[0].length; i++) { sumForColumn = 0; for (int j = 0; j < matrix.length; j++) { sumForColumn += matrix[j][i]; } columnEntropy += lnFunc(sumForColumn); total += sumForColumn; } columnEntropy -= lnFunc(total); // Compute entropy for rows and conditional entropy for (int i = 0; i < matrix.length; i++) { sumForRow = 0; for (int j = 0; j < matrix[0].length; j++) { sumForRow += matrix[i][j]; entropyConditionedOnRows += lnFunc(matrix[i][j]); } rowEntropy += lnFunc(sumForRow); } entropyConditionedOnRows -= rowEntropy; rowEntropy -= lnFunc(total); infoGain = columnEntropy - entropyConditionedOnRows; if (Utils.eq(columnEntropy, 0) || Utils.eq(rowEntropy, 0)) return 0; return 2.0 * (infoGain / (columnEntropy + rowEntropy)); } /** * Computes Goodman and Kruskal's tau-value for a contingency table. * * @param matrix the contingency table * @return Goodman and Kruskal's tau-value */ public static double tauVal(double[][] matrix) { int nrows, ncols, row, col; double [] ctotal; double maxcol = 0, max, maxtotal = 0, n = 0; nrows = matrix.length; ncols = matrix[0].length; ctotal = new double [ncols]; for (row = 0; row < nrows; row++) { max = 0; for (col = 0; col < ncols; col++) { if (Utils.gr(matrix[row][col], max)) max = matrix[row][col]; ctotal[col] += matrix[row][col]; n += matrix[row][col]; } maxtotal += max; } if (Utils.eq(n, 0)) { return 0; } maxcol = ctotal[Utils.maxIndex(ctotal)]; return (maxtotal - maxcol)/(n - maxcol); } /** * Help method for computing entropy. */ private static double lnFunc(double num){ if (num <= 0) { return 0; } else { return num * Math.log(num); } } /** * Computes chi-value for one cell in a contingency table. * * @param freq the observed frequency in the cell * @param expected the expected frequency in the cell * @return the chi-value for that cell; 0 if the expected value is * too close to zero */ private static double chiCell(double freq, double expected, boolean yates){ // Cell in empty row and column? if (Utils.smOrEq(expected, 0)) { return 0; } // Compute difference between observed and expected value double diff = Math.abs(freq - expected); if (yates) { // Apply Yates' correction if wanted diff -= 0.5; // The difference should never be negative if (diff < 0) { diff = 0; } } // Return chi-value for the cell return (diff * diff / expected); } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8925 $"); } /** * Main method for testing this class. */ public static void main(String[] ops) { double[] firstRow = {10, 5, 20}; double[] secondRow = {2, 10, 6}; double[] thirdRow = {5, 10, 10}; double[][] matrix = new double[3][0]; matrix[0] = firstRow; matrix[1] = secondRow; matrix[2] = thirdRow; for (int i = 0; i < matrix.length; i++) { for (int j = 0; j < matrix[i].length; j++) { System.out.print(matrix[i][j] + " "); } System.out.println(); } System.out.println("Chi-squared probability: " + ContingencyTables.chiSquared(matrix, false)); System.out.println("Chi-squared value: " + ContingencyTables.chiVal(matrix, false)); System.out.println("Cochran's criterion fullfilled: " + ContingencyTables.cochransCriterion(matrix)); System.out.println("Cramer's V: " + ContingencyTables.CramersV(matrix)); System.out.println("Entropy of first row: " + ContingencyTables.entropy(firstRow)); System.out.println("Entropy conditioned on columns: " + ContingencyTables.entropyConditionedOnColumns(matrix)); System.out.println("Entropy conditioned on rows: " + ContingencyTables.entropyConditionedOnRows(matrix)); System.out.println("Entropy conditioned on rows (with Laplace): " + ContingencyTables.entropyConditionedOnRows(matrix, matrix, 3)); System.out.println("Entropy of rows: " + ContingencyTables.entropyOverRows(matrix)); System.out.println("Entropy of columns: " + ContingencyTables.entropyOverColumns(matrix)); System.out.println("Gain ratio: " + ContingencyTables.gainRatio(matrix)); System.out.println("Negative log2 of multiple hypergeometric probability: " + ContingencyTables.log2MultipleHypergeometric(matrix)); System.out.println("Symmetrical uncertainty: " + ContingencyTables.symmetricalUncertainty(matrix)); System.out.println("Tau value: " + ContingencyTables.tauVal(matrix)); double[][] newMatrix = new double[3][3]; newMatrix[0][0] = 1; newMatrix[0][1] = 0; newMatrix[0][2] = 1; newMatrix[1][0] = 0; newMatrix[1][1] = 0; newMatrix[1][2] = 0; newMatrix[2][0] = 1; newMatrix[2][1] = 0; newMatrix[2][2] = 1; System.out.println("Matrix with empty row and column: "); for (int i = 0; i < newMatrix.length; i++) { for (int j = 0; j < newMatrix[i].length; j++) { System.out.print(newMatrix[i][j] + " "); } System.out.println(); } System.out.println("Reduced matrix: "); newMatrix = ContingencyTables.reduceMatrix(newMatrix); for (int i = 0; i < newMatrix.length; i++) { for (int j = 0; j < newMatrix[i].length; j++) { System.out.print(newMatrix[i][j] + " "); } System.out.println(); } } }
18,606
27.364329
81
java
tsml-java
tsml-java-master/src/main/java/weka/core/Copyable.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * Copyable.java * Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand * */ package weka.core; /** * Interface implemented by classes that can produce "shallow" copies * of their objects. (As opposed to clone(), which is supposed to * produce a "deep" copy.) * * @author Eibe Frank (eibe@cs.waikato.ac.nz) * @version $Revision: 8034 $ */ public interface Copyable { /** * This method produces a shallow copy of an object. * It does the same as the clone() method in Object, which also produces * a shallow copy. */ Object copy(); }
1,261
29.780488
74
java
tsml-java
tsml-java-master/src/main/java/weka/core/Copyright.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * Copyright.java * Copyright (C) 2007-2012 University of Waikato, Hamilton, New Zealand */ package weka.core; import java.util.Calendar; import java.util.Properties; /** * A class for providing centralized Copyright information. * * @author fracpete (fracpete at waikato dot ac dot nz) * @version $Revision: 8034 $ */ public class Copyright { /** the copyright file */ public final static String PROPERTY_FILE = "weka/core/Copyright.props"; /** Contains the properties */ protected static Properties PROPERTIES; static { PROPERTIES = new Properties(); try { // PROPERTIES.load(ClassLoader.getSystemResourceAsStream(PROPERTY_FILE)); PROPERTIES. load((new Copyright()).getClass().getClassLoader().getResourceAsStream(PROPERTY_FILE)); } catch (Exception e) { System.err.println( "Could not read configuration file for the copyright " + "information - using default."); } } /** * returns the start year of the copyright * * @return the start year */ public static String getFromYear() { return PROPERTIES.getProperty("FromYear", "1999"); } /** * returns the end year of the copyright (i.e., current year) * * @return the end/current year */ public static String getToYear() { return PROPERTIES.getProperty("ToYear", "" + Calendar.getInstance().get(Calendar.YEAR)); } /** * returns the entity owning the copyright * * @return the owner */ public static String getOwner() { return PROPERTIES.getProperty("Owner", "The University of Waikato"); } /** * returns the address of the owner * * @return the address */ public static String getAddress() { return PROPERTIES.getProperty("Address", "Hamilton, New Zealand"); } /** * returns the URL of the owner * * @return the URL */ public static String getURL() { return PROPERTIES.getProperty("URL", "http://www.cs.waikato.ac.nz/~ml/"); } /** * Only for testing * * @param args ignored */ public static void main(String[] args) { System.out.println(PROPERTIES); } }
2,835
25.018349
95
java
tsml-java
tsml-java-master/src/main/java/weka/core/CustomDisplayStringProvider.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /** * CustomDisplayStringProvider.java * Copyright (C) 2010-2012 University of Waikato, Hamilton, New Zealand */ package weka.core; /** * For classes that do not implement the OptionHandler interface and want to * provide a custom display string in the GenericObjectEditor, which is more * descriptive than the class name. * * @author fracpete (fracpete at waikato dot ac dot nz) * @version $Revision: 8034 $ */ public interface CustomDisplayStringProvider { /** * Returns the custom display string. * * @return the string */ public String toDisplay(); }
1,263
31.410256
77
java
tsml-java
tsml-java-master/src/main/java/weka/core/Debug.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * Debug.java * Copyright (C) 2006-2012 University of Waikato, Hamilton, New Zealand */ package weka.core; import java.io.BufferedWriter; import java.io.FileWriter; import java.io.PrintWriter; import java.io.Serializable; import java.io.StringWriter; import java.lang.management.ManagementFactory; import java.lang.management.ThreadMXBean; import java.text.SimpleDateFormat; import java.util.Date; import java.util.logging.FileHandler; import java.util.logging.Handler; import java.util.logging.Level; import java.util.logging.Logger; import java.util.logging.SimpleFormatter; /** * A helper class for debug output, logging, clocking, etc. * * @author fracpete (fracpete at waikato dot ac dot nz) * @version $Revision: 8034 $ */ public class Debug implements Serializable, RevisionHandler { /** for serialization */ private static final long serialVersionUID = 66171861743328020L; /** the log level All */ public final static Level ALL = Level.ALL; /** the log level Vonfig */ public final static Level CONFIG = Level.CONFIG; /** the log level Fine */ public final static Level FINE = Level.FINE; /** the log level Finer */ public final static Level FINER = Level.FINER; /** the log level Finest */ public final static Level FINEST = Level.FINEST; /** the log level Info */ public final static Level INFO = Level.INFO; /** the log level Off - i.e., no logging */ public final static Level OFF = Level.OFF; /** the log level Severe */ public final static Level SEVERE = Level.SEVERE; /** the log level Warning */ public final static Level WARNING = Level.WARNING; /** whether logging is enabled */ protected boolean m_Enabled = true; /** for logging */ protected Log m_Log; /** for clocking */ protected Clock m_Clock = new Clock(); /** * A little helper class for clocking and outputting times. It measures the * CPU time if possible, otherwise it's just based on the system time. In * case one just wants to measure time (e.g., database queries don't take up * much CPU time, but still might take a long time to finish), then one can * disable the use of CPU time as well. * * @author FracPete (fracpete at waikato dot ac dot nz) * @version $Revision: 8034 $ * @see ThreadMXBean#isThreadCpuTimeEnabled() */ public static class Clock implements Serializable, RevisionHandler { /** for serialization */ private static final long serialVersionUID = 4622161807307942201L; /** the output format in milli-seconds */ public final static int FORMAT_MILLISECONDS = 0; /** the output format in seconds, with fraction of msecs */ public final static int FORMAT_SECONDS = 1; /** the output format in hours:minutes:seconds, with fraction of msecs */ public final static int FORMAT_HHMMSS = 2; /** the output formats */ public static final Tag[] TAGS_FORMAT = { new Tag(FORMAT_MILLISECONDS, "milli-seconds"), new Tag(FORMAT_SECONDS, "seconds"), new Tag(FORMAT_HHMMSS, "hh:mm:ss") }; /** the format of the output */ public int m_OutputFormat = FORMAT_SECONDS; /** the start time */ protected long m_Start; /** the end time */ protected long m_Stop; /** whether the time is still clocked */ protected boolean m_Running; /** the thread ID */ protected long m_ThreadID; /** whether the system can measure the CPU time */ protected boolean m_CanMeasureCpuTime; /** whether to use the CPU time (by default TRUE) */ protected boolean m_UseCpuTime; /** the thread monitor, if the system can measure the CPU time */ protected transient ThreadMXBean m_ThreadMonitor; /** * automatically starts the clock with FORMAT_SECONDS format and CPU * time if available * * @see #m_OutputFormat */ public Clock() { this(true); } /** * automatically starts the clock with the given output format and CPU * time if available * * @param format the output format * @see #m_OutputFormat */ public Clock(int format) { this(true, format); } /** * starts the clock depending on <code>start</code> immediately with the * FORMAT_SECONDS output format and CPU time if available * * @param start whether to start the clock immediately * @see #m_OutputFormat */ public Clock(boolean start) { this(start, FORMAT_SECONDS); } /** * starts the clock depending on <code>start</code> immediately, using * CPU time if available * * @param start whether to start the clock immediately * @param format the format * @see #m_OutputFormat */ public Clock(boolean start, int format) { m_Running = false; m_Start = 0; m_Stop = 0; m_UseCpuTime = true; setOutputFormat(format); if (start) start(); } /** * initializes the clocking, ensure to get the correct thread ID. */ protected void init() { m_ThreadMonitor = null; m_ThreadMonitor = getThreadMonitor(); // can we measure cpu time? m_CanMeasureCpuTime = m_ThreadMonitor.isThreadCpuTimeSupported(); } /** * whether the measurement is based on the msecs returned from the System * class or on the more accurate CPU time. Also depends on whether the * usage of the CPU time was disabled or enabled. * * @return true if the more accurate CPU time of the thread is * used and the use of CPU time hasn't been disabled * @see System#currentTimeMillis() * @see ThreadMXBean#isThreadCpuTimeEnabled() * @see #getUseCpuTime() */ public boolean isCpuTime() { return m_UseCpuTime && m_CanMeasureCpuTime; } /** * enables/disables the use of CPU time (if measurement of CPU time is * available). The actual use of CPU time still depends on whether the * system supports it. Resets the current timer, if running. * * @param value if true the CPU time is used (if possible) */ public void setUseCpuTime(boolean value) { m_UseCpuTime = value; // we have to re-initialize the start time, otherwise we get bogus // results if (m_Running) { stop(); start(); } } /** * returns whether the use of CPU is time is enabled/disabled (regardless * whether the system supports it or not) * * @return true the CPU time is used (if possible) */ public boolean getUseCpuTime() { return m_UseCpuTime; } /** * Returns a new thread monitor if the current one is null (e.g., due to * serialization) or the currently set one. The thread ID is also updated * if necessary. * * @return the thread monitor to use */ protected ThreadMXBean getThreadMonitor() { if (m_ThreadMonitor == null) { m_ThreadMonitor = ManagementFactory.getThreadMXBean(); if (m_CanMeasureCpuTime && !m_ThreadMonitor.isThreadCpuTimeEnabled()) m_ThreadMonitor.setThreadCpuTimeEnabled(true); m_ThreadID = Thread.currentThread().getId(); } return m_ThreadMonitor; } /** * returns the current time in msec * * @return the current time */ protected long getCurrentTime() { long result; if (isCpuTime()) result = getThreadMonitor().getThreadUserTime(m_ThreadID) / 1000000; else result = System.currentTimeMillis(); return result; } /** * saves the current system time (or CPU time) in msec as start time * * @see #m_Start */ public void start() { // make sure that we get the right thread ID! init(); m_Start = getCurrentTime(); m_Stop = m_Start; m_Running = true; } /** * saves the current system (or CPU time) in msec as stop time * * @see #m_Stop */ public void stop() { m_Stop = getCurrentTime(); m_Running = false; } /** * returns the start time * * @return the start time */ public long getStart() { return m_Start; } /** * returns the stop time or, if still running, the current time * * @return the stop time */ public long getStop() { long result; if (isRunning()) result = getCurrentTime(); else result = m_Stop; return result; } /** * whether the time is still being clocked * * @return true if the time is still being clocked */ public boolean isRunning() { return m_Running; } /** * sets the format of the output * * @param value the format of the output * @see #m_OutputFormat */ public void setOutputFormat(int value) { if (value == FORMAT_MILLISECONDS) m_OutputFormat = value; else if (value == FORMAT_SECONDS) m_OutputFormat = value; else if (value == FORMAT_HHMMSS) m_OutputFormat = value; else System.out.println("Format '" + value + "' is not recognized!"); } /** * returns the output format * * @return the output format * @see #m_OutputFormat */ public int getOutputFormat() { return m_OutputFormat; } /** * returns the elapsed time, getStop() - getStart(), as string * * @return the elapsed time as string * @see #getStart() * @see #getStop() */ public String toString() { String result; long elapsed; long hours; long mins; long secs; long msecs; result = ""; elapsed = getStop() - getStart(); switch (getOutputFormat()) { case FORMAT_HHMMSS: hours = elapsed / (3600 * 1000); elapsed = elapsed % (3600 * 1000); mins = elapsed / (60 * 1000); elapsed = elapsed % (60 * 1000); secs = elapsed / 1000; msecs = elapsed % 1000; if (hours > 0) result += "" + hours + ":"; if (mins < 10) result += "0" + mins + ":"; else result += "" + mins + ":"; if (secs < 10) result += "0" + secs + "."; else result += "" + secs + "."; result += Utils.doubleToString( (double) msecs / (double) 1000, 3).replaceAll(".*\\.", ""); break; case FORMAT_SECONDS: result = Utils.doubleToString((double) elapsed / (double) 1000, 3) + "s"; break; case FORMAT_MILLISECONDS: result = "" + elapsed + "ms"; break; default: result = "<unknown time format>"; } return result; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } } /** * A class that can be used for timestamps in files, The toString() method * simply returns the associated Date object in a timestamp format. For * formatting options, see java.text.SimpleDateFormat. * * @author FracPete (fracpete at waikato dot ac dot nz) * @version $Revision: 8034 $ * @see SimpleDateFormat */ public static class Timestamp implements Serializable, RevisionHandler { /** for serialization */ private static final long serialVersionUID = -6099868388466922753L; /** the default format */ public final static String DEFAULT_FORMAT = "yyyy-MM-dd HH:mm:ss"; /** the actual date */ protected Date m_Stamp; /** the format of the timestamp */ protected String m_Format; /** handles the format of the output */ protected SimpleDateFormat m_Formatter; /** * creates a timestamp with the current date and time and the default * format. */ public Timestamp() { this(DEFAULT_FORMAT); } /** * creates a timestamp with the current date and time and the specified * format. * * @param format the format of the timestamp * @see SimpleDateFormat */ public Timestamp(String format) { this(new Date(), format); } /** * creates a timestamp with the given date and the default format. * * @param stamp the associated date/time for the timestamp */ public Timestamp(Date stamp) { this(stamp, DEFAULT_FORMAT); } /** * creates a timestamp with the given date and format. * * @param stamp the associated date/time for the timestamp * @param format the format of the timestamp * @see SimpleDateFormat */ public Timestamp(Date stamp, String format) { super(); m_Stamp = stamp; setFormat(format); } /** * sets the format for the timestamp * * @param value the format string * @see SimpleDateFormat */ public void setFormat(String value) { try { m_Formatter = new SimpleDateFormat(value); m_Format = value; } catch (Exception e) { m_Formatter = new SimpleDateFormat(DEFAULT_FORMAT); m_Format = DEFAULT_FORMAT; } } /** * returns the current timestamp format * * @return the current format */ public String getFormat() { return m_Format; } /** * returns the associated date/time * * @return the timestamp value */ public Date getStamp() { return m_Stamp; } /** * returns the timestamp as string in the specified format * * @return the timestamp as string */ public String toString() { return m_Formatter.format(getStamp()); } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } } /** * A little, simple helper class for logging stuff. Uses simple file access * and not the java.util.logging stuff (see Log for that). Uses the * writeToFile methods of the Debug class. * * @see Debug.Log * @see Debug#writeToFile(String, String) * @see Debug#writeToFile(String, String, boolean) */ public static class SimpleLog implements Serializable, RevisionHandler { /** for serialization */ private static final long serialVersionUID = -2671928223819510830L; /** the file to write to (if null then only stdout is used) */ protected String m_Filename = null; /** * default constructor, uses only stdout */ public SimpleLog() { this(null); } /** * Creates a logger that writes into the specified file. Appends to the * file by default. * * @param filename the file to write to, if null then only stdout is used */ public SimpleLog(String filename) { this(filename, true); } /** * Creates a logger that writes into the specified file. Appends to the * file by default. * * @param filename the file to write to, if null then only stdout is used * @param append if false, the file will be deleted first */ public SimpleLog(String filename, boolean append) { super(); m_Filename = filename; Debug.writeToFile(m_Filename, "--> Log started", append); } /** * returns the filename of the log, can be null * * @return the filename of the log */ public String getFilename() { return m_Filename; } /** * logs the given message to the file * * @param message the message to log */ public void log(String message) { String log; log = new Timestamp() + " " + message; if (getFilename() != null) Debug.writeToFile(getFilename(), log); System.out.println(log); } /** * a convenience method for dumping the current system info in the * log file * * @see SystemInfo */ public void logSystemInfo() { // log("SystemInfo:\n" + new SystemInfo().toString()); } /** * returns a string representation of the logger * * @return a string representation of the logger */ public String toString() { String result; result = "Filename: " + getFilename(); return result; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } } /** * A helper class for logging stuff. Uses the java.util.logging * package. If this approach seems an "overkill" (it can create quite a few * log files if used in different threads), one can use the * Debug.SimpleLog class. * * @author FracPete (fracpete at waikato dot ac dot nz) * @version $Revision: 8034 $ * @see Debug.SimpleLog */ public static class Log implements Serializable, RevisionHandler { /** for serialization */ private static final long serialVersionUID = 1458435732111675823L; /** the actual logger, if null only stdout is used */ protected transient Logger m_Logger = null; /** the filename, if any */ protected String m_Filename = null; /** the size of the file (in bytes) */ protected int m_Size; /** the number of files for rotating the logs */ protected int m_NumFiles; /** whether the initialization of the logger failed */ protected boolean m_LoggerInitFailed = false; /** * default constructor, uses only stdout */ public Log() { this(null); } /** * creates a logger that logs into the specified file, if null then only * stdout is used. It uses 1,000,000 bytes for file size and 1 file. * * @param filename the file to log into */ public Log(String filename) { this(filename, 1000000, 1); } /** * creates a logger that logs into the specified file, if null then only * stdout is used. * * @param filename the file to log into * @param size the size of the files in bytes * @param numFiles the number of files for rotating */ public Log(String filename, int size, int numFiles) { m_Filename = filename; m_Size = size; m_NumFiles = numFiles; } /** * initializes and returns the logger if necessary (e.g., due to * serialization). * * @return the logger, can be null, e.g., if no filename provided */ protected Logger getLogger() { if ( (m_Logger == null) && (!m_LoggerInitFailed) ) { if (m_Filename != null) { m_Logger = Logger.getLogger(m_Filename); Handler fh = null; try{ fh = new FileHandler(m_Filename, m_Size, m_NumFiles); fh.setFormatter(new SimpleFormatter()); m_Logger.addHandler(fh); m_LoggerInitFailed = false; } catch(Exception e) { System.out.println("Cannot init fileHandler for logger:" + e.toString()); m_Logger = null; m_LoggerInitFailed = true; } } } return m_Logger; } /** * turns the string representing a level, e.g., "FINE" or "ALL" into * the corresponding level (case-insensitive). The default is ALL. * * @param level the string to return a level for * @return the corresponding level or the default */ public static Level stringToLevel(String level) { Level result; if (level.equalsIgnoreCase("ALL")) result = ALL; else if (level.equalsIgnoreCase("CONFIG")) result = CONFIG; else if (level.equalsIgnoreCase("FINE")) result = FINE; else if (level.equalsIgnoreCase("FINER")) result = FINER; else if (level.equalsIgnoreCase("FINEST")) result = FINEST; else if (level.equalsIgnoreCase("INFO")) result = INFO; else if (level.equalsIgnoreCase("OFF")) result = OFF; else if (level.equalsIgnoreCase("SEVERE")) result = SEVERE; else if (level.equalsIgnoreCase("WARNING")) result = WARNING; else result = ALL; return result; } /** * returns the filename of the log, can be null * * @return the filename of the log */ public String getFilename() { return m_Filename; } /** * returns the size of the files * * @return the size of a file */ public int getSize() { return m_Size; } /** * returns the number of files being used * * @return the number of files */ public int getNumFiles() { return m_NumFiles; } /** * logs the given message * * @param level the level of severity * @param message the message to log */ public void log(Level level, String message) { log(level, "", message); } /** * prints the given message with the specified level * * @param level the level of logging * @param sourceclass the class that logs the message * @param message the message to print */ public void log(Level level, String sourceclass, String message) { log(level, sourceclass, "", message); } /** * prints the given message with the specified level * * @param level the level of logging * @param sourceclass the class that logs the message * @param sourcemethod the method that logs the message * @param message the message to print */ public void log(Level level, String sourceclass, String sourcemethod, String message) { Logger logger; logger = getLogger(); if (logger != null) logger.logp(level, sourceclass, sourcemethod, message); else System.out.println(message); } /** * a convenience method for dumping the current system info in the * log file * * @see SystemInfo */ public void logSystemInfo() { // log(INFO, "SystemInfo:\n" + new SystemInfo().toString()); } /** * returns a string representation of the logger * * @return a string representation of the logger */ public String toString() { String result; result = "Filename: " + getFilename() + ", " + "Size: " + getSize() + ", " + "# Files: " + getNumFiles(); return result; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } } /** * This extended Random class enables one to print the generated random * numbers etc., before they are returned. It can either use stdout (default) * for outputting the logging information or a Log object (level is then * INFO). * * @author FracPete (fracpete at waikato dot ac dot nz) * @version $Revision: 8034 $ */ public static class Random extends java.util.Random implements Serializable, RevisionHandler { /** for serialization */ private static final long serialVersionUID = 1256846887618333956L; /** whether to output debug information */ protected boolean m_Debug = false; /** the unique ID for this number generator */ protected long m_ID; /** for keeping track of unique IDs */ protected static long m_CurrentID; /** the log to use for outputting the data, otherwise just stdout */ protected Log m_Log = null; /** * Creates a new random number generator. With no debugging. */ public Random() { this(false); } /** * Creates a new random number generator using a single long seed. * With no debugging * * @param seed the seed value */ public Random(long seed) { this(seed, false); } /** * Creates a new random number generator. With optional debugging. * * @param debug if true, debugging output is enabled */ public Random(boolean debug) { super(); setDebug(debug); m_ID = nextID(); if (getDebug()) printStackTrace(); } /** * Creates a new random number generator using a single long seed. * With optional debugging * * @param seed the seed value * @param debug if true, debugging output is enabled */ public Random(long seed, boolean debug) { super(seed); setDebug(debug); m_ID = nextID(); if (getDebug()) printStackTrace(); } /** * sets whether to print the generated random values or not * * @param value if true debugging output is enabled */ public void setDebug(boolean value) { m_Debug = value; } /** * returns whether to print the generated random values or not * * @return true if debugging output is enabled */ public boolean getDebug() { return m_Debug; } /** * the log to use, if it is null then stdout is used * * @param value the log to use */ public void setLog(Log value) { m_Log = value; } /** * the currently used log, if null then stdout is used for outputting * the debugging information * * @return the log, can be null */ public Log getLog() { return m_Log; } /** * returns the next unique ID for a number generator * * @return the next unique ID */ protected static long nextID() { m_CurrentID++; return m_CurrentID; } /** * returns the unique ID of this number generator * * @return the unique ID of this number generator */ public long getID() { return m_ID; } /** * prints the given message only if m_Debug is TRUE * * @param msg the message to print * @see #m_Debug */ protected void println(String msg) { if (getDebug()) { if (getLog() != null) getLog().log(Level.INFO, m_ID + ": " + msg); else System.out.println(m_ID + ": " + msg); } } /** * prints the current stacktrace */ public void printStackTrace() { Throwable t; StringWriter writer; writer = new StringWriter(); // generate stacktrace t = new Throwable(); t.fillInStackTrace(); t.printStackTrace(new PrintWriter(writer)); println(writer.toString()); } /** * Returns the next pseudorandom, uniformly distributed boolean value from * this random number generator's sequence. * * @return random boolean */ public boolean nextBoolean() { boolean result = super.nextBoolean(); println("nextBoolean=" + result); return result; } /** * Generates random bytes and places them into a user-supplied byte array. * * @param bytes array to fill with random bytes */ public void nextBytes(byte[] bytes) { super.nextBytes(bytes); println("nextBytes=" + Utils.arrayToString(bytes)); } /** * Returns the next pseudorandom, uniformly distributed double value between * 0.0 and 1.0 from this random number generator's sequence. * * @return random double */ public double nextDouble() { double result = super.nextDouble(); println("nextDouble=" + result); return result; } /** * Returns the next pseudorandom, uniformly distributed float value between * 0.0 and 1.0 from this random number generator's sequence. * * @return random float */ public float nextFloat() { float result = super.nextFloat(); println("nextFloat=" + result); return result; } /** * Returns the next pseudorandom, Gaussian ("normally") distributed double * value with mean 0.0 and standard deviation 1.0 from this random number * generator's sequence. * * @return random double, gaussian distributed */ public double nextGaussian() { double result = super.nextGaussian(); println("nextGaussian=" + result); return result; } /** * Returns the next pseudorandom, uniformly distributed int value from this * random number generator's sequence. * * @return random int */ public int nextInt() { int result = super.nextInt(); println("nextInt=" + result); return result; } /** * Returns a pseudorandom, uniformly distributed int value between 0 * (inclusive) and the specified value (exclusive), drawn from this random * number generator's sequence. * * @param n the upper limit (exclusive) * @return random int */ public int nextInt(int n) { int result = super.nextInt(n); println("nextInt(" + n + ")=" + result); return result; } /** * Returns the next pseudorandom, uniformly distributed long value from this * random number generator's sequence. * * @return random long */ public long nextLong() { long result = super.nextLong(); println("nextLong=" + result); return result; } /** * Sets the seed of this random number generator using a single long seed. * * @param seed the seed value */ public void setSeed(long seed) { super.setSeed(seed); println("setSeed(" + seed + ")"); } /** * returns a string representation of this number generator * * @return a string representation */ public String toString() { return this.getClass().getName() + ": " + getID(); } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } } /** * contains debug methods * * @author Gabi Schmidberger (gabi at cs dot waikato dot ac dot nz) * @version $Revision: 8034 $ */ public static class DBO implements Serializable, RevisionHandler { /** for serialization */ static final long serialVersionUID = -5245628124742606784L; /** enables/disables output of debug information */ public boolean m_verboseOn = false; /** range of outputtyp */ public Range m_outputTypes = new Range(); /** * Set the verbose on flag on */ public void setVerboseOn() { m_verboseOn = true; } /** * Initialize ranges, upper limit must be set * * @param upper upper limit */ public void initializeRanges(int upper) { m_outputTypes.setUpper(upper); } /** * Return true if the outputtype is set * * @param num value that is reserved for a specific outputtype * @return return true if the output type is set */ public boolean outputTypeSet(int num) { return (m_outputTypes.isInRange(num)); } /** * Return true if the debug level is set * same method as outpuTypeSet but better name * * @param num value that is reserved for a specific outputtype * @return return true if the debug level is set */ public boolean dl(int num) { return (outputTypeSet(num)); } /** * Switches the outputs on that are requested from the option O * * @param list list of integers, all are used for an output type */ public void setOutputTypes(String list) { if (list.length() > 0) { m_verboseOn = true; m_outputTypes.setRanges(list); m_outputTypes.setUpper(30); } } /** * Gets the current output type selection * * @return a string containing a comma separated list of ranges */ public String getOutputTypes() { return m_outputTypes.getRanges(); } /** * prints out text + endofline if verbose is on. * helps to make debug output commands more visible in text * * @param text the text to print */ public void dpln(String text) { if (m_verboseOn) { System.out.println(text); } } /** * prints out text + endofline but only if parameter debug type is set. * helps to make debug output commands more visible in text * * @param debugType the type of the output * @param text the text to print */ public void dpln(int debugType, String text) { if (outputTypeSet(debugType)) { System.out.println(text); } } /** * prints out text if verbose is on. * helps to make debug output commands more visible in text * * @param text the text to print */ public void dp(String text) { if (m_verboseOn) { System.out.print(text); } } /** * prints out text but only if debug level is set. * helps to make debug output commands more visible in text * * @param debugType the type of the output * @param text the text to print */ public void dp(int debugType, String text) { if (outputTypeSet(debugType)) { System.out.print(text); } } /** * prints out text + endofline. * helps to make debug output commands more visible in text * * @param text the text to print */ public static void pln(String text) { System.out.println(text); } /** * prints out text. * helps to make debug output commands more visible in text * * @param text the text to print */ public static void p (String text) { System.out.print(text); } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } } /** * default constructor, prints only to stdout */ public Debug() { this(null); } /** * logs the output to the specified file (and stdout). Size is 1,000,000 bytes * and 1 file. * * @param filename the name of the log */ public Debug(String filename) { this(filename, 1000000, 1); } /** * logs the output * * @param filename the name of the log * @param size the size of the files in bytes * @param numFiles the number of files for rotating */ public Debug(String filename, int size, int numFiles) { super(); m_Log = newLog(filename, size, numFiles); } /** * turns the string representing a level, e.g., "FINE" or "ALL" into * the corresponding level (case-insensitive). The default is ALL. * * @param level the string to return a level for * @return the corresponding level or the default */ public static Level stringToLevel(String level) { return Log.stringToLevel(level); } /** * returns a new Log instance * * @param filename the name of the log * @param size the size of the files in bytes * @param numFiles the number of files for rotating * @return the log instance */ public static Log newLog(String filename, int size, int numFiles) { return new Log(filename, size, numFiles); } /** * prints the given message with level INFO * * @param message the message to print */ public void log(String message) { log(INFO, message); } /** * prints the given message with the specified level and an empty sourceclass * * @param level the level of logging * @param message the message to print */ public void log(Level level, String message) { log(level, "", message); } /** * prints the given message with the specified level * * @param level the level of logging * @param sourceclass the class that logs the message * @param message the message to print */ public void log(Level level, String sourceclass, String message) { log(level, sourceclass, "", message); } /** * prints the given message with the specified level * * @param level the level of logging * @param sourceclass the class that logs the message * @param sourcemethod the method that logs the message * @param message the message to print */ public void log(Level level, String sourceclass, String sourcemethod, String message) { if (getEnabled()) m_Log.log(level, sourceclass, sourcemethod, message); } /** * sets whether the logging is enabled or not * * @param value if true logging will be enabled */ public void setEnabled(boolean value) { m_Enabled = value; } /** * returns whether the logging is enabled * * @return true if the logging is enabled */ public boolean getEnabled() { return m_Enabled; } /** * returns a new instance of a clock * * @return a new instance of a Clock */ public static Clock newClock() { return new Clock(); } /** * returns the instance of the Clock that is internally used * * @return the clock that's being used */ public Clock getClock() { return m_Clock; } /** * starts the clock */ public void startClock() { m_Clock.start(); } /** * stops the clock and prints the message associated with the time, but only * if the logging is enabled. * * @param message the message to print * @see #getEnabled() */ public void stopClock(String message) { log(message + ": " + m_Clock); } /** * returns a default debug random object, with no particular seed and * debugging enabled. * * @return a new instance of a Random object */ public static java.util.Random newRandom() { return new Random(true); } /** * returns a debug random object with the specified seed and debugging * enabled. * * @param seed the seed value * @return a new instance of a Random object */ public static java.util.Random newRandom(int seed) { return new Random(seed, true); } /** * returns a default timestamp for the current date/time * * @return a new timestamp */ public static Timestamp newTimestamp() { return new Timestamp(); } /** * returns the system temp directory * * @return the temp directory */ public static String getTempDir() { return System.getProperty("java.io.tmpdir"); } /** * returns the home directory of the user * * @return the user's home directory */ public static String getHomeDir() { return System.getProperty("user.home"); } /** * returns the current working directory of the user * * @return the user's current working directory */ public static String getCurrentDir() { return System.getProperty("user.dir"); } /** * Writes the given object to the specified file. The string representation * of the object is appended to the file. * * @param filename the file to write to * @param obj the object to write to the file * @return true if writing was successful */ public static boolean writeToFile(String filename, Object obj) { return writeToFile(filename, obj, true); } /** * Writes the given message to the specified file. The message is appended * to the file. * * @param filename the file to write to * @param message the message to write * @return true if writing was successful */ public static boolean writeToFile(String filename, String message) { return writeToFile(filename, message, true); } /** * Writes the given object to the specified file. The string representation * of the object is either appended or replaces the current content of the * file. * * @param filename the file to write to * @param obj the object to write to the file * @param append whether to append the message or not * @return true if writing was successful */ public static boolean writeToFile(String filename, Object obj, boolean append) { return writeToFile(filename, obj.toString(), append); } /** * Writes the given message to the specified file. The message is either * appended or replaces the current content of the file. * * @param filename the file to write to * @param message the message to write * @param append whether to append the message or not * @return true if writing was successful */ public static boolean writeToFile(String filename, String message, boolean append) { boolean result; BufferedWriter writer; try { writer = new BufferedWriter(new FileWriter(filename, append)); writer.write(message); writer.newLine(); writer.flush(); writer.close(); result = true; } catch (Exception e) { result = false; } return result; } /** * writes the serialized object to the speicified file * * @param filename the file to serialize the object to * @param o the object to serialize * @return true if writing was successful */ public static boolean saveToFile(String filename, Object o) { boolean result; if (SerializationHelper.isSerializable(o.getClass())) { try { SerializationHelper.write(filename, o); result = true; } catch (Exception e) { result = false; } } else { result = false; } return result; } /** * deserializes the content of the file and returns it, null if an error * occurred. * * @param filename the name of the file to deserialize * @return the deserialized content, null if problem occurred */ public static Object loadFromFile(String filename) { Object result; try { result = SerializationHelper.read(filename); } catch (Exception e) { result = null; } return result; } /** * Returns the revision string. * * @return the revision */ public String getRevision() { return RevisionUtils.extract("$Revision: 8034 $"); } }
43,075
24.9807
91
java
tsml-java
tsml-java-master/src/main/java/weka/core/DenseInstance.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * DenseInstance.java * Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand * */ package weka.core; import java.util.ArrayList; import java.util.Enumeration; /** * Class for handling an instance. All values (numeric, date, nominal, string or * relational) are internally stored as floating-point numbers. If an attribute * is nominal (or a string or relational), the stored value is the index of the * corresponding nominal (or string or relational) value in the attribute's * definition. We have chosen this approach in favor of a more elegant * object-oriented approach because it is much faster. * <p> * * Typical usage (code from the main() method of this class): * <p> * * <code> * ... <br> * * // Create empty instance with three attribute values <br> * Instance inst = new DenseInstance(3); <br><br> * * // Set instance's values for the attributes "length", "weight", and "position"<br> * inst.setValue(length, 5.3); <br> * inst.setValue(weight, 300); <br> * inst.setValue(position, "first"); <br><br> * * // Set instance's dataset to be the dataset "race" <br> * inst.setDataset(race); <br><br> * * // Print the instance <br> * System.out.println("The instance: " + inst); <br> * * ... <br> * </code> * <p> * * All methods that change an instance's attribute values are safe, ie. a change * of an instance's attribute values does not affect any other instances. All * methods that change an instance's attribute values clone the attribute value * vector before it is changed. If your application heavily modifies instance * values, it may be faster to create a new instance from scratch. * * @author Eibe Frank (eibe@cs.waikato.ac.nz) * @version $Revision: 9028 $ */ public class DenseInstance extends AbstractInstance { /** for serialization */ static final long serialVersionUID = 1482635194499365122L; /** * Constructor that copies the attribute values and the weight from the given * instance. It does NOT perform a deep copy of the attribute values if the * instance provided is also of type DenseInstance (it simply copies the * reference to the array of values), otherwise it does. Reference to the * dataset is set to null. (ie. the instance doesn't have access to * information about the attribute types) * * @param instance the instance from which the attribute values and the weight * are to be copied */ // @ ensures m_Dataset == null; public DenseInstance(/* @non_null@ */Instance instance) { if (instance instanceof DenseInstance) { m_AttValues = ((DenseInstance) instance).m_AttValues; } else { m_AttValues = instance.toDoubleArray(); } m_Weight = instance.weight(); m_Dataset = null; } /** * Constructor that inititalizes instance variable with given values. * Reference to the dataset is set to null. (ie. the instance doesn't have * access to information about the attribute types) * * @param weight the instance's weight * @param attValues a vector of attribute values */ // @ ensures m_Dataset == null; public DenseInstance(double weight, /* @non_null@ */double[] attValues) { m_AttValues = attValues; m_Weight = weight; m_Dataset = null; } /** * Constructor of an instance that sets weight to one, all values to be * missing, and the reference to the dataset to null. (ie. the instance * doesn't have access to information about the attribute types) * * @param numAttributes the size of the instance */ // @ requires numAttributes > 0; // Or maybe == 0 is okay too? // @ ensures m_Dataset == null; public DenseInstance(int numAttributes) { m_AttValues = new double[numAttributes]; for (int i = 0; i < m_AttValues.length; i++) { m_AttValues[i] = Utils.missingValue(); } m_Weight = 1; m_Dataset = null; } /** * Produces a shallow copy of this instance. The copy has access to the same * dataset. (if you want to make a copy that doesn't have access to the * dataset, use <code>new DenseInstance(instance)</code> * * @return the shallow copy */ // @ also ensures \result != null; // @ also ensures \result instanceof DenseInstance; // @ also ensures ((DenseInstance)\result).m_Dataset == m_Dataset; @Override public/* @pure@ */Object copy() { DenseInstance result = new DenseInstance(this); result.m_Dataset = m_Dataset; return result; } /** * Returns the index of the attribute stored at the given position. Just * returns the given value. * * @param position the position * @return the index of the attribute stored at the given position */ @Override public/* @pure@ */int index(int position) { return position; } /** * Merges this instance with the given instance and returns the result. * Dataset is set to null. The returned instance is of the same type as this * instance. * * @param inst the instance to be merged with this one * @return the merged instances */ @Override public Instance mergeInstance(Instance inst) { int m = 0; double[] newVals = new double[numAttributes() + inst.numAttributes()]; for (int j = 0; j < numAttributes(); j++, m++) { newVals[m] = value(j); } for (int j = 0; j < inst.numAttributes(); j++, m++) { newVals[m] = inst.value(j); } return new DenseInstance(1.0, newVals); } /** * Returns the number of attributes. * * @return the number of attributes as an integer */ // @ ensures \result == m_AttValues.length; @Override public/* @pure@ */int numAttributes() { return m_AttValues.length; } /** * Returns the number of values present. Always the same as numAttributes(). * * @return the number of values */ // @ ensures \result == m_AttValues.length; @Override public/* @pure@ */int numValues() { return m_AttValues.length; } /** * Replaces all missing values in the instance with the values contained in * the given array. A deep copy of the vector of attribute values is performed * before the values are replaced. * * @param array containing the means and modes * @throws IllegalArgumentException if numbers of attributes are unequal */ @Override public void replaceMissingValues(double[] array) { if ((array == null) || (array.length != m_AttValues.length)) { throw new IllegalArgumentException("Unequal number of attributes!"); } freshAttributeVector(); for (int i = 0; i < m_AttValues.length; i++) { if (isMissing(i)) { m_AttValues[i] = array[i]; } } } /** * Sets a specific value in the instance to the given value (internal * floating-point format). Performs a deep copy of the vector of attribute * values before the value is set. * * @param attIndex the attribute's index * @param value the new attribute value (If the corresponding attribute is * nominal (or a string) then this is the new value's index as a * double). */ @Override public void setValue(int attIndex, double value) { freshAttributeVector(); m_AttValues[attIndex] = value; } /** * Sets a specific value in the instance to the given value (internal * floating-point format). Performs a deep copy of the vector of attribute * values before the value is set. Does exactly the same thing as setValue(). * * @param indexOfIndex the index of the attribute's index * @param value the new attribute value (If the corresponding attribute is * nominal (or a string) then this is the new value's index as a * double). */ @Override public void setValueSparse(int indexOfIndex, double value) { freshAttributeVector(); m_AttValues[indexOfIndex] = value; } /** * Returns the values of each attribute as an array of doubles. * * @return an array containing all the instance attribute values */ @Override public double[] toDoubleArray() { double[] newValues = new double[m_AttValues.length]; System.arraycopy(m_AttValues, 0, newValues, 0, m_AttValues.length); return newValues; } /** * Returns the description of one instance (without weight appended). If the * instance doesn't have access to a dataset, it returns the internal * floating-point values. Quotes string values that contain whitespace * characters. * * This method is used by getRandomNumberGenerator() in Instances.java in * order to maintain backwards compatibility with weka 3.4. * * @return the instance's description as a string */ @Override public String toStringNoWeight() { return toStringNoWeight(AbstractInstance.s_numericAfterDecimalPoint); } /** * Returns the description of one instance (without weight appended). If the * instance doesn't have access to a dataset, it returns the internal * floating-point values. Quotes string values that contain whitespace * characters. * * This method is used by getRandomNumberGenerator() in Instances.java in * order to maintain backwards compatibility with weka 3.4. * * @param afterDecimalPoint maximum number of digits after the decimal point * for numeric values * * @return the instance's description as a string */ @Override public String toStringNoWeight(int afterDecimalPoint) { StringBuffer text = new StringBuffer(); for (int i = 0; i < m_AttValues.length; i++) { if (i > 0) text.append(","); text.append(toString(i, afterDecimalPoint)); } return text.toString(); } /** * Returns an instance's attribute value in internal format. * * @param attIndex the attribute's index * @return the specified value as a double (If the corresponding attribute is * nominal (or a string) then it returns the value's index as a * double). */ @Override public/* @pure@ */double value(int attIndex) { return m_AttValues[attIndex]; } /** * Deletes an attribute at the given position (0 to numAttributes() - 1). * * @param position the attribute's position */ @Override protected void forceDeleteAttributeAt(int position) { double[] newValues = new double[m_AttValues.length - 1]; System.arraycopy(m_AttValues, 0, newValues, 0, position); if (position < m_AttValues.length - 1) { System.arraycopy(m_AttValues, position + 1, newValues, position, m_AttValues.length - (position + 1)); } m_AttValues = newValues; } /** * Inserts an attribute at the given position (0 to numAttributes()) and sets * its value to be missing. * * @param position the attribute's position */ @Override protected void forceInsertAttributeAt(int position) { double[] newValues = new double[m_AttValues.length + 1]; System.arraycopy(m_AttValues, 0, newValues, 0, position); newValues[position] = Utils.missingValue(); System.arraycopy(m_AttValues, position, newValues, position + 1, m_AttValues.length - position); m_AttValues = newValues; } /** * Clones the attribute vector of the instance and overwrites it with the * clone. */ private void freshAttributeVector() { m_AttValues = toDoubleArray(); } /** * Main method for testing this class. * * @param options the commandline options - ignored */ // @ requires options != null; public static void main(String[] options) { try { // Create numeric attributes "length" and "weight" Attribute length = new Attribute("length"); Attribute weight = new Attribute("weight"); // Create vector to hold nominal values "first", "second", "third" ArrayList<String> my_nominal_values = new ArrayList<String>(3); my_nominal_values.add("first"); my_nominal_values.add("second"); my_nominal_values.add("third"); // Create nominal attribute "position" Attribute position = new Attribute("position", my_nominal_values); // Create vector of the above attributes ArrayList<Attribute> attributes = new ArrayList<Attribute>(3); attributes.add(length); attributes.add(weight); attributes.add(position); // Create the empty dataset "race" with above attributes Instances race = new Instances("race", attributes, 0); // Make position the class attribute race.setClassIndex(position.index()); // Create empty instance with three attribute values Instance inst = new DenseInstance(3); // Set instance's values for the attributes "length", "weight", and // "position" inst.setValue(length, 5.3); inst.setValue(weight, 300); inst.setValue(position, "first"); // Set instance's dataset to be the dataset "race" inst.setDataset(race); // Print the instance System.out.println("The instance: " + inst); // Print the first attribute System.out.println("First attribute: " + inst.attribute(0)); // Print the class attribute System.out.println("Class attribute: " + inst.classAttribute()); // Print the class index System.out.println("Class index: " + inst.classIndex()); // Say if class is missing System.out.println("Class is missing: " + inst.classIsMissing()); // Print the instance's class value in internal format System.out.println("Class value (internal format): " + inst.classValue()); // Print a shallow copy of this instance Instance copy = (Instance) inst.copy(); System.out.println("Shallow copy: " + copy); // Set dataset for shallow copy copy.setDataset(inst.dataset()); System.out.println("Shallow copy with dataset set: " + copy); // Unset dataset for copy, delete first attribute, and insert it again copy.setDataset(null); copy.deleteAttributeAt(0); copy.insertAttributeAt(0); copy.setDataset(inst.dataset()); System.out.println("Copy with first attribute deleted and inserted: " + copy); // Enumerate attributes (leaving out the class attribute) System.out.println("Enumerating attributes (leaving out class):"); Enumeration enu = inst.enumerateAttributes(); while (enu.hasMoreElements()) { Attribute att = (Attribute) enu.nextElement(); System.out.println(att); } // Headers are equivalent? System.out.println("Header of original and copy equivalent: " + inst.equalHeaders(copy)); // Test for missing values System.out.println("Length of copy missing: " + copy.isMissing(length)); System.out.println("Weight of copy missing: " + copy.isMissing(weight.index())); System.out.println("Length of copy missing: " + Utils.isMissingValue(copy.value(length))); // Prints number of attributes and classes System.out.println("Number of attributes: " + copy.numAttributes()); System.out.println("Number of classes: " + copy.numClasses()); // Replace missing values double[] meansAndModes = { 2, 3, 0 }; copy.replaceMissingValues(meansAndModes); System.out.println("Copy with missing value replaced: " + copy); // Setting and getting values and weights copy.setClassMissing(); System.out.println("Copy with missing class: " + copy); copy.setClassValue(0); System.out.println("Copy with class value set to first value: " + copy); copy.setClassValue("third"); System.out.println("Copy with class value set to \"third\": " + copy); copy.setMissing(1); System.out.println("Copy with second attribute set to be missing: " + copy); copy.setMissing(length); System.out.println("Copy with length set to be missing: " + copy); copy.setValue(0, 0); System.out.println("Copy with first attribute set to 0: " + copy); copy.setValue(weight, 1); System.out.println("Copy with weight attribute set to 1: " + copy); copy.setValue(position, "second"); System.out.println("Copy with position set to \"second\": " + copy); copy.setValue(2, "first"); System.out.println("Copy with last attribute set to \"first\": " + copy); System.out.println("Current weight of instance copy: " + copy.weight()); copy.setWeight(2); System.out.println("Current weight of instance copy (set to 2): " + copy.weight()); System.out.println("Last value of copy: " + copy.toString(2)); System.out.println("Value of position for copy: " + copy.toString(position)); System.out.println("Last value of copy (internal format): " + copy.value(2)); System.out.println("Value of position for copy (internal format): " + copy.value(position)); } catch (Exception e) { e.printStackTrace(); } } /** * Returns the revision string. * * @return the revision */ @Override public String getRevision() { return RevisionUtils.extract("$Revision: 9028 $"); } }
17,838
32.343925
85
java
tsml-java
tsml-java-master/src/main/java/weka/core/DistanceFunction.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * DistanceFunction.java * Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand * */ package weka.core; import weka.core.neighboursearch.PerformanceStats; /** * Interface for any class that can compute and return distances between two * instances. * * @author Ashraf M. Kibriya (amk14@cs.waikato.ac.nz) * @version $Revision: 8034 $ */ public interface DistanceFunction extends OptionHandler { /** * Sets the instances. * * @param insts the instances to use */ public void setInstances(Instances insts); /** * returns the instances currently set. * * @return the current instances */ public Instances getInstances(); /** * Sets the range of attributes to use in the calculation of the distance. * The indices start from 1, 'first' and 'last' are valid as well. * E.g.: first-3,5,6-last * * @param value the new attribute index range */ public void setAttributeIndices(String value); /** * Gets the range of attributes used in the calculation of the distance. * * @return the attribute index range */ public String getAttributeIndices(); /** * Sets whether the matching sense of attribute indices is inverted or not. * * @param value if true the matching sense is inverted */ public void setInvertSelection(boolean value); /** * Gets whether the matching sense of attribute indices is inverted or not. * * @return true if the matching sense is inverted */ public boolean getInvertSelection(); /** * Calculates the distance between two instances. * * @param first the first instance * @param second the second instance * @return the distance between the two given instances */ public double distance(Instance first, Instance second); /** * Calculates the distance between two instances. * * @param first the first instance * @param second the second instance * @param stats the performance stats object * @return the distance between the two given instances * @throws Exception if calculation fails */ public double distance(Instance first, Instance second, PerformanceStats stats) throws Exception; /** * Calculates the distance between two instances. Offers speed up (if the * distance function class in use supports it) in nearest neighbour search by * taking into account the cutOff or maximum distance. Depending on the * distance function class, post processing of the distances by * postProcessDistances(double []) may be required if this function is used. * * @param first the first instance * @param second the second instance * @param cutOffValue If the distance being calculated becomes larger than * cutOffValue then the rest of the calculation is * discarded. * @return the distance between the two given instances or * Double.POSITIVE_INFINITY if the distance being * calculated becomes larger than cutOffValue. */ public double distance(Instance first, Instance second, double cutOffValue); /** * Calculates the distance between two instances. Offers speed up (if the * distance function class in use supports it) in nearest neighbour search by * taking into account the cutOff or maximum distance. Depending on the * distance function class, post processing of the distances by * postProcessDistances(double []) may be required if this function is used. * * @param first the first instance * @param second the second instance * @param cutOffValue If the distance being calculated becomes larger than * cutOffValue then the rest of the calculation is * discarded. * @param stats the performance stats object * @return the distance between the two given instances or * Double.POSITIVE_INFINITY if the distance being * calculated becomes larger than cutOffValue. */ public double distance(Instance first, Instance second, double cutOffValue, PerformanceStats stats); /** * Does post processing of the distances (if necessary) returned by * distance(distance(Instance first, Instance second, double cutOffValue). It * may be necessary, depending on the distance function, to do post processing * to set the distances on the correct scale. Some distance function classes * may not return correct distances using the cutOffValue distance function to * minimize the inaccuracies resulting from floating point comparison and * manipulation. * * @param distances the distances to post-process */ public void postProcessDistances(double distances[]); /** * Update the distance function (if necessary) for the newly added instance. * * @param ins the instance to add */ public void update(Instance ins); }
5,601
34.232704
82
java
tsml-java
tsml-java-master/src/main/java/weka/core/Drawable.java
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * Drawable.java * Copyright (C) 1999-2012 University of Waikato, Hamilton, New Zealand * */ package weka.core; /** * Interface to something that can be drawn as a graph. * * @author Ashraf M. Kibriya(amk14@cs.waikato.ac.nz), Eibe Frank(eibe@cs.waikato.ac.nz) * @version $Revision: 8034 $ */ public interface Drawable { int NOT_DRAWABLE = 0, TREE = 1, BayesNet = 2, Newick = 3; /** * Returns the type of graph representing * the object. * * @return the type of graph representing the object */ int graphType(); /** * Returns a string that describes a graph representing * the object. The string should be in XMLBIF ver. * 0.3 format if the graph is a BayesNet, otherwise * it should be in dotty format. * * @return the graph described by a string * @exception Exception if the graph can't be computed */ String graph() throws Exception; }
1,593
25.131148
87
java